hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c7567d0240960680c6649e4c7e58dd953f81f1c2 | 1,654 | py | Python | user_feedback/migrations/0001_initial.py | SALTISES4/UserFeedback | 0d6807a3ea5a9ff4e617fadf2208e04e4e04bd57 | [
"BSD-3-Clause"
] | null | null | null | user_feedback/migrations/0001_initial.py | SALTISES4/UserFeedback | 0d6807a3ea5a9ff4e617fadf2208e04e4e04bd57 | [
"BSD-3-Clause"
] | null | null | null | user_feedback/migrations/0001_initial.py | SALTISES4/UserFeedback | 0d6807a3ea5a9ff4e617fadf2208e04e4e04bd57 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.2.16 on 2020-09-12 22:38
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="Feedback",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.CharField(default="null", max_length=2000)),
("created_on", models.DateTimeField(auto_now_add=True)),
(
"author",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"type",
models.PositiveIntegerField(
choices=[
(1, "Bug Report"),
(2, "Feature Request"),
(3, "General Feedback"),
]
),
),
("url", models.CharField(max_length=200)),
],
options={"verbose_name": "Feedback", "verbose_name_plural": "Feedback"},
)
]
| 31.807692 | 84 | 0.426239 |
0267efbb652e6eff961e2ccab034b36c9c924978 | 1,462 | py | Python | djangoBlog/apps/blog/migrations/0002_auto_20200510_1735.py | blackmonkey121/blog | 938f104d3360c5f7562a2fd5a7d2f2e77c4695c0 | [
"BSD-3-Clause"
] | 4 | 2019-07-20T02:04:11.000Z | 2020-05-02T06:15:22.000Z | djangoBlog/apps/blog/migrations/0002_auto_20200510_1735.py | blackmonkey121/blog | 938f104d3360c5f7562a2fd5a7d2f2e77c4695c0 | [
"BSD-3-Clause"
] | 8 | 2020-05-03T09:01:14.000Z | 2022-01-13T02:13:14.000Z | djangoBlog/apps/blog/migrations/0002_auto_20200510_1735.py | blackmonkey121/blog | 938f104d3360c5f7562a2fd5a7d2f2e77c4695c0 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.0.8 on 2020-05-10 09:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='tag',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
migrations.AddField(
model_name='post',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='blog.Category', verbose_name='文章分类'),
),
migrations.AddField(
model_name='post',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
migrations.AddField(
model_name='post',
name='tag',
field=models.ManyToManyField(to='blog.Tag', verbose_name='标签'),
),
migrations.AddField(
model_name='category',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
]
| 33.227273 | 132 | 0.623803 |
e3506b9ac65c6a95b27405e647361d8b995f046c | 450 | py | Python | pytest_reana/version.py | Sinclert/pytest-reana | d35101266ccb9fb0a31ebbd3e96bdb0acadea4a7 | [
"MIT"
] | null | null | null | pytest_reana/version.py | Sinclert/pytest-reana | d35101266ccb9fb0a31ebbd3e96bdb0acadea4a7 | [
"MIT"
] | null | null | null | pytest_reana/version.py | Sinclert/pytest-reana | d35101266ccb9fb0a31ebbd3e96bdb0acadea4a7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2018, 2019, 2020, 2021 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Version information for pytest-REANA.
This file is imported by ``pytest_reana.__init__``
and parsed by ``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = "0.8.0a7"
| 25 | 72 | 0.72 |
2a707b1c7db33291506ee7b26fc466fef54d5be9 | 9,082 | py | Python | salt/modules/extfs.py | aletourneau/salt | d7013a2f64eb4b79592220d76274bc5dde609e08 | [
"Apache-2.0"
] | null | null | null | salt/modules/extfs.py | aletourneau/salt | d7013a2f64eb4b79592220d76274bc5dde609e08 | [
"Apache-2.0"
] | null | null | null | salt/modules/extfs.py | aletourneau/salt | d7013a2f64eb4b79592220d76274bc5dde609e08 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Module for managing ext2/3/4 file systems
'''
from __future__ import absolute_import
# Import python libs
import logging
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.is_windows():
return False
return True
def mkfs(device, fs_type, **kwargs):
'''
Create a file system on the specified device
CLI Example:
.. code-block:: bash
salt '*' extfs.mkfs /dev/sda1 fs_type=ext4 opts='acl,noexec'
Valid options are:
* **block_size**: 1024, 2048 or 4096
* **check**: check for bad blocks
* **direct**: use direct IO
* **ext_opts**: extended file system options (comma-separated)
* **fragment_size**: size of fragments
* **force**: setting force to True will cause mke2fs to specify the -F
option twice (it is already set once); this is truly dangerous
* **blocks_per_group**: number of blocks in a block group
* **number_of_groups**: ext4 option for a virtual block group
* **bytes_per_inode**: set the bytes/inode ratio
* **inode_size**: size of the inode
* **journal**: set to True to create a journal (default on ext3/4)
* **journal_opts**: options for the fs journal (comma separated)
* **blocks_file**: read bad blocks from file
* **label**: label to apply to the file system
* **reserved**: percentage of blocks reserved for super-user
* **last_dir**: last mounted directory
* **test**: set to True to not actually create the file system (mke2fs -n)
* **number_of_inodes**: override default number of inodes
* **creator_os**: override "creator operating system" field
* **opts**: mount options (comma separated)
* **revision**: set the filesystem revision (default 1)
* **super**: write superblock and group descriptors only
* **fs_type**: set the filesystem type (REQUIRED)
* **usage_type**: how the filesystem is going to be used
* **uuid**: set the UUID for the file system
See the ``mke2fs(8)`` manpage for a more complete description of these
options.
'''
kwarg_map = {'block_size': 'b',
'check': 'c',
'direct': 'D',
'ext_opts': 'E',
'fragment_size': 'f',
'force': 'F',
'blocks_per_group': 'g',
'number_of_groups': 'G',
'bytes_per_inode': 'i',
'inode_size': 'I',
'journal': 'j',
'journal_opts': 'J',
'blocks_file': 'l',
'label': 'L',
'reserved': 'm',
'last_dir': 'M',
'test': 'n',
'number_of_inodes': 'N',
'creator_os': 'o',
'opts': 'O',
'revision': 'r',
'super': 'S',
'usage_type': 'T',
'uuid': 'U'}
opts = ''
for key in kwargs:
if key in kwarg_map:
opt = kwarg_map[key]
if kwargs[key] == 'True':
opts += '-{0} '.format(opt)
else:
opts += '-{0} {1} '.format(opt, kwargs[key])
cmd = 'mke2fs -F -t {0} {1}{2}'.format(fs_type, opts, device)
out = __salt__['cmd.run'](cmd).splitlines()
ret = []
for line in out:
if not line:
continue
elif line.startswith('mke2fs'):
continue
elif line.startswith('Discarding device blocks'):
continue
elif line.startswith('Allocating group tables'):
continue
elif line.startswith('Writing inode tables'):
continue
elif line.startswith('Creating journal'):
continue
elif line.startswith('Writing superblocks'):
continue
ret.append(line)
return ret
def tune(device, **kwargs):
'''
Set attributes for the specified device (using tune2fs)
CLI Example:
.. code-block:: bash
salt '*' extfs.tune /dev/sda1 force=True label=wildstallyns opts='acl,noexec'
Valid options are:
* **max**: max mount count
* **count**: mount count
* **error**: error behavior
* **extended_opts**: extended options (comma separated)
* **force**: force, even if there are errors (set to True)
* **group**: group name or gid that can use the reserved blocks
* **interval**: interval between checks
* **journal**: set to True to create a journal (default on ext3/4)
* **journal_opts**: options for the fs journal (comma separated)
* **label**: label to apply to the file system
* **reserved**: percentage of blocks reserved for super-user
* **last_dir**: last mounted directory
* **opts**: mount options (comma separated)
* **feature**: set or clear a feature (comma separated)
* **mmp_check**: mmp check interval
* **reserved**: reserved blocks count
* **quota_opts**: quota options (comma separated)
* **time**: time last checked
* **user**: user or uid who can use the reserved blocks
* **uuid**: set the UUID for the file system
See the ``mke2fs(8)`` manpage for a more complete description of these
options.
'''
kwarg_map = {'max': 'c',
'count': 'C',
'error': 'e',
'extended_opts': 'E',
'force': 'f',
'group': 'g',
'interval': 'i',
'journal': 'j',
'journal_opts': 'J',
'label': 'L',
'last_dir': 'M',
'opts': 'o',
'feature': 'O',
'mmp_check': 'p',
'reserved': 'r',
'quota_opts': 'Q',
'time': 'T',
'user': 'u',
'uuid': 'U'}
opts = ''
for key in kwargs:
if key in kwarg_map:
opt = kwarg_map[key]
if kwargs[key] == 'True':
opts += '-{0} '.format(opt)
else:
opts += '-{0} {1} '.format(opt, kwargs[key])
cmd = 'tune2fs {0}{1}'.format(opts, device)
out = __salt__['cmd.run'](cmd).splitlines()
return out
def attributes(device, args=None):
'''
Return attributes from dumpe2fs for a specified device
CLI Example:
.. code-block:: bash
salt '*' extfs.attributes /dev/sda1
'''
fsdump = dump(device, args)
return fsdump['attributes']
def blocks(device, args=None):
'''
Return block and inode info from dumpe2fs for a specified device
CLI Example:
.. code-block:: bash
salt '*' extfs.blocks /dev/sda1
'''
fsdump = dump(device, args)
return fsdump['blocks']
def dump(device, args=None):
'''
Return all contents of dumpe2fs for a specified device
CLI Example:
.. code-block:: bash
salt '*' extfs.dump /dev/sda1
'''
cmd = 'dumpe2fs {0}'.format(device)
if args:
cmd = cmd + ' -' + args
ret = {'attributes': {}, 'blocks': {}}
out = __salt__['cmd.run'](cmd).splitlines()
mode = 'opts'
group = None
for line in out:
if not line:
continue
if line.startswith('dumpe2fs'):
continue
if mode == 'opts':
line = line.replace('\t', ' ')
comps = line.split(': ')
if line.startswith('Filesystem features'):
ret['attributes'][comps[0]] = comps[1].split()
elif line.startswith('Group'):
mode = 'blocks'
else:
ret['attributes'][comps[0]] = comps[1].strip()
if mode == 'blocks':
if line.startswith('Group'):
line = line.replace(':', '')
line = line.replace('(', '')
line = line.replace(')', '')
line = line.replace('[', '')
line = line.replace(']', '')
comps = line.split()
blkgrp = comps[1]
group = 'Group {0}'.format(blkgrp)
ret['blocks'][group] = {}
ret['blocks'][group]['group'] = blkgrp
ret['blocks'][group]['range'] = comps[3]
# TODO: comps[4:], which may look one one of the following:
# ITABLE_ZEROED
# INODE_UNINIT, ITABLE_ZEROED
# Does anyone know what to call these?
ret['blocks'][group]['extra'] = []
elif 'Free blocks:' in line:
comps = line.split(': ')
free_blocks = comps[1].split(', ')
ret['blocks'][group]['free blocks'] = free_blocks
elif 'Free inodes:' in line:
comps = line.split(': ')
inodes = comps[1].split(', ')
ret['blocks'][group]['free inodes'] = inodes
else:
line = line.strip()
ret['blocks'][group]['extra'].append(line)
return ret
| 32.435714 | 85 | 0.519049 |
0079f011f7336f285c0e3f52ad84b3e0d7b29037 | 1,967 | py | Python | lesson-8/ex1.py | alirsamar/intro-ml | 36450b26b7ea09472ccdd2a0abce51b6c3889a20 | [
"MIT"
] | null | null | null | lesson-8/ex1.py | alirsamar/intro-ml | 36450b26b7ea09472ccdd2a0abce51b6c3889a20 | [
"MIT"
] | null | null | null | lesson-8/ex1.py | alirsamar/intro-ml | 36450b26b7ea09472ccdd2a0abce51b6c3889a20 | [
"MIT"
] | null | null | null | # Deploying Clustering
#### Boilerplate #################################################################
import pickle
import numpy
import matplotlib.pyplot as plt
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"):
""" some plotting code designed to help you visualize your clusters """
colors = ["b", "c", "k", "m", "g"]
for ii, pp in enumerate(pred):
plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])
if mark_poi:
for ii, pp in enumerate(pred):
if poi[ii]:
plt.scatter(features[ii][0], features[ii][1], color="r", marker="*")
plt.xlabel(f1_name)
plt.ylabel(f2_name)
plt.savefig(name)
plt.show()
data_dict = pickle.load( open("../ud120-projects/final_project/final_project_dataset.pkl", "r") )
data_dict.pop("TOTAL", 0)
feature_1 = "salary"
feature_2 = "exercised_stock_options"
poi = "poi"
features_list = [poi, feature_1, feature_2]
data = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data )
### in the "clustering with 3 features" part of the mini-project,
### you'll want to change this line to
### for f1, f2, _ in finance_features:
### (as it's currently written, the line below assumes 2 features)
for f1, f2 in finance_features:
plt.scatter( f1, f2 )
plt.show()
#### Exercise code #############################################################
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=2)
pred = kmeans.fit_predict(finance_features)
#### Boilerplate #################################################################
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print "no predictions object named pred found, no clusters to plot"
| 33.338983 | 112 | 0.629385 |
b9dbf826fa3663cc7391963c03cce61b71b6ff22 | 26,479 | py | Python | env/lib/python3.8/site-packages/celery/utils/collections.py | crimergio/linux_test | 5e688a06884ab10b4eaaad10a5d0df417a1c9b31 | [
"CC-BY-4.0"
] | null | null | null | env/lib/python3.8/site-packages/celery/utils/collections.py | crimergio/linux_test | 5e688a06884ab10b4eaaad10a5d0df417a1c9b31 | [
"CC-BY-4.0"
] | null | null | null | env/lib/python3.8/site-packages/celery/utils/collections.py | crimergio/linux_test | 5e688a06884ab10b4eaaad10a5d0df417a1c9b31 | [
"CC-BY-4.0"
] | null | null | null | """Custom maps, sets, sequences, and other data structures."""
import sys
import time
from collections import OrderedDict as _OrderedDict
from collections import deque
from collections.abc import (Callable, Mapping, MutableMapping, MutableSet,
Sequence)
from heapq import heapify, heappop, heappush
from itertools import chain, count
from queue import Empty
from .functional import first, uniq
from .text import match_case
try:
# pypy: dicts are ordered in recent versions
from __pypy__ import reversed_dict as _dict_is_ordered
except ImportError:
_dict_is_ordered = None
try:
from django.utils.functional import LazyObject, LazySettings
except ImportError:
class LazyObject: # noqa
pass
LazySettings = LazyObject # noqa
__all__ = (
'AttributeDictMixin', 'AttributeDict', 'BufferMap', 'ChainMap',
'ConfigurationView', 'DictAttribute', 'Evictable',
'LimitedSet', 'Messagebuffer', 'OrderedDict',
'force_mapping', 'lpmerge',
)
REPR_LIMITED_SET = """\
<{name}({size}): maxlen={0.maxlen}, expires={0.expires}, minlen={0.minlen}>\
"""
def force_mapping(m):
# type: (Any) -> Mapping
"""Wrap object into supporting the mapping interface if necessary."""
if isinstance(m, (LazyObject, LazySettings)):
m = m._wrapped
return DictAttribute(m) if not isinstance(m, Mapping) else m
def lpmerge(L, R):
# type: (Mapping, Mapping) -> Mapping
"""In place left precedent dictionary merge.
Keeps values from `L`, if the value in `R` is :const:`None`.
"""
setitem = L.__setitem__
[setitem(k, v) for k, v in R.items() if v is not None]
return L
class OrderedDict(_OrderedDict):
"""Dict where insertion order matters."""
def _LRUkey(self):
# type: () -> Any
# return value of od.keys does not support __next__,
# but this version will also not create a copy of the list.
return next(iter(self.keys()))
if not hasattr(_OrderedDict, 'move_to_end'):
if _dict_is_ordered: # pragma: no cover
def move_to_end(self, key, last=True):
# type: (Any, bool) -> None
if not last:
# we don't use this argument, and the only way to
# implement this on PyPy seems to be O(n): creating a
# copy with the order changed, so we just raise.
raise NotImplementedError('no last=True on PyPy')
self[key] = self.pop(key)
else:
def move_to_end(self, key, last=True):
# type: (Any, bool) -> None
link = self._OrderedDict__map[key]
link_prev = link[0]
link_next = link[1]
link_prev[1] = link_next
link_next[0] = link_prev
root = self._OrderedDict__root
if last:
last = root[0]
link[0] = last
link[1] = root
last[1] = root[0] = link
else:
first_node = root[1]
link[0] = root
link[1] = first_node
root[1] = first_node[0] = link
class AttributeDictMixin:
"""Mixin for Mapping interface that adds attribute access.
I.e., `d.key -> d[key]`).
"""
def __getattr__(self, k):
# type: (str) -> Any
"""`d.key -> d[key]`."""
try:
return self[k]
except KeyError:
raise AttributeError(
f'{type(self).__name__!r} object has no attribute {k!r}')
def __setattr__(self, key, value):
# type: (str, Any) -> None
"""`d[key] = value -> d.key = value`."""
self[key] = value
class AttributeDict(dict, AttributeDictMixin):
"""Dict subclass with attribute access."""
class DictAttribute:
"""Dict interface to attributes.
`obj[k] -> obj.k`
`obj[k] = val -> obj.k = val`
"""
obj = None
def __init__(self, obj):
# type: (Any) -> None
object.__setattr__(self, 'obj', obj)
def __getattr__(self, key):
# type: (Any) -> Any
return getattr(self.obj, key)
def __setattr__(self, key, value):
# type: (Any, Any) -> None
return setattr(self.obj, key, value)
def get(self, key, default=None):
# type: (Any, Any) -> Any
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
# type: (Any, Any) -> None
if key not in self:
self[key] = default
def __getitem__(self, key):
# type: (Any) -> Any
try:
return getattr(self.obj, key)
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
# type: (Any, Any) -> Any
setattr(self.obj, key, value)
def __contains__(self, key):
# type: (Any) -> bool
return hasattr(self.obj, key)
def _iterate_keys(self):
# type: () -> Iterable
return iter(dir(self.obj))
iterkeys = _iterate_keys
def __iter__(self):
# type: () -> Iterable
return self._iterate_keys()
def _iterate_items(self):
# type: () -> Iterable
for key in self._iterate_keys():
yield key, getattr(self.obj, key)
iteritems = _iterate_items
def _iterate_values(self):
# type: () -> Iterable
for key in self._iterate_keys():
yield getattr(self.obj, key)
itervalues = _iterate_values
if sys.version_info[0] == 3: # pragma: no cover
items = _iterate_items
keys = _iterate_keys
values = _iterate_values
else:
def keys(self):
# type: () -> List[Any]
return list(self)
def items(self):
# type: () -> List[Tuple[Any, Any]]
return list(self._iterate_items())
def values(self):
# type: () -> List[Any]
return list(self._iterate_values())
MutableMapping.register(DictAttribute) # noqa: E305
class ChainMap(MutableMapping):
"""Key lookup on a sequence of maps."""
key_t = None
changes = None
defaults = None
maps = None
_observers = []
def __init__(self, *maps, **kwargs):
# type: (*Mapping, **Any) -> None
maps = list(maps or [{}])
self.__dict__.update(
key_t=kwargs.get('key_t'),
maps=maps,
changes=maps[0],
defaults=maps[1:],
)
def add_defaults(self, d):
# type: (Mapping) -> None
d = force_mapping(d)
self.defaults.insert(0, d)
self.maps.insert(1, d)
def pop(self, key, *default):
# type: (Any, *Any) -> Any
try:
return self.maps[0].pop(key, *default)
except KeyError:
raise KeyError(
f'Key not found in the first mapping: {key!r}')
def __missing__(self, key):
# type: (Any) -> Any
raise KeyError(key)
def _key(self, key):
# type: (Any) -> Any
return self.key_t(key) if self.key_t is not None else key
def __getitem__(self, key):
# type: (Any) -> Any
_key = self._key(key)
for mapping in self.maps:
try:
return mapping[_key]
except KeyError:
pass
return self.__missing__(key)
def __setitem__(self, key, value):
# type: (Any, Any) -> None
self.changes[self._key(key)] = value
def __delitem__(self, key):
# type: (Any) -> None
try:
del self.changes[self._key(key)]
except KeyError:
raise KeyError(f'Key not found in first mapping: {key!r}')
def clear(self):
# type: () -> None
self.changes.clear()
def get(self, key, default=None):
# type: (Any, Any) -> Any
try:
return self[self._key(key)]
except KeyError:
return default
def __len__(self):
# type: () -> int
return len(set().union(*self.maps))
def __iter__(self):
return self._iterate_keys()
def __contains__(self, key):
# type: (Any) -> bool
key = self._key(key)
return any(key in m for m in self.maps)
def __bool__(self):
# type: () -> bool
return any(self.maps)
__nonzero__ = __bool__ # Py2
def setdefault(self, key, default=None):
# type: (Any, Any) -> None
key = self._key(key)
if key not in self:
self[key] = default
def update(self, *args, **kwargs):
# type: (*Any, **Any) -> Any
result = self.changes.update(*args, **kwargs)
for callback in self._observers:
callback(*args, **kwargs)
return result
def __repr__(self):
# type: () -> str
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
# type: (type, Iterable, *Any) -> 'ChainMap'
"""Create a ChainMap with a single dict created from the iterable."""
return cls(dict.fromkeys(iterable, *args))
def copy(self):
# type: () -> 'ChainMap'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy # Py2
def _iter(self, op):
# type: (Callable) -> Iterable
# defaults must be first in the stream, so values in
# changes take precedence.
# pylint: disable=bad-reversed-sequence
# Someone should teach pylint about properties.
return chain(*[op(d) for d in reversed(self.maps)])
def _iterate_keys(self):
# type: () -> Iterable
return uniq(self._iter(lambda d: d.keys()))
iterkeys = _iterate_keys
def _iterate_items(self):
# type: () -> Iterable
return ((key, self[key]) for key in self)
iteritems = _iterate_items
def _iterate_values(self):
# type: () -> Iterable
return (self[key] for key in self)
itervalues = _iterate_values
def bind_to(self, callback):
self._observers.append(callback)
if sys.version_info[0] == 3: # pragma: no cover
keys = _iterate_keys
items = _iterate_items
values = _iterate_values
else: # noqa
def keys(self):
# type: () -> List[Any]
return list(self._iterate_keys())
def items(self):
# type: () -> List[Tuple[Any, Any]]
return list(self._iterate_items())
def values(self):
# type: () -> List[Any]
return list(self._iterate_values())
class ConfigurationView(ChainMap, AttributeDictMixin):
"""A view over an applications configuration dictionaries.
Custom (but older) version of :class:`collections.ChainMap`.
If the key does not exist in ``changes``, the ``defaults``
dictionaries are consulted.
Arguments:
changes (Mapping): Map of configuration changes.
defaults (List[Mapping]): List of dictionaries containing
the default configuration.
"""
def __init__(self, changes, defaults=None, keys=None, prefix=None):
# type: (Mapping, Mapping, List[str], str) -> None
defaults = [] if defaults is None else defaults
super().__init__(changes, *defaults)
self.__dict__.update(
prefix=prefix.rstrip('_') + '_' if prefix else prefix,
_keys=keys,
)
def _to_keys(self, key):
# type: (str) -> Sequence[str]
prefix = self.prefix
if prefix:
pkey = prefix + key if not key.startswith(prefix) else key
return match_case(pkey, prefix), key
return key,
def __getitem__(self, key):
# type: (str) -> Any
keys = self._to_keys(key)
getitem = super().__getitem__
for k in keys + (
tuple(f(key) for f in self._keys) if self._keys else ()):
try:
return getitem(k)
except KeyError:
pass
try:
# support subclasses implementing __missing__
return self.__missing__(key)
except KeyError:
if len(keys) > 1:
raise KeyError(
'Key not found: {0!r} (with prefix: {0!r})'.format(*keys))
raise
def __setitem__(self, key, value):
# type: (str, Any) -> Any
self.changes[self._key(key)] = value
def first(self, *keys):
# type: (*str) -> Any
return first(None, (self.get(key) for key in keys))
def get(self, key, default=None):
# type: (str, Any) -> Any
try:
return self[key]
except KeyError:
return default
def clear(self):
# type: () -> None
"""Remove all changes, but keep defaults."""
self.changes.clear()
def __contains__(self, key):
# type: (str) -> bool
keys = self._to_keys(key)
return any(any(k in m for k in keys) for m in self.maps)
def swap_with(self, other):
# type: (ConfigurationView) -> None
changes = other.__dict__['changes']
defaults = other.__dict__['defaults']
self.__dict__.update(
changes=changes,
defaults=defaults,
key_t=other.__dict__['key_t'],
prefix=other.__dict__['prefix'],
maps=[changes] + defaults
)
class LimitedSet:
"""Kind-of Set (or priority queue) with limitations.
Good for when you need to test for membership (`a in set`),
but the set should not grow unbounded.
``maxlen`` is enforced at all times, so if the limit is reached
we'll also remove non-expired items.
You can also configure ``minlen``: this is the minimal residual size
of the set.
All arguments are optional, and no limits are enabled by default.
Arguments:
maxlen (int): Optional max number of items.
Adding more items than ``maxlen`` will result in immediate
removal of items sorted by oldest insertion time.
expires (float): TTL for all items.
Expired items are purged as keys are inserted.
minlen (int): Minimal residual size of this set.
.. versionadded:: 4.0
Value must be less than ``maxlen`` if both are configured.
Older expired items will be deleted, only after the set
exceeds ``minlen`` number of items.
data (Sequence): Initial data to initialize set with.
Can be an iterable of ``(key, value)`` pairs,
a dict (``{key: insertion_time}``), or another instance
of :class:`LimitedSet`.
Example:
>>> s = LimitedSet(maxlen=50000, expires=3600, minlen=4000)
>>> for i in range(60000):
... s.add(i)
... s.add(str(i))
...
>>> 57000 in s # last 50k inserted values are kept
True
>>> '10' in s # '10' did expire and was purged from set.
False
>>> len(s) # maxlen is reached
50000
>>> s.purge(now=time.monotonic() + 7200) # clock + 2 hours
>>> len(s) # now only minlen items are cached
4000
>>>> 57000 in s # even this item is gone now
False
"""
max_heap_percent_overload = 15
def __init__(self, maxlen=0, expires=0, data=None, minlen=0):
# type: (int, float, Mapping, int) -> None
self.maxlen = 0 if maxlen is None else maxlen
self.minlen = 0 if minlen is None else minlen
self.expires = 0 if expires is None else expires
self._data = {}
self._heap = []
if data:
# import items from data
self.update(data)
if not self.maxlen >= self.minlen >= 0:
raise ValueError(
'minlen must be a positive number, less or equal to maxlen.')
if self.expires < 0:
raise ValueError('expires cannot be negative!')
def _refresh_heap(self):
# type: () -> None
"""Time consuming recreating of heap. Don't run this too often."""
self._heap[:] = [entry for entry in self._data.values()]
heapify(self._heap)
def _maybe_refresh_heap(self):
# type: () -> None
if self._heap_overload >= self.max_heap_percent_overload:
self._refresh_heap()
def clear(self):
# type: () -> None
"""Clear all data, start from scratch again."""
self._data.clear()
self._heap[:] = []
def add(self, item, now=None):
# type: (Any, float) -> None
"""Add a new item, or reset the expiry time of an existing item."""
now = now or time.monotonic()
if item in self._data:
self.discard(item)
entry = (now, item)
self._data[item] = entry
heappush(self._heap, entry)
if self.maxlen and len(self._data) >= self.maxlen:
self.purge()
def update(self, other):
# type: (Iterable) -> None
"""Update this set from other LimitedSet, dict or iterable."""
if not other:
return
if isinstance(other, LimitedSet):
self._data.update(other._data)
self._refresh_heap()
self.purge()
elif isinstance(other, dict):
# revokes are sent as a dict
for key, inserted in other.items():
if isinstance(inserted, (tuple, list)):
# in case someone uses ._data directly for sending update
inserted = inserted[0]
if not isinstance(inserted, float):
raise ValueError(
'Expecting float timestamp, got type '
f'{type(inserted)!r} with value: {inserted}')
self.add(key, inserted)
else:
# XXX AVOID THIS, it could keep old data if more parties
# exchange them all over and over again
for obj in other:
self.add(obj)
def discard(self, item):
# type: (Any) -> None
# mark an existing item as removed. If KeyError is not found, pass.
self._data.pop(item, None)
self._maybe_refresh_heap()
pop_value = discard
def purge(self, now=None):
# type: (float) -> None
"""Check oldest items and remove them if needed.
Arguments:
now (float): Time of purging -- by default right now.
This can be useful for unit testing.
"""
now = now or time.monotonic()
now = now() if isinstance(now, Callable) else now
if self.maxlen:
while len(self._data) > self.maxlen:
self.pop()
# time based expiring:
if self.expires:
while len(self._data) > self.minlen >= 0:
inserted_time, _ = self._heap[0]
if inserted_time + self.expires > now:
break # oldest item hasn't expired yet
self.pop()
def pop(self, default=None):
# type: (Any) -> Any
"""Remove and return the oldest item, or :const:`None` when empty."""
while self._heap:
_, item = heappop(self._heap)
try:
self._data.pop(item)
except KeyError:
pass
else:
return item
return default
def as_dict(self):
# type: () -> Dict
"""Whole set as serializable dictionary.
Example:
>>> s = LimitedSet(maxlen=200)
>>> r = LimitedSet(maxlen=200)
>>> for i in range(500):
... s.add(i)
...
>>> r.update(s.as_dict())
>>> r == s
True
"""
return {key: inserted for inserted, key in self._data.values()}
def __eq__(self, other):
# type: (Any) -> bool
return self._data == other._data
def __ne__(self, other):
# type: (Any) -> bool
return not self.__eq__(other)
def __repr__(self):
# type: () -> str
return REPR_LIMITED_SET.format(
self, name=type(self).__name__, size=len(self),
)
def __iter__(self):
# type: () -> Iterable
return (i for _, i in sorted(self._data.values()))
def __len__(self):
# type: () -> int
return len(self._data)
def __contains__(self, key):
# type: (Any) -> bool
return key in self._data
def __reduce__(self):
# type: () -> Any
return self.__class__, (
self.maxlen, self.expires, self.as_dict(), self.minlen)
def __bool__(self):
# type: () -> bool
return bool(self._data)
__nonzero__ = __bool__ # Py2
@property
def _heap_overload(self):
# type: () -> float
"""Compute how much is heap bigger than data [percents]."""
return len(self._heap) * 100 / max(len(self._data), 1) - 100
MutableSet.register(LimitedSet) # noqa: E305
class Evictable:
"""Mixin for classes supporting the ``evict`` method."""
Empty = Empty
def evict(self):
# type: () -> None
"""Force evict until maxsize is enforced."""
self._evict(range=count)
def _evict(self, limit=100, range=range):
# type: (int) -> None
try:
[self._evict1() for _ in range(limit)]
except IndexError:
pass
def _evict1(self):
# type: () -> None
if self._evictcount <= self.maxsize:
raise IndexError()
try:
self._pop_to_evict()
except self.Empty:
raise IndexError()
class Messagebuffer(Evictable):
"""A buffer of pending messages."""
Empty = Empty
def __init__(self, maxsize, iterable=None, deque=deque):
# type: (int, Iterable, Any) -> None
self.maxsize = maxsize
self.data = deque(iterable or [])
self._append = self.data.append
self._pop = self.data.popleft
self._len = self.data.__len__
self._extend = self.data.extend
def put(self, item):
# type: (Any) -> None
self._append(item)
self.maxsize and self._evict()
def extend(self, it):
# type: (Iterable) -> None
self._extend(it)
self.maxsize and self._evict()
def take(self, *default):
# type: (*Any) -> Any
try:
return self._pop()
except IndexError:
if default:
return default[0]
raise self.Empty()
def _pop_to_evict(self):
# type: () -> None
return self.take()
def __repr__(self):
# type: () -> str
return f'<{type(self).__name__}: {len(self)}/{self.maxsize}>'
def __iter__(self):
# type: () -> Iterable
while 1:
try:
yield self._pop()
except IndexError:
break
def __len__(self):
# type: () -> int
return self._len()
def __contains__(self, item):
# type: () -> bool
return item in self.data
def __reversed__(self):
# type: () -> Iterable
return reversed(self.data)
def __getitem__(self, index):
# type: (Any) -> Any
return self.data[index]
@property
def _evictcount(self):
# type: () -> int
return len(self)
Sequence.register(Messagebuffer) # noqa: E305
class BufferMap(OrderedDict, Evictable):
"""Map of buffers."""
Buffer = Messagebuffer
Empty = Empty
maxsize = None
total = 0
bufmaxsize = None
def __init__(self, maxsize, iterable=None, bufmaxsize=1000):
# type: (int, Iterable, int) -> None
super().__init__()
self.maxsize = maxsize
self.bufmaxsize = 1000
if iterable:
self.update(iterable)
self.total = sum(len(buf) for buf in self.items())
def put(self, key, item):
# type: (Any, Any) -> None
self._get_or_create_buffer(key).put(item)
self.total += 1
self.move_to_end(key) # least recently used.
self.maxsize and self._evict()
def extend(self, key, it):
# type: (Any, Iterable) -> None
self._get_or_create_buffer(key).extend(it)
self.total += len(it)
self.maxsize and self._evict()
def take(self, key, *default):
# type: (Any, *Any) -> Any
item, throw = None, False
try:
buf = self[key]
except KeyError:
throw = True
else:
try:
item = buf.take()
self.total -= 1
except self.Empty:
throw = True
else:
self.move_to_end(key) # mark as LRU
if throw:
if default:
return default[0]
raise self.Empty()
return item
def _get_or_create_buffer(self, key):
# type: (Any) -> Messagebuffer
try:
return self[key]
except KeyError:
buf = self[key] = self._new_buffer()
return buf
def _new_buffer(self):
# type: () -> Messagebuffer
return self.Buffer(maxsize=self.bufmaxsize)
def _LRUpop(self, *default):
# type: (*Any) -> Any
return self[self._LRUkey()].take(*default)
def _pop_to_evict(self):
# type: () -> None
for _ in range(100):
key = self._LRUkey()
buf = self[key]
try:
buf.take()
except (IndexError, self.Empty):
# buffer empty, remove it from mapping.
self.pop(key)
else:
# we removed one item
self.total -= 1
# if buffer is empty now, remove it from mapping.
if not len(buf):
self.pop(key)
else:
# move to least recently used.
self.move_to_end(key)
break
def __repr__(self):
# type: () -> str
return f'<{type(self).__name__}: {self.total}/{self.maxsize}>'
@property
def _evictcount(self):
# type: () -> int
return self.total
| 29.355876 | 78 | 0.543034 |
9f2697a9f9635a94f72711b059fb047fbd0e31e5 | 13,965 | py | Python | pytorch_lightning/callbacks/finetuning.py | prajakta0111/pytorch-lightning | 3df02b880a6d145ff0aca24ea429c12c0d8f1181 | [
"Apache-2.0"
] | 1 | 2021-08-05T01:45:26.000Z | 2021-08-05T01:45:26.000Z | pytorch_lightning/callbacks/finetuning.py | prajakta0111/pytorch-lightning | 3df02b880a6d145ff0aca24ea429c12c0d8f1181 | [
"Apache-2.0"
] | 1 | 2021-03-01T17:32:12.000Z | 2021-03-01T17:32:12.000Z | pytorch_lightning/callbacks/finetuning.py | prajakta0111/pytorch-lightning | 3df02b880a6d145ff0aca24ea429c12c0d8f1181 | [
"Apache-2.0"
] | 1 | 2021-02-16T00:47:46.000Z | 2021-02-16T00:47:46.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Finetuning Callback
^^^^^^^^^^^^^^^^^^^^
Freeze and unfreeze models for finetuning purposes
"""
from typing import Callable, Generator, Iterable, List, Optional, Union
import torch
from torch.nn import Module
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.container import Container, ModuleDict, ModuleList, Sequential
from torch.optim.optimizer import Optimizer
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
def multiplicative(epoch):
return 2
class BaseFinetuning(Callback):
r"""
This class implements the base logic for writing your own Finetuning Callback.
Override ``freeze_before_training`` and ``finetune_function`` methods with your own logic.
``freeze_before_training``: This method is called before ``configure_optimizers``
and should be used to freeze any modules parameters.
``finetune_function``: This method is called on every train epoch start and should be used to
``unfreeze`` any parameters. Those parameters needs to be added in a new ``param_group``
within the optimizer.
.. note:: Make sure to filter the parameters based on ``requires_grad``.
Example::
class MyModel(LightningModule)
...
def configure_optimizer(self):
# Make sure to filter the parameters based on `requires_grad`
return Adam(filter(lambda p: p.requires_grad, self.parameters))
class FeatureExtractorFreezeUnfreeze(BaseFinetuning):
def __init__(self, unfreeze_at_epoch=10)
self._unfreeze_at_epoch = unfreeze_at_epoch
def freeze_before_training(self, pl_module):
# freeze any module you want
# Here, we are freezing ``feature_extractor``
self.freeze(pl_module.feature_extractor)
def finetune_function(self, pl_module, current_epoch, optimizer, optimizer_idx):
# When `current_epoch` is 10, feature_extractor will start training.
if current_epoch == self._unfreeze_at_epoch:
self.unfreeze_and_add_param_group(
module=pl_module.feature_extractor,
optimizer=optimizer,
train_bn=True,
)
"""
@staticmethod
def flatten_modules(modules: Union[Module, Iterable[Union[Module, Iterable]]]) -> List[Module]:
"""
This function is used to flatten a module or an iterable of modules into a list of its modules.
Args:
modules: A given module or an iterable of modules
Returns:
List of modules
"""
if isinstance(modules, Iterable):
_modules = []
for m in modules:
_modules.extend(BaseFinetuning.flatten_modules(m))
else:
_modules = modules.modules()
return list(
filter(
lambda m: not isinstance(m, (Container, Sequential, ModuleDict, ModuleList, LightningModule)), _modules
)
)
@staticmethod
def filter_params(
modules: Union[Module, Iterable[Union[Module, Iterable]]],
train_bn: bool = True,
requires_grad: bool = True
) -> Generator:
"""Yields the `requires_grad` parameters of a given module or list of modules.
Args:
modules: A given module or an iterable of modules
train_bn: Whether to train BatchNorm module
requires_grad: Whether to create a generator for trainable or non-trainable parameters.
Returns:
Generator
"""
modules = BaseFinetuning.flatten_modules(modules)
for mod in modules:
if isinstance(mod, _BatchNorm) and not train_bn:
continue
for param in mod.parameters():
if param.requires_grad == requires_grad:
yield param
@staticmethod
def make_trainable(modules: Union[Module, Iterable[Union[Module, Iterable]]]) -> None:
"""
Unfreezes the parameters of the provided modules
Args:
modules: A given module or an iterable of modules
"""
modules = BaseFinetuning.flatten_modules(modules)
for module in modules:
for param in module.parameters():
param.requires_grad = True
@staticmethod
def freeze(modules: Union[Module, Iterable[Union[Module, Iterable]]], train_bn: bool = True) -> None:
"""
Freezes the parameters of the provided modules
Args:
modules: A given module or an iterable of modules
train_bn: If True, leave the BatchNorm layers in training mode
Returns:
None
"""
modules = BaseFinetuning.flatten_modules(modules)
for mod in modules:
if isinstance(mod, _BatchNorm) and train_bn:
BaseFinetuning.make_trainable(mod)
else:
for param in mod.parameters():
param.requires_grad = False
@staticmethod
def filter_on_optimizer(optimizer: Optimizer, params: Iterable) -> List:
"""
This function is used to exclude any parameter which already exists in
this optimizer
Args:
optimizer: Optimizer used for parameter exclusion
params: Iterable of parameters used to check against the provided optimizer
Returns:
List of parameters not contained in this optimizer param groups
"""
out_params = []
removed_params = []
for param in params:
if not any(torch.equal(p, param) for group in optimizer.param_groups for p in group["params"]):
out_params.append(param)
else:
removed_params.append(param)
if removed_params:
rank_zero_warn(
"The provided params to be freezed already exist within another group of this optimizer."
" Those parameters will be skipped.\n"
"HINT: Did you init your optimizer in `configure_optimizer` as such:\n"
f" {type(optimizer)}(filter(lambda p: p.requires_grad, self.parameters()), ...) ", UserWarning
)
return out_params
@staticmethod
def unfreeze_and_add_param_group(
modules: Union[Module, Iterable[Union[Module, Iterable]]],
optimizer: Optimizer,
lr: Optional[float] = None,
initial_denom_lr: float = 10.,
train_bn: bool = True,
) -> None:
"""
Unfreezes a module and adds its parameters to an optimizer.
Args:
modules: A module or iterable of modules to unfreeze.
Their parameters will be added to an optimizer as a new param group.
optimizer: The provided optimizer will receive new parameters and will add them to
`add_param_group`
lr: Learning rate for the new param group.
initial_denom_lr: If no lr is provided, the learning from the first param group will be used
and divided by initial_denom_lr.
train_bn: Whether to train the BatchNormalization layers.
Returns:
None
"""
BaseFinetuning.make_trainable(modules)
params_lr = optimizer.param_groups[0]['lr'] if lr is None else float(lr)
denom_lr = initial_denom_lr if lr is None else 1.
params = BaseFinetuning.filter_params(modules, train_bn=train_bn, requires_grad=True)
params = BaseFinetuning.filter_on_optimizer(optimizer, params)
if params:
optimizer.add_param_group({
'params': params,
'lr': params_lr / denom_lr,
})
def on_before_accelerator_backend_setup(self, trainer, pl_module):
self.freeze_before_training(pl_module)
def on_train_epoch_start(self, trainer, pl_module):
"""Called when the epoch begins."""
for opt_idx, optimizer in trainer.train_loop.prepare_optimizers():
self.finetune_function(pl_module, trainer.current_epoch, optimizer, opt_idx)
def finetune_function(self, pl_module: LightningModule, epoch: int, optimizer: Optimizer, opt_idx: int):
"""
Override to add your unfreeze logic
"""
raise NotImplementedError
def freeze_before_training(self, pl_module: LightningModule):
"""
Override to add your freeze logic
"""
raise NotImplementedError
class BackboneFinetuning(BaseFinetuning):
r"""
Finetune a backbone model based on a learning rate user-defined scheduling.
When the backbone learning rate reaches the current model learning rate
and ``should_align`` is set to True, it will align with it for the rest of the training.
Args:
unfreeze_backbone_at_epoch: Epoch at which the backbone will be unfreezed.
lambda_func: Scheduling function for increasing backbone learning rate.
backbone_initial_ratio_lr:
Used to scale down the backbone learning rate compared to rest of model
backbone_initial_lr: Optional, Inital learning rate for the backbone.
By default, we will use current_learning / backbone_initial_ratio_lr
should_align: Wheter to align with current learning rate when backbone learning
reaches it.
initial_denom_lr: When unfreezing the backbone, the intial learning rate will
current_learning_rate / initial_denom_lr.
train_bn: Wheter to make Batch Normalization trainable.
verbose: Display current learning rate for model and backbone
round: Precision for displaying learning rate
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import BackboneFinetuning
>>> multiplicative = lambda epoch: 1.5
>>> backbone_finetuning = BackboneFinetuning(200, multiplicative)
>>> trainer = Trainer(callbacks=[backbone_finetuning])
"""
def __init__(
self,
unfreeze_backbone_at_epoch: int = 10,
lambda_func: Callable = multiplicative,
backbone_initial_ratio_lr: float = 10e-2,
backbone_initial_lr: Optional[float] = None,
should_align: bool = True,
initial_denom_lr: float = 10.,
train_bn: bool = True,
verbose: bool = False,
round: int = 12,
):
self.unfreeze_backbone_at_epoch = unfreeze_backbone_at_epoch
self.backbone_initial_lr = backbone_initial_lr
self.lambda_func = lambda_func
self.backbone_initial_ratio_lr = backbone_initial_ratio_lr
self.should_align = should_align
self.initial_denom_lr = initial_denom_lr
self.train_bn = train_bn
self.round = round
self.verbose = verbose
def on_fit_start(self, trainer, pl_module):
"""
Raises:
MisconfigurationException:
If LightningModule has no nn.Module `backbone` attribute.
"""
if hasattr(pl_module, "backbone") and isinstance(pl_module.backbone, Module):
return
raise MisconfigurationException("The LightningModule should have a nn.Module `backbone` attribute")
def freeze_before_training(self, pl_module: LightningModule):
self.freeze(pl_module.backbone)
def finetune_function(self, pl_module: LightningModule, epoch: int, optimizer: Optimizer, opt_idx: int):
"""Called when the epoch begins."""
if epoch == self.unfreeze_backbone_at_epoch:
current_lr = optimizer.param_groups[0]['lr']
initial_backbone_lr = self.backbone_initial_lr if self.backbone_initial_lr is not None \
else current_lr * self.backbone_initial_ratio_lr
self.previous_backbone_lr = initial_backbone_lr
self.unfreeze_and_add_param_group(
pl_module.backbone,
optimizer,
initial_backbone_lr,
train_bn=self.train_bn,
initial_denom_lr=self.initial_denom_lr
)
if self.verbose:
log.info(
f"Current lr: {round(current_lr, self.round)}, "
f"Backbone lr: {round(initial_backbone_lr, self.round)}"
)
elif epoch > self.unfreeze_backbone_at_epoch:
current_lr = optimizer.param_groups[0]['lr']
next_current_backbone_lr = self.lambda_func(epoch + 1) * self.previous_backbone_lr
next_current_backbone_lr = current_lr if (self.should_align and next_current_backbone_lr > current_lr) \
else next_current_backbone_lr
optimizer.param_groups[-1]["lr"] = next_current_backbone_lr
self.previous_backbone_lr = next_current_backbone_lr
if self.verbose:
log.info(
f"Current lr: {round(current_lr, self.round)}, "
f"Backbone lr: {round(next_current_backbone_lr, self.round)}"
)
| 38.155738 | 119 | 0.64676 |
2dfe315c5d82723ce4996170ee547b761909f737 | 342 | py | Python | LIVRO_FLASK/model/Category.py | marceloprni/FLASK_A_Z | bc85819167c1e7f3d1f4f41d0e50d8cbf5c1b4af | [
"MIT"
] | null | null | null | LIVRO_FLASK/model/Category.py | marceloprni/FLASK_A_Z | bc85819167c1e7f3d1f4f41d0e50d8cbf5c1b4af | [
"MIT"
] | null | null | null | LIVRO_FLASK/model/Category.py | marceloprni/FLASK_A_Z | bc85819167c1e7f3d1f4f41d0e50d8cbf5c1b4af | [
"MIT"
] | null | null | null | from flask_sqlalchemy import SQLAlchemy
from config import app_config, app_active
config = app_config[app_active]
db = SQLAlchemy(config.APP)
class Category(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True, nullable=False)
description = db.Column(db.Text(), nullable=False)
| 26.307692 | 64 | 0.748538 |
73ed1c00261ef469147298dfc8130b56df8ae2c4 | 1,090 | py | Python | experiment_process.py | naummo/swarm_maze_opencl_solver | 1047e1293e90f484ccc4ff77cfe61196fb7cbbc6 | [
"MIT"
] | null | null | null | experiment_process.py | naummo/swarm_maze_opencl_solver | 1047e1293e90f484ccc4ff77cfe61196fb7cbbc6 | [
"MIT"
] | null | null | null | experiment_process.py | naummo/swarm_maze_opencl_solver | 1047e1293e90f484ccc4ff77cfe61196fb7cbbc6 | [
"MIT"
] | null | null | null | """
experiment_process.py contains some post-processing functionality.
Not used anymore.
"""
import csv
import os
import numpy as np
import configs as cfg
if not os.path.exists(cfg.reporting_dir):
os.makedirs(cfg.reporting_dir)
# Collisions
results = [0 for i in range(cfg.seconds)]
for filename in os.listdir(cfg.reporting_dir):
if "collisions.csv" in filename:
csvfile = open(os.path.join(cfg.reporting_dir, filename))
csvreader = csv.reader(csvfile, delimiter=',', lineterminator='\n',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in csvreader:
for value in row:
if value != '':
results[int(np.trunc(float(value) / cfg.framespersecond))] += 1
csvfile.close()
print(results)
csvfile = open(os.path.join(".", "collisions_total.csv"), 'w')
csvwriter = csv.writer(csvfile, delimiter=',', lineterminator='\n',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow([(str(val) if val != 0 else "") for val in results])
csvfile.close()
| 32.058824 | 83 | 0.641284 |
eaebc4ce4aef34cb3be9fcf38d4c692a46788494 | 8,786 | py | Python | src/briefcase/platforms/linux/appimage.py | chuckyQ/briefcase | 06e84e7b1c3af016c828a5a640d277809de6644b | [
"BSD-3-Clause"
] | 3 | 2020-10-20T00:59:22.000Z | 2021-02-22T09:22:00.000Z | src/briefcase/platforms/linux/appimage.py | CuPidev/briefcase | 35619cbe4b512c8521ad3733341e6bc3422efb58 | [
"BSD-3-Clause"
] | null | null | null | src/briefcase/platforms/linux/appimage.py | CuPidev/briefcase | 35619cbe4b512c8521ad3733341e6bc3422efb58 | [
"BSD-3-Clause"
] | 1 | 2021-03-26T11:52:02.000Z | 2021-03-26T11:52:02.000Z | import subprocess
from contextlib import contextmanager
from briefcase.commands import (
BuildCommand,
CreateCommand,
PackageCommand,
PublishCommand,
RunCommand,
UpdateCommand
)
from briefcase.config import BaseConfig
from briefcase.exceptions import BriefcaseCommandError
from briefcase.integrations.docker import verify_docker
from briefcase.integrations.linuxdeploy import LinuxDeploy
from briefcase.platforms.linux import LinuxMixin
class LinuxAppImageMixin(LinuxMixin):
output_format = 'appimage'
def appdir_path(self, app):
return self.bundle_path(app) / "{app.formal_name}.AppDir".format(app=app)
def binary_path(self, app):
binary_name = app.formal_name.replace(' ', '_')
return self.platform_path / '{binary_name}-{app.version}-{self.host_arch}.AppImage'.format(
app=app,
self=self,
binary_name=binary_name,
)
def distribution_path(self, app):
return self.binary_path(app)
def add_options(self, parser):
super().add_options(parser)
parser.add_argument(
'--no-docker',
dest='use_docker',
action='store_false',
help="Don't use Docker for building the AppImage",
required=False,
)
def parse_options(self, extra):
"""Extract the use_docker option"""
options = super().parse_options(extra)
self.use_docker = options.pop('use_docker')
return options
def clone_options(self, command):
"""Clone the use_docker option"""
super().clone_options(command)
self.use_docker = command.use_docker
def docker_image_tag(self, app):
"The Docker image tag for an app"
return 'briefcase/{app.bundle}.{app_name}:py{self.python_version_tag}'.format(
app=app,
self=self,
app_name=app.app_name.lower()
)
def verify_tools(self):
"""
Verify that Docker is available; and if it isn't that we're on Linux.
"""
super().verify_tools()
if self.use_docker:
if self.host_os == 'Windows':
raise BriefcaseCommandError("""
Linux AppImages cannot be generated on Windows.
""")
else:
self.Docker = verify_docker(self)
else:
if self.host_os == 'Linux':
# Use subprocess natively. No Docker wrapper is needed
self.Docker = None
else:
raise BriefcaseCommandError("""
Linux AppImages can only be generated on Linux.
""")
@contextmanager
def dockerize(self, app):
"""
Enter a Docker container based on the properties of the app.
Provides a context manager for the Docker context. The context
object is an object that exposes subprocess-analog calls.
This will replace self.subprocess with a version that proxies all
subprocess calls into the docker container.
If the user has selected --no-docker, this is a no-op.
:param app: The application that will determine the container image.
"""
if self.use_docker:
"""
Enter the Docker context.
"""
print("[{app.app_name}] Entering Docker context...".format(app=app))
orig_subprocess = self.subprocess
self.subprocess = self.Docker(self, app)
yield self.subprocess
print("[{app.app_name}] Leaving Docker context.".format(app=app))
self.subprocess = orig_subprocess
else:
yield self.subprocess
class LinuxAppImageCreateCommand(LinuxAppImageMixin, CreateCommand):
description = "Create and populate a Linux AppImage."
@property
def support_package_url_query(self):
"""
The query arguments to use in a support package query request.
"""
return [
('platform', self.platform),
('version', self.python_version_tag),
('arch', self.host_arch),
]
def install_app_dependencies(self, app: BaseConfig):
"""
Install application dependencies.
This will be containerized in Docker to ensure that the right
binary versions are installed.
"""
with self.dockerize(app=app) as docker:
docker.prepare()
# Install dependencies. This will run inside a Docker container.
super().install_app_dependencies(app=app)
class LinuxAppImageUpdateCommand(LinuxAppImageMixin, UpdateCommand):
description = "Update an existing Linux AppImage."
class LinuxAppImageBuildCommand(LinuxAppImageMixin, BuildCommand):
description = "Build a Linux AppImage."
def verify_tools(self):
super().verify_tools()
self.linuxdeploy = LinuxDeploy.verify(self)
def build_app(self, app: BaseConfig, **kwargs):
"""
Build an application.
:param app: The application to build
"""
print()
print("[{app.app_name}] Building AppImage...".format(app=app))
try:
print()
# Build the AppImage.
# For some reason, the version has to be passed in as an
# environment variable, *not* in the configuration...
env = {
'VERSION': app.version
}
# Find all the .so files in app and app_packages,
# so they can be passed in to linuxdeploy to have their
# dependencies added to the AppImage. Looks for any .so file
# in the application, and make sure it is marked for deployment.
so_folders = set()
for so_file in self.appdir_path(app).glob('**/*.so'):
so_folders.add(so_file.parent)
deploy_deps_args = []
for folder in sorted(so_folders):
deploy_deps_args.extend(["--deploy-deps-only", str(folder)])
# Build the app image. We use `--appimage-extract-and-run`
# because AppImages won't run natively inside Docker.
with self.dockerize(app) as docker:
docker.run(
[
str(self.linuxdeploy.appimage_path),
"--appimage-extract-and-run",
"--appdir={appdir_path}".format(appdir_path=self.appdir_path(app)),
"-d", str(
self.appdir_path(app) / "{app.bundle}.{app.app_name}.desktop".format(
app=app,
)
),
"-o", "appimage",
] + deploy_deps_args,
env=env,
check=True,
cwd=str(self.platform_path)
)
# Make the binary executable.
self.os.chmod(str(self.binary_path(app)), 0o755)
except subprocess.CalledProcessError:
print()
raise BriefcaseCommandError(
"Error while building app {app.app_name}.".format(app=app)
)
class LinuxAppImageRunCommand(LinuxAppImageMixin, RunCommand):
description = "Run a Linux AppImage."
def verify_tools(self):
"""
Verify that we're on Linux.
"""
super().verify_tools()
if self.host_os != 'Linux':
raise BriefcaseCommandError(
"AppImages can only be executed on Linux."
)
def run_app(self, app: BaseConfig, **kwargs):
"""
Start the application.
:param app: The config object for the app
:param base_path: The path to the project directory.
"""
print()
print('[{app.app_name}] Starting app...'.format(
app=app
))
try:
print()
self.subprocess.run(
[
str(self.binary_path(app)),
],
check=True,
)
except subprocess.CalledProcessError:
print()
raise BriefcaseCommandError(
"Unable to start app {app.app_name}.".format(app=app)
)
class LinuxAppImagePackageCommand(LinuxAppImageMixin, PackageCommand):
description = "Publish a Linux AppImage."
class LinuxAppImagePublishCommand(LinuxAppImageMixin, PublishCommand):
description = "Publish a Linux AppImage."
# Declare the briefcase command bindings
create = LinuxAppImageCreateCommand # noqa
update = LinuxAppImageUpdateCommand # noqa
build = LinuxAppImageBuildCommand # noqa
run = LinuxAppImageRunCommand # noqa
package = LinuxAppImagePackageCommand # noqa
publish = LinuxAppImagePublishCommand # noqa
| 32.420664 | 99 | 0.590371 |
dde44a70dd9732c7e3eb3597b1425126ff9aff3a | 428 | py | Python | examples/movements/goal.py | norefle/rl-vista | 033979e588f5b3b87a319dcd3789b40c69687a1c | [
"MIT"
] | 1 | 2021-04-06T07:44:47.000Z | 2021-04-06T07:44:47.000Z | examples/movements/goal.py | norefle/rl-vista | 033979e588f5b3b87a319dcd3789b40c69687a1c | [
"MIT"
] | null | null | null | examples/movements/goal.py | norefle/rl-vista | 033979e588f5b3b87a319dcd3789b40c69687a1c | [
"MIT"
] | null | null | null | from rlv.core.engine import Engine
from rlv.core.entity import Entity
from rlv.core.image import Image
from config import config as cf
class Goal(Entity):
def __init__(self, name, x, y, style):
scale = cf["tile-size"]
super().__init__(name, (x * scale, y * scale, cf["z-order"]["target"]))
self.add(Engine.get().image(style=style, entity=self))
def destroy(self):
self.remove("image")
| 28.533333 | 79 | 0.654206 |
b8c813d77542de9ea0a623d8d4637868d6447cfb | 10,767 | py | Python | tonks/vision/models/multi_task_resnet.py | vanderveld/tonks | e87afbd9614b276b443b4a7527fd1fda01a8be4c | [
"BSD-3-Clause"
] | null | null | null | tonks/vision/models/multi_task_resnet.py | vanderveld/tonks | e87afbd9614b276b443b4a7527fd1fda01a8be4c | [
"BSD-3-Clause"
] | null | null | null | tonks/vision/models/multi_task_resnet.py | vanderveld/tonks | e87afbd9614b276b443b4a7527fd1fda01a8be4c | [
"BSD-3-Clause"
] | null | null | null | import copy
from pathlib import Path
import torch
import torch.nn as nn
from torchvision import models as torch_models
from tonks.vision.helpers import _dense_block, _Identity
class ResnetForMultiTaskClassification(nn.Module):
"""
PyTorch image attribute model. This model allows you to load
in some pretrained tasks in addition to creating new ones.
Examples
--------
To instantiate a completely new instance of ResnetForMultiTaskClassification
and load the weights into this architecture you can set `pretrained` to True::
model = ResnetForMultiTaskClassification(
new_task_dict=new_task_dict,
load_pretrained_resnet = True
)
# DO SOME TRAINING
model.save(SOME_FOLDER, SOME_MODEL_ID)
To instantiate an instance of ResnetForMultiTaskClassification that has layers for
pretrained tasks and new tasks, you would do the following::
model = ResnetForMultiTaskClassification(
pretrained_task_dict=pretrained_task_dict,
new_task_dict=new_task_dict
)
model.load(SOME_FOLDER, SOME_MODEL_DICT)
# DO SOME TRAINING
Parameters
----------
pretrained_task_dict: dict
dictionary mapping each pretrained task to the number of labels it has
new_task_dict: dict
dictionary mapping each new task to the number of labels it has
load_pretrained_resnet: boolean
flag for whether or not to load in pretrained weights for ResNet50.
useful for the first round of training before there are fine tuned weights
"""
def __init__(self, pretrained_task_dict=None, new_task_dict=None, load_pretrained_resnet=False):
super(ResnetForMultiTaskClassification, self).__init__()
self.resnet = torch_models.resnet50(pretrained=load_pretrained_resnet)
self.resnet.fc = _Identity()
self.dense_layers = nn.Sequential(
_dense_block(2048*2, 1024, 2e-3),
_dense_block(1024, 512, 2e-3),
_dense_block(512, 256, 2e-3),
)
if pretrained_task_dict is not None:
pretrained_layers = {}
for key, task_size in pretrained_task_dict.items():
pretrained_layers[key] = nn.Linear(256, task_size)
self.pretrained_classifiers = nn.ModuleDict(pretrained_layers)
if new_task_dict is not None:
new_layers = {}
for key, task_size in new_task_dict.items():
new_layers[key] = nn.Linear(256, task_size)
self.new_classifiers = nn.ModuleDict(new_layers)
def forward(self, x):
"""
Defines forward pass for image model
Parameters
----------
x: dict of image tensors containing tensors for
full and cropped images. the full image tensor
has the key 'full_img' and the cropped tensor has
the key 'crop_img'
Returns
----------
A dictionary mapping each task to its logits
"""
full_img = self.resnet(x['full_img']).squeeze()
crop_img = self.resnet(x['crop_img']).squeeze()
full_crop_combined = torch.cat((full_img, crop_img), 1)
dense_layer_output = self.dense_layers(full_crop_combined)
logit_dict = {}
if hasattr(self, 'pretrained_classifiers'):
for key, classifier in self.pretrained_classifiers.items():
logit_dict[key] = classifier(dense_layer_output)
if hasattr(self, 'new_classifiers'):
for key, classifier in self.new_classifiers.items():
logit_dict[key] = classifier(dense_layer_output)
return logit_dict
def freeze_core(self):
"""Freeze all core model layers"""
for param in self.resnet.parameters():
param.requires_grad = False
def freeze_dense(self):
"""Freeze all core model layers"""
for param in self.dense_layers.parameters():
param.requires_grad = False
def freeze_all_pretrained(self):
"""Freeze pretrained classifier layers and core model layers"""
self.freeze_core()
self.freeze_dense()
if hasattr(self, 'pretrained_classifiers'):
for param in self.pretrained_classifiers.parameters():
param.requires_grad = False
else:
print('There are no pretrained_classifier layers to be frozen.')
def unfreeze_pretrained_classifiers(self):
"""Unfreeze pretrained classifier layers"""
if hasattr(self, 'pretrained_classifiers'):
for param in self.pretrained_classifiers.parameters():
param.requires_grad = True
else:
print('There are no pretrained_classifier layers to be unfrozen.')
def unfreeze_pretrained_classifiers_and_core(self):
"""Unfreeze pretrained classifiers and core model layers"""
for param in self.resnet.parameters():
param.requires_grad = True
for param in self.dense_layers.parameters():
param.requires_grad = True
self.unfreeze_pretrained_classifiers()
def save(self, folder, model_id):
"""
Saves the model state dicts to a specific folder.
Each part of the model is saved separately to allow for
new classifiers to be added later.
Note: if the model has `pretrained_classifiers` and `new_classifers`,
they will be combined into the `pretrained_classifiers_dict`.
Parameters
----------
folder: str or Path
place to store state dictionaries
model_id: int
unique id for this model
Side Effects
------------
saves three files:
- folder / f'resnet_dict_{model_id}.pth'
- folder / f'dense_layers_dict_{model_id}.pth'
- folder / f'pretrained_classifiers_dict_{model_id}.pth'
"""
if not hasattr(self, 'pretrained_classifiers'):
classifiers_to_save = copy.deepcopy(self.new_classifiers)
else:
# PyTorch's update method isn't working because it doesn't think ModuleDict is a Mapping
classifiers_to_save = copy.deepcopy(self.pretrained_classifiers)
if hasattr(self, 'new_classifiers'):
for key, module in self.new_classifiers.items():
classifiers_to_save[key] = module
folder = Path(folder)
folder.mkdir(parents=True, exist_ok=True)
torch.save(
self.resnet.state_dict(),
folder / f'resnet_dict_{model_id}.pth'
)
torch.save(
self.dense_layers.state_dict(),
folder / f'dense_layers_dict_{model_id}.pth'
)
torch.save(
classifiers_to_save.state_dict(),
folder / f'pretrained_classifiers_dict_{model_id}.pth'
)
def load(self, folder, model_id):
"""
Loads the model state dicts from a specific folder.
Parameters
----------
folder: str or Path
place where state dictionaries are stored
model_id: int
unique id for this model
Side Effects
------------
loads from three files:
- folder / f'resnet_dict_{model_id}.pth'
- folder / f'dense_layers_dict_{model_id}.pth'
- folder / f'pretrained_classifiers_dict_{model_id}.pth'
"""
folder = Path(folder)
if torch.cuda.is_available():
self.resnet.load_state_dict(torch.load(folder / f'resnet_dict_{model_id}.pth'))
self.dense_layers.load_state_dict(
torch.load(folder / f'dense_layers_dict_{model_id}.pth'))
self.pretrained_classifiers.load_state_dict(
torch.load(folder / f'pretrained_classifiers_dict_{model_id}.pth')
)
else:
self.resnet.load_state_dict(
torch.load(
folder / f'resnet_dict_{model_id}.pth',
map_location=lambda storage,
loc: storage
)
)
self.dense_layers.load_state_dict(
torch.load(
folder / f'dense_layers_dict_{model_id}.pth',
map_location=lambda storage,
loc: storage
)
)
self.pretrained_classifiers.load_state_dict(
torch.load(
folder / f'pretrained_classifiers_dict_{model_id}.pth',
map_location=lambda storage,
loc: storage
)
)
def export(self, folder, model_id, model_name=None):
"""
Exports the entire model state dict to a specific folder.
Note: if the model has `pretrained_classifiers` and `new_classifiers`,
they will be combined into the `pretrained_classifiers` attribute before being saved.
Parameters
----------
folder: str or Path
place to store state dictionaries
model_id: int
unique id for this model
model_name: str (defaults to None)
Name to store model under, if None, will default to `multi_task_bert_{model_id}.pth`
Side Effects
------------
saves one file:
- folder / model_name
"""
if hasattr(self, 'new_classifiers'):
hold_new_classifiers = copy.deepcopy(self.new_classifiers)
else:
hold_new_classifiers = None
hold_pretrained_classifiers = None
if not hasattr(self, 'pretrained_classifiers'):
self.pretrained_classifiers = copy.deepcopy(self.new_classifiers)
else:
hold_pretrained_classifiers = copy.deepcopy(self.pretrained_classifiers)
# PyTorch's update method isn't working because it doesn't think ModuleDict is a Mapping
if hasattr(self, 'new_classifiers'):
for key, module in self.new_classifiers.items():
self.pretrained_classifiers[key] = module
if hasattr(self, 'new_classifiers'):
del self.new_classifiers
if model_name is None:
model_name = f'multi_task_resnet_{model_id}.pth'
folder = Path(folder)
folder.mkdir(parents=True, exist_ok=True)
torch.save(
self.state_dict(),
folder / model_name
)
if hold_pretrained_classifiers is not None:
self.pretrained_classifiers = hold_pretrained_classifiers
else:
del self.pretrained_classifiers
if hold_new_classifiers is not None:
self.new_classifiers = hold_new_classifiers
| 36.252525 | 100 | 0.617349 |
72e24e48125b5eb1605f3edaacea9fbaefd4ca57 | 2,140 | py | Python | setup.py | NSLS-II/bad-seeds | f6499fd1933baabf7cb6321388d95dd69b4fc27b | [
"BSD-3-Clause"
] | null | null | null | setup.py | NSLS-II/bad-seeds | f6499fd1933baabf7cb6321388d95dd69b4fc27b | [
"BSD-3-Clause"
] | 1 | 2021-02-05T23:56:56.000Z | 2021-02-08T18:03:30.000Z | setup.py | NSLS-II/bad-seeds | f6499fd1933baabf7cb6321388d95dd69b4fc27b | [
"BSD-3-Clause"
] | null | null | null | from os import path
from setuptools import setup, find_packages
import sys
import versioneer
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
min_version = (3, 7)
if sys.version_info < min_version:
error = """
bad-seeds does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*(sys.version_info[:2] + min_version))
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
setup(
name='bad-seeds',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Learn to efficiently measure samples at a beamline.",
long_description=readme,
author="Brookhaven National Lab",
author_email='',
url='https://github.com/jklynch/bad-seeds',
python_requires='>={}'.format('.'.join(str(n) for n in min_version)),
packages=find_packages(exclude=['docs', 'tests']),
entry_points={
'console_scripts': [
# 'command = some.module:some_function',
],
},
include_package_data=True,
package_data={
'bad_seeds': [
# When adding files here, remember to update MANIFEST.in as well,
# or else they will not be included in the distribution on PyPI!
# 'path/to/data_file',
]
},
install_requires=requirements,
license="BSD (3-clause)",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
| 31.470588 | 77 | 0.664953 |
0d03ac499b99f97313508263450cc568daeeb6ba | 3,028 | py | Python | pygazebo/msg/subscribe_pb2.py | WindhoverLabs/pygazebo | 9c977703be5c04fe931e7ec522fb7aa1e6bbe05e | [
"Apache-2.0"
] | null | null | null | pygazebo/msg/subscribe_pb2.py | WindhoverLabs/pygazebo | 9c977703be5c04fe931e7ec522fb7aa1e6bbe05e | [
"Apache-2.0"
] | null | null | null | pygazebo/msg/subscribe_pb2.py | WindhoverLabs/pygazebo | 9c977703be5c04fe931e7ec522fb7aa1e6bbe05e | [
"Apache-2.0"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: subscribe.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = _descriptor.FileDescriptor(
name='subscribe.proto',
package='gazebo.msgs',
serialized_pb='\n\x0fsubscribe.proto\x12\x0bgazebo.msgs\"a\n\tSubscribe\x12\r\n\x05topic\x18\x01 \x02(\t\x12\x0c\n\x04host\x18\x02 \x02(\t\x12\x0c\n\x04port\x18\x03 \x02(\r\x12\x10\n\x08msg_type\x18\x04 \x02(\t\x12\x17\n\x08latching\x18\x05 \x01(\x08:\x05\x66\x61lse')
_SUBSCRIBE = _descriptor.Descriptor(
name='Subscribe',
full_name='gazebo.msgs.Subscribe',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='topic', full_name='gazebo.msgs.Subscribe.topic', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='host', full_name='gazebo.msgs.Subscribe.host', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='port', full_name='gazebo.msgs.Subscribe.port', index=2,
number=3, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='msg_type', full_name='gazebo.msgs.Subscribe.msg_type', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='latching', full_name='gazebo.msgs.Subscribe.latching', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=32,
serialized_end=129,
)
DESCRIPTOR.message_types_by_name['Subscribe'] = _SUBSCRIBE
class Subscribe(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SUBSCRIBE
# @@protoc_insertion_point(class_scope:gazebo.msgs.Subscribe)
# @@protoc_insertion_point(module_scope)
| 35.209302 | 270 | 0.730185 |
0c85c9721be4921f511a1e23a0e0de4c508b1d29 | 3,441 | py | Python | src/olympia/translations/forms.py | osamamagdy/addons-server | f7326c94d1d40c71eca991242288edf799146182 | [
"BSD-3-Clause"
] | 2 | 2021-07-19T03:26:43.000Z | 2021-07-24T03:12:52.000Z | src/olympia/translations/forms.py | osamamagdy/addons-server | f7326c94d1d40c71eca991242288edf799146182 | [
"BSD-3-Clause"
] | 760 | 2021-05-17T07:59:30.000Z | 2022-03-31T11:14:15.000Z | src/olympia/translations/forms.py | osamamagdy/addons-server | f7326c94d1d40c71eca991242288edf799146182 | [
"BSD-3-Clause"
] | 1 | 2021-07-19T03:26:52.000Z | 2021-07-19T03:26:52.000Z | from django.conf import settings
from django.db import models
from django.forms import ValidationError
from django.forms.utils import ErrorList
from django.utils.encoding import force_str
from django.utils.html import conditional_escape, format_html, format_html_join
from django.utils.safestring import mark_safe
from django.utils.translation.trans_real import to_language
from .fields import LocaleErrorMessage, _TransField
def default_locale(obj):
"""Get obj's default locale."""
if hasattr(obj, 'get_fallback'):
fallback = obj.get_fallback()
if isinstance(fallback, models.Field):
fallback = getattr(obj, fallback.name)
return fallback
else:
return settings.LANGUAGE_CODE
class TranslationFormMixin(object):
"""
A mixin for forms with translations that tells fields about the object's
default locale.
"""
# Hack to restore behavior from pre Django 1.10 times.
# Django 1.10 enabled `required` rendering for required widgets. That
# wasn't the case before, this should be fixed properly but simplifies
# the actual Django 1.11 deployment for now.
# See https://github.com/mozilla/addons-server/issues/8912 for proper fix.
use_required_attribute = False
def __init__(self, *args, **kwargs):
kwargs['error_class'] = LocaleErrorList
super(TranslationFormMixin, self).__init__(*args, **kwargs)
self.set_locale_field_defaults()
def set_locale_field_defaults(self):
locale = to_language(default_locale(self.instance))
for field_name, field in self.fields.items():
if isinstance(field, _TransField):
field.set_default_values(
field_name=field_name, parent_form=self, default_locale=locale
)
def add_error(self, field, error):
if isinstance(error, LocaleErrorMessage):
self._errors.setdefault(field, self.error_class())
self._errors[field].append(error)
if field in self.cleaned_data:
del self.cleaned_data[field]
else:
# Didn't come from a translation field, forward
# to original implementation.
super(TranslationFormMixin, self).add_error(field, error)
def full_clean(self):
self.set_locale_field_defaults()
return super(TranslationFormMixin, self).full_clean()
class LocaleErrorList(ErrorList):
def as_ul(self):
if not self.data:
return ''
li = []
for item in self.data:
if isinstance(item, LocaleErrorMessage):
locale, message = item.locale, item.message
extra = mark_safe(' data-lang="%s"' % conditional_escape(locale))
else:
message, extra = ''.join(list(item)), ''
li.append((extra, conditional_escape(force_str(message))))
return mark_safe(
format_html(
'<ul class="{}">{}</ul>',
self.error_class,
format_html_join(
'', '<li{}>{}</li>', ((extra, elem) for extra, elem in li)
),
)
)
def __getitem__(self, i):
error = self.data[i]
if isinstance(error, LocaleErrorMessage):
return error.message
if isinstance(error, ValidationError):
return list(error)[0]
return force_str(error)
| 35.112245 | 82 | 0.635571 |
552ed29f65bba7b199e7fee7521d0ad0de577701 | 75,900 | py | Python | tensorflow/python/eager/function.py | zengjia110/tensorflow | 6dd278831a62be829ce6f15039e5b6b368b3727c | [
"Apache-2.0"
] | null | null | null | tensorflow/python/eager/function.py | zengjia110/tensorflow | 6dd278831a62be829ce6f15039e5b6b368b3727c | [
"Apache-2.0"
] | null | null | null | tensorflow/python/eager/function.py | zengjia110/tensorflow | 6dd278831a62be829ce6f15039e5b6b368b3727c | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Defun decorator for defining graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import sys
import threading
import numpy as np
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2_impl
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
# This is to avoid a circular dependency with cond_v2_impl
# (function -> gradients_impl -> control_flow_ops -> cond_v2_impl).
cond_v2_impl._function = sys.modules[__name__] # pylint: disable=protected-access
# This is to avoid a circular dependency with gradients_impl
gradients_impl._function = sys.modules[__name__] # pylint: disable=protected-access
# TODO(scottzhu): Update this to allow arbitrary attribute names in future.
WHITELIST_FUNCTION_ATTRIBUTE_PREFIX = "experimental_"
def _create_substitute_placeholder(value, name, dtype=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=value.shape, name=name)
if placeholder.dtype == dtypes_module.resource:
if isinstance(value, ops.EagerTensor):
handle_data = value._handle_data # pylint: disable=protected-access
else:
handle_data = resource_variable_ops.get_resource_handle_data(value)
if handle_data is not None and handle_data.is_set:
# pylint: disable=protected-access
pywrap_tensorflow.SetResourceHandleShapeAndType(
placeholder.graph._c_graph, placeholder._as_tf_output(),
handle_data.SerializeToString())
# pylint: enable=protected-access
# Ensure that shapes and dtypes are propagated.
shapes, types = zip(*[(pair.shape, pair.dtype)
for pair in handle_data.shape_and_type])
ranks = [len(s.dim) if not s.unknown_rank else -1 for s in shapes]
shapes = [[d.size for d in s.dim]
if not s.unknown_rank else None for s in shapes]
pywrap_tensorflow.TF_GraphSetOutputHandleShapesAndTypes_wrapper(
placeholder._op._graph._c_graph, # pylint: disable=protected-access
placeholder._as_tf_output(), # pylint: disable=protected-access
shapes, ranks, types)
return placeholder
def _get_device_functions(ctx, graph):
"""Returns a tuple of device functions representing the device stack."""
if ctx.executing_eagerly():
return (pydev.merge_device(ctx.device_name),)
else:
return tuple(graph._device_functions_outer_to_inner) # pylint: disable=protected-access
def _parse_func_attrs(attributes):
"""Convert the keyword arguments into function_def attributes.
Currently only support primitive types: bool, int, float and string.
Args:
attributes: the dictionary of attributes.
Returns:
A dict of attributes where the key is the name of attribute and the value
is the AttrValue proto.
Raises:
ValueError: If the kwargs contains unwhitelisted name or unsupported value
types.
"""
attrs = {}
for key, value in attributes.items():
if not key.startswith(WHITELIST_FUNCTION_ATTRIBUTE_PREFIX):
raise ValueError("Attribute name is not whitelisted. "
"Whitelisted: prefix %s, got: %s" %
(WHITELIST_FUNCTION_ATTRIBUTE_PREFIX, key))
if isinstance(value, attr_value_pb2.AttrValue):
attrs[key] = value
# bool type check has to happen before int since bool is a subclass of int.
elif isinstance(value, bool):
attrs[key] = attr_value_pb2.AttrValue(b=value)
elif isinstance(value, int):
attrs[key] = attr_value_pb2.AttrValue(i=value)
elif isinstance(value, float):
attrs[key] = attr_value_pb2.AttrValue(f=value)
elif isinstance(value, str):
attrs[key] = attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError("Unsupported attribute type for %s with type %s" %
(key, type(value)))
return attrs
class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
seed: The graph-level random seed.
"""
def __init__(self, name):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, device stack, and
distribution strategy stack from the current context or graph.
Args:
name: the name of the function.
"""
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.structured_outputs = None
self.variables = []
self.outer_graph = ops.get_default_graph()
self.captures = collections.OrderedDict()
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
self._xla_compile = (context.context().device_spec.device_type == "TPU")
self._add_device_to_stack(context.context().device_name)
else:
self.seed = graph.seed
self._xla_compile = getattr(graph, "_xla_compile", False)
self._device_function_stack = graph._device_function_stack.copy() # pylint: disable=protected-access
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
# TODO(b/112165328, b/112906995): summaries depend on inheriting collections
# from the default graph even in eager mode. It'd be nice to not have a
# default graph with eager execution, so hopefully this will go away when we
# remove collections.
# pylint: disable=protected-access
self._collections = graph._collections
# TODO(b/112906995): distribution strategy depends on inheriting this stack
# from the default graph even in eager mode. Maybe it should be part of the
# eager context?
self._distribution_strategy_stack = graph._distribution_strategy_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
self._graph_key = graph._graph_key
# pylint: enable=protected-access
def create_op(
self,
op_type,
inputs,
dtypes,
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: A list of `DType` objects that will be the types of the tensors
that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
for i, inp in enumerate(inputs):
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
inputs[i] = inp
return super(FuncGraph, self).create_op(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
compute_device=compute_device)
def capture(self, tensor, name=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
Returns:
Tensor from this FuncGraph.
"""
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
return self._capture_helper(tensor, name)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name):
captured_tensor = self.captures.get(tensor, None)
if captured_tensor is None:
captured_tensor = _create_substitute_placeholder(tensor, name=name,
dtype=tensor.dtype)
self.captures[tensor] = captured_tensor
self.inputs.append(captured_tensor)
tape.record_operation("captured_value", [captured_tensor], [tensor],
lambda x: [x])
return captured_tensor
@property
def external_captures(self):
"""External tensors captured by this function."""
return list(self.captures.keys())
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return list(self.captures.values())
def _forward_name(n):
"""The name of a generated forward defun named n."""
return "__forward_%s_%s" % (n, ops.uid())
def _backward_name(n):
"""The name of a generated backward defun named n."""
return "__backward_%s_%s" % (n, ops.uid())
def _inference_name(n):
"""The name of a forward-but-no-gradient defun named n."""
return "__inference_%s_%s" % (n, ops.uid())
def _register(fn):
"""Registers the function `fn`."""
context.context().add_function(fn)
# TODO(apassos) get rid of this by splitting framework.function._DefinedFunction
# so it doesn't have the definition-generating logic and is just a container for
# an already-defined function.
class _EagerDefinedFunction(object):
"""Callable with the interface of `framework.function._DefinedFunction.`
`_EagerDefinedFunction` encapsulates a function definition and its properties,
and it provides a method for calling the encapsulated function. Some Ops
take functions as attributes, which have type `func`; an instance of this
class may be provided as the value of these `func` attributes.
"""
def __init__(self, name, graph, inputs, outputs, attrs):
"""Initializes an eager defined function.
Args:
name: str, the name for the created function.
graph: Graph, the graph containing the operations in the function
inputs: the tensors in the graph to be used as inputs to the function
outputs: the tensors in the graph which will be outputs to the function
attrs: dict mapping names of attributes to their AttrValue values
"""
operations = [
op for op in graph.get_operations()
if op not in set(arg.op for arg in inputs)
]
fn = pywrap_tensorflow.TF_GraphToFunction_wrapper(
graph._c_graph, # pylint: disable=protected-access
compat.as_str(name),
False,
[o._c_op for o in operations], # pylint: disable=protected-access
[t._as_tf_output() for t in inputs], # pylint: disable=protected-access
[t._as_tf_output() for t in outputs], # pylint: disable=protected-access
[],
None,
compat.as_str(""))
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(iga): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use status.
pywrap_tensorflow.TF_FunctionSetAttrValueProto(
fn, compat.as_str(name), serialized)
# TODO(apassos) avoid creating a FunctionDef (specially to grab the
# signature, but also in general it's nice not to depend on it.
with c_api_util.tf_buffer() as buffer_:
pywrap_tensorflow.TF_FunctionToFunctionDef(fn, buffer_)
proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)
function_def = function_pb2.FunctionDef()
function_def.ParseFromString(compat.as_bytes(proto_data))
if context.executing_eagerly():
_register(fn)
self.definition = function_def
self.name = compat.as_bytes(function_def.signature.name)
self.signature = function_def.signature
self._num_outputs = len(self.signature.output_arg)
self._output_types = [o.type for o in self.signature.output_arg]
self._output_shapes = [o.shape for o in outputs]
self.grad_func_name = None
self.python_grad_func = None
self._c_func = c_api_util.ScopedTFFunction(fn)
self._grad_func = None
self._graph = graph
self._stateful_ops = tuple(op for op in operations if op.op_def.is_stateful)
def add_to_graph(self, g):
# pylint: disable=protected-access
if self.name not in g._functions:
g._add_function(self)
for f in self._graph._functions.values():
if f.name not in g._functions:
g._add_function(f)
# pylint: enable=protected-access
@property
def stateful_ops(self):
return self._stateful_ops
def call(self, ctx, args):
"""Calls this function with `args` as inputs.
Function execution respects device annotations only if the function won't
be compiled with xla.
Args:
ctx: a Context object
args: a list of arguments to supply this function with.
Returns:
The outputs of the function call.
"""
executing_eagerly = ctx.executing_eagerly()
if self._graph._xla_compile: # pylint: disable=protected-access
# XLA compilation relies upon a custom kernel creator to run functions.
signature = self.signature
if executing_eagerly:
outputs = execute.execute(
str(signature.name),
num_outputs=self._num_outputs,
inputs=args,
attrs=None,
ctx=ctx)
else:
g = ops.get_default_graph()
self.add_to_graph(g)
op = g.create_op(
signature.name,
[ops.internal_convert_to_tensor(x, ctx=ctx) for x in args],
tuple(dtypes_module.DType(x.type) for x in signature.output_arg),
op_def=signature,
name="FunctionCall",
compute_shapes=False)
outputs = op.outputs
if not outputs:
return op
outputs = [outputs] if isinstance(
outputs, (ops.Tensor, type(None))) else list(outputs)
else:
# TODO(akshayka): Either remove this if the FunctionLibraryRuntime
# creates `PartitionedCallOp` kernels by default, or remove the previous
# branch if a TPU kernel is registered for `PartitionedCall`.
outputs = functional_ops.partitioned_call(
args=args,
f=self,
tout=self._output_types,
executing_eagerly=executing_eagerly)
if executing_eagerly:
return outputs
else:
for i, shape in enumerate(self._output_shapes):
outputs[i].set_shape(shape)
return outputs
def _flatten(sequence):
"""A wrapper around `nest.flatten` that also unpacks `IndexedSlices`."""
# TODO(akshayka): Support `SparseTensor` in a similar fashion.
flat_sequence = nest.flatten(sequence)
outputs = []
for item in flat_sequence:
if isinstance(item, ops.IndexedSlices):
if item.dense_shape is not None:
outputs.extend([item.values, item.indices, item.dense_shape])
else:
outputs.extend([item.values, item.indices])
else:
outputs.append(item)
return outputs
class Function(object):
"""Callable object encapsulating a function definition and its gradient.
`Function` is a callable that encapsulates a function definition and
is differentiable under `tf.GradientTape` objects.
"""
def __init__(self, func_graph, attrs=None):
"""Initialize a Function.
Args:
func_graph: An instance of FuncGraph: the function body to wrap.
attrs: (optional) dict mapping names of attributes to their AttrValue
values. Attributes in `attrs` will be included in this function's
definition.
Raises:
ValueError: If number of input_placeholders is not equal to the number
of function inputs.
"""
self._func_graph = func_graph
self._captured_inputs = list(self._func_graph.captures.keys())
self._num_outputs = len(self._func_graph.outputs)
self._output_shapes = tuple(
output.shape for output in self._func_graph.outputs)
self._attrs = _parse_func_attrs(attrs)
self._device_functions = tuple(
self._func_graph._device_functions_outer_to_inner) # pylint: disable=protected-access
self._inference_function = _EagerDefinedFunction(
_inference_name(self._func_graph.name), self._func_graph,
self._func_graph.inputs, self._func_graph.outputs, self._attrs)
self._backward_graph_function = None
# Map holding distributed variables, keyed by resource handle tensors.
self._distributed_variables = {}
strategy = distribution_strategy_context.get_distribution_strategy()
for variable in self._func_graph.variables:
# If variable is not distributed, unwrap returns [variable].
component_variables = strategy.unwrap(variable)
# Only update the dictionary when the variable is actually distributed.
if (len(component_variables) > 1 or component_variables[0] != variable):
for component_variable in component_variables:
self._distributed_variables[component_variable.handle] = variable
def __call__(self, *args):
"""Executes the wrapped function."""
ctx = context.context()
device_functions = _get_device_functions(ctx, ops.get_default_graph())
if device_functions != self._device_functions:
raise ValueError(
"The current device stack does not match the device stack under "
"which the TensorFlow function '%s' was created.\n"
"Current device stack: %s\n%s device stack: %s" %
(self._inference_function.name, device_functions,
self._inference_function.name, self._device_functions))
for v in self._func_graph.variables:
if v.trainable:
tape.variable_accessed(v)
captures = self._resolve_captured_inputs()
tensor_inputs = [x for x in nest.flatten(args) if isinstance(x, ops.Tensor)]
args = tensor_inputs + captures
if tape.should_record(tensor_inputs) or tape.should_record(captures):
return self._backprop_call(args)
outputs = self._inference_function.call(ctx, args)
return self._build_call_outputs(outputs)
@property
def graph(self):
"""Returns the graph from which this function was constructed."""
return self._func_graph
@property
def variables(self):
"""Returns all variables touched by this function."""
return self._func_graph.variables
@property
def inputs(self):
"""Returns tensors in `self.graph` corresponding to arguments."""
return self._func_graph.inputs
@property
def outputs(self):
"""Returns tensors in `self.graph` corresponding to return values."""
return self._func_graph.outputs
@property
def captured_inputs(self):
"""Returns external Tensors captured by this function.
self.__call__(*args) passes `args + self.captured_inputs` to the function.
"""
return self._captured_inputs
@property
def function_def(self):
"""Returns a `FunctionDef` object representing this function."""
return self._inference_function.definition
@property
def output_shapes(self):
"""The function's output shapes."""
# TODO(ebrevdo): Should we only keep the output shapes associated
# with len(self._python_returns) outputs?
# TODO(akshayka): Consider removing this.
outputs_list = nest.flatten(self._func_graph.structured_outputs)
j = 0
for i, o in enumerate(outputs_list):
if o is not None:
if isinstance(o, ops.IndexedSlices):
# Extract the shape of the `IndexedSlices` object's `values` field.
outputs_list[i] = self._output_shapes[j] # the `values` shape
if o.dense_shape is not None:
j += 3 # skip over shapes for `values`, `indices`, `dense_shape`
else:
j += 2 # skip over shapes for `values`, `indices`
else:
outputs_list[i] = self._output_shapes[j]
j += 1
return nest.pack_sequence_as(self._func_graph.structured_outputs,
outputs_list)
@property
def output_dtypes(self):
# TODO(akshayka): Consider removing this.
return nest.map_structure(lambda x: x.dtype if x is not None else None,
self._func_graph.structured_outputs)
def _construct_backprop_function(self):
"""Constructs the backprop function object for this function."""
backwards_graph = FuncGraph(_backward_name(self._func_graph.name))
with backwards_graph.as_default():
gradients_wrt_outputs = [
graph_placeholder(x.dtype, x.shape) for x in self._func_graph.outputs
]
gradients_wrt_inputs = gradients_impl._GradientsHelper( # pylint: disable=protected-access
self._func_graph.outputs,
self._func_graph.inputs,
grad_ys=gradients_wrt_outputs,
src_graph=self._func_graph)
self._forward_function = _EagerDefinedFunction(
_forward_name(
self._func_graph.name), self._func_graph, self._func_graph.inputs,
self._func_graph.outputs + list(backwards_graph.captures.keys()),
self._attrs)
# The ordering of `backwards_graph.inputs` is important: inputs of
# `self._backward_graph_function` correspond to outputs of
# `self._forward_function`.
backwards_graph.inputs = gradients_wrt_outputs + list(
backwards_graph.captures.values())
# Clear captures, since we pass them in as inputs.
backwards_graph.captures = {}
backwards_graph.outputs.extend(
grad for grad in _flatten(gradients_wrt_inputs) if grad is not None)
backwards_graph.structured_outputs = gradients_wrt_inputs
self._backward_graph_function = Function(
backwards_graph, attrs=self._attrs)
def _backprop_call(self, args):
"""Calls the forward function and records the result on a tape.
(Only records results on a tape if the function has outputs)
Args:
args: All inputs to the function, including resolved captured inputs
Returns:
The call output.
"""
if self._backward_graph_function is None:
self._construct_backprop_function()
ctx = context.context()
outputs = self._forward_function.call(ctx, args)
if isinstance(outputs, ops.Operation) or outputs is None:
return outputs
# `real_outputs` are the actual outputs of the inference graph function;
# `side_outputs` are the intermediate Tensors that were added as outputs to
# the forward graph function so that we can compute its gradient.
real_outputs = outputs[:self._num_outputs]
side_outputs = outputs[self._num_outputs:]
def backward_function(*args):
return self._backward_graph_function(*(list(args) + side_outputs)) # pylint: disable=not-callable
tape.record_operation(self._forward_function.signature.name, real_outputs,
args, backward_function)
return self._build_call_outputs(real_outputs)
def _resolve_captured_inputs(self):
"""Resolve captured distributed variables to their current values.
Some inputs can be distributed variables. Such variables yield a different
component (i.e. actual tf.Variable) variables depending on the context of
execution.
Returns:
a list of resolved captured input tensors.
"""
if self._distributed_variables:
# Loop over each captured input and check if it corresponds to something
# distributed. If so, get its _distributed_container and fetch the
# component appropriate for the current execution context.
resolved_captured_inputs = self._captured_inputs[:]
for i, captured_input in enumerate(self._captured_inputs):
distributed_var = self._distributed_variables.get(captured_input, None)
if distributed_var is not None:
# distributed variables override __getattr__ and substitute the
# right component variable. In here, `distributed_var.handle`
# actually does the equivalent of
# distributed_var.get_current_component_var().handle.
resolved_captured_inputs[i] = distributed_var.handle
return resolved_captured_inputs
return self._captured_inputs
def _build_call_outputs(self, result):
"""Maps the fdef output list to actual output structure.
Args:
result: Output lists defined by FunctionDef.
Returns:
The actual call output.
"""
if self._func_graph.structured_outputs is None:
return result
# Use `nest.flatten` instead of `_flatten` in order to preserve any
# IndexedSlices in `self._func_graph.structured_outputs`.
outputs_list = nest.flatten(self._func_graph.structured_outputs)
j = 0
for i, o in enumerate(outputs_list):
if o is not None:
if isinstance(o, ops.IndexedSlices):
# Repack Tensors for IndexedSlices.
if o.dense_shape is not None:
outputs_list[i] = ops.IndexedSlices(
values=result[j],
indices=result[j + 1],
dense_shape=result[j + 2])
j += 3
else:
outputs_list[i] = ops.IndexedSlices(
values=result[j], indices=result[j + 1])
j += 2
else:
outputs_list[i] = result[j]
j += 1
ret = nest.pack_sequence_as(self._func_graph.structured_outputs,
outputs_list)
return ret
def _get_defun_inputs_from_signature(signature):
"""Maps a signature to graph-construction inputs."""
function_inputs = [
graph_placeholder(spec.dtype, spec.shape)
for spec in nest.flatten(signature)
]
return nest.pack_sequence_as(signature, function_inputs)
def _get_defun_inputs_from_args(args):
"""Maps python function args to graph-construction inputs."""
function_inputs = [
graph_placeholder(arg.dtype, arg.shape)
if isinstance(arg, ops.Tensor) else arg for arg in nest.flatten(args)
]
return nest.pack_sequence_as(args, function_inputs)
def func_graph_from_py_func(name, python_func, args, kwds, signature=None):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwds: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwds` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None` nor a
`Tensor`.
"""
func_graph = FuncGraph(name)
with func_graph.as_default(), AutomaticControlDependencies() as a:
variable_scope.get_variable_scope().set_use_resource(True)
if signature is None:
func_args = _get_defun_inputs_from_args(args)
func_kwds = _get_defun_inputs_from_args(kwds)
else:
func_args = _get_defun_inputs_from_signature(signature)
func_kwds = {}
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
func_graph.inputs.extend(
x for x in nest.flatten(func_args) + nest.flatten(func_kwds)
if isinstance(x, ops.Tensor))
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, nest.flatten(func_args))
func_kwds_before = nest.pack_sequence_as(func_kwds, nest.flatten(func_kwds))
def convert(x):
"""Converts an argument to a Tensor."""
if x is None:
return None
try:
x = ops.convert_to_tensor_or_indexed_slices(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.contrib.eager.defun, Python functions "
"must return zero or more Tensors; in compilation of %s, found "
"return value of type %s, which is not a Tensor." %
(str(python_func), type(x)))
x = a.mark_as_return(x)
return x
this_tape = tape.push_new_tape()
try:
func_outputs = python_func(*func_args, **func_kwds)
# invariant: `func_outputs` contains only Tensors and `None`s.
func_outputs = nest.map_structure(convert, func_outputs)
def check_mutation(n1, n2):
"""Check if two list of arguments are exactly the same."""
errmsg = ("Function to be traced should not modify structure of input "
"arguments. Check if your function has list and dictionary "
"operations that alter input arguments, "
"such as `list.pop`, `list.append`")
try:
nest.assert_same_structure(n1, n2)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1), nest.flatten(n2)):
if arg1 is not arg2:
raise ValueError(errmsg)
check_mutation(func_args_before, func_args)
check_mutation(func_kwds_before, func_kwds)
finally:
tape.pop_tape(this_tape)
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in _flatten(func_graph.structured_outputs)
if x is not None)
# Some captured variables might be components of DistributedValues.
# Instead of storing non-distributed component variables, we
# store their distributed containers so we can retrieve the correct
# component variables at call-time.
variables = list(this_tape.watched_variables())
strategy = distribution_strategy_context.get_distribution_strategy()
for i, variable in enumerate(variables):
# If variable is not distributed value_container returns itself.
variables[i] = strategy.value_container(variable)
func_graph.variables = variables
# Register any other functions defined in the graph.
if context.executing_eagerly():
for f in func_graph._functions.values(): # pylint: disable=protected-access
# TODO(ashankar): What about the gradient registry?
_register(f._c_func.func) # pylint: disable=protected-access
return func_graph
_TensorType = collections.namedtuple("_TensorType", ["dtype", "shape"])
def _encode_arg(arg):
"""A canonical representation for this argument, for use in a cache key."""
# `defun` uses dtypes and shapes instead of `Tensors` as cache keys. Dtypes
# are used because TensorFlow graphs are not parametric w.r.t. dtypes. Shapes
# are used for both performance reasons, as much TensorFlow code specializes
# on known shapes to produce slimmer graphs, and correctness, as some
# high-level APIs require shapes to be fully-known.
#
# TODO(akshayka): Add support for sparse tensors.
#
# pylint: disable=protected-access
if isinstance(arg, ops.Tensor):
return _TensorType(arg.dtype, arg._shape_tuple())
elif isinstance(arg, ops.IndexedSlices):
if arg.dense_shape is not None:
return tuple([
_TensorType(arg.values.dtype, arg.values._shape_tuple()),
_TensorType(arg.indices.dtype, arg.indices._shape_tuple()),
_TensorType(arg.dense_shape.dtype, arg.dense_shape._shape_tuple()),
])
else:
return tuple([
_TensorType(arg.values.dtype, arg.values._shape_tuple()),
_TensorType(arg.indices.dtype, arg.indices._shape_tuple()),
])
# pylint: enable=protected-access
elif isinstance(arg, (list, tuple)):
return tuple([_encode_arg(elem) for elem in arg])
elif isinstance(arg, dict):
return tuple(
(_encode_arg(key), _encode_arg(arg[key])) for key in sorted(arg))
else:
return arg
def _deterministic_dict_values(dictionary):
return tuple(dictionary[key] for key in sorted(dictionary))
class PolymorphicFunction(object):
"""Wrapper class for the graph functions defined for a Python function.
See the documentation for `defun` for more information on the semantics of
defined functions.
PolymorphicFunction class is thread-compatible meaning that minimal
usage of defuns (defining and calling) is thread-safe, but if users call other
methods or invoke the base `python_function` themselves, external
synchronization is necessary.
"""
def __init__(self,
python_function,
name,
input_signature=None,
attributes=None):
"""Initializes a polymorphic function.
Args:
python_function: the function to be wrapped.
name: the name given to it.
input_signature: a possibly nested sequence of `TensorSpec` objects
specifying the input signature of this function. If `None`, a separate
function is instantiated for each inferred input signature.
attributes: dict, extra keyword arguments that will be added as attribute
of the function.
Raises:
ValueError: if `input_signature` is not None and the `python_function`'s
argspec has keyword arguments.
"""
if isinstance(python_function, functools.partial):
self._python_function = python_function.func
self._args_to_prepend = python_function.args or tuple()
self._kwds_to_include = python_function.keywords or {}
else:
self._python_function = python_function
self._args_to_prepend = tuple()
self._kwds_to_include = {}
self._name = name
self._function_cache = collections.OrderedDict()
self._variables = []
self._function_attributes = attributes or {}
self._lock = threading.Lock()
fullargspec = tf_inspect.getfullargspec(self._python_function)
if tf_inspect.ismethod(self._python_function):
# Remove `self`: default arguments shouldn't be matched to it.
args = fullargspec.args[1:]
else:
args = fullargspec.args
# A cache mapping from argument name to index, for canonicalizing
# arguments that are called in a keyword-like fashion.
self._args_to_indices = {arg: i for i, arg in enumerate(args)}
# A cache mapping from arg index to default value, for canonicalization.
offset = len(args) - len(fullargspec.defaults or [])
self._arg_indices_to_default_values = {
offset + index: default
for index, default in enumerate(fullargspec.defaults or [])
}
if input_signature is None:
self._input_signature = None
else:
if fullargspec.varkw is not None or fullargspec.kwonlyargs:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided.")
if not isinstance(input_signature, (tuple, list)):
raise TypeError("input_signature must be either a tuple or a "
"list, received " + str(type(input_signature)))
self._input_signature = tuple(input_signature)
self._flat_input_signature = tuple(nest.flatten(input_signature))
def __call__(self, *args, **kwds):
"""Calls a graph function specialized to the inputs."""
graph_function, inputs = self._maybe_define_function(*args, **kwds)
return graph_function(*inputs)
@property
def python_function(self):
"""Returns the wrapped Python function."""
return self._python_function
# TODO(akshayka): Remove this property.
@property
def variables(self):
"""Returns the union of all variables referenced by cached `Function`s`."""
return self._variables
def get_concrete_function(self, *args, **kwargs):
"""Returns a `Function` object specialized to inputs and execution context.
`args` and `kwargs` are ignored if this `PolymorphicFunction` was created
with an `input_signature`.
Args:
*args: inputs to specialize on.
**kwargs: inputs to specialize on.
"""
graph_function, _ = self._maybe_define_function(*args, **kwargs)
return graph_function
def __get__(self, instance, owner):
"""Makes it possible to defun instance methods."""
del owner
# `instance` here is the instance that this `PolymorphicFunction` was
# accessed through; e.g., for
#
# class Foo(object):
#
# @function.defun
# def bar(self):
# ...
#
# foo = Foo()
# foo.bar() # `foo.bar` is a `PolymorphicFunction` instance
#
# then `instance` will be `foo` (and `owner` will be `Foo`).
return functools.partial(self.__call__, instance)
def _cache_key(self, args, kwds, ctx, graph):
"""Computes the cache key given inputs and execution context."""
if self._input_signature is None:
inputs = (args, kwds) if kwds else args
cache_key = tuple(_encode_arg(arg) for arg in inputs)
else:
del args, kwds
cache_key = self._flat_input_signature
# The graph, or whether we're executing eagerly, should be a part of the
# cache key so we don't improperly capture tensors such as variables.
executing_eagerly = ctx.executing_eagerly()
execution_context = executing_eagerly or graph
# Putting the device in the cache key ensures that call-site device
# annotations are respected.
device_functions = _get_device_functions(ctx, graph)
# `ops.colocate_with` directives translate into `ops.device` directives when
# eager execution is enabled.
colocation_stack = (None if executing_eagerly else
tuple(graph._colocation_stack.peek_objs())) # pylint: disable=protected-access
return cache_key + (execution_context, device_functions, colocation_stack)
def _canonicalize_function_inputs(self, *args, **kwds):
"""Canonicalizes `args` and `kwds`.
Canonicalize the inputs to the Python function using its fullargspec. In
particular, we parse the varags and kwargs that this
`PolymorphicFunction` was called with into a tuple corresponding to the
Python function's positional (named) arguments and a dictionary
corresponding to its kwargs.
Args:
*args: The varargs this object was called with.
**kwds: The keyword args this function was called with.
Returns:
A canonicalized ordering of the inputs.
Raises:
ValueError: If a keyword in `kwds` cannot be matched with a positional
argument when an input signature is specified, or when the inputs
do not conform to the input signature.
"""
args = self._args_to_prepend + args
kwds = dict(kwds, **self._kwds_to_include)
# Maps from index of arg to its corresponding value, according to `args`
# and `kwds`; seeded with the default values for the named args that aren't
# in `args`.
arg_indices_to_values = {
index: default
for index, default in six.iteritems(self._arg_indices_to_default_values)
if index >= len(args)
}
consumed_args = []
for arg, value in six.iteritems(kwds):
index = self._args_to_indices.get(arg, None)
if index is not None:
arg_indices_to_values[index] = value
consumed_args.append(arg)
elif self._input_signature is not None:
raise ValueError("Cannot define a TensorFlow function from a Python "
"function with keyword arguments when "
"input_signature is provided.")
for arg in consumed_args:
# After this loop, `kwds` will only contain true keyword arguments, as
# opposed to named arguments called in a keyword-like fashion.
kwds.pop(arg)
inputs = args + _deterministic_dict_values(arg_indices_to_values)
flat_inputs = nest.flatten(inputs)
# Check for NumPy arrays in arguments and convert them to Tensors.
need_packing = False
for index, value in enumerate(flat_inputs):
if isinstance(value, np.ndarray):
flat_inputs[index] = constant_op.constant(value)
need_packing = True
if need_packing:
inputs = nest.pack_sequence_as(structure=inputs,
flat_sequence=flat_inputs)
if self._input_signature is None:
return inputs, kwds
else:
assert not kwds
try:
nest.assert_same_structure(self._input_signature, inputs)
except (ValueError, TypeError):
raise ValueError("Structure of Python function inputs does not match "
"input_signature.")
if any(not isinstance(arg, ops.Tensor) for arg in flat_inputs):
raise ValueError("When input_signature is provided, all inputs to "
"the Python function must be Tensors.")
tensor_specs = [
tensor_spec.TensorSpec.from_tensor(tensor) for tensor in flat_inputs
]
if any(not spec.is_compatible_with(other)
for spec, other in zip(self._flat_input_signature, tensor_specs)):
raise ValueError("Python inputs incompatible with input_signature: "
"inputs (%s), input_signature (%s)" %
(str(inputs), str(self._input_signature)))
return inputs, {}
def _maybe_define_function(self, *args, **kwds):
"""Gets a function for these inputs, defining it if necessary.
Args:
*args: args for the Python function.
**kwds: keywords for the Python function.
Returns:
A graph function corresponding to the input signature implied by args and
kwds, as well as the inputs that the object should be called with.
Raises:
ValueError: If inputs are incompatible with the input signature.
TypeError: If the function inputs include non-hashable objects
"""
args, kwds = self._canonicalize_function_inputs(*args, **kwds)
cache_key = self._cache_key(args, kwds, context.context(),
ops.get_default_graph())
with self._lock:
try:
graph_function = self._function_cache.get(cache_key, None)
except TypeError:
raise TypeError("Arguments supplied to `defun`-generated functions "
"must be hashable.")
if graph_function is None:
graph_function = Function(
func_graph_from_py_func(self._name, self._python_function, args,
kwds, self._input_signature),
self._function_attributes)
self._variables.extend(
[v for v in graph_function.variables if v not in self._variables])
self._function_cache[cache_key] = graph_function
return graph_function, (args, kwds)
def register(func, *args, **kwargs):
"""Register the defun function into the graph.
This won't actually call the function with the inputs, and only put the
function definition into graph. Register function with different input param
will result into multiple version of functions registered in graph.
Args:
func: the PolymorphicFunction instance that generated by a @defun
*args: input arguments for the Python function.
**kwargs: input keyword arguments for the Python function.
Returns:
a `Function` object specialized to inputs and execution context.
Raises:
ValueError: When the input function is not a defun wrapped python function.
"""
if not isinstance(func, PolymorphicFunction):
raise ValueError("Only defun function is allowed to be registered. "
"Got type: %s" % type(func))
concrete_func = func.get_concrete_function(*args, **kwargs)
graph = ops.get_default_graph()
concrete_func._inference_function.add_to_graph(graph) # pylint: disable=protected-access
# TODO(scottzhu): support concrete_func._backward_graph_function in future.
return concrete_func
def _validate_signature(signature):
if any(not isinstance(arg, tensor_spec.TensorSpec)
for arg in nest.flatten(signature)):
raise TypeError("Invalid input_signature %s; input_signature must be "
"a possibly nested sequence of TensorSpec objects.")
def defun(func=None, input_signature=None):
"""Compiles a Python function into a callable TensorFlow graph.
`defun` (short for "define function") trace-compiles a Python function
composed of TensorFlow operations into a callable that executes a `tf.Graph`
containing those operations. The callable produced by `defun` contains only
the subgraph of TensorFlow operations that were executed when the Python
function was called with a particular input signature, defined as a list
of the shapes and dtypes of the Python function's Tensor-valued arguments and
the values of its non-Tensor Python objects. In particular, `defun` is _not_ a
compiler for arbitrary Python code.
When eager execution is enabled, the ability to create graphs from Python
functions makes it possible to incrementally trade off debugability and
interactivity for performance. Functions compiled with `defun` cannot be
inspected with `pdb` and `print` statements; however, executing a graph
generated by `defun` sometimes takes less time and memory than eagerly
executing the corresponding Python function, since specifying computations as
graphs allows for optimizations like automatic buffer reuse and
parallelization among ops. Note that executing a `defun`-compiled function
incurs a small constant overhead, so eagerly executing sufficiently small
Python functions might take less time than executing their corresponding
`defun`-generated graphs.
For a Python function to be compatible with `defun`, all of its arguments must
be hashable Python objects or lists thereof. The function itself may not
modify the list/map structure of its arguments. Additionally, it must return
zero or more `tf.Tensor` objects. If the Python function returns
a `tf.Variable`, its compiled version will return the value of that variable
as a `tf.Tensor`.
Executing a graph generated by `defun` respects device annotations (i.e.,
all `with tf.device` directives present in a Python function will also be
present in its corresponding graph), but it is not yet possible to execute the
generated graphs across multiple machines.
_Example Usage_
```python
import tensorflow as tf
tf.enable_eager_execution()
# A simple example.
def f(x, y):
return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)
g = tf.contrib.eager.defun(f)
x = tf.constant([[2.0, 3.0]])
y = tf.constant([[3.0, -2.0]])
# `f` and `g` will return the same value, but `g` will be executed as a
# TensorFlow graph.
assert f(x, y).numpy() == g(x, y).numpy()
# `defun` is capable of compiling Python functions that close over Python
# objects, including Tensors and Variables.
@tf.contrib.eager.defun
def h():
return f(x, y)
assert (h().numpy() == f(x, y).numpy()).all()
# `defun` automatically lifts variables out of the graphs it creates,
# allowing you to compile the `call` methods of `tf.keras.layers.Layer` and
# `tf.keras.Model` objects.
class MyModel(tf.keras.Model):
def __init__(self, keep_probability=0.2):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.keep_probability = keep_probability
@tf.contrib.eager.defun
def call(self, inputs, training=True):
x = self.dense2(self.dense1(inputs))
if training:
return tf.nn.dropout(x, self.keep_probability)
else:
return x
model = MyModel()
model(x, training=True) # executes a graph, with dropout
model(x, training=False) # executes a graph, without dropout
# `defun`-compiled functions are differentiable.
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
with tf.GradientTape() as tape:
outputs = model(x)
gradient = tape.gradient(outputs, model.trainable_variables)
optimizer.apply_gradients((grad, var) for grad, var in zip(gradient,
model.trainable_variables))
```
When using `defun`, there are subtleties regarding inputs, Python control
flow, and variable creation that one should be aware of. For concreteness, let
`f` be a Python function that returns zero or more `tf.Tensor` objects and
let `F = defun(f)`. `F` builds a graph for each unique input signature it
sees, Python control flow is baked into graphs, and operations related to
variable initialization are automatically lifted out of the graphs that `F`
generates and placed in the eager context if executing eagerly or into an
outer graph otherwise.
_Input Signatures_
By default, `F = tf.contrib.eager.defun(f)` instantiates a separate graph
for every unique sequence of the shapes and dtypes of Tensor arguments and
the values of Python objects it is invoked with. For example, calling
`F(tf.random_uniform([2])` will execute a different graph than
`F(tf.random_uniform([3])` because the two inputs have different shapes.
The first time that `F(*args, **kwargs)` is called with a particular sequence
of Tensor shapes and dtypes and Python values, it constructs a graph by
tracing the execution of `f(*args, **kwargs)`; this graph is bound to an
input signature inferred from `(*args, **kwargs)` and cached for future reuse.
NumPy arrays passed as inputs to `F` are converted to `tf.Tensor` objects
before being passed to `f`, and are treated as Tensors for caching. This
allows a function to be called multiple times with NumPy arrays having
different values but the same shape and dtype without re-tracing each time.
`tf.contrib.eager.defun` caches graphs for your convenience, letting you
define TensorFlow functions without explicitly specifying their signatures.
However, this policy is conservative and potentially expensive; for example,
when different invocations of your function have differently-shaped Tensor
inputs, this policy might generate more graph functions than necessary. To
eliminate such costs, `tf.contrib.eager.defun` allows you to supply an
optional `input_signature` argument specifying the shapes and dtypes of the
inputs. In particular, the shapes may be partially unspecified, with `None`s
in the unknown dimensions. When an input signature is provided,
`tf.contrib.eager.defun` will only instantiate a single graph for the
decorated Python function. The following is an example:
```python
import tensorflow as tf
# The first `TensorSpec` below describes the shape and dtype of `words`,
# and the second describes the shape and dtype of `another_tensor`. Note that
# the last dimension of the `words` `TensorSpec` is left unspecified.
@tf.contrib.eager.defun(input_signature=[
tf.contrib.eager.TensorSpec(shape=[50, 300, None], dtype=tf.float32),
tf.contrib.eager.TensorSpec(shape=[300, 100], dtype=tf.float32)
])
def my_sequence_model(words, another_tensor):
...
# Note how the third dimension of the first input can vary freely.
words = tf.random_uniform(([50, 300, 10])
second_input = tf.random_uniform([300, 100])
my_sequence_model(words, second_input)
words = tf.random_uniform(([50, 300, 20])
my_sequence_model(words, second_input)
# Passing an input with an incompatible shape will raise an error.
words = tf.random_uniform(([50, 100, 20])
my_sequence_model(words, second_input) # <---- This will raise an error.
```
Python functions that are compiled with an `input_signature` must only accept
Tensors as arguments and must not take unnamed keyword arguments (**kwargs).
_Tracing_
Be aware that because `F` only logs TensorFlow operations, all the other
Python code that `f` executes will only shape the _construction_ of the graphs
that `F` executes: the Python code won't be executed when the graphs
themselves are executed, though it will be executed every time the Python
function is traced (and a given Python function might be traced multiple
times, once for each input signature it is invoked with). For example, whereas
the Python function
```python
import tensorflow as tf
import numpy as np
tf.enable_eager_execution()
def add_noise():
return tf.eye(5) + np.random.randn(5, 5)
```
will return a different output everytime it is invoked, the compiled function
`compiled = tf.contrib.eager.defun(add_noise)` will return the same value
every time it is called, since a particular random offset generated by NumPy
will be inserted into the graph as a TensorFlow constant. The solution is to
replace the call to `np.random.randn` with `tf.random_normal((5, 5))`.
_Python Side-Effects_
A corollary of the previous discussion on tracing is the following: If a
Python function `f` has Python side-effects, then executing `f` multiple times
will not necessarily be semantically equivalent to executing `F =
tf.contrib.eager.defun(f)` multiple times; this difference is due to the fact
that `defun` only captures the subgraph of TensorFlow operations that is
constructed when `f` is called in a graph-building context.
_Python Control Flow_.
The structure of many machine learning computations depend upon whether one is
training or validating, and it is common to nest specialized logic under `if
training:` blocks. By mapping each input signature to a unique graph, `defun`
lets users transparently compile such code, as the following code snippet
demonstrates:
```python
import tensorflow as tf
tf.enable_eager_execution()
@tf.contrib.eager.defun
def lossy_matmul(W, x, training=True):
outputs = tf.matmul(W, x)
if training:
outputs = tf.nn.dropout(outputs, keep_probability=0.2)
return outputs
W = tf.random_normal((3, 5))
x = tf.random_normal((5, 1))
# Executes a graph that applies dropout.
lossy_outputs = lossy_matmul(W, x, training=True)
# Executes a graph that does not apply dropout.
exact_outputs = lossy_matmul(W, x, training=False)
```
On the other hand, because `defun` generates graphs by tracing and not by
source code analysis, it fully unrolls Python `for` and `while` loops,
potentially creating large graphs. If your Python function has native loops
that run for many iterations, consider replacing them with `tf.while_loop`
operations.
When constructing graphs, `tf.Tensor` objects cannot be used as Python
`bool` objects. This means, for example, that you should replace code in `f`
resembling
```python
if tensor < 10:
true_fn()
else:
false_fn()
```
with `tf.cond(tensor < 10, true_fn, false_fn)`.
_Variables_
TensorFlow operations related to variable creation and initialization are
automatically lifted out of the graphs generated by `defun`. In practice, this
implies that variable creation and initialization only happen the first time
`F` is called, and that variables are reused every time thereafter. Many
TensorFlow APIs, like `tf.keras.layers.Layer` objects, create variables the
first time they are called and reuse them thereafter. Automatic variable
lifting makes it possible to compile these APIs without extra effort, at the
cost of introducing a discrepancy between the semantics of executing Python
functions and their corresponding compiled functions. For example:
```python
import tensorflow as tf
tf.enable_eager_execution()
def fn():
x = tf.Variable(0.0)
x.assign_add(1.0)
return x.read_value()
# `fn` is a Python function, so x is created, initialized, and destroyed upon
# every invocation
assert fn().numpy() == fn().numpy() == 1.0
compiled = tf.contrib.eager.defun(fn)
# Compiling `fn` with `defun` hoists all variables outside of the generated
# graph, so initialization happens exactly once.
assert compiled().numpy() == 1.0
assert compiled().numpy() == 2.0
```
Finally, because each input signature is bound to a unique graph, if your
Python function constructs `tf.Variable` objects, then each graph constructed
for that Python function will reference a unique set of variables. To
circumvent this problem, we recommend against compiling Python functions that
create `tf.Variable` objects. Instead, Python functions should either
lexically close over `tf.Variable` objects or accept them as arguments,
preferably encapsulated in an object-oriented container. If you must create
variables inside your Python function and you want each graph generated for it
to reference the same set of variables, add logic to your Python function that
ensures that variables are only created the first time it is called and are
reused for every subsequent invocation; note that this is precisely what
`tf.keras.layers.Layer` objects do, so we recommend using them to represent
variable-bearing computations whenever possible.
Args:
func: function to be compiled. If `func` is None, returns a
decorator that can be invoked with a single argument - `func`. The
end result is equivalent to providing all the arguments up front.
In other words, defun(input_signature=...)(func) is equivalent to
defun(func, input_signature=...). The former allows
the following use case:
@tf.contrib.eager.defun(input_signature=...)
def foo(...):
...
input_signature: A possibly nested sequence of
`tf.contrib.eager.TensorSpec` objects specifying the shapes and dtypes of
the Tensors that will be supplied to this function. If `None`, a separate
function is instantiated for each inferred input signature. If a
signature is specified, every input to `func` must be a `Tensor`, and
`func` cannot accept `**kwargs`.
Returns:
If `func` is not None, returns a callable that will execute the compiled
function (and return zero or more `tf.Tensor` objects).
If `func` is None, returns a decorator that, when invoked with a single
`func` argument, returns a callable equivalent to the case above.
Raises:
TypeError: If `input_signature` is neither `None` nor a sequence of
`tf.contrib.eager.TensorSpec` objects.
"""
return defun_with_attributes(func=func, input_signature=input_signature)
def defun_with_attributes(func=None, input_signature=None, attributes=None):
"""Compiles a Python function into a callable TensorFlow graph.
This function supports adding extra function attributes. See detailed
documentation in defun(). Currently this is not exposed in public API since we
don't expect user to directly use attributes, and attribute won't work by
itself. This assumption might change in future.
Args:
func: function to be compiled.
input_signature: same as defun()'s input_signature.
attributes: A dictionary of arguments which will be added to function def as
attributes. Currently only support primitive types as value, and only
whitelisted attribute name is allowed. Unwhitelisted attribute name or
unsupported value will result into ValueError.
Returns:
Same as the return value of defun, with attributes added to the function in
graph.
"""
if input_signature is not None:
_validate_signature(input_signature)
# TODO(apassos): deal with captured global state. Deal with control flow.
def decorated(function):
try:
name = function.__name__
except AttributeError:
name = "function"
return tf_decorator.make_decorator(
function,
PolymorphicFunction(function, name, input_signature=input_signature,
attributes=attributes))
# This code path is for the `foo = tfe.defun(foo, ...)` use case
if func is not None:
return decorated(func)
# This code path is for the
#
# @tfe.defun(...)
# def foo(...):
# ...
#
# use case, which is equivalent to `foo = tfe.defun(...)(foo)`
return decorated
class AutomaticControlDependencies(object):
"""Context manager to automatically add control dependencies.
Code under this context manager will act as if a sensible set of control
dependencies were present. More specifically:
1. All stateful ops in the scope will execute
2. Stateful ops which modify the same resource will execute in program order
Note: creating variables in an automatic control dependencies context is not
supported (the value of the variables will never change as they will keep
getting reinitialized).
NOT THREAD SAFE
"""
def __init__(self):
self._returned_tensors = set()
def mark_as_return(self, tensor):
"""Acts like identity but marks the `Tensor` as a return value.
This will possibly return a copy of the `Tensor`. Usage:
```
with AutomaticControlDependencies() as a:
...
t = a.mark_as_return(t)
_ = ...(t...) # i.e. it's safe to use t here
```
Args:
tensor: the `Tensor` to be marked
Returns:
a copy of the `Tensor`.
"""
if isinstance(tensor, ops.IndexedSlices):
values = array_ops.identity(tensor.values)
indices = array_ops.identity(tensor.indices)
self._returned_tensors.add(indices)
self._returned_tensors.add(values)
return ops.IndexedSlices(values, indices, dense_shape=tensor.dense_shape)
# We want to make the return values depend on the stateful operations, but
# we don't want to introduce a cycle, so we make the return value the result
# of a new identity operation that the stateful operations definitely don't
# depend on.
tensor = array_ops.identity(tensor)
self._returned_tensors.add(tensor)
return tensor
def __enter__(self):
if context.executing_eagerly():
return self
# This code assumes no other thread is adding ops to the graph while
# we're adding ops to the graph.
# TODO(apassos): Fix this by locking the graph or using a temporary
# graph (but that would mess up devices and collections at least,
# probably other things as well).
self._graph = ops.get_default_graph()
self._n_operations = len(self._graph.get_operations())
return self
def _process_switch(self, switch_op, ops_which_must_run,
last_op_using_resource_tensor, merge_for_resource):
"""Processes a switch node for a resource input.
When tensorflow creates a cond, it creates a control flow context for each
branch of the cond. Each external tensor accessed by that branch is routed
through a switch op, which gets created in the graph _after_ the op which
uses that tensor get created.
If the resource comes from another switch op we process that one first.
_process_switch creates a corresponding merge node for the switch node. This
merge node is added to the outer control flow context of the switch
node. We also ensure that:
1. The switch node executes after the previous op which used the resource
tensor
2. Any op which uses a resource output of the switch node executes before
the merge for the switch node.
3. The next op which uses the input resource to the switch node (which
might be another switch node for the other branch of the conditional)
will execute after the merge node is done.
4. The merge node is marked as must_run so it will run even if no
subsequent operation uses the resource.
Args:
switch_op: the switch op to be processed
ops_which_must_run: the set of ops which must run
last_op_using_resource_tensor: map from resource tensor to last op using
it
merge_for_resource: map from resource tensor to merge which must follow
all usages of it.
"""
inp = switch_op.inputs[0]
if inp.dtype == dtypes_module.resource and inp.op.type == "Switch":
self._process_switch(inp.op, ops_which_must_run,
last_op_using_resource_tensor, merge_for_resource)
if switch_op.outputs[0] in merge_for_resource:
return
new_merge = control_flow_ops.merge(switch_op.outputs,
name="artificial_merge")
new_merge[0].op._control_flow_context = ( # pylint: disable=protected-access
switch_op._control_flow_context.outer_context) # pylint: disable=protected-access
# Ensures the merge always runs
ops_which_must_run.add(new_merge[0].op)
if inp in last_op_using_resource_tensor:
# Ensures the switch executes after the previous op using the resource.
switch_op._add_control_input(last_op_using_resource_tensor[inp]) # pylint: disable=protected-access
# Ensure the next op outside the cond happens after the merge.
last_op_using_resource_tensor[inp] = new_merge[0].op
if inp in merge_for_resource:
merge_for_resource[inp]._add_control_input(new_merge[0].op) # pylint: disable=protected-access
for o in switch_op.outputs:
# Ensures the merge will execute after all ops inside the cond
merge_for_resource[o] = new_merge[0].op
def __exit__(self, unused_type, unused_value, unused_traceback):
if context.executing_eagerly():
return
if self._graph is not ops.get_default_graph():
raise RuntimeError(
"Graph changed while trying to add control dependencies.")
# map from resource tensor to the last op which used it
last_op_using_resource_tensor = {}
# set of conditional and loop exits
ops_which_must_run = set()
# merge which must depend on ops which use this resource
merge_for_resource = {}
new_operations = self._graph.get_operations()[self._n_operations:]
# Ensures that uses of resource tensors get serialized properly and all
# execute. This is done by keeping a map from resource tensor to the last op
# in graph-construction order which used it (last_op_using_resource_tensor).
#
# Conditionals are written in TensorFlow such that every external tensor
# accessed in the conditional goes through a switch op and every return
# tensor (it's guaranteed that there will be at least one) goes through a
# merge op.
#
# To handle conditionals, switches are handled in a special way (see
# comments for _process_switch). Merge nodes created by TF's conditional
# logic (as opposed to by _process_switch) are forced to run and also get a
# control dependency added to them to ensure all stateful ops inside their
# control flow context run.
#
# We also ensure that if an op is using a resource output by a switch node
# (that is, a resource tensor for which there's a value in
# merge_for_resource) this op will run before the merge for that resource.
#
# We try to add control inputs to nodes respecting their control flow
# contexts to avoid dead nodes propagating everywhere and leading to
# "retval[0] doesn't have value" errors. If a node gets a control dependency
# on a dead node (i.e. a note from an untaken control flow branch) that node
# will be marked as dead unless it's a merge node.
#
# TODO(apassos): serialize non-resource-taking stateful ops as well, and
# test that it works. Support while loops. Support init_scope escaping from
# this.
for op in new_operations:
# TODO(apassos) make this code safely support while loops.
if isinstance(op._control_flow_context, control_flow_ops.WhileContext): # pylint: disable=protected-access
continue
control_inputs = set()
# Ensure stateful ops run
if (op.type not in self._graph._registered_ops # pylint: disable=protected-access
or self._graph._registered_ops[op.type].is_stateful): # pylint: disable=protected-access
ops_which_must_run.add(op)
# Ignore switches (they're handled separately)
if op.type == "Switch" and op.inputs[0].dtype == dtypes_module.resource:
continue
# Make merges trigger all other computation which must run
if op.type == "Merge":
for o in ops_which_must_run:
op._add_control_input(o) # pylint: disable=protected-access
for inp in o.inputs:
if inp in last_op_using_resource_tensor:
last_op_using_resource_tensor[inp] = op
ops_which_must_run = set([op])
continue
for inp in op.inputs:
if inp.dtype == dtypes_module.resource:
# Deal with switches, finally.
if inp.op.type == "Switch":
self._process_switch(inp.op, ops_which_must_run,
last_op_using_resource_tensor,
merge_for_resource)
# Ensure uses of resources are serialized
if inp in last_op_using_resource_tensor:
if (last_op_using_resource_tensor[inp]._control_flow_context # pylint: disable=protected-access
is op._control_flow_context): # pylint: disable=protected-access
control_inputs.add(last_op_using_resource_tensor[inp])
# Ensure merges happen after the closing of a cond block
if inp in merge_for_resource:
merge_for_resource[inp]._add_control_input(op) # pylint: disable=protected-access
last_op_using_resource_tensor[inp] = op
control_inputs = [c for c in control_inputs
if c._control_flow_context is op._control_flow_context] # pylint: disable=protected-access
op._add_control_inputs(control_inputs) # pylint: disable=protected-access
# Ensure all ops which must run do run
for r in self._returned_tensors:
if ops_which_must_run:
r.op._add_control_inputs( # pylint: disable=protected-access
[o for o in ops_which_must_run
if o._control_flow_context is r.op._control_flow_context]) # pylint: disable=protected-access
def automatic_control_dependencies(f):
"""Wraps f to automatically insert control dependencies.
The inserted dependencies ensure that:
1. All stateful ops in f run when the result of f runs
2. Updates to the same resources happen in order.
Args:
f: the function to be wrapped.
Returns:
The wrapped function.
"""
def wrapper(*args, **kwds):
with AutomaticControlDependencies() as a:
result = f(*args, **kwds)
result_flat = [a.mark_as_return(t) for t in nest.flatten(result)]
return nest.pack_sequence_as(result, result_flat)
return tf_decorator.make_decorator(f, wrapper)
| 41.049216 | 115 | 0.707022 |
fd8701b5d283e4527f54d8ee90af0da8965bf57f | 592 | py | Python | setup.py | kenlowrie/ev | 919441276d385d2f5580bf2652bd4929e804d931 | [
"Apache-2.0"
] | null | null | null | setup.py | kenlowrie/ev | 919441276d385d2f5580bf2652bd4929e804d931 | [
"Apache-2.0"
] | null | null | null | setup.py | kenlowrie/ev | 919441276d385d2f5580bf2652bd4929e804d931 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
from sys import version_info
setup(name='ev',
version='0.8.8',
description='Encrypted Vault Management Console for image stores on macOS',
url='https://github.com/kenlowrie/ev',
author='Ken Lowrie',
author_email='ken@kenlowrie.com',
license='Apache',
packages=['ev'],
install_requires=['kenl380.pylib'],
entry_points = {
'console_scripts': ['ev=ev.ev:ev_entry',
'ev{}=ev.ev:ev_entry'.format(version_info.major)
],
},
zip_safe=False)
| 31.157895 | 81 | 0.584459 |
fcc8488481eacbfb32be70a567fb80331fbaa8b3 | 4,260 | py | Python | aliyun-python-sdk-dyplsapi/aliyunsdkdyplsapi/request/v20170525/BindAxbRequest.py | liuzheng/aliyun-openapi-python-sdk | 1ba6743f3d6f2cef57ec9e3be1754b04293c3150 | [
"Apache-2.0"
] | 1 | 2021-03-08T02:59:17.000Z | 2021-03-08T02:59:17.000Z | aliyun-python-sdk-dyplsapi/aliyunsdkdyplsapi/request/v20170525/BindAxbRequest.py | bricklayer-Liu/aliyun-openapi-python-sdk | 20da2554de22679fc7c5462c483663e4d79512aa | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-dyplsapi/aliyunsdkdyplsapi/request/v20170525/BindAxbRequest.py | bricklayer-Liu/aliyun-openapi-python-sdk | 20da2554de22679fc7c5462c483663e4d79512aa | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdyplsapi.endpoint import endpoint_data
class BindAxbRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dyplsapi', '2017-05-25', 'BindAxb','dypls')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_CallDisplayType(self):
return self.get_query_params().get('CallDisplayType')
def set_CallDisplayType(self,CallDisplayType):
self.add_query_param('CallDisplayType',CallDisplayType)
def get_PhoneNoX(self):
return self.get_query_params().get('PhoneNoX')
def set_PhoneNoX(self,PhoneNoX):
self.add_query_param('PhoneNoX',PhoneNoX)
def get_RingConfig(self):
return self.get_query_params().get('RingConfig')
def set_RingConfig(self,RingConfig):
self.add_query_param('RingConfig',RingConfig)
def get_ASRStatus(self):
return self.get_query_params().get('ASRStatus')
def set_ASRStatus(self,ASRStatus):
self.add_query_param('ASRStatus',ASRStatus)
def get_PhoneNoB(self):
return self.get_query_params().get('PhoneNoB')
def set_PhoneNoB(self,PhoneNoB):
self.add_query_param('PhoneNoB',PhoneNoB)
def get_PhoneNoA(self):
return self.get_query_params().get('PhoneNoA')
def set_PhoneNoA(self,PhoneNoA):
self.add_query_param('PhoneNoA',PhoneNoA)
def get_ExpectCity(self):
return self.get_query_params().get('ExpectCity')
def set_ExpectCity(self,ExpectCity):
self.add_query_param('ExpectCity',ExpectCity)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_OutOrderId(self):
return self.get_query_params().get('OutOrderId')
def set_OutOrderId(self,OutOrderId):
self.add_query_param('OutOrderId',OutOrderId)
def get_PoolKey(self):
return self.get_query_params().get('PoolKey')
def set_PoolKey(self,PoolKey):
self.add_query_param('PoolKey',PoolKey)
def get_Expiration(self):
return self.get_query_params().get('Expiration')
def set_Expiration(self,Expiration):
self.add_query_param('Expiration',Expiration)
def get_IsRecordingEnabled(self):
return self.get_query_params().get('IsRecordingEnabled')
def set_IsRecordingEnabled(self,IsRecordingEnabled):
self.add_query_param('IsRecordingEnabled',IsRecordingEnabled)
def get_OutId(self):
return self.get_query_params().get('OutId')
def set_OutId(self,OutId):
self.add_query_param('OutId',OutId)
def get_ASRModelId(self):
return self.get_query_params().get('ASRModelId')
def set_ASRModelId(self,ASRModelId):
self.add_query_param('ASRModelId',ASRModelId)
def get_CallRestrict(self):
return self.get_query_params().get('CallRestrict')
def set_CallRestrict(self,CallRestrict):
self.add_query_param('CallRestrict',CallRestrict) | 31.791045 | 74 | 0.76385 |
67de7da24cf6b9ca7f8e4a0cb992038e8250b070 | 6,556 | py | Python | test/programytest/parser/template/node_tests/test_thatstar.py | cen-ai/program-y | a753667638147544c54dbebd9f1c8f9ae7f2159e | [
"MIT"
] | 5 | 2018-08-21T00:13:45.000Z | 2018-09-01T20:00:55.000Z | test/programytest/parser/template/node_tests/test_thatstar.py | cen-ai/program-y | a753667638147544c54dbebd9f1c8f9ae7f2159e | [
"MIT"
] | 1 | 2018-09-12T18:30:17.000Z | 2018-09-12T18:30:17.000Z | test/programytest/parser/template/node_tests/test_thatstar.py | cen-ai/program-y | a753667638147544c54dbebd9f1c8f9ae7f2159e | [
"MIT"
] | 5 | 2018-08-21T00:08:36.000Z | 2018-09-23T06:11:04.000Z | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.thatstar import TemplateThatStarNode
from programy.dialog.dialog import Conversation, Question
from programy.parser.pattern.matcher import MatchContext, Match
from programy.parser.pattern.nodes.oneormore import PatternOneOrMoreWildCardNode
from programytest.parser.base import ParserTestsBaseClass
class MockTemplateThatStarNode(TemplateThatStarNode):
def __init__(self):
TemplateThatStarNode.__init__(self)
def resolve_to_string(self, context):
raise Exception("This is an error")
class TemplateThatStarNodeTests(ParserTestsBaseClass):
def test_to_str_defaults(self):
node = TemplateThatStarNode()
self.assertEqual("[THATSTAR]", node.to_string())
def test_to_str_no_defaults(self):
node = TemplateThatStarNode(3, 2)
self.assertEqual("[THATSTAR question=3 sentence=2]", node.to_string())
def test_to_str_star(self):
node = TemplateThatStarNode(1, -1)
self.assertEqual("[THATSTAR sentence=*]", node.to_string())
def test_to_xml_defaults(self):
root = TemplateNode()
node = TemplateThatStarNode()
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual("<template><thatstar /></template>", xml_str)
def test_to_xml_no_defaults(self):
root = TemplateNode()
node = TemplateThatStarNode(question=3, sentence=2)
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><thatstar index="3,2" /></template>', xml_str)
def test_to_xml_no_default_star(self):
root = TemplateNode()
node = TemplateThatStarNode(question=3, sentence=-1)
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><thatstar index="3,*" /></template>', xml_str)
def test_node(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateThatStarNode()
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual(1, node.question)
self.assertEqual(1, node.sentence)
def test_node_no_defaults(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateThatStarNode(question=3, sentence=2)
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual(3, node.question)
self.assertEqual(2, node.sentence)
def test_node_no_star(self):
root = TemplateNode()
node = TemplateThatStarNode()
root.append(node)
conversation = Conversation(self._client_context)
question = Question.create_from_text(self._client_context, "Hello world", self._client_context.bot.sentence_splitter)
question.current_sentence()._response = "Hello matey"
conversation.record_dialog(question)
question = Question.create_from_text(self._client_context, "How are you", self._client_context.bot.sentence_splitter)
question.current_sentence()._response = "Very well thanks"
conversation.record_dialog(question)
self._client_context.bot._conversation_mgr._conversations["testid"] = conversation
self.assertEqual("", root.resolve(self._client_context))
def test_node_with_star(self):
root = TemplateNode()
node = TemplateThatStarNode()
root.append(node)
conversation = Conversation(self._client_context)
question = Question.create_from_text(self._client_context, "Hello world", self._client_context.bot.sentence_splitter)
question.current_sentence()._response = "Hello matey"
conversation.record_dialog(question)
question = Question.create_from_text(self._client_context, "How are you", self._client_context.bot.sentence_splitter)
question.current_sentence()._response = "Very well thanks"
conversation.record_dialog(question)
match = PatternOneOrMoreWildCardNode("*")
context = MatchContext(max_search_depth=100, max_search_timeout=-1, tokenizer=self._client_context.brain.tokenizer)
context.add_match(Match(Match.THAT, match, "Matched"))
question.current_sentence()._matched_context = context
conversation.record_dialog(question)
self._client_context.bot._conversation_mgr._conversations["testid"] = conversation
self.assertEqual("Matched", root.resolve(self._client_context))
def test_node_with_star_with_none(self):
root = TemplateNode()
node = TemplateThatStarNode()
root.append(node)
conversation = Conversation(self._client_context)
question = Question.create_from_text(self._client_context, "Hello world", self._client_context.bot.sentence_splitter)
question.current_sentence()._response = "Hello matey"
conversation.record_dialog(question)
question = Question.create_from_text(self._client_context, "How are you", self._client_context.bot.sentence_splitter)
question.current_sentence()._response = "Very well thanks"
conversation.record_dialog(question)
match = PatternOneOrMoreWildCardNode("*")
context = MatchContext(max_search_depth=100, max_search_timeout=-1, tokenizer=self._client_context.brain.tokenizer)
context.add_match(Match(Match.THAT, match, None))
question.current_sentence()._matched_context = context
conversation.record_dialog(question)
self._client_context.bot._conversation_mgr._conversations["testid"] = conversation
self.assertEqual("", root.resolve(self._client_context))
def test_node_exception_handling(self):
root = TemplateNode()
node = MockTemplateThatStarNode()
root.append(node)
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("", result) | 39.493976 | 125 | 0.704393 |
7a7d4c3b51590d519b63f1b733f5a25e65497b9f | 12,380 | py | Python | utils/classify_WHAM_vcf.py | gkno/wham | 2a1f349e73292f01792460664fc06edec2e7874a | [
"MIT"
] | 66 | 2015-11-10T23:33:06.000Z | 2022-02-23T14:08:24.000Z | utils/classify_WHAM_vcf.py | gkno/wham | 2a1f349e73292f01792460664fc06edec2e7874a | [
"MIT"
] | 39 | 2015-11-04T23:14:59.000Z | 2021-09-07T17:55:14.000Z | utils/classify_WHAM_vcf.py | gkno/wham | 2a1f349e73292f01792460664fc06edec2e7874a | [
"MIT"
] | 21 | 2015-12-31T10:04:47.000Z | 2022-01-25T11:32:19.000Z | #!/usr/bin/python
import argparse, csv, os, sys, re #std python imports
import numpy as np
from sklearn.ensemble import RandomForestClassifier #RF classifier from SKlearn
from sklearn.cross_validation import cross_val_score #validation stats from SKlearn
import itertools
import multiprocessing as mp #allows for parallelization of the classification to speed up script.
#########################
#Args
#########################
parser=argparse.ArgumentParser(description="Runs RandomForest classifier on WHAM output VCF files to classify structural variant type. Appends WC and WP flags for user to explore structural variant calls. The output is a VCF file written to standard out.")
parser.add_argument("VCF", type=str, help="User supplied VCF with WHAM variants; VCF needs AT field data")
parser.add_argument("training_matrix", type=str, help="training dataset for classifier derived from simulated read dataset")
parser.add_argument("--filter", type=str, help="optional arg for filtering type one of : ['sensitive', 'specific']; defaults to output all data if filtering if argument is not supplied.")
parser.add_argument("--proc", type=str, help="optional arg for number of proceses to run with classifier; higher thread number will increase speed of classifier; defaults to 1")
parser.add_argument("--minclassfreq", default=0, type=float, help="optional arg for minimum frequency required for classification, otherwise variant set as unknown (UNK). Default is to classify everything.")
arg=parser.parse_args()
#########################
#Functions
#########################
#class object for processing VCF files.
class vcf:
"""
class vcf generates an iterator for looping through a vcf
file. Can add various functionalities here to further process
the vcf in a number of ways
chunksize = number of lines to process at once for parallel applications.
"""
def __init__(self,file):
self.f = open(file,'r')
#proces all of the header lines of the vcf.
header = True #boolean to continye looping through header
info_boolean = False #need a boolean to trigger and append new INFO fields
while header:
self.line = self.f.readline()
line = self.line.strip()
line = line.split("\t") #split line on tabs
if line[0][0] == '#': #process header lines
if re.search("##FORMAT", line[0]) and info_boolean == False: #first instance of ##FORMAT..
#need to append new INFO fields for the corresponding data
print '##INFO=<ID=WC,Number=1,Type=String,Description="WHAM classifier variant type">'
print '##INFO=<ID=WP,Number=4,Type=Float,Description="WHAM probability estimate for each structural variant classification from RandomForest model">'
info_boolean = True #reset boolean to
print "\t".join( line ) #print results to stdout
else:
header = False #break out of the loop
def __iter__(self):
return self
def next(self, chunksize=5000):
cnt = 0 #boolean for chunking.
return_array = [] #initialize empty array to store line data.
#check here if we are currently on last line, and raise StopIteration to exit next()
if len(self.line) == 0: #
raise StopIteration
while cnt < chunksize:
line = self.line
if len( line ) == 0:
return( return_array )
break #break out of loop because we are at last line in file.
else:
return_array.append( line )
self.line = self.f.readline()
cnt += 1
return( return_array )
#parse the targets for ML. converts text list of classified data
#into a numerical dataset with links to the classified names
def parse_targets( target ):
"""
target = list of factors to be turned into numerical classifiers.
for machine learning classifiction. ie. converts INR, DEL, INV,
DUP into integer factors
"""
target = np.array(target) #convert to np array for ease in processing
names = np.unique( target ) #unique names of SV types (factors)
#now iterate through and classify to an integer for SKlearn
cnt = 0
target_numerical = np.zeros( target.shape[0] ) #generate empty dataset
for name in names:
idx = np.where( name == target )
target_numerical[ idx ] = cnt
cnt += 1
#setup return data structure
RV = {'names': names, 'target': target_numerical}
#can use index of SV type in 'names' to get text based variant
#call from 'target', where they've been converted to integers.
return( RV )
#method to run observed data through the trained model to output
#a vcf friendly return of classified variant call and the prediction
#probabilities for each call
def classify_data( _x, clf, names, minclassfreq=None ):
"""
_x = pass the col 8 from vcf
clf = machine learning object
names = string names, zero indexed of variant calls.
"""
_x = np.array(_x)
#pdb.set_trace()
# index = [16,17,18]
# _x = map(lambda(x):_x[x], index)
class_idx = int( clf.predict(_x) )#predict classifier. can link back to dataset['target_names']
prediction = names[ class_idx ] #lookup text based name for classification
class_probs = clf.predict_proba(_x)[0] #gives weights for your predictions 1:target_names
#if minclass is set and is below threshold, we change prediction to UKN - unknown
if minclassfreq and class_probs[class_idx] < minclassfreq:
prediction = "UKN" # set as unknown, not enough evidence for classification
#convert back to text comma separated list
class_str = ",".join( [ str(i) for i in class_probs ] )
#this is a terrible hack that make my soul hurt, but gets the job done for
# dels called dups.
#parse the two data fields into a string so they can be appended to the vcf file.
return_str = "WC=" + prediction + ";WP=" + class_str
return( return_str )
#A general parser that takes the data in VCF flag field and parses it into a
#dictionary data structure. Can then obtain whatever data needed by using
# RV['key']; ie. RV['GT'] ...
def parse_vcf_data( vdat ):
"""
vdat = string; column 8 from VCF file with INFO fields.
"""
#start by parsing the vcf data into a dictionary structure
#will be keyed by ["XX="] = data
dict = {}
vdat = vdat.split(";")
for v in vdat:
try:
v = v.split("=") #split key and data into list
except:
print "not valid VCF file"
dict[ v[0] ] = v[1] #setup dict struct
#return the dictionary structure data with info fields as keys.
return( dict )
#takes vcf field data and runs various filtering specs.
def run_filters( vdat, filtering = None ):
"""
vdat - dictionary of INFO field from VCF line
filtering - dictionary of fields to be filtered; defaults to None
Currently implemented for sensitive and specific. Can modify the
filters to return False anytime you want to not report results based
on filteirng criterion from the INFO field.
"""
pass_filt = True #will remain true until we do not satisfy some criterion
if filtering == None:
return( pass_filt ) #break out early
#sensitive is very perimssive
elif filtering == "sensitive":
if int( vdat['NC'] ) < 2:
pass_filt = False
return( pass_filt )
if pass_filt:
return( pass_filt )
#specific mapping is more restrictive on the filtering.
elif filtering == "specific":
if vdat['ED'] == 'nan':
pass_filt = False
return( pass_filt )
BE = vdat['BE'].split(',')
if int(BE[-1]) < 2:
pass_filt = False
return( pass_filt )
if int( vdat['NC'] ) < 3:
pass_filt = False
return( pass_filt )
if pass_filt:
return( pass_filt )
#elif filtering == "user_defined":
# ....
else:
raise ValueError('Not a valid --filter argumuent\n please try running with --help arg for instructions')
#fuction will process line information and classify variant for a line in VCF file.
def process_vcf( info ):
"""
pass izip object of line object and other needed vars
info[0] = list of vcf lines from VCF object iterator.
info[1] = clf object
info[2] = dataset dictionary
info[3] = filter arg supplied by user
info[4] = min classification frequency supplied by user (defaults to None)
"""
#sys.stderr.write("... running process VCF with job id %d \n" %(os.getpid() ) )
#parse the args to function
line_list = info[0] #list of lines from VCF obj
clf = info[1] #randomForest object
dataset = info[2] #dataset with class names
filter = info[3] #filter arg supplied by user
minclassfreq = info[4]
#iterate over lines in the chunked data
return_list = []
for line in line_list:
line = line.strip().split("\t")
vdat = parse_vcf_data( line[7] ) #parse all of vcf appended data
filter_bool = run_filters( vdat, filtering=filter ) #boolean of whether line info passes filters
if filter_bool:
_x = vdat[ 'AT' ].split(",") #create list from data in 'AT' field
_x = _x[1:]
#results = classify_data( _x, clf, dataset['target_names'] )
results = classify_data( _x, clf, dataset['target_names'], minclassfreq )
line[7] = line[7] + ";" + results #append data to correct vcf column
#print "\t".join( line ) #print results to stdout
print_line = "\t".join( line )
return_list.append( print_line )
else:
return_list.append( None )
#return the full list of updated line data
return( return_list )
#########################
#MAIN
#########################
###########
#import and assign training data
###########
#all sklearn data will be in 2D array [ nsamples X nfeatures]
sys.stderr.write("processing training file... \n" )
#iterate over training file. select out the numerical and classifier data
data = []
target = []
with open(arg.training_matrix) as t:
for line in csv.reader(t,delimiter='\t'):
if line[0][0] == "#": #add in this statemnt to print error if user supplies files in wrong order.
raise ValueError('not a valid WHAM training file. perhaps you supplied arguments in the wrong order? \n please try running with --help arg for instructions')
target.append( line[-1] ) #always have targets [classified SV] as last column
#exclude first attribute
d = [ float(i) for i in line[1:-1] ]
data.append( d )
#populate the training dataset in sciKitLearn friendly structure.
dataset = {} #empty data
dataset[ 'data' ] = np.array( data ) #all training data into 2-D array
#turn our target list into integers and return target names
target_parse = parse_targets( target )
dataset[ 'target' ] = np.array( target_parse['target'] )
dataset[ 'target_names' ] = np.array( target_parse['names'] )
###########
#random forest classification
###########
#setup inital params
clf = RandomForestClassifier( n_estimators=500 )
#run RFC on dataset with target classifiers; runs the model fit
clf = clf.fit( dataset['data'], dataset['target'] )
######
#run some sanity checks here.
######
training_stats = clf.feature_importances_ #array of variable importances for model.
#print training stats to user
train_list = [ str(i) for i in training_stats ] #convert to str for printing to user.
sys.stderr.write("\t Training weights for RandomForest classifier \n\t N = %d training variables\n" %( len(train_list) ) )
sys.stderr.write("\t %s\n" %( "\t".join( train_list ) ) )
#need cross validation here. uses sklearn.cross_validation
scores = cross_val_score( clf, dataset['data'], dataset['target'] )
avg_val = scores.mean() * 100 #average cross validation levels
sys.stderr.write("\t results from cross validation:\n\t %f%s \n" %( avg_val, '%' ) )
######
#prediction and output
######
sys.stderr.write("processing VCF file through classifier... \n" )
sys.stderr.write("...running parent process with job id %d \n can use this ID to exit \n" %(os.getpid() ) )
sys.stderr.write("minclassfreq var is set to = %f \n" %( arg.minclassfreq ) )
#load VCF file into class obj
vcf_file = vcf(arg.VCF)
#parse the number of processes to enact
if arg.proc == None:
proc_num = 1
else:
proc_num = int( arg.proc )
###
#setup multiprocessing for the classification of SVs
###
p = mp.Pool( processes = proc_num )
results = p.imap(process_vcf, itertools.izip( vcf_file, itertools.repeat(clf), itertools.repeat(dataset), itertools.repeat(arg.filter), itertools.repeat(arg.minclassfreq) ) )
#iterate over the results and feed to stdout
for r in results:
for rv in r: #iterate over the list of returned results
if rv != None: #only print results that pass filtering specs.
print rv #write output to std out
#final output to std err that the run has finished.
sys.stderr.write("...classifier finished \n" )
| 36.093294 | 256 | 0.703554 |
4c554db691be8827be31e9d997757b4001d39fa3 | 670 | py | Python | poetry/console/commands/check.py | maggyero/poetry | 3c7592c2f3d481f5e655a68fc1fa15c5bc024cda | [
"MIT"
] | 2 | 2019-06-19T15:07:58.000Z | 2019-11-24T14:08:55.000Z | poetry/console/commands/check.py | djetelina/poetry | 1aa1ab2962bb8b6aed33c2308cf8352809d91685 | [
"MIT"
] | 1 | 2021-02-15T17:05:58.000Z | 2021-02-15T17:05:58.000Z | poetry/console/commands/check.py | djetelina/poetry | 1aa1ab2962bb8b6aed33c2308cf8352809d91685 | [
"MIT"
] | 1 | 2019-06-19T15:08:05.000Z | 2019-06-19T15:08:05.000Z | from .command import Command
class CheckCommand(Command):
"""
Checks the validity of the <comment>pyproject.toml</comment> file.
check
"""
def handle(self):
# Load poetry and display errors, if any
check_result = self.poetry.check(self.poetry.local_config, strict=True)
if not check_result["errors"] and not check_result["warnings"]:
self.info("All set!")
return 0
for error in check_result["errors"]:
self.error("Error: {}".format(error))
for error in check_result["warnings"]:
self.line("<warning>Warning: {}</warning>".format(error))
return 1
| 25.769231 | 79 | 0.610448 |
d0b5a855313b4d47754ee4e3daad7573cd9107e4 | 427 | py | Python | feedback_bot/config.py | Ribonney/feedback-bot | d46e7b56911d626a0b12d44c30e32c98bb46af32 | [
"MIT"
] | null | null | null | feedback_bot/config.py | Ribonney/feedback-bot | d46e7b56911d626a0b12d44c30e32c98bb46af32 | [
"MIT"
] | null | null | null | feedback_bot/config.py | Ribonney/feedback-bot | d46e7b56911d626a0b12d44c30e32c98bb46af32 | [
"MIT"
] | null | null | null | import os
from pathlib import Path
from feedback_bot.command_replies import Replies
TG_TOKEN = os.environ["1327572686:AAFsgdpyubYSw6ZUiw0xQX6sbJ4rfra0o8I"]
PROXY = os.environ.get("RT_HTTPS_PROXY")
CHAT_ID = int(os.environ["-470600717"])
REPLIES: Replies = Replies.load_from_dir(Path(os.environ["COMMAND_REPLIES_PATH"]))
BOT_TIMEOUT = int(os.environ.get("BOT_TIMEOUT", 10))
BOT_RETRIES = int(os.environ.get("BOT_RETRIES", 5))
| 35.583333 | 82 | 0.791569 |
d2e6c61351830524a79bac3e31a09541627faa65 | 355 | py | Python | friendly_computing_machine/math.py | Yigitcan-MSU/friendly-computing-machine | ccfae25c4e331694572b75b1c95b36595c7067a9 | [
"BSD-3-Clause"
] | null | null | null | friendly_computing_machine/math.py | Yigitcan-MSU/friendly-computing-machine | ccfae25c4e331694572b75b1c95b36595c7067a9 | [
"BSD-3-Clause"
] | null | null | null | friendly_computing_machine/math.py | Yigitcan-MSU/friendly-computing-machine | ccfae25c4e331694572b75b1c95b36595c7067a9 | [
"BSD-3-Clause"
] | null | null | null | """
A small set of functions for doing math operations
"""
def add(arg1, arg2):
"""
Function for adding two variables
"""
return arg1 + arg2
def mult(arg1, arg2):
"""
Function for multiplying two variables
"""
return arg1 * arg2
def sub(arg1,arg2):
"""
Function for substracting arg1 from arg2
"""
return arg1 - arg2
| 14.791667 | 50 | 0.633803 |
49b82492f00dcbe167a6abd3d51ffac6e5a5b8ff | 5,208 | py | Python | src/RIOT/dist/tools/mcuboot/imgtool/image.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
] | 2 | 2020-04-30T08:17:45.000Z | 2020-05-23T08:46:54.000Z | src/RIOT/dist/tools/mcuboot/imgtool/image.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
] | null | null | null | src/RIOT/dist/tools/mcuboot/imgtool/image.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
] | 1 | 2020-02-21T09:21:45.000Z | 2020-02-21T09:21:45.000Z | """
Image signing and management.
"""
from . import version as versmod
import hashlib
import struct
IMAGE_MAGIC = 0x96f3b83c
IMAGE_HEADER_SIZE = 32
# Image header flags.
IMAGE_F = {
'PIC': 0x0000001,
'SHA256': 0x0000002,
'PKCS15_RSA2048_SHA256': 0x0000004,
'ECDSA224_SHA256': 0x0000008,
'NON_BOOTABLE': 0x0000010,
'ECDSA256_SHA256': 0x0000020,
'PKCS1_PSS_RSA2048_SHA256': 0x0000040, }
TLV_VALUES = {
'SHA256': 1,
'RSA2048': 2,
'ECDSA224': 3,
'ECDSA256': 4, }
TLV_HEADER_SIZE = 4
# Sizes of the image trailer, depending on image alignment.
trailer_sizes = {
1: 402,
2: 788,
4: 1560,
8: 3104, }
boot_magic = bytes([
0x77, 0xc2, 0x95, 0xf3,
0x60, 0xd2, 0xef, 0x7f,
0x35, 0x52, 0x50, 0x0f,
0x2c, 0xb6, 0x79, 0x80, ])
class TLV():
def __init__(self):
self.buf = bytearray()
def add(self, kind, payload):
"""Add a TLV record. Kind should be a string found in TLV_VALUES above."""
buf = struct.pack('<BBH', TLV_VALUES[kind], 0, len(payload))
self.buf += buf
self.buf += payload
def get(self):
return bytes(self.buf)
class Image():
@classmethod
def load(cls, path, included_header=False, **kwargs):
"""Load an image from a given file"""
with open(path, 'rb') as f:
payload = f.read()
obj = cls(**kwargs)
obj.payload = payload
# Add the image header if needed.
if not included_header and obj.header_size > 0:
obj.payload = (b'\000' * obj.header_size) + obj.payload
obj.check()
return obj
def __init__(self, version=None, header_size=IMAGE_HEADER_SIZE, pad=0):
self.version = version or versmod.decode_version("0")
self.header_size = header_size or IMAGE_HEADER_SIZE
self.pad = pad
def __repr__(self):
return "<Image version={}, header_size={}, pad={}, payloadlen=0x{:x}>".format(
self.version,
self.header_size,
self.pad,
len(self.payload))
def save(self, path):
with open(path, 'wb') as f:
f.write(self.payload)
def check(self):
"""Perform some sanity checking of the image."""
# If there is a header requested, make sure that the image
# starts with all zeros.
if self.header_size > 0:
if any(v != 0 for v in self.payload[0:self.header_size]):
raise Exception("Padding requested, but image does not start with zeros")
def sign(self, key):
self.add_header(key)
tlv = TLV()
# Note that ecdsa wants to do the hashing itself, which means
# we get to hash it twice.
sha = hashlib.sha256()
sha.update(self.payload)
digest = sha.digest()
tlv.add('SHA256', digest)
if key is not None:
sig = key.sign(self.payload)
tlv.add(key.sig_tlv(), sig)
self.payload += tlv.get()
def add_header(self, key):
"""Install the image header.
The key is needed to know the type of signature, and
approximate the size of the signature."""
flags = 0
tlvsz = 0
if key is not None:
flags |= IMAGE_F[key.sig_type()]
tlvsz += TLV_HEADER_SIZE + key.sig_len()
flags |= IMAGE_F['SHA256']
tlvsz += 4 + hashlib.sha256().digest_size
fmt = ('<' +
# type ImageHdr struct {
'I' + # Magic uint32
'H' + # TlvSz uint16
'B' + # KeyId uint8
'B' + # Pad1 uint8
'H' + # HdrSz uint16
'H' + # Pad2 uint16
'I' + # ImgSz uint32
'I' + # Flags uint32
'BBHI' + # Vers ImageVersion
'I' # Pad3 uint32
) # }
assert struct.calcsize(fmt) == IMAGE_HEADER_SIZE
header = struct.pack(fmt,
IMAGE_MAGIC,
tlvsz, # TlvSz
0, # KeyId (TODO: allow other ids)
0, # Pad1
self.header_size,
0, # Pad2
len(self.payload) - self.header_size, # ImageSz
flags, # Flags
self.version.major,
self.version.minor or 0,
self.version.revision or 0,
self.version.build or 0,
0) # Pad3
self.payload = bytearray(self.payload)
self.payload[:len(header)] = header
def pad_to(self, size, align):
"""Pad the image to the given size, with the given flash alignment."""
tsize = trailer_sizes[align]
padding = size - (len(self.payload) + tsize)
if padding < 0:
msg = "Image size (0x{:x}) + trailer (0x{:x}) exceeds requested size 0x{:x}".format(
len(self.payload), tsize, size)
raise Exception(msg)
pbytes = b'\xff' * padding
pbytes += boot_magic
pbytes += b'\xff' * (tsize - len(boot_magic))
self.payload += pbytes
| 30.104046 | 96 | 0.532834 |
02d5cfe8ec28aa1cbffcac11ff1c8386049d3797 | 69,480 | py | Python | tensorflow/python/eager/context.py | SayyedAdnan/tensorflow | 68062543231ccbeb7e572eb97dffe8690915c9a0 | [
"Apache-2.0"
] | 1 | 2019-12-25T09:44:14.000Z | 2019-12-25T09:44:14.000Z | tensorflow/python/eager/context.py | mauryaganesh/tensorflow | 9e484cdbf47f20802b14f9e3aca07393e5b89547 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:14:24.000Z | 2022-02-10T02:58:17.000Z | tensorflow/python/eager/context.py | mauryaganesh/tensorflow | 9e484cdbf47f20802b14f9e3aca07393e5b89547 | [
"Apache-2.0"
] | 1 | 2020-04-22T01:47:46.000Z | 2020-04-22T01:47:46.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""State management for eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import random
import threading
from absl import logging
import numpy as np
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python import tf2
from tensorflow.python.eager import eager_util as c_api_util
from tensorflow.python.eager import executor
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import device as pydev
from tensorflow.python.util import compat
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
GRAPH_MODE = 0
EAGER_MODE = 1
default_execution_mode = EAGER_MODE if tf2.enabled() else GRAPH_MODE
# Cache from (old_device_name, partial_new_device_name) -> (new_device_name,
# new_device_spec).
# Note that we do not protect this with a lock and instead rely on python's GIL
# and the idempotent nature of writes to provide thread safety.
_device_parsing_cache = {}
_starting_device_spec = pydev.DeviceSpec.from_string("")
_MAXINT32 = 2**31 - 1
DEVICE_PLACEMENT_EXPLICIT = pywrap_tfe.TFE_DEVICE_PLACEMENT_EXPLICIT
DEVICE_PLACEMENT_WARN = pywrap_tfe.TFE_DEVICE_PLACEMENT_WARN
DEVICE_PLACEMENT_SILENT = pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT
DEVICE_PLACEMENT_SILENT_FOR_INT32 = (
pywrap_tfe.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32)
SYNC = 0
ASYNC = 1
MIRRORING_NONE = pywrap_tfe.TFE_MIRRORING_NONE
MIRRORING_ALL = pywrap_tfe.TFE_MIRRORING_ALL
_KEEP_ALIVE_SECS = 600
_python_eager_context_create_counter = monitoring.Counter(
"/tensorflow/api/python/eager_context_create_counter",
"Counter for number of eager contexts created in Python.")
class _EagerTensorCache(object):
"""Simple cache which evicts items based on length in a FIFO manner."""
def __init__(self, max_items=256, max_tensor_size=10000):
self._data = collections.OrderedDict()
self._max_items = max_items
self._max_tensor_size = max_tensor_size
def put(self, key, value):
if value._num_elements() > self._max_tensor_size: # pylint: disable=protected-access
return
self._data[key] = value
if len(self._data) > self._max_items:
self._data.popitem(last=False)
def get(self, key):
return self._data.get(key, None)
def flush(self):
self._data = {}
class FunctionCallOptions(object):
"""Options applied at call sites of eager functions.
Eager functions are functions decorated with tf.contrib.eager.defun.
"""
def __init__(self, executor_type=None, config_proto=None):
"""Constructor.
Args:
executor_type: (optional) name of the executor to be used to execute the
eager function. If None or an empty string, the default Tensorflow
executor will be used.
config_proto: (optional) a `config_pb2.ConfigProto` proto or
a serialized string of that proto.
The config used by Grappler when optimizing the function graph.
Each concrete function is optimized the first time is called. Changing
config_proto after the first call has no effect.
If config_proto is None, an empty RewriterConfig will be used.
"""
self.config_proto_serialized = config_proto
self.executor_type = executor_type
@property
def executor_type(self):
return self._executor_type
@executor_type.setter
def executor_type(self, executor_type):
self._executor_type = executor_type
@property
def config_proto_serialized(self):
return self._config_proto_serialized
@config_proto_serialized.setter
def config_proto_serialized(self, config):
if isinstance(config, config_pb2.ConfigProto):
self._config_proto_serialized = config.SerializeToString()
elif isinstance(config, str):
self._config_proto_serialized = config
elif config is None:
self._config_proto_serialized = (
config_pb2.ConfigProto().SerializeToString())
else:
raise ValueError("the rewriter config must be either a "
"config_pb2.ConfigProto, or a serialized string of that "
"proto or None. got: {}".format(type(config)))
# Map from context_id (an int) to _TensorCaches.
# Dicts are thread safe in CPython.
# TODO(iga): Remove this once TensorCaches are moved to C++.
_tensor_caches_map = {}
class _TensorCaches(threading.local):
"""Thread local tensor caches."""
def __init__(self):
super(_TensorCaches, self).__init__()
self._ones_rank_cache = None
self._zeros_cache = None
@property
def ones_rank_cache(self):
if not self._ones_rank_cache:
self._ones_rank_cache = _EagerTensorCache()
return self._ones_rank_cache
@property
def zeros_cache(self):
if not self._zeros_cache:
self._zeros_cache = _EagerTensorCache()
return self._zeros_cache
class _ThreadLocalData(threading.local):
"""Thread local storage for the eager context."""
def __init__(self):
super(_ThreadLocalData, self).__init__()
self.device_spec = _starting_device_spec
self.device_name = ""
self.is_eager = default_execution_mode == EAGER_MODE
self.scope_name = ""
self.function_call_options = None
self.executor = None
self.op_callbacks = []
self.invoking_op_callbacks = False
ContextSwitch = collections.namedtuple(
"ContextSwitch", ["is_building_function", "enter_context_fn",
"device_stack"])
# `_ContextSwitchStack` is a `threading.local` to match the semantics of
# ``DefaultGraphStack`, which is also a `threading.local`.
class _ContextSwitchStack(threading.local):
"""A thread-local stack of context switches."""
def __init__(self, eager):
super(_ContextSwitchStack, self).__init__()
self.stack = []
if eager:
# Initialize the stack with a pointer to enter the eager context; this
# ensures that the fact that eager execution was enabled is propagated
# across threads, since (1) `enable_eager_execution` modifies a
# process-level flag (`default_execution_mode`) and (2) `__init__` is
# called each time a threading.local object is used in a separate thread.
self.push(is_building_function=False, enter_context_fn=eager_mode,
device_stack=None)
def push(self, is_building_function, enter_context_fn, device_stack):
"""Push metadata about a context switch onto the stack.
A context switch can take any one of the two forms: installing a graph as
the default graph, or entering the eager context. For each context switch,
we record whether or not the entered context is building a function.
Args:
is_building_function: (bool.) Whether the context is building a function.
enter_context_fn: (function.) A callable that executes the context switch.
For example, `graph.as_default` or `eager_mode`.
device_stack: If applicable, the device function stack for this
graph. When breaking out of graphs in init_scope, the innermost nonempty
device stack is used. Eager contexts put `None` here and the value is
never used.
"""
self.stack.append(
ContextSwitch(is_building_function, enter_context_fn, device_stack))
def pop(self):
"""Pop the stack."""
self.stack.pop()
@tf_export("config.LogicalDevice")
class LogicalDevice(
collections.namedtuple("LogicalDevice", ["name", "device_type"])):
"""Abstraction for a logical device initialized by the runtime.
A `tf.config.LogicalDevice` corresponds to an initialized logical device on a
`tf.config.PhysicalDevice` or a remote device visible to the cluster. Tensors
and operations can be placed on a specific logical device by calling
`tf.device` with a specified `tf.config.LogicalDevice`.
Fields:
name: The fully qualified name of the device. Can be used for Op or function
placement.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
@tf_export("config.LogicalDeviceConfiguration",
"config.experimental.VirtualDeviceConfiguration")
class LogicalDeviceConfiguration(
collections.namedtuple("LogicalDeviceConfiguration", ["memory_limit"])):
"""Configuration class for a logical devices.
The class specifies the parameters to configure a `tf.config.PhysicalDevice`
as it is initialized to a `tf.config.LogicalDevice` during runtime
initialization. Not all fields are valid for all device types.
See `tf.config.get_logical_device_configuration` and
`tf.config.set_logical_device_configuration` for usage examples.
Fields:
memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual
device. Currently only supported for GPUs.
"""
def __new__(cls, memory_limit=None):
return super(LogicalDeviceConfiguration, cls).__new__(cls, memory_limit)
@tf_export("config.PhysicalDevice")
class PhysicalDevice(
collections.namedtuple("PhysicalDevice", ["name", "device_type"])):
"""Abstraction for a locally visible physical device.
TensorFlow can utilize various devices such as the CPU or multiple GPUs
for computation. Before initializing a local device for use, the user can
customize certain properties of the device such as it's visibility or memory
configuration.
Once a visible `tf.config.PhysicalDevice` is initialized one or more
`tf.config.LogicalDevice` objects are created. Use
`tf.config.set_visible_devices` to configure the visibility of a physical
device and `tf.config.set_logical_device_configuration` to configure multiple
`tf.config.LogicalDevice` objects for a `tf.config.PhysicalDevice`. This is
useful when separation between models is needed or to simulate a multi-device
environment.
Fields:
name: Unique identifier for device.
device_type: String declaring the type of device such as "CPU" or "GPU".
"""
pass
class _AtomicCounter(object):
"""A simple atomic counter."""
def __init__(self):
self._value = 0
self._lock = threading.Lock()
def increment_and_get(self):
with self._lock:
self._value += 1
return self._value
_context_id_counter = _AtomicCounter()
class _TensorCacheDeleter(object):
"""Deletes tensor caches for a given context."""
def __init__(self, context_id):
self._context_id = context_id
def __del__(self):
if _tensor_caches_map is None:
return
if self._context_id in _tensor_caches_map:
del _tensor_caches_map[self._context_id]
# TODO(agarwal): rename to EagerContext / EagerRuntime ?
# TODO(agarwal): consider keeping the corresponding Graph here.
class Context(object):
"""Environment in which eager operations execute."""
# TODO(agarwal): create and link in some documentation for `execution_mode`.
# pylint: disable=redefined-outer-name
def __init__(self,
config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Creates a new Context.
Args:
config: (Optional.) A `ConfigProto` protocol buffer with configuration
options for the Context. Note that a lot of these options may be
currently unimplemented or irrelevant when eager execution is enabled.
device_policy: (Optional.) What policy to use when trying to run an
operation on a device with inputs which are not on that device.
When set to None, an appropriate value will be picked automatically.
The value picked may change between TensorFlow releases.
Defaults to DEVICE_PLACEMENT_SILENT.
Valid values:
- DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is
not correct.
- DEVICE_PLACEMENT_WARN: copies the tensors which are not on the
right device but raises a warning.
- DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might
hide performance problems.
- DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,
raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched
are actually executed. When set to None, an appropriate value will be
picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- SYNC: executes each operation synchronously.
- ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
server_def: (Optional.) A tensorflow::ServerDef proto.
Enables execution on remote devices. GrpcServers need to be started by
creating an identical server_def to this, and setting the appropriate
task_indexes, so that the servers can communicate. It will then be
possible to execute operations on remote devices.
Raises:
ValueError: If execution_mode is not valid.
"""
# This _id is used only to index the tensor caches.
# TODO(iga): Remove this when tensor caches are moved to C++.
self._id = _context_id_counter.increment_and_get()
self._tensor_cache_deleter = _TensorCacheDeleter(self._id)
_tensor_caches_map[self._id] = _TensorCaches()
self._config = config
self._thread_local_data = _ThreadLocalData()
self._context_switches = _ContextSwitchStack(self.executing_eagerly())
self._context_handle = None
self._context_devices = None
self._seed = None
self._initialize_lock = threading.Lock()
self._initialized = False
if device_policy is None:
device_policy = DEVICE_PLACEMENT_SILENT
self._device_policy = device_policy
self._mirroring_policy = None
if execution_mode not in (None, SYNC, ASYNC):
raise ValueError(
"execution_mode should be None/SYNC/ASYNC. Got %s" % execution_mode)
if execution_mode is None:
execution_mode = SYNC
self._default_is_async = execution_mode == ASYNC
self._lazy_remote_inputs_copy = None
self._server_def = server_def
self._collective_ops_server_def = None
self._collective_leader = None
self._collective_scoped_allocator_enabled_ops = None
self._collective_use_nccl_communication = None
self._collective_device_filters = None
self._device_lock = threading.Lock()
self._physical_devices = None
self._visible_device_list = []
self._memory_growth_map = None
self._virtual_device_map = {}
# Values set after construction
self._optimizer_jit = None
self._intra_op_parallelism_threads = None
self._inter_op_parallelism_threads = None
self._soft_device_placement = None
self._log_device_placement = None
self._enable_mlir_bridge = None
self._optimizer_experimental_options = {}
_python_eager_context_create_counter.get_cell().increase_by(1)
# pylint: enable=redefined-outer-name
def _set_global_seed(self, seed):
"""Set a global eager mode seed for random ops."""
self._seed = seed
# `random.Random(seed)` needs `seed` to be hashable, while values of type
# e.g. `np.int64` or `np.ndarray` are not. We use `int(...)` to convert them
# to int.
try:
hash(seed)
except TypeError:
seed = int(np.array(seed))
self._rng = random.Random(seed)
# Also clear the kernel cache, to reset any existing seeds
if self._context_handle is not None:
pywrap_tfe.TFE_ContextClearCaches(self._context_handle)
def _internal_operation_seed(self):
"""Returns a fake operation seed.
In eager mode, user shouldn't set or depend on operation seed.
Here, we generate a random seed based on global seed to make
operation's randomness different and depend on the global seed.
Returns:
A fake operation seed based on global seed.
"""
return self._rng.randint(0, _MAXINT32)
def _initialize_logical_devices(self):
"""Helper to initialize devices."""
# Store list of devices
logical_devices = []
context_devices = []
device_list = pywrap_tfe.TFE_ContextListDevices(self._context_handle)
try:
self._num_gpus = 0
for i in range(pywrap_tfe.TF_DeviceListCount(device_list)):
dev_name = pywrap_tfe.TF_DeviceListName(device_list, i)
context_devices.append(pydev.canonical_name(dev_name))
spec = pydev.DeviceSpec.from_string(dev_name)
# If the job is localhost, we assume that the cluster has not yet been
# configured and thus clear the job, replica & task.
if spec.job == "localhost":
spec = spec.replace(job=None, replica=None, task=None)
logical_devices.append(
LogicalDevice(name=spec.to_string(), device_type=spec.device_type))
dev_type = pywrap_tfe.TF_DeviceListType(device_list, i)
if dev_type == "GPU":
self._num_gpus += 1
finally:
self._logical_devices = logical_devices
self._context_devices = context_devices
pywrap_tfe.TF_DeleteDeviceList(device_list)
def ensure_initialized(self):
"""Initialize handle and devices if not already done so."""
if self._initialized:
return
with self._initialize_lock:
if self._initialized:
return
assert self._context_devices is None
opts = pywrap_tfe.TFE_NewContextOptions()
try:
config_str = self.config.SerializeToString()
pywrap_tfe.TFE_ContextOptionsSetConfig(opts, config_str)
if self._device_policy is not None:
pywrap_tfe.TFE_ContextOptionsSetDevicePlacementPolicy(
opts, self._device_policy)
if self._mirroring_policy is not None:
pywrap_tfe.TFE_ContextOptionsSetMirroringPolicy(
opts, self._mirroring_policy)
if self._default_is_async == ASYNC:
pywrap_tfe.TFE_ContextOptionsSetAsync(opts, True)
if self._lazy_remote_inputs_copy is not None:
pywrap_tfe.TFE_ContextOptionsSetLazyRemoteInputsCopy(
opts, self._lazy_remote_inputs_copy)
context_handle = pywrap_tfe.TFE_NewContext(opts)
finally:
pywrap_tfe.TFE_DeleteContextOptions(opts)
assert not (self._server_def and self._collective_ops_server_def), (
"Cannot enable remote execution as well as collective ops at the "
"moment. If this is important to you, please file an issue.")
if self._server_def is not None:
server_def_str = self._server_def.SerializeToString()
pywrap_tfe.TFE_ContextSetServerDef(context_handle, _KEEP_ALIVE_SECS,
server_def_str)
elif self._collective_ops_server_def is not None:
server_def_str = self._collective_ops_server_def.SerializeToString()
pywrap_tfe.TFE_EnableCollectiveOps(context_handle, server_def_str)
self._context_handle = context_handle
self._initialize_logical_devices()
self._initialized = True
def _clear_caches(self):
self.ones_rank_cache().flush()
self.zeros_cache().flush()
pywrap_tfe.TFE_ClearScalarCache()
def get_server_def(self):
return self._server_def
def set_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):
"""Allow setting a server_def on the context.
When a server def is replaced, it effectively clears a bunch of caches
within the context. If you attempt to use a tensor object that was pointing
to a tensor on the remote device, it will raise an error.
Args:
server_def: A tensorflow::ServerDef proto.
Enables execution on remote devices.
keep_alive_secs: Num. seconds after which the remote end will hang up.
As long as the client is still alive, the server state for the context
will be kept alive. If the client is killed (or there is some failure),
the server will clean up its context keep_alive_secs after the final RPC
it receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
self._server_def = server_def
if self._context_handle:
server_def_str = server_def.SerializeToString()
pywrap_tfe.TFE_ContextSetServerDef(self._context_handle, keep_alive_secs,
server_def_str)
self._initialize_logical_devices()
# Clear all the caches in case there are remote tensors in them.
self._clear_caches()
def update_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):
"""Update a server_def on the context.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
keep_alive_secs: Num. seconds after which the remote end will hang up. As
long as the client is still alive, the server state for the context will
be kept alive. If the client is killed (or there is some failure), the
server will clean up its context keep_alive_secs after the final RPC it
receives.
Raises:
ValueError: if server_def is None.
"""
if not server_def:
raise ValueError("server_def is None.")
self._server_def = server_def
if self._context_handle:
server_def_str = server_def.SerializeToString()
pywrap_tfe.TFE_ContextUpdateServerDef(self._context_handle,
keep_alive_secs, server_def_str)
self._initialize_logical_devices()
self._clear_caches()
def check_alive(self, worker_name):
"""Checks whether a remote worker is alive or not.
Args:
worker_name: a string representing the remote worker. It must be a fully
specified name like "/job:worker/replica:0/task:0".
Returns:
a boolean indicating whether the remote worker is alive or not.
Raises:
ValueError: if context is not initialized.
"""
# TODO(yuefengz): support checking multiple workers.
if self._context_handle:
return pywrap_tfe.TFE_ContextCheckAlive(self._context_handle, worker_name)
else:
raise ValueError("Context is not initialized.")
def enable_collective_ops(self, server_def):
"""Enable distributed collective ops with an appropriate server_def.
Args:
server_def: A tensorflow::ServerDef proto. Enables execution on remote
devices.
Raises:
ValueError: if server_def is None.
RuntimeError: if this method is not called at program startup.
"""
if not server_def:
raise ValueError("server_def is None.")
# TODO(b/129298253): Allow creating datasets/tensors before enabling
# collective ops.
if self._context_handle is not None:
logging.warning("Enabling collective ops after program startup may cause "
"error when accessing previously created tensors.")
self._collective_ops_server_def = server_def
def configure_collective_ops(
self,
collective_leader="",
scoped_allocator_enabled_ops=("CollectiveReduce",),
use_nccl_communication=False,
device_filters=None):
"""Configure collective ops.
Collective group leader is necessary for collective ops to run, other
configurations are mainly for the purpose of performance.
Args:
collective_leader: a device string for collective leader, e.g.
"/job:worker/replica:0/task:0"; empty string means local execution of
collective ops.
scoped_allocator_enabled_ops: a tuple or a list of op names for scoped
allocator to run with.
use_nccl_communication: whether to use nccl communication for collective
ops.
device_filters: a tuple or a list of device strings. If set, corresponding
task can only see the devices filtered by these device filters.
Raises:
RuntimeError: if this method is not called at program startup.
"""
if self._collective_leader is not None:
if (self._collective_leader != collective_leader or
self._collective_scoped_allocator_enabled_ops !=
scoped_allocator_enabled_ops or
self._collective_use_nccl_communication != use_nccl_communication or
self._collective_device_filters != device_filters):
raise ValueError("Collective ops are already configured.")
else:
return
if self._context_handle is not None:
raise RuntimeError("Collective ops must be configured at program startup")
self._collective_leader = collective_leader
self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops
self._collective_use_nccl_communication = use_nccl_communication
self._collective_device_filters = device_filters
@property
def _handle(self):
if self._context_handle is None:
raise AssertionError("Context must be initialized first.")
return self._context_handle
@property
def _devices(self):
if self._context_devices is None:
raise AssertionError("Context must be initialized first.")
return self._context_devices
def __str__(self):
if self._context_handle is None:
return "Eager TensorFlow Context. Devices currently uninitialized."
else:
devices = self._devices
lines = ["Eager TensorFlow Context with %d devices" % (len(devices))]
for i, d in enumerate(devices):
lines.append(" Device %d: %s" % (i, d))
return "\n".join(lines)
@tf_contextlib.contextmanager
def _mode(self, mode):
"""A context manager to allow setting the mode to EAGER/GRAPH."""
ctx = self._thread_local_data
old_is_eager = ctx.is_eager
ctx.is_eager = mode == EAGER_MODE
if mode == EAGER_MODE:
# Entering graph mode does not provide us with sufficient information to
# record a context switch; graph-based context switches are only logged
# when a graph is registered as the default graph.
self.context_switches.push(False, eager_mode, None)
try:
yield
finally:
ctx.is_eager = old_is_eager
if mode == EAGER_MODE:
self.context_switches.pop()
def executing_eagerly(self):
"""Returns True if current thread has eager executing enabled."""
return self._thread_local_data.is_eager
def ones_rank_cache(self):
"""Per-device cache for scalars."""
return _tensor_caches_map[self._id].ones_rank_cache
def zeros_cache(self):
"""Per-device cache for scalars."""
return _tensor_caches_map[self._id].zeros_cache
@property
def scope_name(self):
"""Returns scope name for the current thread."""
return self._thread_local_data.scope_name
@scope_name.setter
def scope_name(self, s):
"""Sets scope name for the current thread."""
self._thread_local_data.scope_name = s
@property
def device_name(self):
"""Returns the device name for the current thread."""
return self._thread_local_data.device_name
@property
def device_spec(self):
"""Returns the device spec for the current thread."""
return self._thread_local_data.device_spec
def _set_device(self, device_name, device_spec):
self._thread_local_data.device_name = device_name
self._thread_local_data.device_spec = device_spec
def device(self, name):
"""Context-manager to force placement of operations and Tensors on a device.
Args:
name: Name of the device or None to get default placement.
Returns:
Context manager that forces device placement.
Raises:
ValueError: If name is not a string or is an invalid device name.
RuntimeError: If device scopes are not properly nested.
"""
if isinstance(name, LogicalDevice):
name = name.name
elif pydev.is_device_spec(name):
name = name.to_string()
return _EagerDeviceContext(self, name)
def devices(self):
"""List of the names of devices available to execute operations."""
return self._devices
# TODO(fishx): remove this property.
@property
def execution_mode(self):
"""Gets execution mode for current thread."""
return ASYNC if self.is_async() else SYNC
@execution_mode.setter
def execution_mode(self, mode):
"""Sets execution mode for current thread."""
if mode not in (None, SYNC, ASYNC):
raise ValueError(
"Execution mode should be None/SYNC/ASYNC. Got %s" % mode)
if mode is None:
mode = SYNC
enable_async = (mode == ASYNC)
if self.is_async() != enable_async:
# Only set the execution mode if the context has already been initialized
if self._context_handle is not None:
self.executor.wait()
executor_new = executor.new_executor(enable_async)
self._thread_local_data.executor = executor_new
pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle,
executor_new.handle())
else:
self._default_is_async = enable_async
def is_async(self):
if self._context_handle is not None:
return self.executor.is_async()
else:
return self._default_is_async
@property
def executor(self):
ensure_initialized()
return executor.Executor(
pywrap_tfe.TFE_ContextGetExecutorForThread(self._context_handle))
@executor.setter
def executor(self, e):
ensure_initialized()
pywrap_tfe.TFE_ContextSetExecutorForThread(self._context_handle, e.handle())
@property
def config(self):
"""Return the ConfigProto with all runtime deltas applied."""
# Ensure physical devices have been discovered and config has been imported
self._initialize_physical_devices()
config = config_pb2.ConfigProto()
if self._config is not None:
config.CopyFrom(self._config)
if self._optimizer_jit is not None:
config.graph_options.optimizer_options.global_jit_level = (
config_pb2.OptimizerOptions.ON_1
if self._optimizer_jit else config_pb2.OptimizerOptions.OFF)
if self._intra_op_parallelism_threads is not None:
config.intra_op_parallelism_threads = self._intra_op_parallelism_threads
if self._inter_op_parallelism_threads is not None:
config.inter_op_parallelism_threads = self._inter_op_parallelism_threads
if self._soft_device_placement is not None:
config.allow_soft_placement = self._soft_device_placement
else:
config.allow_soft_placement = self.executing_eagerly()
if self._log_device_placement is not None:
config.log_device_placement = self._log_device_placement
if self._enable_mlir_bridge is not None:
config.experimental.enable_mlir_bridge = self._enable_mlir_bridge
def rewriter_toggle(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
(rewriter_config_pb2.RewriterConfig.ON
if toggle else rewriter_config_pb2.RewriterConfig.OFF))
def rewriter_bool(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options,
option,
toggle)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_bool("disable_meta_optimizer")
nodes = self._optimizer_experimental_options.get("min_graph_nodes", None)
if nodes is not None:
config.graph_options.rewrite_options.min_graph_nodes = nodes
# Compute device counts
config.device_count["CPU"] = 0
config.device_count["GPU"] = 0
for dev in self._physical_devices:
if dev not in self._visible_device_list:
continue
virtual_devices = self._virtual_device_map.get(dev)
if virtual_devices is None:
config.device_count[dev.device_type] += 1
else:
config.device_count[dev.device_type] += len(virtual_devices)
# Configure gpu_options
gpu_options = self._compute_gpu_options()
config.gpu_options.MergeFrom(gpu_options)
# Configure collective ops
if self._collective_leader:
config.experimental.collective_group_leader = self._collective_leader
if self._collective_scoped_allocator_enabled_ops:
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
for op in self._collective_scoped_allocator_enabled_ops:
rewrite_options.scoped_allocator_opts.enable_op.append(op)
if self._collective_use_nccl_communication:
config.experimental.collective_nccl = True
if self._collective_device_filters:
del config.device_filters[:]
for f in self._collective_device_filters:
config.device_filters.append(f)
return config
def _compute_gpu_options(self):
"""Build the GPUOptions proto."""
visible_device_list = []
virtual_devices = []
gpu_index = -1
memory_growths = set()
for dev in self.list_physical_devices("GPU"):
gpu_index += 1
if dev not in self._visible_device_list:
continue
growth = self._memory_growth_map[dev]
memory_growths.add(growth)
visible_device_list.append(str(gpu_index))
if self._virtual_device_map:
vdevs = self._virtual_device_map.get(dev, [])
device_limits = []
for virt_dev in vdevs:
device_limits.append(virt_dev.memory_limit)
virtual_devices.append(
config_pb2.GPUOptions.Experimental.VirtualDevices(
memory_limit_mb=device_limits))
# Only compute growth if virtual devices have not been configured and we
# have GPUs
if not virtual_devices and memory_growths:
if len(memory_growths) > 1:
raise ValueError("Memory growth cannot differ between GPU devices")
allow_growth = memory_growths.pop()
else:
allow_growth = None
return config_pb2.GPUOptions(
allow_growth=allow_growth,
visible_device_list=",".join(visible_device_list),
experimental=config_pb2.GPUOptions.Experimental(
virtual_devices=virtual_devices))
@property
def function_call_options(self):
"""Returns function call options for current thread.
Note that the returned object is still referenced by the eager context.
Returns: the FunctionCallOptions for current thread.
"""
if self._thread_local_data.function_call_options is None:
config = self.config
# Default to soft placement for functions unless specified
if self._soft_device_placement is None:
config.allow_soft_placement = True
self._thread_local_data.function_call_options = FunctionCallOptions(
config_proto=config)
return self._thread_local_data.function_call_options
@function_call_options.setter
def function_call_options(self, options):
"""Returns function call options for current thread."""
self._thread_local_data.function_call_options = options
def num_gpus(self):
"""The number of GPUs available to execute operations."""
self.ensure_initialized()
return self._num_gpus
def add_function(self, fn):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextAddFunction(self._handle, fn)
def add_function_def(self, fdef):
"""Add a function definition to the context.
Once added, the function (identified by its name) can be executed like any
other operation.
Args:
fdef: A FunctionDef protocol buffer message.
"""
self.ensure_initialized()
fdef_string = fdef.SerializeToString()
pywrap_tfe.TFE_ContextAddFunctionDef(self._handle, fdef_string,
len(fdef_string))
def remove_function(self, name):
"""Remove a function from the context.
Once removed, the function cannot be executed anymore.
Args:
name: function signature name.
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextRemoveFunction(self._handle, name)
def has_function(self, name):
"""Check if a function `name` is registered."""
self.ensure_initialized()
return bool(pywrap_tfe.TFE_ContextHasFunction(self._handle, name))
def add_op_callback(self, callback):
"""Add a post-op callback to the context.
A post-op callback is invoked immediately after an eager operation or
function has finished execution or after a op has been added to a graph,
providing access to the op's type, name input and output tensors. Multiple
op callbacks can be added, in which case the callbacks will be invoked in
the order in which they are added.
Args:
callback: a callable of the signature
`f(op_type, inputs, attrs, outputs, op_name=None, graph=None)`.
See doc strings in `op_callbacks.py` for details on the function
signature and its semantics.
"""
if callback not in self._thread_local_data.op_callbacks:
self._thread_local_data.op_callbacks.append(callback)
def remove_op_callback(self, callback):
"""Remove an already-registered op callback.
Args:
callback: The op callback to be removed.
Raises:
KeyError: If `callback` is not already registered.
"""
if callback not in self._thread_local_data.op_callbacks:
raise KeyError(
"The specified op callback has not been registered, "
"and hence cannot be removed.")
del self._thread_local_data.op_callbacks[
self._thread_local_data.op_callbacks.index(callback)]
@property
def op_callbacks(self):
return self._thread_local_data.op_callbacks
@property
def invoking_op_callbacks(self):
return self._thread_local_data.invoking_op_callbacks
@invoking_op_callbacks.setter
def invoking_op_callbacks(self, value):
self._thread_local_data.invoking_op_callbacks = value
def _initialize_physical_devices(self):
"""Get local devices visible to the system."""
# We lazy initialize self._physical_devices since we do not want to do this
# the constructor since the backend may not be initialized yet.
with self._device_lock:
if self._physical_devices is not None:
return
devs = pywrap_tfe.TF_ListPhysicalDevices()
self._physical_devices = [
PhysicalDevice(name=d.decode(),
device_type=d.decode().split(":")[1]) for d in devs]
# Construct the visible device list from all physical devices but ignore
# XLA devices
self._visible_device_list = [
d for d in self._physical_devices
if not d.device_type.startswith("XLA")
]
self._memory_growth_map = {
d: None for d in self._physical_devices if d.device_type == "GPU"
}
# Import device settings that may have been passed into the constructor
self._import_config()
def list_physical_devices(self, device_type=None):
"""List local devices visible to the system.
This API allows a client to query the devices before they have been
initialized by the eager runtime. Additionally a user can filter by device
type, to get only CPUs or GPUs.
Args:
device_type: Optional device type to limit results to
Returns:
List of PhysicalDevice objects.
"""
self._initialize_physical_devices()
if device_type is None:
return list(self._physical_devices)
return [d for d in self._physical_devices if d.device_type == device_type]
def _import_config(self):
"""Import config if passed in during construction.
If Context was created with a ConfigProto such as when calling
tf.compat.v1.enable_eager_execution(), then we need to pull out the
various pieces we might be replacing and import then into our internal
class representation.
"""
if self._config is None:
return
num_cpus = self._config.device_count.get("CPU", 1)
if num_cpus != 1:
cpus = [d for d in self._physical_devices if d.device_type == "CPU"]
if num_cpus == 0:
self.set_visible_devices([], "CPU")
elif num_cpus > 1:
self.set_logical_device_configuration(
cpus[0], [LogicalDeviceConfiguration() for _ in range(num_cpus)])
# Parse GPU options
gpus = [d for d in self._physical_devices if d.device_type == "GPU"]
# If there are no GPUs detected, simply ignore all the GPU options passed in
# rather than doing any validation checks.
if not gpus:
return
gpu_count = self._config.device_count.get("GPU", None)
visible_gpus = []
# TODO(gjn): Handle importing existing virtual GPU configuration
visible_indices = self._config.gpu_options.visible_device_list
if visible_indices:
for index in visible_indices.split(","):
if int(index) >= len(gpus):
raise ValueError("Invalid visible device index: %s" % index)
visible_gpus.append(gpus[int(index)])
else:
visible_gpus = gpus
if gpu_count is not None:
visible_gpus = visible_gpus[:gpu_count]
self.set_visible_devices(visible_gpus, "GPU")
def list_logical_devices(self, device_type=None):
"""Return logical devices."""
self.ensure_initialized()
if device_type is None:
return list(self._logical_devices)
return [d for d in self._logical_devices if d.device_type == device_type]
def get_visible_devices(self, device_type=None):
"""Get the list of visible devices."""
self._initialize_physical_devices()
if device_type is None:
return list(self._visible_device_list)
return [
d for d in self._visible_device_list if d.device_type == device_type
]
def set_visible_devices(self, devices, device_type=None):
"""Set the list of visible devices."""
self._initialize_physical_devices()
if not isinstance(devices, list):
devices = [devices]
for d in devices:
if d not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(d))
if device_type is not None and d.device_type != device_type:
raise ValueError("Unrecognized device: %s" % repr(d))
visible_device_list = []
if device_type is not None:
visible_device_list = [
d for d in self._visible_device_list if d.device_type != device_type
]
visible_device_list += devices
if self._visible_device_list == visible_device_list:
return
if self._context_handle is not None:
raise RuntimeError(
"Visible devices cannot be modified after being initialized")
self._visible_device_list = visible_device_list
def get_memory_growth(self, dev):
"""Get if memory growth is enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._memory_growth_map[dev]
def set_memory_growth(self, dev, enable):
"""Set if memory growth should be enabled for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev in self._virtual_device_map:
raise ValueError(
"Cannot set memory growth on device when virtual devices configured")
if dev.device_type != "GPU":
raise ValueError("Cannot set memory growth on non-GPU devices")
if self._memory_growth_map.get(dev) == enable:
return
if self._context_handle is not None:
raise RuntimeError(
"Physical devices cannot be modified after being initialized")
self._memory_growth_map[dev] = enable
def get_logical_device_configuration(self, dev):
"""Get the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
return self._virtual_device_map.get(dev)
def set_logical_device_configuration(self, dev, virtual_devices):
"""Set the virtual device configuration for a PhysicalDevice."""
self._initialize_physical_devices()
if dev not in self._physical_devices:
raise ValueError("Unrecognized device: %s" % repr(dev))
if dev.device_type == "CPU":
for vdev in virtual_devices:
if vdev.memory_limit is not None:
raise ValueError("Setting memory limit on CPU virtual devices is "
"currently not supported")
elif dev.device_type == "GPU":
for vdev in virtual_devices:
if vdev.memory_limit is None:
raise ValueError(
"Setting memory limit is required for GPU virtual devices")
else:
raise ValueError("Virtual devices are not supported for %s" %
dev.device_type)
if self._virtual_device_map.get(dev) == virtual_devices:
return
if self._context_handle is not None:
raise RuntimeError(
"Virtual devices cannot be modified after being initialized")
self._virtual_device_map[dev] = virtual_devices
@property
def enable_mlir_bridge(self):
return self._enable_mlir_bridge
@enable_mlir_bridge.setter
def enable_mlir_bridge(self, enabled):
self._enable_mlir_bridge = enabled
self._thread_local_data.function_call_options = None
@property
def optimizer_jit(self):
level = self.config.graph_options.optimizer_options.global_jit_level
return (level == config_pb2.OptimizerOptions.ON_1 or
level == config_pb2.OptimizerOptions.ON_2)
@optimizer_jit.setter
def optimizer_jit(self, enabled):
self._optimizer_jit = enabled
self._thread_local_data.function_call_options = None
def get_optimizer_experimental_options(self):
"""Get experimental options for the optimizer.
Returns:
Dictionary of current option values
"""
rewrite_options = self.config.graph_options.rewrite_options
options = {}
def rewriter_toggle(option):
attr = getattr(rewrite_options, option)
if attr != 0:
options[option] = (attr == rewriter_config_pb2.RewriterConfig.ON)
def rewriter_bool(option):
options[option] = getattr(rewrite_options, option)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_bool("disable_meta_optimizer")
if rewrite_options.min_graph_nodes != 0:
options["min_graph_nodes"] = rewrite_options.min_graph_nodes
return options
def set_optimizer_experimental_options(self, options):
"""Set experimental options for the optimizer.
Args:
options: Dictionary of options to modify
"""
self._optimizer_experimental_options.update(options)
self._thread_local_data.function_call_options = None
@property
def intra_op_parallelism_threads(self):
return self.config.intra_op_parallelism_threads
@intra_op_parallelism_threads.setter
def intra_op_parallelism_threads(self, num_threads):
if self._intra_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Intra op parallelism cannot be modified after initialization.")
self._intra_op_parallelism_threads = num_threads
@property
def inter_op_parallelism_threads(self):
return self.config.inter_op_parallelism_threads
@inter_op_parallelism_threads.setter
def inter_op_parallelism_threads(self, num_threads):
if self._inter_op_parallelism_threads == num_threads:
return
if self._context_handle is not None:
raise RuntimeError(
"Inter op parallelism cannot be modified after initialization.")
self._inter_op_parallelism_threads = num_threads
@property
def soft_device_placement(self):
return self.config.allow_soft_placement
@soft_device_placement.setter
def soft_device_placement(self, enabled):
self._soft_device_placement = enabled
self._thread_local_data.function_call_options = None
@property
def log_device_placement(self):
return self.config.log_device_placement
@log_device_placement.setter
def log_device_placement(self, enabled):
if self._log_device_placement == enabled:
return
if self._context_handle is not None:
raise RuntimeError(
"Device placement logging must be set at program startup")
self._log_device_placement = enabled
self._thread_local_data.function_call_options = None
@property
def device_policy(self):
# Only get the policy from the context if it has already been initialized
if self._context_handle is not None:
return pywrap_tfe.TFE_ContextGetDevicePlacementPolicy(self._handle)
return self._device_policy
@device_policy.setter
def device_policy(self, policy):
if policy is None:
policy = DEVICE_PLACEMENT_SILENT
if self._device_policy != policy:
self._device_policy = policy
# Only set the policy if the context has already been initialized
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetThreadLocalDevicePlacementPolicy(
self._handle, self._device_policy)
@property
def mirroring_policy(self):
# Only get the policy from the context if it has already been initialized
if self._context_handle is not None:
return pywrap_tfe.TFE_ContextGetMirroringPolicy(self._handle)
return self._mirroring_policy
@mirroring_policy.setter
def mirroring_policy(self, policy):
if policy is None:
policy = MIRRORING_NONE
if self._mirroring_policy is None or self._mirroring_policy != policy:
self._mirroring_policy = policy
# Only set the policy if the context has already been initialized
if self._context_handle is not None:
pywrap_tfe.TFE_ContextSetThreadLocalMirroringPolicy(
self._handle, self._mirroring_policy)
@property
def lazy_remote_inputs_copy(self):
return self._lazy_remote_inputs_copy
@lazy_remote_inputs_copy.setter
def lazy_remote_inputs_copy(self, lazy_copy):
"""Sets whether to copy remote inputs lazily for functions."""
if not isinstance(lazy_copy, bool):
raise ValueError("Expecting a boolean but got %s" % type(lazy_copy))
if self._lazy_remote_inputs_copy != lazy_copy:
if self._initialized:
raise ValueError(
"lazy_remote_inputs_copy should be set before being initialized.")
self._lazy_remote_inputs_copy = lazy_copy
def enable_run_metadata(self):
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextEnableRunMetadata(self._handle)
def disable_run_metadata(self):
"""Disables tracing of op execution via RunMetadata."""
if not self._context_handle:
return
pywrap_tfe.TFE_ContextDisableRunMetadata(self._context_handle)
def enable_graph_collection(self):
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
self.ensure_initialized()
pywrap_tfe.TFE_ContextEnableGraphCollection(self._handle)
def disable_graph_collection(self):
"""Disables graph collection of executed functions."""
if not self._context_handle:
return
pywrap_tfe.TFE_ContextDisableGraphCollection(self._context_handle)
def export_run_metadata(self):
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer. Or None if not enabled.
"""
if not self._context_handle:
return None
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_ContextExportRunMetadata(self._context_handle, buffer_)
proto_data = pywrap_tfe.TF_GetBuffer(buffer_)
run_metadata = config_pb2.RunMetadata()
run_metadata.ParseFromString(compat.as_bytes(proto_data))
return run_metadata
@property
def context_switches(self):
"""Returns a stack of context switches."""
return self._context_switches
def start_step(self):
pywrap_tfe.TFE_ContextStartStep(self._handle)
def end_step(self):
pywrap_tfe.TFE_ContextEndStep(self._handle)
class _EagerDeviceContext(object):
"""Context-manager forcing placement of ops and Tensors on a device."""
def __init__(self, ctx, device_name):
self._device_name = device_name
self._ctx = ctx
self._stack = []
def __enter__(self):
ctx = self._ctx
old_device_name = ctx.device_name
old_device_spec = ctx.device_spec
new_device_name = self._device_name
cache_key = (old_device_name, new_device_name)
try:
new_device_name, new_device_spec = _device_parsing_cache[cache_key]
except TypeError:
# Error while trying to compute the cache key.
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
except KeyError:
# Handle a cache miss.
if new_device_name is not None:
if not isinstance(new_device_name, six.string_types):
raise ValueError("Expecting a string device name. Got %s(%s)" %
(type(new_device_name), new_device_name))
device_spec = pydev.DeviceSpec.from_string(new_device_name)
if old_device_name:
new_device_spec = copy.copy(old_device_spec)
else:
ctx.ensure_initialized()
new_device_spec = pydev.DeviceSpec.from_string(
ctx._context_devices[0]) # pylint: disable=protected-access
new_device_spec = new_device_spec.make_merged_spec(device_spec)
else:
new_device_spec = pydev.DeviceSpec.from_string("")
new_device_name = new_device_spec.to_string()
_device_parsing_cache[cache_key] = (new_device_name, new_device_spec)
ctx._set_device(new_device_name, new_device_spec) # pylint: disable=protected-access
self._stack.append((old_device_name, old_device_spec, new_device_spec))
def __exit__(self, *ex_info):
ctx = self._ctx
old_device_name, old_device_spec, new_device_spec = self._stack[-1]
if ctx.device_spec is not new_device_spec:
raise RuntimeError(
"Exiting device scope without proper scope nesting")
del self._stack[-1]
ctx._set_device(old_device_name, old_device_spec) # pylint: disable=protected-access
# Do not set directly. Use _set_context.
_context = None
_context_lock = threading.Lock()
def _set_context_locked(ctx):
global _context
pywrap_tfe.TFE_Py_SetEagerContext(ctx)
_context = ctx
def _set_context(ctx):
with _context_lock:
_set_context_locked(ctx)
def _create_context():
with _context_lock:
if _context is None:
ctx = Context()
_set_context_locked(ctx)
def _reset_context():
"""Clears and re-initializes the singleton context.
Should only be used for testing.
"""
global _context
with _context_lock:
if _context is not None:
_context = None
_create_context()
def context():
"""Returns a singleton context object."""
if _context is None:
_create_context()
return _context
def context_safe():
"""Returns current context (or None if one hasn't been initialized)."""
return _context
def ensure_initialized():
"""Initialize the context."""
context().ensure_initialized()
def set_global_seed(seed):
"""Sets the eager mode seed."""
context()._set_global_seed(seed) # pylint: disable=protected-access
def global_seed():
"""Returns the eager mode seed."""
return context()._seed # pylint: disable=protected-access
def internal_operation_seed():
"""Returns the operation seed generated based on global seed."""
return context()._internal_operation_seed() # pylint: disable=protected-access
@tf_export("executing_eagerly", v1=[])
def executing_eagerly():
"""Checks whether the current thread has eager execution enabled.
Eager execution is enabled by default and this API returns `True`
in most of cases. However, this API might return `False` in the following use
cases.
* Executing inside `tf.function`, unless under `tf.init_scope` or
`tf.config.experimental_run_functions_eagerly(True)` is previously called.
* Executing inside a transformation function for `tf.dataset`.
* `tf.compat.v1.disable_eager_execution()` is called.
General case:
>>> print(tf.executing_eagerly())
True
Inside `tf.function`:
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
False
Inside `tf.function` after
`tf.config.experimental_run_functions_eagerly(True)` is called:
>>> tf.config.experimental_run_functions_eagerly(True)
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
True
>>> tf.config.experimental_run_functions_eagerly(False)
Inside a transformation function for `tf.dataset`:
>>> def data_fn(x):
... print(tf.executing_eagerly())
... return x
>>> dataset = tf.data.Dataset.range(100)
>>> dataset = dataset.map(data_fn)
False
Returns:
`True` if the current thread has eager execution enabled.
"""
ctx = context_safe()
if ctx is None:
return default_execution_mode == EAGER_MODE
return ctx.executing_eagerly()
@tf_export(v1=["executing_eagerly"])
def executing_eagerly_v1():
"""Checks whether the current thread has eager execution enabled.
Eager execution is typically enabled via
`tf.compat.v1.enable_eager_execution`, but may also be enabled within the
context of a Python function via tf.contrib.eager.py_func.
When eager execution is enabled, returns `True` in most cases. However,
this API might return `False` in the following use cases.
* Executing inside `tf.function`, unless under `tf.init_scope` or
`tf.config.experimental_run_functions_eagerly(True)` is previously called.
* Executing inside a transformation function for `tf.dataset`.
* `tf.compat.v1.disable_eager_execution()` is called.
>>> tf.compat.v1.enable_eager_execution()
General case:
>>> print(tf.executing_eagerly())
True
Inside `tf.function`:
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
False
Inside `tf.function`
after `tf.config.experimental_run_functions_eagerly(True)` is called:
>>> tf.config.experimental_run_functions_eagerly(True)
>>> @tf.function
... def fn():
... with tf.init_scope():
... print(tf.executing_eagerly())
... print(tf.executing_eagerly())
>>> fn()
True
True
>>> tf.config.experimental_run_functions_eagerly(False)
Inside a transformation function for `tf.dataset`:
>>> def data_fn(x):
... print(tf.executing_eagerly())
... return x
>>> dataset = tf.data.Dataset.range(100)
>>> dataset = dataset.map(data_fn)
False
Returns:
`True` if the current thread has eager execution enabled.
"""
return executing_eagerly()
def in_eager_mode():
"""Use executing_eagerly() instead. This function will be removed."""
return executing_eagerly()
def shared_name(name=None):
"""Returns the anonymous shared name GUID if no shared name is specified.
In eager mode we need to use a unique shared name to avoid spurious sharing
issues. The runtime generates a unique name on our behalf when the reserved
GUID is used as a shared name.
Args:
name: Optional shared name
Returns:
Eager compatible shared name.
"""
if name or not executing_eagerly():
return name
# Ensure a unique name when eager execution is enabled to avoid spurious
# sharing issues.
return "cd2c89b7-88b7-44c8-ad83-06c2a9158347"
def graph_mode():
"""Context-manager to disable eager execution for the current thread."""
return context()._mode(GRAPH_MODE) # pylint: disable=protected-access
def eager_mode():
"""Context-manager to enable eager execution for the current thread."""
return context()._mode(EAGER_MODE) # pylint: disable=protected-access
def scope_name():
"""Name of the current scope."""
return context().scope_name
def device(name):
"""Context-manager to force placement of operations and Tensors on a device.
Example:
```python
with tf.device('gpu:0'):
with tf.device('cpu:0'):
shape = tf.constant([], dtype=tf.int32)
x = tf.random.truncated_normal(shape, tf.float32)
```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
operation runs on GPU 0.
Args:
name: Name of the device (see context().devices()), or None to
perform automatic placement.
Returns:
Context manager for setting the device.
"""
ensure_initialized()
return context().device(name)
@tf_export("debugging.get_log_device_placement")
def get_log_device_placement():
"""Get if device placements are logged.
Returns:
If device placements are logged.
"""
return context().log_device_placement
@tf_export("debugging.set_log_device_placement")
def set_log_device_placement(enabled):
"""Set if device placements should be logged.
Args:
enabled: Whether to enabled device placement logging.
"""
context().log_device_placement = enabled
@tf_contextlib.contextmanager
def device_policy(policy):
"""Context manager for setting device placement policy for current thread."""
ctx = context()
old_policy = ctx.device_policy
try:
ctx.device_policy = policy
yield
finally:
ctx.device_policy = old_policy
@tf_contextlib.contextmanager
def mirroring_policy(policy):
"""Context manager for setting mirroring policy for current thread."""
ctx = context()
old_policy = ctx.mirroring_policy
try:
ctx.mirroring_policy = policy
yield
finally:
ctx.mirroring_policy = old_policy
def set_execution_mode(mode):
"""Sets execution mode for the current thread."""
context().execution_mode = mode
# TODO(fishx): remove this method.
@tf_contextlib.contextmanager
def execution_mode(mode):
"""Context manager for setting execution mode for current thread."""
ctx = context()
executor_new = executor.new_executor(mode == ASYNC)
executor_old = ctx.executor
try:
executor_old.wait()
ctx.executor = executor_new
yield
finally:
ctx.executor = executor_old
executor_new.wait()
@tf_contextlib.contextmanager
def executor_scope(e):
"""Context manager for changing executor for current thread.
Args:
e: A Executor to execute eager ops under this scope. Setting it to None will
switch back to use the default executor for the context.
Yields:
Context manager for setting the executor for current thread.
"""
ctx = context()
executor_old = ctx.executor
try:
ctx.executor = e
yield
finally:
ctx.executor = executor_old
@tf_export("experimental.function_executor_type")
@tf_contextlib.contextmanager
def function_executor_type(executor_type):
"""Context manager for setting the executor of eager defined functions.
Eager defined functions are functions decorated by tf.contrib.eager.defun.
Args:
executor_type: a string for the name of the executor to be used to execute
functions defined by tf.contrib.eager.defun.
Yields:
Context manager for setting the executor of eager defined functions.
"""
current_options = context().function_call_options
old_options = copy.copy(current_options)
try:
current_options.executor_type = executor_type
yield
finally:
context().function_call_options = old_options
def is_async():
"""Returns true if current thread is in async mode."""
return context().is_async()
def async_wait():
"""Waits for ops dispatched in ASYNC mode to finish."""
return context().executor.wait()
def async_clear_error():
"""Clears errors raised during ASYNC execution mode."""
return context().executor.clear_error()
def num_gpus():
"""Get the number of available GPU devices.
Returns:
The number of available GPU devices.
"""
return context().num_gpus()
def enable_run_metadata():
"""Enables tracing of op execution via RunMetadata.
To retrieve the accumulated metadata call context.export_run_metadata()
and to stop tracing call context.disable_run_metadata().
"""
context().enable_run_metadata()
def disable_run_metadata():
"""Disables tracing of op execution via RunMetadata."""
context().disable_run_metadata()
def enable_graph_collection():
"""Enables graph collection of executed functions.
To retrieve the accumulated graphs call context.export_run_metadata()
and to stop collecting graphs call context.disable_graph_collection().
"""
context().enable_graph_collection()
def disable_graph_collection():
"""Disables graph collection of executed functions."""
context().disable_graph_collection()
def export_run_metadata():
"""Returns a RunMetadata proto with accumulated information.
The returned protocol buffer contains information since the most recent call
to either enable_run_metadata or export_run_metadata.
Returns:
A RunMetadata protocol buffer.
"""
return context().export_run_metadata()
@contextlib.contextmanager
def collect_optimized_graphs():
"""Collects a flat list of post-optimization graphs.
The collected graphs include device placements, which can be useful for
testing.
Usage:
```
@def_function.function
def f(x):
return x + constant_op.constant(1.)
with context.collect_optimized_graphs() as graphs:
with ops.device("CPU:0"):
f(constant_op.constant(1.))
graph, = graphs # `graph` contains a single GraphDef for inspection
```
Yields:
A list of GraphDefs, populated when the context manager exits.
"""
ctx = context()
ctx.enable_graph_collection()
try:
graphs = []
yield graphs
metadata = ctx.export_run_metadata()
finally:
ctx.disable_graph_collection()
for graph in metadata.function_graphs:
graphs.append(graph.post_optimization_graph)
def get_server_def():
return context().get_server_def()
def set_server_def(server_def):
context().set_server_def(server_def)
def update_server_def(server_def):
context().update_server_def(server_def)
def check_alive(worker_name):
return context().check_alive(worker_name)
def add_function(fdef):
"""Add a function definition to the context."""
context().add_function(fdef)
def remove_function(name):
"""Remove a function from the context."""
context().remove_function(name)
# Not every user creates a Context via context.context()
# (for example, enable_eager_execution in python/framework/ops.py),
# but they do all import this file. Note that IS_IN_GRAPH_MODE and
# in_graph_mode are both parameterless functions.
def _tmp_in_graph_mode():
if context_safe() is None:
# Context not yet initialized. Assume graph mode following the
# default implementation in `is_in_graph_mode`.
return True
return not executing_eagerly()
is_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode
| 33.054234 | 89 | 0.720078 |
1aeafcb308af7baaf4d71709f85254fea9e11e72 | 4,195 | py | Python | tacker/vnfm/mgmt_drivers/openwrt/openwrt.py | takahashi-tsc/tacker | a0ae01a13dcc51bb374060adcbb4fd484ab37156 | [
"Apache-2.0"
] | 116 | 2015-10-18T02:57:08.000Z | 2022-03-15T04:09:18.000Z | tacker/vnfm/mgmt_drivers/openwrt/openwrt.py | takahashi-tsc/tacker | a0ae01a13dcc51bb374060adcbb4fd484ab37156 | [
"Apache-2.0"
] | 6 | 2016-11-07T22:15:54.000Z | 2021-05-09T06:13:08.000Z | tacker/vnfm/mgmt_drivers/openwrt/openwrt.py | takahashi-tsc/tacker | a0ae01a13dcc51bb374060adcbb4fd484ab37156 | [
"Apache-2.0"
] | 166 | 2015-10-20T15:31:52.000Z | 2021-11-12T08:39:49.000Z | # Copyright 2015 Intel Corporation.
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import yaml
from tacker._i18n import _
from tacker.common import cmd_executer
from tacker.common import exceptions
from tacker.common import log
from tacker.vnfm.mgmt_drivers import abstract_driver
from tacker.vnfm.mgmt_drivers import constants as mgmt_constants
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('user', default='root', help=_('User name to login openwrt')),
cfg.StrOpt('password', default='', help=_('Password to login openwrt')),
]
cfg.CONF.register_opts(OPTS, 'openwrt')
def config_opts():
return [('openwrt', OPTS)]
class VnfMgmtOpenWRT(abstract_driver.VnfMGMTAbstractDriver):
def get_type(self):
return 'openwrt'
def get_name(self):
return 'openwrt'
def get_description(self):
return 'Tacker VNFMgmt OpenWRT Driver'
def mgmt_ip_address(self, plugin, context, vnf):
LOG.debug('mgmt_ip_address %s', vnf)
return vnf.get('mgmt_ip_address', '')
@log.log
def _config_service(self, mgmt_ip_address, service, config):
user = cfg.CONF.openwrt.user
password = cfg.CONF.openwrt.password
package = service
if service == "dhcp":
package = "dnsmasq"
try:
cmd = "uci import %s; /etc/init.d/%s restart" % (service, package)
LOG.debug('execute command: %(cmd)s on mgmt_ip_address '
'%(mgmt_ip)s',
{'cmd': cmd,
'mgmt_ip': mgmt_ip_address})
commander = cmd_executer.RemoteCommandExecutor(
user, password, mgmt_ip_address)
commander.execute_command(cmd, input_data=config)
except Exception as ex:
LOG.error("While executing command on remote "
"%(mgmt_ip)s: %(exception)s",
{'mgmt_ip': mgmt_ip_address,
'exception': ex})
raise exceptions.MgmtDriverException()
@log.log
def mgmt_call(self, plugin, context, vnf, kwargs):
if (kwargs[mgmt_constants.KEY_ACTION] !=
mgmt_constants.ACTION_UPDATE_VNF):
return
dev_attrs = vnf.get('attributes', {})
mgmt_ip_address = jsonutils.loads(vnf.get('mgmt_ip_address', '{}'))
if not mgmt_ip_address:
return
vdus_config = dev_attrs.get('config', '')
config_yaml = yaml.safe_load(vdus_config)
if not config_yaml:
return
vdus_config_dict = config_yaml.get('vdus', {})
for vdu, vdu_dict in vdus_config_dict.items():
config = vdu_dict.get('config', {})
for key, conf_value in config.items():
KNOWN_SERVICES = ('firewall', 'network', 'dhcp', 'qos')
if key not in KNOWN_SERVICES:
continue
mgmt_ip_address = mgmt_ip_address.get(vdu, '')
if not mgmt_ip_address:
LOG.warning('tried to configure unknown mgmt '
'address on VNF %(vnf)s VDU %(vdu)s',
{'vnf': vnf.get('name'),
'vdu': vdu})
continue
if isinstance(mgmt_ip_address, list):
for ip_address in mgmt_ip_address:
self._config_service(ip_address, key, conf_value)
else:
self._config_service(mgmt_ip_address, key, conf_value)
| 36.798246 | 78 | 0.607151 |
7b90b8624cb3dd676dd8c160b0291b7fab121376 | 12,597 | py | Python | src/smbpo.py | gwthomas/Safe-MBPO | 416dd13b6d4eceec7ac89ece161627e166ea54d8 | [
"MIT"
] | 6 | 2021-12-08T04:18:10.000Z | 2022-02-16T07:54:02.000Z | src/smbpo.py | gwthomas/Safe-MBPO | 416dd13b6d4eceec7ac89ece161627e166ea54d8 | [
"MIT"
] | 1 | 2022-02-25T12:53:50.000Z | 2022-03-03T08:55:27.000Z | src/smbpo.py | gwthomas/Safe-MBPO | 416dd13b6d4eceec7ac89ece161627e166ea54d8 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from tqdm import trange
from .config import BaseConfig, Configurable
from .dynamics import BatchedGaussianEnsemble
from .env.batch import ProductEnv
from .env.util import env_dims, get_max_episode_steps
from .log import default_log as log, TabularLog
from .policy import UniformPolicy
from .sampling import sample_episodes_batched
from .shared import SafetySampleBuffer
from .ssac import SSAC
from .torch_util import Module, DummyModuleWrapper, device, torchify, random_choice, gpu_mem_info, deciles
from .util import pythonic_mean, batch_map
N_EVAL_TRAJ = 10
LOSS_AVERAGE_WINDOW = 10
class SMBPO(Configurable, Module):
class Config(BaseConfig):
sac_cfg = SSAC.Config()
model_cfg = BatchedGaussianEnsemble.Config()
model_initial_steps = 10000
model_steps = 2000
model_update_period = 250 # how many steps between updating the models
save_trajectories = True
horizon = 10
alive_bonus = 0.0 # alternative: positive, rather than negative, reinforcement
buffer_min = 5000
buffer_max = 10**6
steps_per_epoch = 1000
rollout_batch_size = 100
solver_updates_per_step = 10
real_fraction = 0.1
action_clip_gap = 1e-6 # for clipping to promote numerical instability in logprob
def __init__(self, config, env_factory, data):
Configurable.__init__(self, config)
Module.__init__(self)
self.data = data
self.episode_log = TabularLog(log.dir, 'episodes.csv')
self.real_env = env_factory()
self.eval_env = ProductEnv([env_factory() for _ in range(N_EVAL_TRAJ)])
self.state_dim, self.action_dim = env_dims(self.real_env)
self.check_done = lambda states: torchify(self.real_env.check_done(states.cpu().numpy()))
self.check_violation = lambda states: torchify(self.real_env.check_violation(states.cpu().numpy()))
self.solver = SSAC(self.sac_cfg, self.state_dim, self.action_dim, self.horizon)
self.model_ensemble = BatchedGaussianEnsemble(self.model_cfg, self.state_dim, self.action_dim)
self.replay_buffer = self._create_buffer(self.buffer_max)
self.virt_buffer = self._create_buffer(self.buffer_max)
self.uniform_policy = UniformPolicy(self.real_env)
self.register_buffer('episodes_sampled', torch.tensor(0))
self.register_buffer('steps_sampled', torch.tensor(0))
self.register_buffer('n_violations', torch.tensor(0))
self.register_buffer('epochs_completed', torch.tensor(0))
self.recent_critic_losses = []
self.stepper = None
@property
def actor(self):
return self.solver.actor
def _create_buffer(self, capacity):
buffer = SafetySampleBuffer(self.state_dim, self.action_dim, capacity)
buffer.to(device)
return DummyModuleWrapper(buffer)
def _log_tabular(self, row):
for k, v in row.items():
self.data.append(k, v, verbose=True)
self.episode_log.row(row)
def step_generator(self):
max_episode_steps = get_max_episode_steps(self.real_env)
episode = self._create_buffer(max_episode_steps)
state = self.real_env.reset()
while True:
t = self.steps_sampled.item()
if t >= self.buffer_min:
policy = self.actor
if t % self.model_update_period == 0:
self.update_models(self.model_steps)
self.rollout_and_update()
else:
policy = self.uniform_policy
action = policy.act1(state, eval=False)
next_state, reward, done, info = self.real_env.step(action)
violation = info['violation']
assert done == self.check_done(next_state.unsqueeze(0))[0]
assert violation == self.check_violation(next_state.unsqueeze(0))[0]
for buffer in [episode, self.replay_buffer]:
buffer.append(states=state, actions=action, next_states=next_state,
rewards=reward, dones=done, violations=violation)
self.steps_sampled += 1
if done or violation or (len(episode) == max_episode_steps):
episode_return = episode.get('rewards').sum().item()
episode_length = len(episode)
episode_return_plus_bonus = episode_return + episode_length * self.alive_bonus
episode_safe = not episode.get('violations').any()
self.episodes_sampled += 1
if not episode_safe:
self.n_violations += 1
self._log_tabular({
'episodes sampled': self.episodes_sampled.item(),
'total violations': self.n_violations.item(),
'steps sampled': self.steps_sampled.item(),
'collect return': episode_return,
'collect return (+bonus)': episode_return_plus_bonus,
'collect length': episode_length,
'collect safe': episode_safe,
**self.evaluate()
})
if self.save_trajectories:
episode_num = self.episodes_sampled.item()
save_path = self.episodes_dir/f'episode-{episode_num}.h5py'
episode.save_h5py(save_path)
log.message(f'Saved episode to {save_path}')
episode = self._create_buffer(max_episode_steps)
state = self.real_env.reset()
else:
state = next_state
yield t
def update_models(self, model_steps):
log.message(f'Fitting models @ t = {self.steps_sampled.item()}')
model_losses = self.model_ensemble.fit(self.replay_buffer, steps=model_steps)
start_loss_average = np.mean(model_losses[:LOSS_AVERAGE_WINDOW])
end_loss_average = np.mean(model_losses[-LOSS_AVERAGE_WINDOW:])
log.message(f'Loss statistics:')
log.message(f'\tFirst {LOSS_AVERAGE_WINDOW}: {start_loss_average}')
log.message(f'\tLast {LOSS_AVERAGE_WINDOW}: {end_loss_average}')
log.message(f'\tDeciles: {deciles(model_losses)}')
buffer_rewards = self.replay_buffer.get('rewards')
r_min = buffer_rewards.min().item() + self.alive_bonus
r_max = buffer_rewards.max().item() + self.alive_bonus
self.solver.update_r_bounds(r_min, r_max)
def rollout(self, policy, initial_states=None):
if initial_states is None:
initial_states = random_choice(self.replay_buffer.get('states'), size=self.rollout_batch_size)
buffer = self._create_buffer(self.rollout_batch_size * self.horizon)
states = initial_states
for t in range(self.horizon):
with torch.no_grad():
actions = policy.act(states, eval=False)
next_states, rewards = self.model_ensemble.sample(states, actions)
dones = self.check_done(next_states)
violations = self.check_violation(next_states)
buffer.extend(states=states, actions=actions, next_states=next_states,
rewards=rewards, dones=dones, violations=violations)
continues = ~(dones | violations)
if continues.sum() == 0:
break
states = next_states[continues]
self.virt_buffer.extend(**buffer.get(as_dict=True))
return buffer
def update_solver(self, update_actor=True):
solver = self.solver
n_real = int(self.real_fraction * solver.batch_size)
real_samples = self.replay_buffer.sample(n_real)
virt_samples = self.virt_buffer.sample(solver.batch_size - n_real)
combined_samples = [
torch.cat([real, virt]) for real, virt in zip(real_samples, virt_samples)
]
if self.alive_bonus != 0:
REWARD_INDEX = 3
assert combined_samples[REWARD_INDEX].ndim == 1
combined_samples[REWARD_INDEX] = combined_samples[REWARD_INDEX] + self.alive_bonus
critic_loss = solver.update_critic(*combined_samples)
self.recent_critic_losses.append(critic_loss)
if update_actor:
solver.update_actor_and_alpha(combined_samples[0])
def rollout_and_update(self):
self.rollout(self.actor)
for _ in range(self.solver_updates_per_step):
self.update_solver()
def setup(self):
if self.save_trajectories:
self.episodes_dir = log.dir/'episodes'
self.episodes_dir.mkdir(exist_ok=True)
episodes_to_load = self.episodes_sampled.item()
if episodes_to_load > 0:
log.message(f'Loading existing {episodes_to_load} episodes')
for i in trange(1, self.episodes_sampled + 1):
episode = SafetySampleBuffer.from_h5py(self.episodes_dir/f'episode-{i}.h5py')
self.replay_buffer.extend(*episode.get())
assert len(self.replay_buffer) == self.steps_sampled
self.stepper = self.step_generator()
if len(self.replay_buffer) < self.buffer_min:
log.message(f'Collecting initial data')
while len(self.replay_buffer) < self.buffer_min:
next(self.stepper)
log.message('Initial model training')
self.update_models(self.model_initial_steps)
log.message(f'Collecting initial virtual data')
while len(self.virt_buffer) < self.buffer_min:
self.rollout(self.uniform_policy)
def epoch(self):
for _ in trange(self.steps_per_epoch):
next(self.stepper)
self.log_statistics()
self.epochs_completed += 1
def evaluate_models(self):
states, actions, next_states = self.replay_buffer.get('states', 'actions', 'next_states')
state_std = states.std(dim=0)
with torch.no_grad():
predicted_states = batch_map(lambda s, a: self.model_ensemble.means(s, a)[0],
[states, actions], cat_dim=1)
for i in range(self.model_cfg.ensemble_size):
errors = torch.norm((predicted_states[i] - next_states) / state_std, dim=1)
log.message(f'Model {i+1} error deciles: {deciles(errors)}')
def log_statistics(self):
self.evaluate_models()
avg_critic_loss = pythonic_mean(self.recent_critic_losses)
log.message(f'Average recent critic loss: {avg_critic_loss}')
self.data.append('critic loss', avg_critic_loss)
self.recent_critic_losses.clear()
log.message('Buffer sizes:')
log.message(f'\tReal: {len(self.replay_buffer)}')
log.message(f'\tVirtual: {len(self.virt_buffer)}')
real_states, real_actions, real_violations = self.replay_buffer.get('states', 'actions', 'violations')
virt_states, virt_violations = self.virt_buffer.get('states', 'violations')
virt_actions = self.actor.act(virt_states, eval=True).detach()
sa_data = {
'real (done)': (real_states[real_violations], real_actions[real_violations]),
'real (~done)': (real_states[~real_violations], real_actions[~real_violations]),
'virtual (done)': (virt_states[virt_violations], virt_actions[virt_violations]),
'virtual (~done)': (virt_states[~virt_violations], virt_actions[~virt_violations])
}
for which, (states, actions) in sa_data.items():
if len(states) == 0:
mean_q = None
else:
with torch.no_grad():
qs = batch_map(lambda s, a: self.solver.critic.mean(s, a), [states, actions])
mean_q = qs.mean()
log.message(f'Average Q {which}: {mean_q}')
self.data.append(f'Average Q {which}', mean_q)
if torch.cuda.is_available():
log.message(f'GPU memory info: {gpu_mem_info()}')
def evaluate(self):
eval_traj = sample_episodes_batched(self.eval_env, self.solver, N_EVAL_TRAJ, eval=True)
lengths = [len(traj) for traj in eval_traj]
length_mean, length_std = float(np.mean(lengths)), float(np.std(lengths))
returns = [traj.get('rewards').sum().item() for traj in eval_traj]
return_mean, return_std = float(np.mean(returns)), float(np.std(returns))
return {
'eval return mean': return_mean,
'eval return std': return_std,
'eval length mean': length_mean,
'eval length std': length_std
} | 43.588235 | 110 | 0.635231 |
3ea2d602bcb98f5670f508a5276f930aa6369e3b | 1,485 | py | Python | mindspore/python/mindspore/ops/_op_impl/tbe/diag.py | PowerOlive/mindspore | bda20724a94113cedd12c3ed9083141012da1f15 | [
"Apache-2.0"
] | 3,200 | 2020-02-17T12:45:41.000Z | 2022-03-31T20:21:16.000Z | mindspore/python/mindspore/ops/_op_impl/tbe/diag.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 176 | 2020-02-12T02:52:11.000Z | 2022-03-28T22:15:55.000Z | mindspore/python/mindspore/ops/_op_impl/tbe/diag.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 621 | 2020-03-09T01:31:41.000Z | 2022-03-30T03:43:19.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DiagD op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
diag_d_op_info = TBERegOp("Diag") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("diag_d.so") \
.compute_cost(10) \
.kernel_name("diag_d") \
.partial_flag(True) \
.input(0, "x", False, "required", "all") \
.input(1, "assist", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
.get_op_info()
@op_info_register(diag_d_op_info)
def _diag_tbe():
"""DiagD TBE register"""
return
| 38.076923 | 85 | 0.682155 |
68e251e08fe48f5e2617443ef88b4cb14e01ff89 | 6,864 | py | Python | hdf_compass/compass_viewer/array/plot.py | giumas/hdf-compass | 945d9acd6d4d676db8bf81e0af694b6eefb7dc25 | [
"IJG"
] | 1 | 2015-10-17T17:45:20.000Z | 2015-10-17T17:45:20.000Z | hdf_compass/compass_viewer/array/plot.py | giumas/hdf-compass | 945d9acd6d4d676db8bf81e0af694b6eefb7dc25 | [
"IJG"
] | null | null | null | hdf_compass/compass_viewer/array/plot.py | giumas/hdf-compass | 945d9acd6d4d676db8bf81e0af694b6eefb7dc25 | [
"IJG"
] | null | null | null | ##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of the HDF Compass Viewer. The full HDF Compass #
# copyright notice, including terms governing use, modification, and #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
"""
Matplotlib window with toolbar.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import wx
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx as NavigationToolbar
import logging
log = logging.getLogger(__name__)
from ..frame import BaseFrame
ID_VIEW_CMAP_JET = wx.NewId() # default
ID_VIEW_CMAP_BONE = wx.NewId()
ID_VIEW_CMAP_GIST_EARTH = wx.NewId()
ID_VIEW_CMAP_OCEAN = wx.NewId()
ID_VIEW_CMAP_RAINBOW = wx.NewId()
ID_VIEW_CMAP_RDYLGN = wx.NewId()
ID_VIEW_CMAP_WINTER = wx.NewId()
class PlotFrame(BaseFrame):
""" Base class for Matplotlib plot windows.
Override draw_figure() to plot your figure on the provided axes.
"""
def __init__(self, data, title="a title"):
""" Create a new Matplotlib plotting window for a 1D line plot """
log.debug(self.__class__.__name__)
BaseFrame.__init__(self, id=wx.ID_ANY, title=title, size=(800, 400))
self.data = data
self.panel = wx.Panel(self)
self.dpi = 100
self.fig = Figure((6.0, 4.0), dpi=self.dpi)
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.axes = self.fig.add_subplot(111)
self.toolbar = NavigationToolbar(self.canvas)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.toolbar, 0, wx.EXPAND)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
self.draw_figure()
def draw_figure(self):
raise NotImplementedError
class LinePlotFrame(PlotFrame):
def __init__(self, data, names=None, title="Line Plot"):
self.names = names
PlotFrame.__init__(self, data, title)
def draw_figure(self):
lines = [self.axes.plot(d)[0] for d in self.data]
if self.names is not None:
for n in self.names:
self.axes.legend(tuple(lines), tuple(self.names))
class ContourPlotFrame(PlotFrame):
def __init__(self, data, names=None, title="Contour Plot"):
# need to be set before calling the parent (need for plotting)
self.colormap = "jet"
self.cb = None # matplotlib color-bar
PlotFrame.__init__(self, data, title)
self.cmap_menu = wx.Menu()
self.cmap_menu.Append(ID_VIEW_CMAP_JET, "Jet", kind=wx.ITEM_RADIO)
self.cmap_menu.Append(ID_VIEW_CMAP_BONE, "Bone", kind=wx.ITEM_RADIO)
self.cmap_menu.Append(ID_VIEW_CMAP_GIST_EARTH, "Gist Earth", kind=wx.ITEM_RADIO)
self.cmap_menu.Append(ID_VIEW_CMAP_OCEAN, "Ocean", kind=wx.ITEM_RADIO)
self.cmap_menu.Append(ID_VIEW_CMAP_RAINBOW, "Rainbow", kind=wx.ITEM_RADIO)
self.cmap_menu.Append(ID_VIEW_CMAP_RDYLGN, "Red-Yellow-Green", kind=wx.ITEM_RADIO)
self.cmap_menu.Append(ID_VIEW_CMAP_WINTER, "Winter", kind=wx.ITEM_RADIO)
self.add_menu(self.cmap_menu, "Colormap")
self.Bind(wx.EVT_MENU, self.on_cmap_jet, id=ID_VIEW_CMAP_JET)
self.Bind(wx.EVT_MENU, self.on_cmap_bone, id=ID_VIEW_CMAP_BONE)
self.Bind(wx.EVT_MENU, self.on_cmap_gist_earth, id=ID_VIEW_CMAP_GIST_EARTH)
self.Bind(wx.EVT_MENU, self.on_cmap_ocean, id=ID_VIEW_CMAP_OCEAN)
self.Bind(wx.EVT_MENU, self.on_cmap_rainbow, id=ID_VIEW_CMAP_RAINBOW)
self.Bind(wx.EVT_MENU, self.on_cmap_rdylgn, id=ID_VIEW_CMAP_RDYLGN)
self.Bind(wx.EVT_MENU, self.on_cmap_winter, id=ID_VIEW_CMAP_WINTER)
self.status_bar = wx.StatusBar(self, -1)
self.status_bar.SetFieldsCount(2)
self.SetStatusBar(self.status_bar)
self.canvas.mpl_connect('motion_notify_event', self.update_status_bar)
self.canvas.Bind(wx.EVT_ENTER_WINDOW, self.change_cursor)
def on_cmap_jet(self, evt):
log.debug("cmap: jet")
self.colormap = "jet"
self._refresh_plot()
def on_cmap_bone(self, evt):
log.debug("cmap: bone")
self.colormap = "bone"
self._refresh_plot()
def on_cmap_gist_earth(self, evt):
log.debug("cmap: gist_earth")
self.colormap = "gist_earth"
self._refresh_plot()
def on_cmap_ocean(self, evt):
log.debug("cmap: ocean")
self.colormap = "ocean"
self._refresh_plot()
def on_cmap_rainbow(self, evt):
log.debug("cmap: rainbow")
self.colormap = "rainbow"
self._refresh_plot()
def on_cmap_rdylgn(self, evt):
log.debug("cmap: RdYlGn")
self.colormap = "RdYlGn"
self._refresh_plot()
def on_cmap_winter(self, evt):
log.debug("cmap: winter")
self.colormap = "winter"
self._refresh_plot()
def _refresh_plot(self):
self.draw_figure()
self.canvas.draw()
def draw_figure(self):
max_elements = 500 # don't attempt plot more than 500x500 elements
rows = self.data.shape[0]
cols = self.data.shape[1]
row_stride = rows // max_elements + 1
col_stride = cols // max_elements + 1
data = self.data[::row_stride, ::col_stride]
xx = np.arange(0, self.data.shape[1], col_stride)
yy = np.arange(0, self.data.shape[0], row_stride)
img = self.axes.contourf(xx, yy, data, 25, cmap=plt.cm.get_cmap(self.colormap))
self.axes.set_aspect('equal')
if self.cb:
self.cb.on_mappable_changed(img)
else:
self.cb = plt.colorbar(img, ax=self.axes)
self.cb.ax.tick_params(labelsize=8)
def change_cursor(self, event):
self.canvas.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))
def update_status_bar(self, event):
msg = str()
if event.inaxes:
x, y = int(event.xdata), int(event.ydata)
z = self.data[y, x]
msg = "x= %d, y= %d, z= %f" % (x, y, z)
self.status_bar.SetStatusText(msg, 1)
| 36.705882 | 90 | 0.624709 |
fff2bc7ce424a661c2c8c52812e29be93acf549a | 9,664 | py | Python | pydocstringformatter/formatting/formatter.py | DanielNoord/pydocstringformatter | a69302cee6bd32b9b5cc48912a47d0e8ad3f7abe | [
"MIT"
] | 4 | 2022-01-02T22:50:59.000Z | 2022-02-09T09:04:37.000Z | pydocstringformatter/formatting/formatter.py | DanielNoord/pydocstringformatter | a69302cee6bd32b9b5cc48912a47d0e8ad3f7abe | [
"MIT"
] | 80 | 2022-01-02T09:02:50.000Z | 2022-03-30T13:34:10.000Z | pydocstringformatter/formatting/formatter.py | DanielNoord/pydocstringformatter | a69302cee6bd32b9b5cc48912a47d0e8ad3f7abe | [
"MIT"
] | 2 | 2022-01-02T11:58:29.000Z | 2022-01-04T18:53:29.000Z | from __future__ import annotations
import re
import textwrap
import tokenize
from typing import Literal
from pydocstringformatter.formatting import _utils
from pydocstringformatter.formatting.base import (
StringAndQuotesFormatter,
StringFormatter,
SummaryFormatter,
)
class BeginningQuotesFormatter(StringFormatter):
"""Fix the position of the opening quotes."""
name = "beginning-quotes"
potential_single_line = re.compile(
r"""
['"]{1,3} # 3 opening quotes
\n\s*.+ # A line with any length of characters
\n\s* # A line with only whitespace
['"]{1,3} # 3 ending quote
""",
re.X,
)
"""Regex pattern to match against a potential single line docstring."""
def _treat_string(self, tokeninfo: tokenize.TokenInfo, _: int) -> str:
new_string = tokeninfo.string
if new_string[3] == "\n":
if (
new_string.count("\n") == 1 # Single line docstring
or self.config.summary_quotes_same_line # Config for multi-line
or self.potential_single_line.match(new_string) # Potential single line
):
new_string = re.sub(r"\n *", "", new_string, 1)
return new_string
class CapitalizeFirstLetterFormatter(StringFormatter):
"""Capitalize the first letter of the docstring if appropriate."""
name = "capitalize-first-letter"
first_letter_re = re.compile(r"""['"]{1,3}\s*(\w)""", re.DOTALL)
def _treat_string(self, tokeninfo: tokenize.TokenInfo, _: int) -> str:
new_string = None
if match := self.first_letter_re.match(tokeninfo.string):
first_letter = match.end() - 1
new_string = (
tokeninfo.string[:first_letter]
+ tokeninfo.string[first_letter].upper()
+ tokeninfo.string[first_letter + 1 :]
)
return new_string or tokeninfo.string
class LineWrapperFormatter(SummaryFormatter):
"""Linewrap the docstring by the pre-defined line length."""
name = "linewrap-full-docstring"
optional = True
def _treat_summary(
self,
summary: str,
indent_length: int,
quotes_length: Literal[1, 3],
description_exists: bool,
) -> str:
"""Wrap the summary of a docstring."""
line_length = 88
# Without a description we need to consider the length including closing quotes
if not description_exists:
# Calculate length without the ending quotes
length_without_ending = indent_length + quotes_length + len(summary)
# If potential length is less than line length we need to consider ending
# quotes as well for the line length
if length_without_ending < line_length:
# We substract one more because we don't want a new line with just the
# ending quotes
line_length -= quotes_length + 1
summary_lines = summary.splitlines()
new_summary = "\n".join(
textwrap.wrap(
summary_lines[0],
width=line_length,
initial_indent=" " * (indent_length + quotes_length),
subsequent_indent=" " * indent_length,
replace_whitespace=True,
)
)[indent_length + quotes_length :]
if len(summary_lines) > 1:
for line in summary_lines[1:]:
new_summary += "\n"
new_summary += "\n".join(
textwrap.wrap(
line,
width=line_length,
subsequent_indent=" " * indent_length,
replace_whitespace=True,
)
)
return new_summary
class ClosingQuotesFormatter(StringFormatter):
"""Fix the position of the closing quotes."""
name = "closing-quotes"
def _treat_string(self, tokeninfo: tokenize.TokenInfo, _: int) -> str:
"""Fix the position of end quotes for multi-line docstrings."""
new_string = tokeninfo.string
if "\n" not in new_string:
# Not a multiline docstring, nothing to do
return new_string
good_end = f"{(tokeninfo.start[1]) * ' '}{(new_string[0]) * 3}"
split_string = new_string.split("\n")
# Add new line with only quotes
if not new_string.endswith("\n" + good_end):
new_string = new_string[:-3] + "\n" + good_end
# Remove line with only quotes for potential single line string
elif len(split_string) == 2 and split_string[-1] == good_end:
new_string = "\n".join(split_string[:-1]) + tokeninfo.string[0] * 3
return new_string
class FinalPeriodFormatter(SummaryFormatter):
"""Add a period to the end of single line docstrings and summaries."""
name = "final-period"
END_OF_SENTENCE_PUNCTUATION = {".", "?", "!", "‽", ":", ";"}
def _treat_summary(
self,
summary: str,
indent_length: int,
quotes_length: Literal[1, 3],
description_exists: bool,
) -> str:
"""Add a period to the end of single-line docstrings and summaries."""
if summary[-1] in self.END_OF_SENTENCE_PUNCTUATION:
return summary
if _utils.is_rst_title(summary):
return summary
return summary + "."
class SplitSummaryAndDocstringFormatter(SummaryFormatter):
"""Split the summary and body of a docstring based on a period and max length.
The maximum length of a summary can be set with the --max-summary-lines option.
"""
name = "split-summary-body"
end_of_sentence_period = re.compile(
r"""
(?<!e.g|i.e|etc) # Not preceded by 'e.g', 'i.e', 'etc'
\. # A dot
(?!\w) # Not followed by a letter
""",
re.X,
)
"""Pattern to match against an end of sentence period."""
# pylint: disable-next=too-many-branches
def _treat_summary(
self,
summary: str,
indent_length: int,
quotes_length: Literal[1, 3],
description_exists: bool,
) -> str:
"""Split a summary and body if there is a period after the summary."""
new_summary = None
if _utils.is_rst_title(summary):
return summary
# Try to split on period
if match := re.search(self.end_of_sentence_period, summary):
index = match.start()
if summary[:index].count("\n") < self.config.max_summary_lines:
if len(summary) == index + 1:
new_summary = summary
# Handle summaries with more text on same line after the period
elif summary[index + 1] == " ":
new_summary = (
summary[:index]
+ f"\n\n{' ' * indent_length}"
+ summary[index + 2 :]
)
# Handle summaries that end with a period and a direct new line
elif summary[index + 1] == "\n":
new_summary = summary[:index] + ".\n\n" + summary[index + 2 :]
# Try to split on max length
if not new_summary and summary.count("\n") > self.config.max_summary_lines - 1:
lines = summary.splitlines()
new_summary = (
"\n".join(lines[: self.config.max_summary_lines])
+ "\n\n"
+ "\n".join(lines[self.config.max_summary_lines :])
)
return new_summary or summary
class StripWhitespacesFormatter(StringAndQuotesFormatter):
"""Strip 1) docstring start, 2) docstring end and 3) end of line."""
name = "strip-whitespaces"
def _treat_string(
self,
tokeninfo: tokenize.TokenInfo,
indent_length: int,
quotes: str,
quotes_length: Literal[1, 3],
) -> str:
"""Strip whitespaces."""
lines = tokeninfo.string[quotes_length:-quotes_length].split("\n")
new_lines: list[str] = []
for index, line in enumerate(lines):
if line == "":
# Remove double white lines
if index and lines[index - 1] == "":
continue
# On the first line strip from both sides
if index == 0: # pylint: disable=compare-to-zero
new_lines.append(line.lstrip().rstrip())
# Check last line
elif index == len(lines) - 1:
# If completely whitespace, just return the indent_length
if line.count(" ") == len(line):
new_lines.append(indent_length * " ")
else:
new_lines.append(line)
# Else, only strip right side
else:
new_lines.append(line.rstrip())
# Remove a final white line
if len(new_lines) > 3 and new_lines[-2] == "":
new_lines.pop(-2)
return quotes + "\n".join(new_lines) + quotes
class QuotesTypeFormatter(StringAndQuotesFormatter):
"""Change all opening and closing quotes to be triple quotes."""
name = "quotes-type"
def _treat_string(
self,
tokeninfo: tokenize.TokenInfo,
_: int,
__: str,
quotes_length: Literal[1, 3],
) -> str:
"""Change all opening and closing quotes if necessary."""
return f'"""{tokeninfo.string[quotes_length:-quotes_length]}"""'
| 33.672474 | 88 | 0.563845 |
9b9900722a965d234b4cc69c3e98e15a965b38a3 | 33,161 | py | Python | apps/agentcontroller/controller.py | jumpscale7/jumpscale_core7 | c3115656214cab1bd32f7a1e092c0bffc84a00cd | [
"Apache-2.0"
] | null | null | null | apps/agentcontroller/controller.py | jumpscale7/jumpscale_core7 | c3115656214cab1bd32f7a1e092c0bffc84a00cd | [
"Apache-2.0"
] | 4 | 2016-08-25T12:08:39.000Z | 2018-04-12T12:36:01.000Z | apps/agentcontroller/controller.py | jumpscale7/jumpscale_core7 | c3115656214cab1bd32f7a1e092c0bffc84a00cd | [
"Apache-2.0"
] | 3 | 2016-03-08T07:49:34.000Z | 2018-10-19T13:56:43.000Z | #this must be in the beginning so things are patched before ever imported by other libraries
from gevent import monkey
import gevent
monkey.patch_socket()
monkey.patch_thread()
monkey.patch_time()
from JumpScale import j
from JumpScale.baselib.cmdutils import argparse
import importlib
import copy
import crontab
try:
import ujson as json
except:
import json
import time
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--instance', help="Agentcontroller instance", required=True)
opts = parser.parse_args()
j.application.instanceconfig = j.application.getAppInstanceHRD(name="agentcontroller",instance=opts.instance)
while not j.clients.redis.isRunning('system'):
time.sleep(0.1)
print("cannot connect to redis system, will keep on trying forever, please start redis system")
j.application.start("jumpscale:agentcontroller")
j.application.initGrid()
j.logger.consoleloglevel = 2
from JumpScale.grid.jumpscripts.JumpscriptFactory import Jumpscript
from watchdog.events import FileSystemEventHandler
from watchdog.observers.polling import PollingObserver as Observer
from base64 import b64encode
class JumpscriptHandler(FileSystemEventHandler):
def __init__(self, agentcontroller):
self.agentcontroller = agentcontroller
def on_any_event(self, event):
if event.src_path and not event.is_directory and event.src_path.endswith('.py'):
try:
self.agentcontroller.reloadjumpscript(event.src_path)
except Exception as e:
print("Failed to reload jumpscript", e)
class ControllerCMDS():
def __init__(self, daemon):
self.debug = False # set true for verbose output
j.application.initGrid()
self.daemon = daemon
self.acuniquekey = j.application.getUniqueMachineId()
self.jumpscripts = {}
self.jumpscriptsFromKeys = {}
self.osisclient = j.clients.osis.getByInstance(gevent=True)
self.jobclient = j.clients.osis.getCategory(self.osisclient, 'system', 'job')
self.nodeclient = j.clients.osis.getCategory(self.osisclient, 'system', 'node')
self.healthclient = j.clients.osis.getCategory(self.osisclient, 'system', 'health')
self.jumpscriptclient = j.clients.osis.getCategory(self.osisclient, 'system', 'jumpscript')
self.redis = j.clients.redis.getByInstance('system')
self.roles2agents = dict()
self.sessionsUpdateTime = dict()
self.agents2roles = dict()
self.start()
def start(self):
gevent.spawn(self._cleanScheduledJobs, 3600*24)
observer = Observer()
handler = JumpscriptHandler(self)
observer.schedule(handler, "jumpscripts", recursive=True)
observer.start()
def _adminAuth(self,user,passwd):
return self.nodeclient.authenticate(user, passwd)
def authenticate(self, session):
return self.nodeclient.authenticate(session.user, session.passwd)
def scheduleCmd(self,gid,nid,cmdcategory,cmdname,args={},jscriptid=None,queue="",log=True,timeout=None,roles=[],wait=False,errorreport=False,tags=None,session=None):
"""
new preferred method for scheduling work
@name is name of cmdserver method or name of jumpscript
"""
self._log("schedule cmd:%s_%s %s %s"%(gid,nid,cmdcategory,cmdname))
if not nid and not roles:
raise RuntimeError("Either nid or roles should be given")
action = None
if jscriptid is None and session is not None:
action = self.getJumpscript(cmdcategory, cmdname, session=session)
jscriptid = action.id
if session is not None:
sessionid=session.id
else:
sessionid=None
self._log("getjob osis client")
if not timeout:
if action:
timeout = action.timeout if action.timeout else 600
else:
timeout = 600
if action and action.category == 'monitor.healthcheck':
log = False
job=self.jobclient.new(sessionid=sessionid,gid=gid,nid=nid,category=cmdcategory,cmd=cmdname,queue=queue,args=args,log=log,timeout=timeout,roles=roles,wait=wait,errorreport=errorreport,tags=tags)
self._log("redis incr for job")
jobid=self.redis.hincrby("jobs:last", str(gid), 1)
self._log("jobid found (incr done):%s"%jobid)
job.id=int(jobid)
job.jscriptid = jscriptid
saveinosis = True if nid and log else False
self._log("save 2 osis {}".format(saveinosis))
jobdict = job.dump()
self._setJob(jobdict, osis=saveinosis)
jobs=json.dumps(jobdict)
self._log("getqueue")
role = roles[0] if roles else None
q = self._getCmdQueue(gid=gid, nid=nid, role=role)
self._log("put on queue")
q.put(jobs)
self._log("schedule done")
return jobdict
def _cleanScheduledJobs(self, expiretime):
while True:
self.cleanScheduledJobs(expiretime)
time.sleep(3600)
def cleanScheduledJobs(self, expiretime):
queues = self.redis.keys('queues:commands:queue*')
count = 0
now = time.time()
for qname in queues:
jobstrings = self.redis.lrange(qname, 0, -1)
for jobstring in jobstrings:
job = json.loads(jobstring)
timeout = job['timeout'] or expiretime
if job['state'] == 'SCHEDULED' and job['timeStart'] + timeout < now:
self.redis.lrem(qname, jobstring)
count += 1
return count
def restartProcessmanagerWorkers(self,session=None):
for item in self.osisclient.list("system","node"):
gid,nid=item.split("_")
if int(gid)==session.gid:
cmds.scheduleCmd(gid,nid,cmdcategory="pm",jscriptid=0,cmdname="stop",args={},queue="internal",log=False,timeout=60,roles=[],session=session)
def reloadjumpscripts(self,session=None):
self.jumpscripts = {}
self.jumpscriptsFromKeys = {}
self.loadJumpscripts()
self.loadLuaJumpscripts()
print("want processmanagers to reload js:")
for item in self.osisclient.list("system","node"):
gid,nid=item.split("_")
cmds.scheduleCmd(gid,nid,cmdcategory="pm",jscriptid=0,cmdname="reloadjumpscripts",args={},queue="internal",log=False,timeout=60,roles=[],session=session)
print("OK")
def reloadjumpscript(self, path, session=None):
script = self.loadJumpscript(path)
print("want processmanagers to reload js:")
for item in self.osisclient.list("system","node"):
gid,nid=item.split("_")
cmds.scheduleCmd(gid, nid, cmdcategory="jumpscripts", jscriptid=0, cmdname="loadJumpscript",
args={'jumpscript': script.getDict()},
queue="internal", log=False, timeout=60, roles=[], session=session)
def restartWorkers(self,session=None):
for item in self.osisclient.list("system","node"):
gid,nid=item.split("_")
if int(gid)==session.gid:
cmds.scheduleCmd(gid,nid,cmdcategory="pm",jscriptid=0,cmdname="restartWorkers",args={},queue="internal",log=False,timeout=60,roles=[],session=session)
def _setJob(self, job, osis=False):
if not j.basetype.dictionary.check(job):
raise RuntimeError("job needs to be dict")
# job guid needs to be unique accoress grid, structure $ac_gid _ $ac_nid _ $executor_gid _ $jobenum
if not job['guid']:
job["guid"]="%s_%s_%s"%(self.acuniquekey, job["gid"],job["id"])
jobs=json.dumps(job)
self.redis.hset("jobs:%s"%job["gid"], job["guid"], jobs)
if osis:
self.saveJob(job)
def saveJob(self, job, session=None):
job = copy.deepcopy(job)
if 'result' in job and not isinstance(job["result"],str):
job['result'] = json.dumps(job['result'])
for key in ('args', 'kwargs'):
if key in job:
job[key] = json.dumps(job[key])
self.jobclient.set(job)
def _deleteJobFromCache(self, job):
self.redis.hdel("jobs:%s"%job["gid"], job["guid"])
def _getJobFromRedis(self, jobguid):
if not len(jobguid.split("_")) == 3:
raise RuntimeError("Jobguid needs to be of format: '$acuniquekey_$gid_$jobid' ")
gid = jobguid.split("_")[1]
jobstring = self.redis.hget("jobs:%s" % gid, jobguid)
if jobstring:
return json.loads(jobstring)
else:
return None
def _getCmdQueue(self, gid=None, nid=None, role=None, session=None):
"""
is qeueue where commands are scheduled for processmanager to be picked up
"""
if gid is None or (nid is None and not role):
raise RuntimeError("gid or nid cannot be None")
if session==None:
self._log("get cmd queue NOSESSION")
qname = role or nid
self._log("get cmd queue for %s %s"%(gid,qname))
queuename = "commands:queue:%s:%s" % (gid, qname)
return self.redis.getQueue(queuename)
def _getWorkQueue(self, session):
rediscl = j.clients.redis.getByInstance('system')
class MultiKeyQueue(object):
def __init__(self, keys):
self.keys = keys
def get(self, timeout=None):
data = rediscl.blpop(self.keys, timeout=timeout)
if data:
return data[1]
return None
queues = list()
queues.append("queues:commands:queue:%s:%s" % (session.gid, session.nid))
for role in session.roles:
queues.append("queues:commands:queue:%s:%s" % (session.gid, role))
return MultiKeyQueue(queues)
def _getJobQueue(self, jobguid):
queuename = "jobqueue:%s" % jobguid
self._log("get job queue for job:%s"%(jobguid))
return self.redis.getQueue(queuename) #fromcache=False)c
def _setRoles(self,roles, agent):
for role, agents in self.roles2agents.iteritems():
if agent in agents:
agents.remove(agent)
for role in roles:
self.roles2agents.setdefault(role, list()).append(agent)
self.agents2roles[agent] = roles
def _updateNetInfo(self, netinfo, node):
node.netaddr = netinfo
node.ipaddr = list()
for netitem in netinfo:
if netitem['mac'] != "00:00:00:00:00:00" and netitem['ip'] and netitem['name']:
ip = [netitem['ip']] if isinstance(netitem['ip'], basestring) else netitem['ip']
node.ipaddr.extend(ip)
def registerNode(self, hostname, machineguid, memory, session):
if session.user != 'root' or not self._adminAuth(session.user, session.passwd):
raise RuntimeError("Only admin can register new nodes")
node = self.nodeclient.new()
nodedata = self.nodeclient.searchOne({'machineguid': machineguid})
if nodedata:
node.load(nodedata)
node.roles = session.roles
node.gid = session.gid
node.name = hostname
node.machineguid = machineguid
node.memory = memory
self._updateNetInfo(session.netinfo, node)
nodeid, new, changed = self.nodeclient.set(node)
node = self.nodeclient.get(nodeid)
self.daemon.notifyOfNewNode(node, session.id)
self._setRoles(node.roles, nodeid)
self.sessionsUpdateTime[nodeid]=j.base.time.getTimeEpoch()
result = {'node': node.dump()}
return result
def register(self,session):
self._log("new agent:")
nodeid="%s_%s"%(session.gid,session.nid)
if session.nid:
node = self.nodeclient.get(nodeid)
self._setRoles(node.roles, nodeid)
self.sessionsUpdateTime[nodeid]=j.base.time.getTimeEpoch()
self._log("register done:%s"%nodeid)
self._updateNetInfo(session.netinfo, node)
self.nodeclient.set(node)
return node.dump()
raise RuntimeError("Node is not registered properly please call registerNode.\n Session: %s" % session )
def escalateError(self, eco, session=None):
if isinstance(eco, dict):
eco = j.errorconditionhandler.getErrorConditionObject(eco)
eco.process()
def loadLuaJumpscripts(self):
"""
Like self.loadJumpscripts() but for Lua jumpscripts.
"""
lua_jumpscript_path = 'luajumpscripts'
available_jumpscripts = list()
if j.system.fs.exists(lua_jumpscript_path):
available_jumpscripts =\
j.system.fs.listFilesInDir(path=lua_jumpscript_path, recursive=True, filter='*.lua', followSymlinks=True)
for jumpscript_path in available_jumpscripts:
jumpscript_metadata = j.core.jumpscripts.introspectLuaJumpscript(jumpscript_path)
key = "%(organization)s_%(name)s" % {
'organization': jumpscript_metadata.organization,
'name': jumpscript_metadata.name
}
self.jumpscripts[key] = jumpscript_metadata
def loadJumpscripts(self, path="jumpscripts", session=None):
if session is not None:
self._adminAuth(session.user,session.passwd)
for filepath in j.system.fs.listFilesInDir(path=path, recursive=True, filter="*.py", followSymlinks=True):
self.loadJumpscript(filepath)
def loadJumpscript(self, path):
if j.system.fs.getDirName(path,True)[0]=="_": #skip dirs starting with _
return
try:
script = Jumpscript(path=path)
except Exception as e:
msg="Could not load jumpscript:%s\n" % path
msg+="Error was:%s\n" % e
# print msg
j.errorconditionhandler.raiseInputError(msgpub="",message=msg,category="agentcontroller.load",tags="",die=False)
j.application.stop()
return
name = getattr(script, 'name', "")
if name=="":
name=j.system.fs.getBaseName(path)
name=name.replace(".py","").lower()
t=self.jumpscriptclient.new(name=name, action=script.module.action)
t.__dict__.update(script.getDict())
self.jumpscriptclient.set(t)
self._log("found jumpscript:{}:{}".format(t.organization, t.name))
namekey = "%s_%s" % (t.organization, t.name)
self.jumpscripts[namekey] = t
return script
def getJumpscript(self, organization, name,gid=None,reload=False, session=None):
if session is not None:
self._adminAuth(session.user,session.passwd)
if gid is None and session is not None:
gid = session.gid
key = "%s_%s" % (organization, name.lower())
if key in self.jumpscripts:
if reload:
# from IPython import embed
# print "DEBUG NOW getJumpscript reload"
# embed()
pass
else:
return self.jumpscripts[key]
else:
j.errorconditionhandler.raiseOperationalCritical("Cannot find jumpscript %s:%s" % (organization, name), category="action.notfound", die=False)
return ""
def listJumpscripts(self, organization=None, cat=None, session=None):
"""
@return [[org,name,category,descr, roles],...]
"""
if session is not None:
self._adminAuth(session.user,session.passwd)
def myfilter(entry):
if not entry.enable:
return False
if organization and entry.organization != organization:
return False
if not hasattr(entry, 'category'):
return False
if cat and entry.category != cat:
return False
return True
return [[t.id,t.organization, t.name, (t.roles)] for t in filter(myfilter, self.jumpscripts.values()) ]
def executeJumpscript(self, organization, name, nid=None, role=None, args={},all=False, \
timeout=600,wait=True,queue="", gid=None,errorreport=True, tags=None, session=None):
"""
@param roles defines which of the agents which need to execute this action
@all if False will be executed only once by the first found agent, if True will be executed by all matched agents
"""
# validate params
if not nid and not gid and not role:
j.events.inputerror_critical("executeJumpscript requires either nid and gid or role")
def noWork():
sessionid = session.id
ngid = gid or j.application.whoAmI.gid
job=self.jobclient.new(sessionid=sessionid,gid=ngid,nid=nid,category=organization,cmd=name,queue=queue,args=args,log=True,timeout=timeout,roles=[role],wait=wait,errorreport=errorreport,tags=tags)
self._log("nothingtodo")
job.state="NOWORK"
job.timeStop=job.timeStart
self._setJob(job.__dict__, osis=True)
return job.__dict__
self._adminAuth(session.user,session.passwd)
self._log("AC:get request to exec JS:%s %s on node:%s"%(organization,name,nid))
action = self.getJumpscript(organization, name, session=session)
if action==None or str(action).strip()=="":
raise RuntimeError("Cannot find jumpscript %s %s"%(organization,name))
if not queue:
queue = action.queue
if role is not None:
self._log("ROLE NOT NONE")
role = role.lower()
if role in self.roles2agents:
if not all:
job=self.scheduleCmd(gid,nid,organization,name,args=args,queue=queue,log=action.log,roles=[role],session=session,jscriptid=action.id, wait=wait, tags=tags)
if wait:
return self.waitJumpscript(job=job,session=session, timeout=timeout)
else:
return job
else:
job = list()
for node_guid in self.roles2agents[role]:
if len(node_guid.split("_")) != 2:
raise RuntimeError("node_guid needs to be of format: '$gid_$nid' ")
ngid,nid=node_guid.split("_")
if gid is None or int(gid) == int(ngid):
jobd=self.scheduleCmd(gid=ngid,nid=nid,cmdcategory=organization,cmdname=name,args=args,queue=queue,log=action.log,timeout=timeout,roles=[role],session=session,jscriptid=action.id, wait=wait,errorreport=errorreport,tags=tags)
job.append(jobd)
if wait:
results = list()
for jobitem in job:
results.append(self.waitJumpscript(job=jobitem,session=session))
return results
return noWork()
elif nid is not None:
self._log("NID KNOWN")
gid = gid or session.gid
job=self.scheduleCmd(gid,nid,organization,name,args=args,queue=queue,log=action.log,timeout=timeout,session=session,jscriptid=action.id,wait=wait,tags=tags)
if wait:
return self.waitJumpscript(job=job,session=session)
return job
else:
return noWork()
def waitJumpscript(self,jobguid=None,job=None, timeout=None, session=None):
"""
@param timeout: if given overules job.timeout makes it possible to wait for 0 seconds
@type timeout: int
@return job as dict
"""
if job==None:
if jobguid==None:
raise RuntimeError("job or jobid need to be given as argument")
job = self._getJobFromRedis(jobguid)
if not job:
# job = self.jobclient.get(jobguid).__dict__
# job['result'] = json.loads(job['result'])
raise RuntimeError("Cannot find job in redis.")
if job['state'] != 'SCHEDULED':
return job
q = self._getJobQueue(job["guid"])
if timeout is None:
timeout = job['timeout']
if timeout == 0:
res = q.fetch(False)
elif timeout is not None:
res = q.fetch(timeout=timeout)
else:
res = q.fetch()
self._deleteJobFromCache(job)
q.set_expire(5) #@todo ????
if res is not None:
return json.loads(res)
else:
job["resultcode"]=1
job["state"]="TIMEOUT"
if job['nid'] is None:
job['nid'] = 0
else:
self._deletelJobFromQueue(job)
self._setJob(job, osis=True)
self._log("timeout on execution")
return job
def _deletelJobFromQueue(self, job):
cmdqueue = self._getCmdQueue(job['gid'], job['nid'])
for jobqueue in self.redis.lrange(cmdqueue.key, 0, -1):
qjob = json.loads(jobqueue)
if qjob['guid'] == job['guid']:
self.redis.lrem(cmdqueue.key, jobqueue)
return
def getWork(self, session=None):
"""
is for agent to ask for work
returns job as dict
"""
nodeid = "%s_%s" % (session.gid, session.nid)
if nodeid not in self.agents2roles:
return -1
self.sessionsUpdateTime[nodeid]=j.base.time.getTimeEpoch()
self._log("getwork %s" % session)
q = self._getWorkQueue(session)
jobstr=q.get(timeout=30)
if jobstr==None:
self._log("NO WORK")
return None
job=json.loads(jobstr)
if job is not None:
job['nid'] = session.nid
saveinosis = job['log']
job['state'] = 'STARTED'
if saveinosis or job['wait']:
self._setJob(job, saveinosis)
else:
self._deleteJobFromCache(job)
self._log("getwork found for node:%s for jsid:%s"%(session.nid,job["jscriptid"]))
return job
def send_healthcheck_eco(self, health, message, state="NEW"):
eco = j.errorconditionhandler.getErrorConditionObject(
msg=message['message'],
category='healthcheck.{}'.format(message['category']),
type='OPERATIONS',
level=j.errorconditionhandler.getLevelByName(message['state'])
)
eco.state = state
eco.gid = health['gid']
eco.nid = health['nid']
eco.backtrace = ""
eco.process()
def saveHealth(self, job, jumpscript):
import copy
interval = 0
if jumpscript.period:
if isinstance(jumpscript.period, int):
interval = jumpscript.period
else:
cron = crontab.CronTab(jumpscript.period)
interval = cron.next() - cron.previous()
health_checks = []
health_tmpl = {'nid': job['nid'],
'gid': job['gid'],
'interval': interval,
'lastchecked': job['timeStop'],
'cmd': '%(category)s_%(cmd)s' % job,
'messages': []}
if job['state'] != 'OK':
health = copy.deepcopy(health_tmpl)
health['jobguid'] = job['guid']
health['messages'].append({'state': 'ERROR',
'message': 'Failed executing job %s' % job['cmd'],
'category': job['cmd'],
'lasterror': job['timeStop'],
'uid': 'execution_failed'})
health_checks.append(health)
else:
if job['result']:
msgs = copy.deepcopy(job['result'])
for msg in msgs:
if 'nid' in msg:
health = copy.deepcopy(health_tmpl)
health['nid'] = msg['nid']
health['eid'] = job['nid']
health['messages'] = [msg]
health['jobguid'] = None
health_checks.append(health)
job['result'].remove(msg)
else:
health = copy.deepcopy(health_tmpl)
health['jobguid'] = None # job is not saved so dont store job guid
health['messages'] = []
health_checks.append(health)
if job['result']:
health = copy.deepcopy(health_tmpl)
health['jobguid'] = None
health['messages'] = job['result']
health_checks.append(health)
try:
ok_states = ['OK', 'SKIPPED']
for health in health_checks:
last = self.healthclient.get('%(gid)s_%(nid)s_%(cmd)s' % health)
for new_message in health['messages']:
send_eco = True
eco_state = "NEW"
for old_message in last.messages[::-1]:
if new_message['uid'] == old_message.get('uid', ''):
last.messages.remove(old_message)
# if new msg not ok, then set last error
if new_message['state'] not in ok_states:
new_message['lasterror'] = old_message.get('lasterror', job['timeStop'])
message = new_message
state = new_message['state']
break
elif old_message.get('state', 'OK') not in ok_states:
# if new msg is ok, then check for old msg if not ok, then set state of eco to CLOSED
# Here we use the old message to get the old eco
message = old_message
eco_state = "CLOSED"
break
else:
send_eco = False
new_message['lasterror'] = ''
if send_eco:
self.send_healthcheck_eco(health, message, eco_state)
# CLOSE all remaining old ecos in error state
for message in last.messages:
if message.get('state', 'OK') not in ok_states:
self.send_healthcheck_eco(health, message, "CLOSED")
except:
pass
for health in health_checks:
self.healthclient.set(health)
def notifyWorkCompleted(self, job,session=None):
"""
job here is a dict
"""
self._log("NOTIFY WORK COMPLETED: jobid:%s"%job["id"])
if not j.basetype.dictionary.check(job):
raise RuntimeError("job needs to be dict")
# don't try to get jumpscripts for internal jobs and jobs directly scheduled to the workers
if job['category'] != 'pm' and job['queue'] != 'internal' and job['category']:
jscript = self.getJumpscript(job['category'], job['cmd'])
if jscript and jscript.category == 'monitor.healthcheck':
job['log'] = False
self.saveHealth(job, jscript)
saveinosis = job['log'] or job['state'] != 'OK'
self._setJob(job, osis=saveinosis)
if job['wait']:
q=self._getJobQueue(job["guid"])
q.put(json.dumps(job))
q.set_expire(60) # if result is not fetched in 60 seconds we can delete this
self._deleteJobFromCache(job)
self._log("completed job")
return
def getScheduledWork(self,agentid,session=None):
"""
list all work scheduled for 1 agent
"""
raise RuntimeError("need to be implemented")
self._adminAuth(session.user,session.passwd)
result=[]
for sessionid in self.agent2session[agentid]:
if self.workqueue.has_key(sessionid):
if len(self.workqueue[sessionid])>0:
result=[item.__dict__ for item in self.workqueue[sessionid]]
return result
def getActiveWork(self,agentid,session=None):
"""
list all work active for 1 agent
"""
if session is not None:
self._adminAuth(session.user,session.passwd)
jobs = list()
qname = 'queues:commands:queue:%s:%s' % (session.gid, agentid)
jobstrings = self.redis.lrange(qname, 0, -1)
for jobstring in jobstrings:
jobs.append(json.loads(jobstring))
return jobs
def getActiveJobs(self, session=None):
queues = self.redis.keys('queues:commands:queue*')
jobs = list()
for qname in queues:
jobstrings = self.redis.lrange(qname, 0, -1)
for jobstring in jobstrings:
job = json.loads(jobstring)
job['acqueue'] = qname[22:]
jobs.append(job)
return jobs
def log(self, logs, session=None):
#normally not required to use on this layer
for log in logs:
j.logger.logTargetLogForwarder.log(log)
def _log(self, msg):
if self.debug:
print(msg)
def listSessions(self,session=None):
agents = self.agents2roles.copy()
times = self.sessionsUpdateTime.copy()
for key, value in times.iteritems():
times[key] = [value] + agents.get(key, list())
return times
def getJobInfo(self, jobguid, session=None):
if jobguid==None:
raise RuntimeError("job or jobid need to be given as argument")
job = self._getJobFromRedis(jobguid)
if not job:
job = self.jobclient.get(jobguid).__dict__
return job
def listJobs(self, session=None):
"""
list all jobs waiting for which roles, show for each role which agents should be answering
also list jobs which are running and running in which sessions
"""
raise RuntimeError("need to be implemented")
result = []
jobresult = {}
for jobid in self.jobs.keys():
job = self.jobs[jobid]
jobresult['id'] = jobid
jobresult['jsname'] = job.db.jsname
jobresult['jsorganization'] = job.db.jsorganization
jobresult['roles'] = job.db.roles
# jobresult['args'] = job.db.args
jobresult['timeout'] = job.db.timeout
jobresult['result'] = job.db.result
jobresult['sessionid'] = job.db.sessionid
jobresult['jscriptid'] = job.db.jscriptid
jobresult['children'] = job.db.children
jobresult['childrenActive'] = job.db.childrenActive
jobresult['parent'] = job.db.parent
jobresult['resultcode'] = job.db.resultcode
if self.activeJobSessions.has_key(session.id):
jobresult["isactive"] == jobid in self.activeJobSessions[session.id]
else:
jobresult["isactive"] = False
result.append(jobresult)
return result
def getJumpscripts(self, bz2_compressed=True, types=('processmanager', 'jumpscripts'), session=None):
"""
Returns the available jumpscripts as a Base64-encoded TAR archive that is optionally compressed using bzip2.
Args:
bz2_compressed (boolean): If True then the returned TAR is bzip2-compressed
types (sequence of str): A sequence of the types of jumpscripts to be packed in the returned archive.
possible values in the sequence are 'processmanager', 'jumpscripts', and 'luajumpscripts'.
"""
scripts_tar_content = \
j.core.jumpscripts.getArchivedJumpscripts(bz2_compressed=bz2_compressed, types=types)
return b64encode(scripts_tar_content)
# will reinit for testing everytime, not really needed
# j.servers.geventws.initSSL4Server("myorg", "controller1")
port = 4444
daemon = j.servers.geventws.getServer(port=port)
daemon.addCMDsInterface(ControllerCMDS, category="agent") # pass as class not as object !!! chose category if only 1 then can leave ""
print("load processmanager cmds")
# j.system.fs.changeDir("processmanager")
import sys
sys.path.append(j.system.fs.joinPaths(j.system.fs.getcwd(),"processmanager"))
for item in j.system.fs.listFilesInDir("processmanager/processmanagercmds",filter="*.py"):
name=j.system.fs.getBaseName(item).replace(".py","")
if name[0] != "_":
module = importlib.import_module('processmanagercmds.%s' % name)
classs = getattr(module, name)
print("load cmds:%s"%name)
tmp=classs()
daemon.addCMDsInterface(classs, category="processmanager_%s" % tmp._name, proxy=True)
# j.system.fs.changeDir("..")
cmds=daemon.daemon.cmdsInterfaces["agent"]
cmds.reloadjumpscripts()
# cmds.restartProcessmanagerWorkers()
daemon.start()
j.application.stop()
| 41.245025 | 252 | 0.585839 |
455521d7bacad78291df359a13b582f5c4391dc6 | 1,490 | py | Python | vendor/paypal/standard/ipn/models.py | aragilar/NewsBlur | 64ecd83bf4cea175f1bdeeb6e475fd5cadb679c9 | [
"MIT"
] | 2 | 2015-09-05T10:40:30.000Z | 2017-03-05T12:31:21.000Z | vendor/paypal/standard/ipn/models.py | aragilar/NewsBlur | 64ecd83bf4cea175f1bdeeb6e475fd5cadb679c9 | [
"MIT"
] | null | null | null | vendor/paypal/standard/ipn/models.py | aragilar/NewsBlur | 64ecd83bf4cea175f1bdeeb6e475fd5cadb679c9 | [
"MIT"
] | 1 | 2021-09-27T06:02:59.000Z | 2021-09-27T06:02:59.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
from vendor.paypal.standard.models import PayPalStandardBase
from vendor.paypal.standard.ipn.signals import *
class PayPalIPN(PayPalStandardBase):
"""Logs PayPal IPN interactions."""
format = u"<IPN: %s %s>"
class Meta:
db_table = "paypal_ipn"
verbose_name = "PayPal IPN"
def _postback(self):
"""Perform PayPal Postback validation."""
return urllib2.urlopen(self.get_endpoint(), "cmd=_notify-validate&%s" % self.query).read()
def _verify_postback(self):
if self.response != "VERIFIED":
self.set_flag("Invalid postback. (%s)" % self.response)
def send_signals(self):
"""Shout for the world to hear whether a txn was successful."""
# Transaction signals:
if self.is_transaction():
if self.flag:
payment_was_flagged.send(sender=self)
else:
payment_was_successful.send(sender=self)
# Subscription signals:
else:
if self.is_subscription_cancellation():
subscription_cancel.send(sender=self)
elif self.is_subscription_signup():
subscription_signup.send(sender=self)
elif self.is_subscription_end_of_term():
subscription_eot.send(sender=self)
elif self.is_subscription_modified():
subscription_modify.send(sender=self) | 36.341463 | 98 | 0.616107 |
4396902ad44b06a79fcc62f78ff44f9454a1a285 | 11,294 | py | Python | ibis/expr/rules.py | ivergara/ibis | 212dc656c0121b01f1762afd9313aab5dfb19764 | [
"Apache-2.0"
] | null | null | null | ibis/expr/rules.py | ivergara/ibis | 212dc656c0121b01f1762afd9313aab5dfb19764 | [
"Apache-2.0"
] | 1 | 2021-03-25T14:07:29.000Z | 2021-03-25T14:07:29.000Z | ibis/expr/rules.py | ivergara/ibis | 212dc656c0121b01f1762afd9313aab5dfb19764 | [
"Apache-2.0"
] | null | null | null | import collections
import enum
import functools
from contextlib import suppress
from itertools import product, starmap
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.schema as sch
import ibis.expr.types as ir
import ibis.util as util
try:
from cytoolz import compose, curry, identity
except ImportError:
from toolz import compose, curry, identity
def highest_precedence_dtype(exprs):
"""Return the highest precedence type from the passed expressions
Also verifies that there are valid implicit casts between any of the types
and the selected highest precedence type.
This is a thin wrapper around datatypes highest precedence check.
Parameters
----------
exprs : Iterable[ir.ValueExpr]
A sequence of Expressions
Returns
-------
dtype: DataType
The highest precedence datatype
"""
if not exprs:
raise ValueError('Must pass at least one expression')
return dt.highest_precedence(expr.type() for expr in exprs)
def castable(source, target):
"""Return whether source ir type is implicitly castable to target
Based on the underlying datatypes and the value in case of Literals
"""
op = source.op()
value = getattr(op, 'value', None)
return dt.castable(source.type(), target.type(), value=value)
def comparable(left, right):
return castable(left, right) or castable(right, left)
def cast(source, target):
"""Currently Literal to *Scalar implicit casts are allowed"""
import ibis.expr.operations as ops # TODO: don't use ops here
if not castable(source, target):
raise com.IbisTypeError('Source is not castable to target type!')
# currently it prevents column -> scalar implicit castings
# however the datatypes are matching
op = source.op()
if not isinstance(op, ops.Literal):
raise com.IbisTypeError('Only able to implicitly cast literals!')
out_type = target.type().scalar_type()
return out_type(op)
# ---------------------------------------------------------------------
# Input type validators / coercion functions
class validator(curry):
def __repr__(self):
return '{}({}{})'.format(
self.func.__name__,
repr(self.args)[1:-1],
', '.join(
'{}={!r}'.format(k, v) for k, v in self.keywords.items()
),
)
noop = validator(identity)
@validator
def one_of(inners, arg):
"""At least one of the inner validators must pass"""
for inner in inners:
with suppress(com.IbisTypeError, ValueError):
return inner(arg)
rules_formatted = ', '.join(map(repr, inners))
raise com.IbisTypeError(
'Arg passes neither of the following rules: {}'.format(rules_formatted)
)
@validator
def all_of(inners, arg):
"""All of the inner validators must pass.
The order of inner validators matters.
Parameters
----------
inners : List[validator]
Functions are applied from right to left so allof([rule1, rule2], arg) is
the same as rule1(rule2(arg)).
arg : Any
Value to be validated.
Returns
-------
arg : Any
Value maybe coerced by inner validators to the appropiate types
"""
return compose(*inners)(arg)
@validator
def isin(values, arg):
if arg not in values:
raise ValueError(
'Value with type {} is not in {!r}'.format(type(arg), values)
)
if isinstance(values, dict): # TODO check for mapping instead
return values[arg]
else:
return arg
@validator
def member_of(obj, arg):
if isinstance(arg, ir.EnumValue):
arg = arg.op().value
if isinstance(arg, enum.Enum):
enum.unique(obj) # check that enum has unique values
arg = arg.name
if not hasattr(obj, arg):
raise com.IbisTypeError(
'Value with type {} is not a member of {}'.format(type(arg), obj)
)
return getattr(obj, arg)
@validator
def list_of(inner, arg, min_length=0):
if isinstance(arg, str) or not isinstance(
arg, (collections.abc.Sequence, ir.ListExpr)
):
raise com.IbisTypeError('Argument must be a sequence')
if len(arg) < min_length:
raise com.IbisTypeError(
'Arg must have at least {} number of elements'.format(min_length)
)
return ir.sequence(list(map(inner, arg)))
@validator
def datatype(arg):
return dt.dtype(arg)
@validator
def instance_of(klass, arg):
"""Require that a value has a particular Python type."""
if not isinstance(arg, klass):
raise com.IbisTypeError(
'Given argument with type {} is not an instance of {}'.format(
type(arg), klass
)
)
return arg
@validator
def value(dtype, arg):
"""Validates that the given argument is a Value with a particular datatype
Parameters
----------
dtype : DataType subclass or DataType instance
arg : python literal or an ibis expression
If a python literal is given the validator tries to coerce it to an ibis
literal.
Returns
-------
arg : AnyValue
An ibis value expression with the specified datatype
"""
if not isinstance(arg, ir.Expr):
# coerce python literal to ibis literal
arg = ir.literal(arg)
if not isinstance(arg, ir.AnyValue):
raise com.IbisTypeError(
'Given argument with type {} is not a value '
'expression'.format(type(arg))
)
# retrieve literal values for implicit cast check
value = getattr(arg.op(), 'value', None)
if isinstance(dtype, type) and isinstance(arg.type(), dtype):
# dtype class has been specified like dt.Interval or dt.Decimal
return arg
elif dt.castable(arg.type(), dt.dtype(dtype), value=value):
# dtype instance or string has been specified and arg's dtype is
# implicitly castable to it, like dt.int8 is castable to dt.int64
return arg
else:
raise com.IbisTypeError(
'Given argument with datatype {} is not '
'subtype of {} nor implicitly castable to '
'it'.format(arg.type(), dtype)
)
@validator
def scalar(inner, arg):
return instance_of(ir.ScalarExpr, inner(arg))
@validator
def column(inner, arg):
return instance_of(ir.ColumnExpr, inner(arg))
@validator
def array_of(inner, arg):
val = arg if isinstance(arg, ir.Expr) else ir.literal(arg)
argtype = val.type()
if not isinstance(argtype, dt.Array):
raise com.IbisTypeError(
'Argument must be an array, got expression {} which is of type '
'{}'.format(val, val.type())
)
return value(dt.Array(inner(val[0]).type()), val)
any = value(dt.any)
double = value(dt.double)
string = value(dt.string)
boolean = value(dt.boolean)
integer = value(dt.int64)
decimal = value(dt.Decimal)
floating = value(dt.float64)
date = value(dt.date)
time = value(dt.time)
timestamp = value(dt.Timestamp)
category = value(dt.category)
temporal = one_of([timestamp, date, time])
strict_numeric = one_of([integer, floating, decimal])
soft_numeric = one_of([integer, floating, decimal, boolean])
numeric = soft_numeric
set_ = value(dt.Set)
array = value(dt.Array)
struct = value(dt.Struct)
mapping = value(dt.Map(dt.any, dt.any))
geospatial = value(dt.GeoSpatial)
point = value(dt.Point)
linestring = value(dt.LineString)
polygon = value(dt.Polygon)
multilinestring = value(dt.MultiLineString)
multipoint = value(dt.MultiPoint)
multipolygon = value(dt.MultiPolygon)
@validator
def interval(arg, units=None):
arg = value(dt.Interval, arg)
unit = arg.type().unit
if units is not None and unit not in units:
msg = 'Interval unit `{}` is not among the allowed ones {}'
raise com.IbisTypeError(msg.format(unit, units))
return arg
@validator
def client(arg):
from ibis.client import Client
return instance_of(Client, arg)
# ---------------------------------------------------------------------
# Ouput type promoter functions
def promoter(fn):
@functools.wraps(fn)
def wrapper(name_or_value, *args, **kwargs):
if isinstance(name_or_value, str):
return lambda self: fn(
getattr(self, name_or_value), *args, **kwargs
)
else:
return fn(name_or_value, *args, **kwargs)
return wrapper
@promoter
def shape_like(arg, dtype=None):
if isinstance(arg, (tuple, list, ir.ListExpr)):
datatype = dtype or highest_precedence_dtype(arg)
columnar = util.any_of(arg, ir.AnyColumn)
else:
datatype = dtype or arg.type()
columnar = isinstance(arg, ir.AnyColumn)
dtype = dt.dtype(datatype)
if columnar:
return dtype.column_type()
else:
return dtype.scalar_type()
@promoter
def scalar_like(arg):
output_dtype = arg.type()
return output_dtype.scalar_type()
@promoter
def array_like(arg):
output_dtype = arg.type()
return output_dtype.column_type()
column_like = array_like
@promoter
def typeof(arg):
return arg._factory
@validator
def table(schema, arg):
"""A table argument.
Parameters
----------
schema : Union[sch.Schema, List[Tuple[str, dt.DataType]]
A validator for the table's columns. Only column subset validators are
currently supported. Accepts any arguments that `sch.schema` accepts.
See the example for usage.
arg : The validatable argument.
Examples
--------
The following op will accept an argument named ``'table'``. Note that the
``schema`` argument specifies rules for columns that are required to be in
the table: ``time``, ``group`` and ``value1``. These must match the types
specified in the column rules. Column ``value2`` is optional, but if
present it must be of the specified type. The table may have extra columns
not specified in the schema.
"""
assert isinstance(arg, ir.TableExpr)
if arg.schema() >= sch.schema(schema):
return arg
raise com.IbisTypeError(
'Argument is not a table with column subset of {}'.format(schema)
)
# TODO: might just use bounds instead of actual literal values
# that could simplify interval binop output_type methods
def _promote_numeric_binop(exprs, op):
bounds, dtypes = [], []
for arg in exprs:
dtypes.append(arg.type())
if hasattr(arg.op(), 'value'):
# arg.op() is a literal
bounds.append([arg.op().value])
else:
bounds.append(arg.type().bounds)
# In some cases, the bounding type might be int8, even though neither
# of the types are that small. We want to ensure the containing type is
# _at least_ as large as the smallest type in the expression.
values = starmap(op, product(*bounds))
dtypes += [dt.infer(value, allow_overflow=True) for value in values]
return dt.highest_precedence(dtypes)
@promoter
def numeric_like(args, op):
if util.all_of(args, ir.IntegerValue):
dtype = _promote_numeric_binop(args, op)
return shape_like(args, dtype=dtype)
else:
return shape_like(args)
# TODO: create varargs marker for impala udfs
| 27.280193 | 79 | 0.646715 |
afe14325479b72edb129b06c60c5f2b899471a3d | 1,166 | py | Python | mmdet/models/detectors/__init__.py | QUAPNH/NucDetSeg | ad4040a359e52c611780b409f84b601bfa9c94e2 | [
"Apache-2.0"
] | 1 | 2022-02-21T11:05:09.000Z | 2022-02-21T11:05:09.000Z | mmdet/models/detectors/__init__.py | lh0515/cas-dc-template | 5b0400ca5dc98d09beca36d46cc55bfabb9ce4e0 | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/__init__.py | lh0515/cas-dc-template | 5b0400ca5dc98d09beca36d46cc55bfabb9ce4e0 | [
"Apache-2.0"
] | 1 | 2022-02-21T11:05:01.000Z | 2022-02-21T11:05:01.000Z | from .atss import ATSS
from .base import BaseDetector
from .bbox_scoring_rcnn import BBoxScoringRCNN
from .cascade_rcnn import CascadeRCNN
from .cornernet import CornerNet
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .point_rend import PointRend
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .single_stage import SingleStageDetector
from .two_stage import TwoStageDetector
from .yolact import YOLACT
from .yolo import YOLOV3
__all__ = [
'ATSS', 'BaseDetector', 'BBoxScoringRCNN', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade',
'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector',
'FOVEA', 'FSAF', 'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA',
'YOLOV3', 'YOLACT'
]
| 34.294118 | 96 | 0.783877 |
8b0f6bef9aa2166c6153ca52820ed5995c92c98f | 1,066 | py | Python | rqalpha/mod/rqalpha_mod_sys_benchmark/__init__.py | HaidongHe/rqalpha | bb824178425909e051c456f6062a6c5bdc816421 | [
"Apache-2.0"
] | 1 | 2020-11-10T05:44:39.000Z | 2020-11-10T05:44:39.000Z | rqalpha/mod/rqalpha_mod_sys_benchmark/__init__.py | HaidongHe/rqalpha | bb824178425909e051c456f6062a6c5bdc816421 | [
"Apache-2.0"
] | null | null | null | rqalpha/mod/rqalpha_mod_sys_benchmark/__init__.py | HaidongHe/rqalpha | bb824178425909e051c456f6062a6c5bdc816421 | [
"Apache-2.0"
] | 1 | 2020-03-05T05:06:45.000Z | 2020-03-05T05:06:45.000Z | # -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 public@ricequant.com 获取。
import click
from rqalpha import cli
__config__ = {
"order_book_id": None
}
def load_mod():
from .mod import BenchmarkMod
return BenchmarkMod()
cli_prefix = "mod__sys_benchmark__"
cli.commands["run"].params.append(
click.Option(
("-bm", "--benchmark", cli_prefix + "order_book_id"),
type=click.STRING,
help="[sys_benchmark] order_book_id of benchmark"
)
)
| 28.810811 | 144 | 0.703565 |
0014a17e42118306b9ac1c94b907d5f9dbc2bed1 | 3,298 | py | Python | student_management_system/settings.py | jatinvats636/SMS | eeefe16c95a9b42cef5861584693090cb5efe1da | [
"MIT"
] | 1 | 2021-04-08T06:21:08.000Z | 2021-04-08T06:21:08.000Z | student_management_system/settings.py | jatinvats636/SMS_Django | eeefe16c95a9b42cef5861584693090cb5efe1da | [
"MIT"
] | null | null | null | student_management_system/settings.py | jatinvats636/SMS_Django | eeefe16c95a9b42cef5861584693090cb5efe1da | [
"MIT"
] | null | null | null |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(i#*06f#keydy_fh17bf=$0f6v)^wr^l7*u4gq42m*sztu#2_m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'student_management_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'student_management_app.LoginCheckMiddleWare.LoginCheckMiddleWare',
]
ROOT_URLCONF = 'student_management_system.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'student_management_system.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
#STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#For Custom USER
AUTH_USER_MODEL = "student_management_app.CustomUser"
# Registering Custom Backend "EmailBackEnd"
AUTHENTICATION_BACKENDS = ['student_management_app.EmailBackEnd.EmailBackEnd']
| 25.765625 | 91 | 0.704973 |
ae365d310ea5a252e0e9cd373199da3bb0eee5b5 | 1,003 | py | Python | time_zone.py | dave-2/chatterbox | 5e8456f1f80858ac0e1a91e2e4f3322b7f33220c | [
"Apache-2.0"
] | null | null | null | time_zone.py | dave-2/chatterbox | 5e8456f1f80858ac0e1a91e2e4f3322b7f33220c | [
"Apache-2.0"
] | null | null | null | time_zone.py | dave-2/chatterbox | 5e8456f1f80858ac0e1a91e2e4f3322b7f33220c | [
"Apache-2.0"
] | 1 | 2016-08-10T05:04:34.000Z | 2016-08-10T05:04:34.000Z | import datetime
class PacificTimeZone(datetime.tzinfo):
"""Implementation of the Pacific timezone."""
def utcoffset(self, dt: datetime.datetime) -> datetime.timedelta:
return datetime.timedelta(hours=-8) + self.dst(dt)
def dst(self, dt: datetime.datetime) -> datetime.timedelta:
# 2 am on the second Sunday in March
dst_start = _first_sunday(datetime.datetime(dt.year, 3, 8, 2))
# 1 am on the first Sunday in November
dst_end = _first_sunday(datetime.datetime(dt.year, 11, 1, 1))
if dst_start <= dt.replace(tzinfo=None) < dst_end:
return datetime.timedelta(hours=1)
return datetime.timedelta(hours=0)
def tzname(self, dt: datetime.datetime) -> str:
if self.dst(dt) == datetime.timedelta(hours=0):
return 'PST'
return 'PDT'
def _first_sunday(dt: datetime.datetime) -> datetime.datetime:
"""First Sunday on or after dt."""
return dt + datetime.timedelta(days=(6-dt.weekday()))
| 34.586207 | 70 | 0.655035 |
6e1476d749583a2fd863b425277ff74c5fd339d4 | 591 | py | Python | setup.py | TheAmmiR/bdtheme | 3dd92e55bb9f73649dda5d9e7fa889dc152f3977 | [
"MIT"
] | 2 | 2022-01-10T17:44:33.000Z | 2022-01-11T11:51:22.000Z | setup.py | TheAmmiR/bdtheme | 3dd92e55bb9f73649dda5d9e7fa889dc152f3977 | [
"MIT"
] | null | null | null | setup.py | TheAmmiR/bdtheme | 3dd92e55bb9f73649dda5d9e7fa889dc152f3977 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
import sys
requirements = ["bs4", "requests", "lxml",
"beaudis"]
if sys.platform == "win32":
requirements.append("windows-curses")
setup(
name="bdtheme",
author="TheAmmiR",
license="MIT",
description="Console BeautifulDiscord theme manager",
version="1.3.1",
packages=find_packages(
exclude=["themes", "venv"]),
install_requires=requirements,
python_requires=">=3.8",
entry_points={
"console_scripts": [
"bdtheme=bdtheme.src.main:cmd_bdtheme"
]
}
)
| 23.64 | 57 | 0.622673 |
bff76b5d5ec84e449e8c67f7fc9a3e990901f8a5 | 2,789 | py | Python | hc/front/templatetags/hc_extras.py | aksalj/healthchecks | f588959dc1a1bc16d631aa034a37ec22a7189fc8 | [
"BSD-3-Clause"
] | 1 | 2021-09-08T17:21:25.000Z | 2021-09-08T17:21:25.000Z | hc/front/templatetags/hc_extras.py | aksalj/healthchecks | f588959dc1a1bc16d631aa034a37ec22a7189fc8 | [
"BSD-3-Clause"
] | null | null | null | hc/front/templatetags/hc_extras.py | aksalj/healthchecks | f588959dc1a1bc16d631aa034a37ec22a7189fc8 | [
"BSD-3-Clause"
] | null | null | null | import re
from django import template
from django.conf import settings
from django.utils.html import escape
from django.utils.safestring import mark_safe
from hc.lib.date import format_duration, format_hms
register = template.Library()
@register.filter
def hc_duration(td):
return format_duration(td)
@register.filter
def hms(td):
return format_hms(td)
@register.simple_tag
def site_name():
return settings.SITE_NAME
@register.filter
def mangle_link(s):
return mark_safe(escape(s).replace(".", "<span>.</span>"))
@register.simple_tag
def site_root():
return settings.SITE_ROOT
@register.simple_tag
def debug_warning():
if settings.DEBUG:
return mark_safe(
"""
<div id="debug-warning">
Running in debug mode, do not use in production.
</div>
"""
)
return ""
def naturalize_int_match(match):
return "%08d" % (int(match.group(0)),)
def natural_name_key(check):
s = check.name.lower().strip()
return re.sub(r"\d+", naturalize_int_match, s)
def last_ping_key(check):
return check.last_ping.isoformat() if check.last_ping else "9999"
def not_down_key(check):
return check.get_status() != "down"
@register.filter
def sortchecks(checks, key):
"""Sort the list of checks in-place by given key, then by status=down. """
if key == "created":
checks.sort(key=lambda check: check.created)
elif key.endswith("name"):
checks.sort(key=natural_name_key, reverse=key.startswith("-"))
elif key.endswith("last_ping"):
checks.sort(key=last_ping_key, reverse=key.startswith("-"))
# Move failed checks to the beginning. Sorts in python are stable
# so this does not mess up the previous sort.
checks.sort(key=not_down_key)
return checks
@register.filter
def num_down_title(num_down):
if num_down:
return "%d down – %s" % (num_down, settings.SITE_NAME)
else:
return settings.SITE_NAME
@register.filter
def down_title(check):
""" Prepare title tag for the Details page.
If the check is down, return "DOWN - Name - site_name".
Otherwise, return "Name - site_name".
"""
s = "%s – %s" % (check.name_then_code(), settings.SITE_NAME)
if check.get_status() == "down":
s = "DOWN – " + s
return s
@register.filter
def break_underscore(s):
""" Add non-breaking-space characters after underscores. """
if len(s) > 30:
s = s.replace("_", "_\u200b")
return s
@register.filter
def fix_asterisks(s):
""" Prepend asterisks with "Combining Grapheme Joiner" characters. """
return s.replace("*", "\u034f*")
@register.filter
def format_headers(headers):
return "\n".join("%s: %s" % (k, v) for k, v in headers.items())
| 21.290076 | 78 | 0.659018 |
3fabebc3e44d3361b276d05dfeb1a51d6825b544 | 28,840 | py | Python | xray/test/test_backends.py | akleeman/xray | 48ce8c23c31b9a5d092f29974715aa1888b95044 | [
"Apache-2.0"
] | 1 | 2017-09-18T02:08:09.000Z | 2017-09-18T02:08:09.000Z | xray/test/test_backends.py | akleeman/xray | 48ce8c23c31b9a5d092f29974715aa1888b95044 | [
"Apache-2.0"
] | null | null | null | xray/test/test_backends.py | akleeman/xray | 48ce8c23c31b9a5d092f29974715aa1888b95044 | [
"Apache-2.0"
] | null | null | null | from io import BytesIO
import contextlib
import os.path
import pickle
import tempfile
import unittest
import sys
import numpy as np
import pandas as pd
import xray
from xray import Dataset, open_dataset, backends
from xray.core.pycompat import iteritems, PY3
from . import (TestCase, requires_scipy, requires_netCDF4, requires_pydap,
requires_scipy_or_netCDF4, has_netCDF4, has_scipy)
from .test_dataset import create_test_data
try:
import netCDF4 as nc4
except ImportError:
pass
def open_example_dataset(name, *args, **kwargs):
return open_dataset(os.path.join(os.path.dirname(__file__), 'data', name),
*args, **kwargs)
def create_masked_and_scaled_data():
x = np.array([np.nan, np.nan, 10, 10.1, 10.2])
encoding = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1), 'dtype': 'i2'}
return Dataset({'x': ('t', x, {}, encoding)})
def create_encoded_masked_and_scaled_data():
attributes = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1)}
return Dataset({'x': ('t', [-1, -1, 0, 1, 2], attributes)})
class Only32BitTypes(object):
pass
class DatasetIOTestCases(object):
def create_store(self):
raise NotImplementedError
def roundtrip(self, data, **kwargs):
raise NotImplementedError
def test_zero_dimensional_variable(self):
expected = create_test_data()
expected['float_var'] = ([], 1.0e9, {'units': 'units of awesome'})
expected['string_var'] = ([], np.array('foobar', dtype='S'))
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_write_store(self):
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
# we need to cf decode the store because it has time and
# non-dimension coordinates
actual = xray.decode_cf(store)
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_test_data(self):
expected = create_test_data()
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_load_data(self):
expected = create_test_data()
@contextlib.contextmanager
def assert_loads(vars=None):
if vars is None:
vars = expected
with self.roundtrip(expected) as actual:
for v in actual.values():
self.assertFalse(v._in_memory)
yield actual
for k, v in actual.items():
if k in vars:
self.assertTrue(v._in_memory)
self.assertDatasetAllClose(expected, actual)
with self.assertRaises(AssertionError):
# make sure the contextmanager works!
with assert_loads() as ds:
pass
with assert_loads() as ds:
ds.load_data()
with assert_loads(['var1', 'dim1', 'dim2']) as ds:
ds['var1'].load_data()
# verify we can read data even after closing the file
with self.roundtrip(expected) as ds:
actual = ds.load_data()
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_None_variable(self):
expected = Dataset({None: (('x', 'y'), [[0, 1], [2, 3]])})
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_object_dtype(self):
floats = np.array([0.0, 0.0, 1.0, 2.0, 3.0], dtype=object)
floats_nans = np.array([np.nan, np.nan, 1.0, 2.0, 3.0], dtype=object)
letters = np.array(['ab', 'cdef', 'g'], dtype=object)
letters_nans = np.array(['ab', 'cdef', np.nan], dtype=object)
all_nans = np.array([np.nan, np.nan], dtype=object)
original = Dataset({'floats': ('a', floats),
'floats_nans': ('a', floats_nans),
'letters': ('b', letters),
'letters_nans': ('b', letters_nans),
'all_nans': ('c', all_nans),
'nan': ([], np.nan)})
expected = original.copy(deep=True)
if isinstance(self, Only32BitTypes):
# for netCDF3 tests, expect the results to come back as characters
expected['letters_nans'] = expected['letters_nans'].astype('S')
expected['letters'] = expected['letters'].astype('S')
with self.roundtrip(original) as actual:
try:
self.assertDatasetIdentical(expected, actual)
except AssertionError:
# Most stores use '' for nans in strings, but some don't
# first try the ideal case (where the store returns exactly)
# the original Dataset), then try a more realistic case.
# ScipyDataTest, NetCDF3ViaNetCDF4DataTest and NetCDF4DataTest
# all end up using this case.
expected['letters_nans'][-1] = ''
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_string_data(self):
expected = Dataset({'x': ('t', ['ab', 'cdef'])})
with self.roundtrip(expected) as actual:
if isinstance(self, Only32BitTypes):
expected['x'] = expected['x'].astype('S')
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_datetime_data(self):
times = pd.to_datetime(['2000-01-01', '2000-01-02', 'NaT'])
expected = Dataset({'t': ('t', times)})
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_timedelta_data(self):
time_deltas = pd.to_timedelta(['1h', '2h', 'NaT'])
expected = Dataset({'td': ('td', time_deltas)})
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_example_1_netcdf(self):
expected = open_example_dataset('example_1.nc')
with self.roundtrip(expected) as actual:
# we allow the attributes to differ since that
# will depend on the encoding used. For example,
# without CF encoding 'actual' will end up with
# a dtype attribute.
self.assertDatasetEqual(expected, actual)
def test_roundtrip_example_1_netcdf_gz(self):
if sys.version_info[:2] < (2, 7):
with self.assertRaisesRegexp(ValueError,
'gzipped netCDF not supported'):
open_example_dataset('example_1.nc.gz')
else:
with open_example_dataset('example_1.nc.gz') as expected:
with open_example_dataset('example_1.nc') as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_coordinates(self):
original = Dataset({'foo': ('x', [0, 1])},
{'x': [2, 3], 'y': ('a', [42]), 'z': ('x', [4, 5])})
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(original, actual)
expected = original.drop('foo')
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
expected = original.copy()
expected.attrs['coordinates'] = 'something random'
with self.assertRaisesRegexp(ValueError, 'cannot serialize'):
with self.roundtrip(expected):
pass
expected = original.copy(deep=True)
expected['foo'].attrs['coordinates'] = 'something random'
with self.assertRaisesRegexp(ValueError, 'cannot serialize'):
with self.roundtrip(expected):
pass
def test_orthogonal_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
indexers = {'dim1': np.arange(3), 'dim2': np.arange(4),
'dim3': np.arange(5)}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
self.assertDatasetAllClose(expected, actual)
# do it twice, to make sure we're switched from orthogonal -> numpy
# when we cached the values
actual = on_disk.isel(**indexers)
self.assertDatasetAllClose(expected, actual)
def test_pickle(self):
on_disk = open_example_dataset('bears.nc')
unpickled = pickle.loads(pickle.dumps(on_disk))
self.assertDatasetIdentical(on_disk, unpickled)
class CFEncodedDataTest(DatasetIOTestCases):
def test_roundtrip_strings_with_fill_value(self):
values = np.array(['ab', 'cdef', np.nan], dtype=object)
encoding = {'_FillValue': np.string_('X'), 'dtype': np.dtype('S1')}
original = Dataset({'x': ('t', values, {}, encoding)})
expected = original.copy(deep=True)
expected['x'][:2] = values[:2].astype('S')
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(expected, actual)
original = Dataset({'x': ('t', values, {}, {'_FillValue': '\x00'})})
if not isinstance(self, Only32BitTypes):
# these stores can save unicode strings
expected = original.copy(deep=True)
if type(self) is NetCDF4DataTest:
# the netCDF4 library can't keep track of an empty _FillValue for
# VLEN variables:
expected['x'][-1] = ''
elif (type(self) is NetCDF3ViaNetCDF4DataTest
or (has_netCDF4 and type(self) is GenericNetCDFDataTest)):
# netCDF4 can't keep track of an empty _FillValue for nc3, either:
# https://github.com/Unidata/netcdf4-python/issues/273
expected['x'][-1] = np.string_('')
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_mask_and_scale(self):
decoded = create_masked_and_scaled_data()
encoded = create_encoded_masked_and_scaled_data()
with self.roundtrip(decoded) as actual:
self.assertDatasetAllClose(decoded, actual)
with self.roundtrip(decoded, decode_cf=False) as actual:
# TODO: this assumes that all roundtrips will first
# encode. Is that something we want to test for?
self.assertDatasetAllClose(encoded, actual)
with self.roundtrip(encoded, decode_cf=False) as actual:
self.assertDatasetAllClose(encoded, actual)
# make sure roundtrip encoding didn't change the
# original dataset.
self.assertDatasetIdentical(encoded,
create_encoded_masked_and_scaled_data())
with self.roundtrip(encoded) as actual:
self.assertDatasetAllClose(decoded, actual)
with self.roundtrip(encoded, decode_cf=False) as actual:
self.assertDatasetAllClose(encoded, actual)
@contextlib.contextmanager
def create_tmp_file(suffix='.nc'):
f, path = tempfile.mkstemp(suffix=suffix)
os.close(f)
try:
yield path
finally:
os.remove(path)
@requires_netCDF4
class NetCDF4DataTest(CFEncodedDataTest, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore(tmp_file, mode='w') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file)
with open_dataset(tmp_file, **kwargs) as ds:
yield ds
def test_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
expected = Dataset()
time = pd.date_range('1999-01-05', periods=10)
encoding = {'units': units, 'dtype': np.dtype('int32')}
expected['time'] = ('time', time, {}, encoding)
with open_dataset(tmp_file) as actual:
self.assertVariableEqual(actual['time'], expected['time'])
actual_encoding = dict((k, v) for k, v in iteritems(actual['time'].encoding)
if k in expected['time'].encoding)
self.assertDictEqual(actual_encoding, expected['time'].encoding)
def test_open_group(self):
# Create a netCDF file with a dataset stored within a group
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as rootgrp:
foogrp = rootgrp.createGroup('foo')
ds = foogrp
ds.createDimension('time', size=10)
x = np.arange(10)
ds.createVariable('x', np.int32, dimensions=('time',))
ds.variables['x'][:] = x
expected = Dataset()
expected['x'] = ('time', x)
# check equivalent ways to specify group
for group in 'foo', '/foo', 'foo/', '/foo/':
with open_dataset(tmp_file, group=group) as actual:
self.assertVariableEqual(actual['x'], expected['x'])
# check that missing group raises appropriate exception
with self.assertRaises(IOError):
open_dataset(tmp_file, group='bar')
def test_open_subgroup(self):
# Create a netCDF file with a dataset stored within a group within a group
with create_tmp_file() as tmp_file:
rootgrp = nc4.Dataset(tmp_file, 'w')
foogrp = rootgrp.createGroup('foo')
bargrp = foogrp.createGroup('bar')
ds = bargrp
ds.createDimension('time', size=10)
x = np.arange(10)
ds.createVariable('x', np.int32, dimensions=('time',))
ds.variables['x'][:] = x
rootgrp.close()
expected = Dataset()
expected['x'] = ('time', x)
# check equivalent ways to specify group
for group in 'foo/bar', '/foo/bar', 'foo/bar/', '/foo/bar/':
with open_dataset(tmp_file, group=group) as actual:
self.assertVariableEqual(actual['x'], expected['x'])
def test_write_groups(self):
data1 = create_test_data()
data2 = data1 * 2
with create_tmp_file() as tmp_file:
data1.to_netcdf(tmp_file, group='data/1')
data2.to_netcdf(tmp_file, group='data/2', mode='a')
with open_dataset(tmp_file, group='data/1') as actual1:
self.assertDatasetIdentical(data1, actual1)
with open_dataset(tmp_file, group='data/2') as actual2:
self.assertDatasetIdentical(data2, actual2)
def test_dump_and_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
with open_dataset(tmp_file) as xray_dataset:
with create_tmp_file() as tmp_file2:
xray_dataset.dump(tmp_file2)
with nc4.Dataset(tmp_file2, 'r') as ds:
self.assertEqual(ds.variables['time'].getncattr('units'), units)
self.assertArrayEqual(ds.variables['time'], np.arange(10) + 4)
def test_compression_encoding(self):
data = create_test_data()
data['var2'].encoding.update({'zlib': True,
'chunksizes': (5, 5),
'least_significant_digit': 2})
with self.roundtrip(data) as actual:
for k, v in iteritems(data['var2'].encoding):
self.assertEqual(v, actual['var2'].encoding[k])
# regression test for #156
expected = data.isel(dim1=0)
with self.roundtrip(expected) as actual:
self.assertDatasetEqual(expected, actual)
def test_mask_and_scale(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('t', 5)
nc.createVariable('x', 'int16', ('t',), fill_value=-1)
v = nc.variables['x']
v.set_auto_maskandscale(False)
v.add_offset = 10
v.scale_factor = 0.1
v[:] = np.array([-1, -1, 0, 1, 2])
# first make sure netCDF4 reads the masked and scaled data correctly
with nc4.Dataset(tmp_file, mode='r') as nc:
expected = np.ma.array([-1, -1, 10, 10.1, 10.2],
mask=[True, True, False, False, False])
actual = nc.variables['x'][:]
self.assertArrayEqual(expected, actual)
# now check xray
with open_dataset(tmp_file) as ds:
expected = create_masked_and_scaled_data()
self.assertDatasetIdentical(expected, ds)
def test_0dimensional_variable(self):
# This fix verifies our work-around to this netCDF4-python bug:
# https://github.com/Unidata/netcdf4-python/pull/220
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
v = nc.createVariable('x', 'int16')
v[...] = 123
with open_dataset(tmp_file) as ds:
expected = Dataset({'x': ((), 123)})
self.assertDatasetIdentical(expected, ds)
def test_variable_len_strings(self):
with create_tmp_file() as tmp_file:
values = np.array(['foo', 'bar', 'baz'], dtype=object)
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('x', 3)
v = nc.createVariable('x', str, ('x',))
v[:] = values
expected = Dataset({'x': ('x', values)})
for kwargs in [{}, {'decode_cf': True}]:
with open_dataset(tmp_file, **kwargs) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_endian(self):
ds = Dataset({'x': np.arange(3, 10, dtype='>i2'),
'y': np.arange(3, 20, dtype='<i4'),
'z': np.arange(3, 30, dtype='=i8'),
'w': ('x', np.arange(3, 10, dtype=np.float))})
with self.roundtrip(ds) as actual:
# technically these datasets are slightly different,
# one hold mixed endian data (ds) the other should be
# all big endian (actual). assertDatasetIdentical
# should still pass though.
self.assertDatasetIdentical(ds, actual)
ds['z'].encoding['endian'] = 'big'
with self.assertRaises(NotImplementedError):
with self.roundtrip(ds) as actual:
pass
def test_roundtrip_character_array(self):
with create_tmp_file() as tmp_file:
values = np.array([['a', 'b', 'c'], ['d', 'e', 'f']], dtype='S')
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('x', 2)
nc.createDimension('string3', 3)
v = nc.createVariable('x', np.dtype('S1'), ('x', 'string3'))
v[:] = values
values = np.array(['abc', 'def'], dtype='S')
expected = Dataset({'x': ('x', values)})
with open_dataset(tmp_file) as actual:
self.assertDatasetIdentical(expected, actual)
# regression test for #157
with self.roundtrip(actual) as roundtripped:
self.assertDatasetIdentical(expected, roundtripped)
def test_default_to_char_arrays(self):
data = Dataset({'x': np.array(['foo', 'zzzz'], dtype='S')})
with self.roundtrip(data) as actual:
self.assertDatasetIdentical(data, actual)
self.assertEqual(actual['x'].dtype, np.dtype('S4'))
def test_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
expected = Dataset()
time = pd.date_range('1999-01-05', periods=10)
encoding = {'units': units, 'dtype': np.dtype('int32')}
expected['time'] = ('time', time, {}, encoding)
with open_dataset(tmp_file) as actual:
self.assertVariableEqual(actual['time'], expected['time'])
actual_encoding = dict((k, v) for k, v
in iteritems(actual['time'].encoding)
if k in expected['time'].encoding)
self.assertDictEqual(actual_encoding,
expected['time'].encoding)
def test_dump_and_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
xray_dataset = open_dataset(tmp_file)
with create_tmp_file() as tmp_file2:
xray_dataset.to_netcdf(tmp_file2)
with nc4.Dataset(tmp_file2, 'r') as ds:
self.assertEqual(ds.variables['time'].getncattr('units'), units)
self.assertArrayEqual(ds.variables['time'], np.arange(10) + 4)
def test_coordinates_encoding(self):
def equals_latlon(obj):
return obj == 'lat lon' or obj == 'lon lat'
original = Dataset({'temp': ('x', [0, 1]), 'precip': ('x', [0, -1])},
{'lat': ('x', [2, 3]), 'lon': ('x', [4, 5])})
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(actual, original)
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
self.assertTrue(equals_latlon(ds['temp'].attrs['coordinates']))
self.assertTrue(equals_latlon(ds['precip'].attrs['coordinates']))
self.assertNotIn('coordinates', ds.attrs)
self.assertNotIn('coordinates', ds['lat'].attrs)
self.assertNotIn('coordinates', ds['lon'].attrs)
modified = original.drop(['temp', 'precip'])
with self.roundtrip(modified) as actual:
self.assertDatasetIdentical(actual, modified)
with create_tmp_file() as tmp_file:
modified.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
self.assertTrue(equals_latlon(ds.attrs['coordinates']))
self.assertNotIn('coordinates', ds['lat'].attrs)
self.assertNotIn('coordinates', ds['lon'].attrs)
@requires_scipy
class ScipyInMemoryDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
fobj = BytesIO()
yield backends.ScipyDataStore(fobj, 'w')
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
serialized = data.to_netcdf()
with open_dataset(BytesIO(serialized), **kwargs) as ds:
yield ds
@requires_scipy
class ScipyOnDiskDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.ScipyDataStore(tmp_file, mode='w') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine='scipy')
with open_dataset(tmp_file, engine='scipy', **kwargs) as ds:
yield ds
@requires_netCDF4
class NetCDF3ViaNetCDF4DataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore(tmp_file, mode='w',
format='NETCDF3_CLASSIC') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format='NETCDF3_CLASSIC',
engine='netcdf4')
with open_dataset(tmp_file, engine='netcdf4', **kwargs) as ds:
yield ds
@requires_scipy_or_netCDF4
class GenericNetCDFDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
# verify that we can read and write netCDF3 files as long as we have scipy
# or netCDF4-python installed
def test_write_store(self):
# there's no specific store to test here
pass
@contextlib.contextmanager
def roundtrip(self, data, **kwargs):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format='NETCDF3_64BIT')
with open_dataset(tmp_file, **kwargs) as ds:
yield ds
def test_engine(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'unrecognized engine'):
data.to_netcdf('foo.nc', engine='foobar')
with self.assertRaisesRegexp(ValueError, 'invalid engine'):
data.to_netcdf(engine='netcdf4')
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file)
with self.assertRaisesRegexp(ValueError, 'unrecognized engine'):
open_dataset(tmp_file, engine='foobar')
netcdf_bytes = data.to_netcdf()
with self.assertRaisesRegexp(ValueError, 'can only read'):
open_dataset(BytesIO(netcdf_bytes), engine='foobar')
def test_cross_engine_read_write(self):
data = create_test_data()
valid_engines = set()
if has_netCDF4:
valid_engines.add('netcdf4')
if has_scipy:
valid_engines.add('scipy')
for write_engine in valid_engines:
for format in ['NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format=format,
engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file,
engine=read_engine) as actual:
self.assertDatasetAllClose(data, actual)
@requires_netCDF4
@requires_pydap
class PydapTest(TestCase):
def test_cmp_local_file(self):
url = 'http://test.opendap.org/opendap/hyrax/data/nc/bears.nc'
actual = Dataset.load_store(backends.PydapDataStore(url))
with open_example_dataset('bears.nc') as expected:
# don't check attributes since pydap doesn't serialize them correctly
# also skip the "bears" variable since the test DAP server incorrectly
# concatenates it.
self.assertDatasetEqual(actual.drop('bears'),
expected.drop('bears'))
| 42.163743 | 92 | 0.585922 |
6aec69e359b4826ec03a6e9f0acc04b3b03f96a2 | 1,766 | py | Python | examples/series/str/series_str_istitle.py | densmirn/sdc | 30e53955a88506a5134d75d843205dbd5d576051 | [
"BSD-2-Clause"
] | 540 | 2017-06-19T16:29:24.000Z | 2019-05-21T09:30:07.000Z | examples/series/str/series_str_istitle.py | densmirn/sdc | 30e53955a88506a5134d75d843205dbd5d576051 | [
"BSD-2-Clause"
] | 389 | 2019-10-30T18:56:46.000Z | 2022-03-09T08:21:36.000Z | examples/series/str/series_str_istitle.py | densmirn/sdc | 30e53955a88506a5134d75d843205dbd5d576051 | [
"BSD-2-Clause"
] | 36 | 2017-06-19T16:29:15.000Z | 2019-04-26T09:22:39.000Z | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def series_str_istitle():
series = pd.Series(['Cat', 'dog', 'Bird'])
out_series = series.str.istitle()
return out_series # Expect series of True, False, True
print(series_str_istitle())
| 44.15 | 79 | 0.698188 |
810132f460bfdba38455d42164c74ce4c2a9fa9c | 237 | py | Python | pytorch_toolbelt/modules/encoders/timm/__init__.py | azkalot1/pytorch-toolbelt | 9d7544fa32a6c6588f9f8c4525ba702700ac01cc | [
"MIT"
] | 1,281 | 2019-03-17T18:32:39.000Z | 2022-03-31T03:47:22.000Z | pytorch_toolbelt/modules/encoders/timm/__init__.py | azkalot1/pytorch-toolbelt | 9d7544fa32a6c6588f9f8c4525ba702700ac01cc | [
"MIT"
] | 28 | 2019-04-05T10:49:25.000Z | 2022-03-11T10:40:28.000Z | pytorch_toolbelt/modules/encoders/timm/__init__.py | azkalot1/pytorch-toolbelt | 9d7544fa32a6c6588f9f8c4525ba702700ac01cc | [
"MIT"
] | 99 | 2019-03-18T08:40:18.000Z | 2022-03-26T10:52:57.000Z | from .common import *
from .dpn import *
from .efficient_net import *
from .efficient_net_v2 import *
from .hrnet import *
from .nf_regnet import *
from .nfnet import *
from .nfnet_s import *
from .res2net import *
from .resnet import *
| 21.545455 | 31 | 0.746835 |
4f8b992accdbc736c50bfa0bcc8f3445ee9d94b9 | 825 | py | Python | gallery/urls.py | MutuaFranklin/Photofolio | 80e8326663c7cd1288d8db2e5fd1542855aad7fd | [
"MIT"
] | null | null | null | gallery/urls.py | MutuaFranklin/Photofolio | 80e8326663c7cd1288d8db2e5fd1542855aad7fd | [
"MIT"
] | null | null | null | gallery/urls.py | MutuaFranklin/Photofolio | 80e8326663c7cd1288d8db2e5fd1542855aad7fd | [
"MIT"
] | null | null | null | from django.urls import re_path, path
from . import views
from django.conf import settings
from django.conf.urls.static import static
from .views import galleryView, imageDetailsView, image_location
urlpatterns=[
path('gallery', galleryView.as_view(), name = 'gallery'),
# path('gallerydetails/<int:pk>', imageDetailsView.as_view(), name ='imageDetails'),
re_path(r'^$', views.home, name='home'),
# re_path(r'gallery', views.gallery, name='gallery'),
re_path(r'location/(?P<location_name>\w+)/', views.image_location, name='location'),
re_path(r'category/(?P<category>\w+)/', views.image_category, name='category'),
re_path(r'^search/', views.search_results, name='search_results'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 34.375 | 88 | 0.717576 |
36f9708cb919f8ec3dc241e198aff77bd42ea8c7 | 473 | py | Python | sandbox/catalogue/migrations/0023_auto_20210527_2042.py | IhateTrains/django-oscar | 072fb7ef19b2bc72894be4c8add746cb79f87e93 | [
"BSD-3-Clause"
] | null | null | null | sandbox/catalogue/migrations/0023_auto_20210527_2042.py | IhateTrains/django-oscar | 072fb7ef19b2bc72894be4c8add746cb79f87e93 | [
"BSD-3-Clause"
] | null | null | null | sandbox/catalogue/migrations/0023_auto_20210527_2042.py | IhateTrains/django-oscar | 072fb7ef19b2bc72894be4c8add746cb79f87e93 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.1.11 on 2021-05-27 18:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0022_auto_20210527_1937'),
]
operations = [
migrations.AlterField(
model_name='productimage',
name='original',
field=models.ImageField(blank=True, null=True, upload_to='catalogue.ProductImageStorage/bytes/filename/mimetype'),
),
]
| 24.894737 | 126 | 0.646934 |
e913ecbf1b663e6f7a5f1415a29773bf9a7a2e39 | 2,937 | py | Python | publichealth/home/migrations/0022_auto_20180525_1520.py | pcoder/public-health-ch | cebc4849653560c54238b67814074353ff7c01f3 | [
"MIT"
] | 2 | 2020-10-29T16:27:21.000Z | 2021-06-07T12:47:46.000Z | publichealth/home/migrations/0022_auto_20180525_1520.py | pcoder/public-health-ch | cebc4849653560c54238b67814074353ff7c01f3 | [
"MIT"
] | 11 | 2017-05-09T10:50:28.000Z | 2021-12-15T17:01:23.000Z | publichealth/home/migrations/0022_auto_20180525_1520.py | pcoder/public-health-ch | cebc4849653560c54238b67814074353ff7c01f3 | [
"MIT"
] | 4 | 2017-04-24T13:06:55.000Z | 2021-06-04T02:18:32.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-25 13:20
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('home', '0021_auto_20171013_2321'),
]
operations = [
migrations.AddField(
model_name='articleindexpage',
name='intro_en',
field=wagtail.core.fields.RichTextField(blank=True, default=''),
),
migrations.AddField(
model_name='articleindexpage',
name='title_en',
field=models.CharField(blank=True, default='', max_length=255),
),
migrations.AddField(
model_name='articlepage',
name='body_en',
field=wagtail.core.fields.StreamField((('paragraph', wagtail.core.blocks.RichTextBlock()), ('section', wagtail.core.blocks.CharBlock(classname='full title')), ('info', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('photo', wagtail.images.blocks.ImageChooserBlock(required=True)), ('summary', wagtail.core.blocks.RichTextBlock(required=True)), ('action', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False))), icon='help')), ('media', wagtail.core.blocks.ChoiceBlock(choices=[('gallery', 'Image gallery')], icon='media'))), blank=True, null=True),
),
migrations.AddField(
model_name='articlepage',
name='intro_en',
field=wagtail.core.fields.RichTextField(blank=True, default=''),
),
migrations.AddField(
model_name='articlepage',
name='title_en',
field=models.CharField(blank=True, default='', max_length=255),
),
migrations.AddField(
model_name='contact',
name='title_en',
field=models.CharField(default='', max_length=255),
),
migrations.AddField(
model_name='homepage',
name='body_en',
field=wagtail.core.fields.RichTextField(blank=True, default=''),
),
migrations.AddField(
model_name='homepage',
name='infos_en',
field=wagtail.core.fields.StreamField((('info', wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(required=True)), ('photo', wagtail.images.blocks.ImageChooserBlock(required=True)), ('summary', wagtail.core.blocks.RichTextBlock(required=True)), ('action', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.URLBlock(required=False))))),), blank=True, null=True),
),
migrations.AddField(
model_name='homepage',
name='intro_en',
field=wagtail.core.fields.RichTextField(blank=True, default=''),
),
]
| 45.890625 | 652 | 0.632278 |
9190b4509e7fd57e1b1967803ddf45a3ddd3cbe7 | 1,241 | py | Python | src/bot/handler/groupsHandler.py | miloszowi/everyone-mention-telegram-bot | a6b441b197b743f57e089dbe32d262b87a155140 | [
"MIT"
] | 13 | 2021-09-20T17:04:28.000Z | 2022-03-15T09:27:25.000Z | src/bot/handler/groupsHandler.py | miloszowi/everyone-mention-telegram-bot | a6b441b197b743f57e089dbe32d262b87a155140 | [
"MIT"
] | null | null | null | src/bot/handler/groupsHandler.py | miloszowi/everyone-mention-telegram-bot | a6b441b197b743f57e089dbe32d262b87a155140 | [
"MIT"
] | null | null | null | from telegram.ext.callbackcontext import CallbackContext
from telegram.ext.commandhandler import CommandHandler
from telegram.update import Update
from bot.handler.abstractHandler import AbstractHandler
from bot.message.replier import Replier
from config.contents import no_groups
from exception.invalidActionException import InvalidActionException
from exception.notFoundException import NotFoundException
from repository.chatRepository import ChatRepository
from utils.messageBuilder import MessageBuilder
class GroupsHandler(AbstractHandler):
bot_handler: CommandHandler
chat_repository: ChatRepository
action: str = 'groups'
def __init__(self) -> None:
self.bot_handler = CommandHandler(self.action, self.wrap)
self.chat_repository = ChatRepository()
def handle(self, update: Update, context: CallbackContext) -> None:
try:
chat = self.chat_repository.get(self.inbound.chat_id)
if not chat.groups:
raise NotFoundException
Replier.html(update, MessageBuilder.group_message(chat.groups))
except NotFoundException:
raise InvalidActionException(no_groups)
def is_group_specific(self) -> bool:
return False
| 35.457143 | 75 | 0.759065 |
8ea80f33f5b68ffb168d4f7c71708fd888c90466 | 368 | py | Python | djautotask/migrations/0095_auto_20210726_1251.py | KerkhoffTechnologies/django-autotask | 458ff0bf65e3ca85fb954f907f05c4c614904afc | [
"MIT"
] | 4 | 2019-04-18T17:12:07.000Z | 2021-12-30T21:42:10.000Z | djautotask/migrations/0095_auto_20210726_1251.py | KerkhoffTechnologies/django-autotask | 458ff0bf65e3ca85fb954f907f05c4c614904afc | [
"MIT"
] | 32 | 2018-05-30T20:31:22.000Z | 2022-02-17T21:36:50.000Z | djautotask/migrations/0095_auto_20210726_1251.py | KerkhoffTechnologies/django-autotask | 458ff0bf65e3ca85fb954f907f05c4c614904afc | [
"MIT"
] | 5 | 2018-05-25T23:33:45.000Z | 2022-01-04T22:01:46.000Z | # Generated by Django 3.1.7 on 2021-07-26 12:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('djautotask', '0094_auto_20210723_1018'),
]
operations = [
migrations.AlterModelOptions(
name='contact',
options={'ordering': ('first_name', 'last_name')},
),
]
| 20.444444 | 62 | 0.603261 |
d15f874185a0a0e5e2113fe36e14a9cbfccc1913 | 7,344 | py | Python | test/config_test.py | jasmine/jasmine-py | cd2e5dc1f165bfda05efb7fbdaac66b97cc084e6 | [
"MIT"
] | 87 | 2015-02-10T00:27:48.000Z | 2022-01-07T02:11:39.000Z | test/config_test.py | jasmine/jasmine-py | cd2e5dc1f165bfda05efb7fbdaac66b97cc084e6 | [
"MIT"
] | 34 | 2015-01-19T10:10:45.000Z | 2022-01-01T19:28:11.000Z | test/config_test.py | jasmine/jasmine-py | cd2e5dc1f165bfda05efb7fbdaac66b97cc084e6 | [
"MIT"
] | 28 | 2015-02-05T16:08:39.000Z | 2022-02-26T14:06:38.000Z | import os
import tempfile
import pkg_resources
from contextlib import contextmanager
from jasmine.config import Config
default_jasmine_yml = """
src_files:
- src/player.js
- src/**/*.js
- http://cdn.jquery.com/jquery.js
- vendor/test.js
- vendor/**/*.{js,coffee}
"""
def create_files(jasmine_yml=default_jasmine_yml):
files = {
"jasmine.yml": jasmine_yml,
"spec/javascripts/helpers/spec_helper.js": '',
"lib/jams/jam_spec.js": '',
"src/player.js": '',
"src/mixer/mixer.js": '',
"src/tuner/fm/fm_tuner.js": '',
"spec/javascripts/player_spec.js": '',
"spec/javascripts/mixer/mixer_spec.js": '',
"spec/javascripts/tuner/fm/fm_tuner_spec.js": '',
"spec/javascripts/tuner/am/AMSpec.js": '',
"vendor/test.js": '',
"vendor/pants.coffee": '',
"vendor_spec/pantsSpec.js": '',
"main.css": '',
}
for k in files:
parent = os.path.dirname(k)
if parent and not os.path.exists(parent):
os.makedirs(parent)
with open(k, 'w') as f:
f.write(files[k])
@contextmanager
def pushd(dest):
src = os.getcwd()
os.chdir(dest)
try:
yield
finally:
os.chdir(src)
@contextmanager
def in_temp_dir():
with tempfile.TemporaryDirectory() as root:
with pushd(root):
yield
def test_src_files():
with in_temp_dir():
create_files()
config = Config("jasmine.yml")
src_files = config.src_files()
assert src_files[0] == "src/player.js"
assert src_files.index("vendor/test.js") < src_files.index("vendor/pants.coffee")
assert 'http://cdn.jquery.com/jquery.js' in src_files
assert 'src/mixer/mixer.js' in src_files
assert 'src/tuner/fm/fm_tuner.js' in src_files
assert 'vendor/pants.coffee' in src_files
def test_stylesheets_default():
with in_temp_dir():
create_files()
config = Config("jasmine.yml")
assert config.stylesheets() == []
def test_helpers_default():
with in_temp_dir():
create_files()
config = Config("jasmine.yml")
assert config.helpers() == ['helpers/spec_helper.js']
def test_spec_files_default():
with in_temp_dir():
create_files()
config = Config("jasmine.yml")
# sort because all of the specified paths are globs, order does not matter
assert sorted(config.spec_files()) == [
'mixer/mixer_spec.js',
'player_spec.js',
'tuner/am/AMSpec.js',
'tuner/fm/fm_tuner_spec.js',
]
def test_src_dir_spec_dir():
with in_temp_dir():
create_files("""
src_dir: src
spec_dir: spec
src_files:
- ./**/*.js
- player.js
- vendor/test.js
- vendor/**/*.{js,coffee}
""")
config = Config("jasmine.yml")
src_files = config.src_files()
assert 'player.js' in src_files
assert 'mixer/mixer.js' in src_files
assert 'tuner/fm/fm_tuner.js' in src_files
# noinspection PySetFunctionToLiteral
assert set(config.spec_files()) == set([
"javascripts/player_spec.js",
"javascripts/mixer/mixer_spec.js",
"javascripts/tuner/am/AMSpec.js",
"javascripts/tuner/fm/fm_tuner_spec.js",
])
def test_script_urls(monkeypatch):
monkeypatch.setattr(
pkg_resources,
'resource_listdir',
lambda package, directory: [
'json2.js',
'jasmine.js',
'boot.js',
'boot0.js',
'boot1.js',
'node_boot.js',
'jasmine-html.js',
'jasmine.css'
]
)
with in_temp_dir():
create_files()
config = Config("jasmine.yml")
script_urls = config.script_urls()
assert script_urls[:6] == [
"/__jasmine__/jasmine.js",
"/__jasmine__/jasmine-html.js",
"/__jasmine__/json2.js",
"/__jasmine__/boot0.js",
"/__jasmine__/boot1.js",
"/__src__/src/player.js"
]
assert 'http://cdn.jquery.com/jquery.js' in script_urls
assert '/__src__/src/mixer/mixer.js' in script_urls
assert '/__src__/src/tuner/fm/fm_tuner.js' in script_urls
assert '/__src__/vendor/pants.coffee' in script_urls
def test_stylesheet_urls():
with in_temp_dir():
create_files("""
stylesheets:
- ./**/*.css
""")
with open("main.css", "r"):
pass
config = Config("jasmine.yml")
stylesheet_urls = config.stylesheet_urls()
assert stylesheet_urls == [
"/__jasmine__/jasmine.css",
"/__src__/main.css"
]
def test_stop_spec_on_expectation_failure_default():
with in_temp_dir():
create_files()
config = Config("jasmine.yml")
assert config.stop_spec_on_expectation_failure() is False
def test_stop_spec_on_expectation_failure_invalid():
with in_temp_dir():
create_files("""
stop_spec_on_expectation_failure: pants
""")
config = Config("jasmine.yml")
assert config.stop_spec_on_expectation_failure() is False
def test_stop_spec_on_expectation_failure_set():
with in_temp_dir():
create_files("""
stop_spec_on_expectation_failure: true
""")
config = Config("jasmine.yml")
assert config.stop_spec_on_expectation_failure() is True
def test_stop_on_spec_failure_default():
with in_temp_dir():
create_files()
config = Config("jasmine.yml")
assert config.stop_on_spec_failure() is False
def test_stop_on_spec_failure_invalid():
with in_temp_dir():
create_files("""
stop_on_spec_failure: pants
""")
config = Config("jasmine.yml")
assert config.stop_on_spec_failure() is False
def test_stop_on_spec_failure_set():
with in_temp_dir():
create_files("""
stop_on_spec_failure: true
""")
config = Config("jasmine.yml")
assert config.stop_on_spec_failure() is True
def test_random_default():
with in_temp_dir():
create_files()
config = Config("jasmine.yml")
assert config.random() is True
def test_random_invalid():
with in_temp_dir():
create_files("""
random: pants
""")
config = Config("jasmine.yml")
assert config.random() is True
def test_random_set_false():
with in_temp_dir():
create_files("""
random: false
""")
config = Config("jasmine.yml")
assert config.random() is False
def test_reload():
with in_temp_dir():
create_files()
config = Config("jasmine.yml")
assert config.src_files() != ['pants.txt']
with open("jasmine.yml", "w") as f:
f.write("""
src_files:
- pants.txt
""")
with open("pants.txt", "w"):
pass
config.reload()
assert config.src_files() == ['pants.txt']
| 27.402985 | 89 | 0.570125 |
76573242a2506a3bdfc9b890cdebd4347cabfdbc | 13,107 | py | Python | pandas/tests/extension/test_numpy.py | dequadras/pandas | 8a7fbbeb8e3a88f8e355093eb1b68f361e65b6aa | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/extension/test_numpy.py | dequadras/pandas | 8a7fbbeb8e3a88f8e355093eb1b68f361e65b6aa | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/extension/test_numpy.py | dequadras/pandas | 8a7fbbeb8e3a88f8e355093eb1b68f361e65b6aa | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pytest
from pandas.compat.numpy import _np_version_under1p16
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.numpy_ import PandasArray, PandasDtype
from . import base
@pytest.fixture(params=["float", "object"])
def dtype(request):
return PandasDtype(np.dtype(request.param))
@pytest.fixture
def allow_in_pandas(monkeypatch):
"""
A monkeypatch to tells pandas to let us in.
By default, passing a PandasArray to an index / series / frame
constructor will unbox that PandasArray to an ndarray, and treat
it as a non-EA column. We don't want people using EAs without
reason.
The mechanism for this is a check against ABCPandasArray
in each constructor.
But, for testing, we need to allow them in pandas. So we patch
the _typ of PandasArray, so that we evade the ABCPandasArray
check.
"""
with monkeypatch.context() as m:
m.setattr(PandasArray, "_typ", "extension")
yield
@pytest.fixture
def data(allow_in_pandas, dtype):
if dtype.numpy_dtype == "object":
return pd.Series([(i,) for i in range(100)]).array
return PandasArray(np.arange(1, 101, dtype=dtype._dtype))
@pytest.fixture
def data_missing(allow_in_pandas, dtype):
# For NumPy <1.16, np.array([np.nan, (1,)]) raises
# ValueError: setting an array element with a sequence.
if dtype.numpy_dtype == "object":
if _np_version_under1p16:
raise pytest.skip("Skipping for NumPy <1.16")
return PandasArray(np.array([np.nan, (1,)], dtype=object))
return PandasArray(np.array([np.nan, 1.0]))
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def na_cmp():
def cmp(a, b):
return np.isnan(a) and np.isnan(b)
return cmp
@pytest.fixture
def data_for_sorting(allow_in_pandas, dtype):
"""Length-3 array with a known sort order.
This should be three items [B, C, A] with
A < B < C
"""
if dtype.numpy_dtype == "object":
# Use an empty tuple for first element, then remove,
# to disable np.array's shape inference.
return PandasArray(np.array([(), (2,), (3,), (1,)], dtype=object)[1:])
return PandasArray(np.array([1, 2, 0]))
@pytest.fixture
def data_missing_for_sorting(allow_in_pandas, dtype):
"""Length-3 array with a known sort order.
This should be three items [B, NA, A] with
A < B and NA missing.
"""
if dtype.numpy_dtype == "object":
return PandasArray(np.array([(1,), np.nan, (0,)], dtype=object))
return PandasArray(np.array([1, np.nan, 0]))
@pytest.fixture
def data_for_grouping(allow_in_pandas, dtype):
"""Data for factorization, grouping, and unique tests.
Expected to be like [B, B, NA, NA, A, A, B, C]
Where A < B < C and NA is missing
"""
if dtype.numpy_dtype == "object":
a, b, c = (1,), (2,), (3,)
else:
a, b, c = np.arange(3)
return PandasArray(
np.array([b, b, np.nan, np.nan, a, a, b, c], dtype=dtype.numpy_dtype)
)
@pytest.fixture
def skip_numpy_object(dtype):
"""
Tests for PandasArray with nested data. Users typically won't create
these objects via `pd.array`, but they can show up through `.array`
on a Series with nested data. Many of the base tests fail, as they aren't
appropriate for nested data.
This fixture allows these tests to be skipped when used as a usefixtures
marker to either an individual test or a test class.
"""
if dtype == "object":
raise pytest.skip("Skipping for object dtype.")
skip_nested = pytest.mark.usefixtures("skip_numpy_object")
class BaseNumPyTests:
pass
class TestCasting(BaseNumPyTests, base.BaseCastingTests):
@skip_nested
def test_astype_str(self, data):
# ValueError: setting an array element with a sequence
super().test_astype_str(data)
class TestConstructors(BaseNumPyTests, base.BaseConstructorsTests):
@pytest.mark.skip(reason="We don't register our dtype")
# We don't want to register. This test should probably be split in two.
def test_from_dtype(self, data):
pass
@skip_nested
def test_array_from_scalars(self, data):
# ValueError: PandasArray must be 1-dimensional.
super().test_array_from_scalars(data)
class TestDtype(BaseNumPyTests, base.BaseDtypeTests):
@pytest.mark.skip(reason="Incorrect expected.")
# we unsurprisingly clash with a NumPy name.
def test_check_dtype(self, data):
pass
class TestGetitem(BaseNumPyTests, base.BaseGetitemTests):
@skip_nested
def test_getitem_scalar(self, data):
# AssertionError
super().test_getitem_scalar(data)
@skip_nested
def test_take_series(self, data):
# ValueError: PandasArray must be 1-dimensional.
super().test_take_series(data)
@pytest.mark.xfail(reason="astype doesn't recognize data.dtype")
def test_loc_iloc_frame_single_dtype(self, data):
super().test_loc_iloc_frame_single_dtype(data)
class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests):
@skip_nested
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
# ValueError: Names should be list-like for a MultiIndex
super().test_groupby_extension_apply(data_for_grouping, groupby_apply_op)
class TestInterface(BaseNumPyTests, base.BaseInterfaceTests):
@skip_nested
def test_array_interface(self, data):
# NumPy array shape inference
super().test_array_interface(data)
class TestMethods(BaseNumPyTests, base.BaseMethodsTests):
@pytest.mark.skip(reason="TODO: remove?")
def test_value_counts(self, all_data, dropna):
pass
@pytest.mark.skip(reason="Incorrect expected")
# We have a bool dtype, so the result is an ExtensionArray
# but expected is not
def test_combine_le(self, data_repeated):
super().test_combine_le(data_repeated)
@skip_nested
def test_combine_add(self, data_repeated):
# Not numeric
super().test_combine_add(data_repeated)
@skip_nested
def test_shift_fill_value(self, data):
# np.array shape inference. Shift implementation fails.
super().test_shift_fill_value(data)
@skip_nested
@pytest.mark.parametrize("box", [pd.Series, lambda x: x])
@pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique])
def test_unique(self, data, box, method):
# Fails creating expected
super().test_unique(data, box, method)
@skip_nested
def test_fillna_copy_frame(self, data_missing):
# The "scalar" for this array isn't a scalar.
super().test_fillna_copy_frame(data_missing)
@skip_nested
def test_fillna_copy_series(self, data_missing):
# The "scalar" for this array isn't a scalar.
super().test_fillna_copy_series(data_missing)
@skip_nested
def test_hash_pandas_object_works(self, data, as_frame):
# ndarray of tuples not hashable
super().test_hash_pandas_object_works(data, as_frame)
@skip_nested
def test_searchsorted(self, data_for_sorting, as_series):
# Test setup fails.
super().test_searchsorted(data_for_sorting, as_series)
@skip_nested
def test_where_series(self, data, na_value, as_frame):
# Test setup fails.
super().test_where_series(data, na_value, as_frame)
@skip_nested
@pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]])
def test_repeat(self, data, repeats, as_series, use_numpy):
# Fails creating expected
super().test_repeat(data, repeats, as_series, use_numpy)
@pytest.mark.xfail(reason="PandasArray.diff may fail on dtype")
def test_diff(self, data, periods):
return super().test_diff(data, periods)
@skip_nested
class TestArithmetics(BaseNumPyTests, base.BaseArithmeticOpsTests):
divmod_exc = None
series_scalar_exc = None
frame_scalar_exc = None
series_array_exc = None
def test_divmod_series_array(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, data, exc=None)
@pytest.mark.skip("We implement ops")
def test_error(self, data, all_arithmetic_operators):
pass
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
super().test_arith_series_with_array(data, all_arithmetic_operators)
class TestPrinting(BaseNumPyTests, base.BasePrintingTests):
pass
@skip_nested
class TestNumericReduce(BaseNumPyTests, base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
# avoid coercing int -> float. Just cast to the actual numpy type.
expected = getattr(s.astype(s.dtype._dtype), op_name)(skipna=skipna)
tm.assert_almost_equal(result, expected)
@skip_nested
class TestBooleanReduce(BaseNumPyTests, base.BaseBooleanReduceTests):
pass
class TestMissing(BaseNumPyTests, base.BaseMissingTests):
@skip_nested
def test_fillna_scalar(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_scalar(data_missing)
@skip_nested
def test_fillna_series_method(self, data_missing, fillna_method):
# Non-scalar "scalar" values.
super().test_fillna_series_method(data_missing, fillna_method)
@skip_nested
def test_fillna_series(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_series(data_missing)
@skip_nested
def test_fillna_frame(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_frame(data_missing)
class TestReshaping(BaseNumPyTests, base.BaseReshapingTests):
@pytest.mark.skip("Incorrect parent test")
# not actually a mixed concat, since we concat int and int.
def test_concat_mixed_dtypes(self, data):
super().test_concat_mixed_dtypes(data)
@skip_nested
def test_merge(self, data, na_value):
# Fails creating expected
super().test_merge(data, na_value)
@skip_nested
def test_merge_on_extension_array(self, data):
# Fails creating expected
super().test_merge_on_extension_array(data)
@skip_nested
def test_merge_on_extension_array_duplicates(self, data):
# Fails creating expected
super().test_merge_on_extension_array_duplicates(data)
@skip_nested
def test_transpose(self, data):
super().test_transpose(data)
class TestSetitem(BaseNumPyTests, base.BaseSetitemTests):
@skip_nested
def test_setitem_scalar_series(self, data, box_in_series):
# AssertionError
super().test_setitem_scalar_series(data, box_in_series)
@skip_nested
def test_setitem_sequence(self, data, box_in_series):
# ValueError: shape mismatch: value array of shape (2,1) could not
# be broadcast to indexing result of shape (2,)
super().test_setitem_sequence(data, box_in_series)
@skip_nested
def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
# ValueError: PandasArray must be 1-dimensional.
super().test_setitem_sequence_mismatched_length_raises(data, as_array)
@skip_nested
def test_setitem_sequence_broadcasts(self, data, box_in_series):
# ValueError: cannot set using a list-like indexer with a different
# length than the value
super().test_setitem_sequence_broadcasts(data, box_in_series)
@skip_nested
def test_setitem_loc_scalar_mixed(self, data):
# AssertionError
super().test_setitem_loc_scalar_mixed(data)
@skip_nested
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
# AssertionError
super().test_setitem_loc_scalar_multiple_homogoneous(data)
@skip_nested
def test_setitem_iloc_scalar_mixed(self, data):
# AssertionError
super().test_setitem_iloc_scalar_mixed(data)
@skip_nested
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
# AssertionError
super().test_setitem_iloc_scalar_multiple_homogoneous(data)
@skip_nested
@pytest.mark.parametrize("setter", ["loc", None])
def test_setitem_mask_broadcast(self, data, setter):
# ValueError: cannot set using a list-like indexer with a different
# length than the value
super().test_setitem_mask_broadcast(data, setter)
@skip_nested
def test_setitem_scalar_key_sequence_raise(self, data):
# Failed: DID NOT RAISE <class 'ValueError'>
super().test_setitem_scalar_key_sequence_raise(data)
@skip_nested
def test_setitem_slice(self, data, box_in_series):
super().test_setitem_slice(data, box_in_series)
@skip_nested
def test_setitem_loc_iloc_slice(self, data):
super().test_setitem_loc_iloc_slice(data)
@skip_nested
class TestParsing(BaseNumPyTests, base.BaseParsingTests):
pass
| 31.890511 | 81 | 0.703288 |
a29821566644c79c9f404d4977643cad6519b478 | 2,452 | py | Python | aliyun-python-sdk-unimkt/aliyunsdkunimkt/request/v20181212/QueryTenantUserByUserIdRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-unimkt/aliyunsdkunimkt/request/v20181212/QueryTenantUserByUserIdRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-unimkt/aliyunsdkunimkt/request/v20181212/QueryTenantUserByUserIdRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkunimkt.endpoint import endpoint_data
class QueryTenantUserByUserIdRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'UniMkt', '2018-12-12', 'QueryTenantUserByUserId')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Business(self):
return self.get_query_params().get('Business')
def set_Business(self,Business):
self.add_query_param('Business',Business)
def get_UserId(self):
return self.get_query_params().get('UserId')
def set_UserId(self,UserId):
self.add_query_param('UserId',UserId)
def get_OriginSiteUserId(self):
return self.get_query_params().get('OriginSiteUserId')
def set_OriginSiteUserId(self,OriginSiteUserId):
self.add_query_param('OriginSiteUserId',OriginSiteUserId)
def get_Environment(self):
return self.get_query_params().get('Environment')
def set_Environment(self,Environment):
self.add_query_param('Environment',Environment)
def get_AppName(self):
return self.get_query_params().get('AppName')
def set_AppName(self,AppName):
self.add_query_param('AppName',AppName)
def get_TenantId(self):
return self.get_query_params().get('TenantId')
def set_TenantId(self,TenantId):
self.add_query_param('TenantId',TenantId)
def get_UserSite(self):
return self.get_query_params().get('UserSite')
def set_UserSite(self,UserSite):
self.add_query_param('UserSite',UserSite) | 33.135135 | 79 | 0.762235 |
1c32b63182e3843d4781fc4ffb7434839531e4cc | 1,011 | py | Python | openproblems/tasks/_batch_integration/batch_integration_graph/metrics/ari.py | scottgigante-immunai/openproblems | d093c1a2f21715d98e07ec760eff2c8f50c68373 | [
"MIT"
] | null | null | null | openproblems/tasks/_batch_integration/batch_integration_graph/metrics/ari.py | scottgigante-immunai/openproblems | d093c1a2f21715d98e07ec760eff2c8f50c68373 | [
"MIT"
] | null | null | null | openproblems/tasks/_batch_integration/batch_integration_graph/metrics/ari.py | scottgigante-immunai/openproblems | d093c1a2f21715d98e07ec760eff2c8f50c68373 | [
"MIT"
] | null | null | null | from .....tools.decorators import metric
"""
The Rand index compares the overlap of two clusterings;
it considers both correct clustering overlaps while also counting correct
disagreements between two clusterings.
Similar to NMI, we compared the cell-type labels with the NMI-optimized
Louvain clustering computed on the integrated dataset.
The adjustment of the Rand index corrects for randomly correct labels.
An ARI of 0 or 1 corresponds to random labeling or a perfect match, respectively.
We also used the scikit-learn (v.0.22.1) implementation of the ARI.
"""
@metric(
metric_name="ARI",
maximize=True,
image="openproblems-python-batch-integration", # only if required
)
def ari(adata):
from scib.metrics import ari
from scib.metrics.clustering import opt_louvain
opt_louvain(
adata,
label_key="labels",
cluster_key="cluster",
plot=False,
inplace=True,
force=True,
)
return ari(adata, group1="cluster", group2="labels")
| 30.636364 | 81 | 0.726014 |
d1476b1ab21d40934db6eb0cc0d2174d41b1df72 | 6,327 | py | Python | hooks/charmhelpers/contrib/openstack/ip.py | ryan-beisner/charm-nova-compute-ppc64el | c5448acb58f143bd0e018dd361d98003d23396e0 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | hooks/charmhelpers/contrib/openstack/ip.py | ryan-beisner/charm-nova-compute-ppc64el | c5448acb58f143bd0e018dd361d98003d23396e0 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2017-06-15T22:41:35.000Z | 2018-04-12T20:12:28.000Z | hooks/charmhelpers/contrib/openstack/ip.py | ryan-beisner/charm-nova-compute-ppc64el | c5448acb58f143bd0e018dd361d98003d23396e0 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2019-08-05T19:37:03.000Z | 2019-08-05T20:06:01.000Z | # Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charmhelpers.core.hookenv import (
config,
unit_get,
service_name,
network_get_primary_address,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
resolve_network_cidr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
ACCESS = 'access'
ADDRESS_MAP = {
PUBLIC: {
'binding': 'public',
'config': 'os-public-network',
'fallback': 'public-address',
'override': 'os-public-hostname',
},
INTERNAL: {
'binding': 'internal',
'config': 'os-internal-network',
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
ADMIN: {
'binding': 'admin',
'config': 'os-admin-network',
'fallback': 'private-address',
'override': 'os-admin-hostname',
},
ACCESS: {
'binding': 'access',
'config': 'access-network',
'fallback': 'private-address',
'override': 'os-access-hostname',
},
}
def canonical_url(configs, endpoint_type=PUBLIC):
"""Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:param endpoint_type: str endpoint type to resolve.
:param returns: str base URL for services on the current service unit.
"""
scheme = _get_scheme(configs)
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address)
def _get_scheme(configs):
"""Returns the scheme to use for the url (either http or https)
depending upon whether https is in the configs value.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:returns: either 'http' or 'https' depending on whether https is
configured within the configs context.
"""
scheme = 'http'
if configs and 'https' in configs.complete_contexts():
scheme = 'https'
return scheme
def _get_address_override(endpoint_type=PUBLIC):
"""Returns any address overrides that the user has defined based on the
endpoint type.
Note: this function allows for the service name to be inserted into the
address if the user specifies {service_name}.somehost.org.
:param endpoint_type: the type of endpoint to retrieve the override
value for.
:returns: any endpoint address or hostname that the user has overridden
or None if an override is not present.
"""
override_key = ADDRESS_MAP[endpoint_type]['override']
addr_override = config(override_key)
if not addr_override:
return None
else:
return addr_override.format(service_name=service_name())
def resolve_address(endpoint_type=PUBLIC, override=True):
"""Return unit address depending on net config.
If unit is clustered with vip(s) and has net splits defined, return vip on
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured, or a Juju 2.0 extra-binding has been used.
:param endpoint_type: Network endpoing type
:param override: Accept hostname overrides or not
"""
resolved_address = None
if override:
resolved_address = _get_address_override(endpoint_type)
if resolved_address:
return resolved_address
vips = config('vip')
if vips:
vips = vips.split()
net_type = ADDRESS_MAP[endpoint_type]['config']
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
binding = ADDRESS_MAP[endpoint_type]['binding']
clustered = is_clustered()
if clustered and vips:
if net_addr:
for vip in vips:
if is_address_in_network(net_addr, vip):
resolved_address = vip
break
else:
# NOTE: endeavour to check vips against network space
# bindings
try:
bound_cidr = resolve_network_cidr(
network_get_primary_address(binding)
)
for vip in vips:
if is_address_in_network(bound_cidr, vip):
resolved_address = vip
break
except NotImplementedError:
# If no net-splits configured and no support for extra
# bindings/network spaces so we expect a single vip
resolved_address = vips[0]
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else:
fallback_addr = unit_get(net_fallback)
if net_addr:
resolved_address = get_address_in_network(net_addr, fallback_addr)
else:
# NOTE: only try to use extra bindings if legacy network
# configuration is not in use
try:
resolved_address = network_get_primary_address(binding)
except NotImplementedError:
resolved_address = fallback_addr
if resolved_address is None:
raise ValueError("Unable to resolve a suitable IP address based on "
"charm state and configuration. (net_type=%s, "
"clustered=%s)" % (net_type, clustered))
return resolved_address
| 33.834225 | 79 | 0.644223 |
da01cacf0490d101d3ba7fcf8061b65803d5eede | 123 | py | Python | lang/Python/sum-of-squares-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | 1 | 2021-05-05T13:42:20.000Z | 2021-05-05T13:42:20.000Z | lang/Python/sum-of-squares-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | lang/Python/sum-of-squares-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | sum(x * x for x in [1, 2, 3, 4, 5])
# or
sum(x ** 2 for x in [1, 2, 3, 4, 5])
# or
sum(pow(x, 2) for x in [1, 2, 3, 4, 5])
| 20.5 | 39 | 0.447154 |
9e59e7d4389c7b3f8f9577f953d09de99dbe5b80 | 1,125 | py | Python | portal/migrations/versions/ed70c144ae46_.py | ivan-c/truenth-portal | 0b9d39ae43f42ea3413ed9634f295f5d856cbc77 | [
"BSD-3-Clause"
] | 3 | 2017-01-15T10:11:57.000Z | 2018-10-02T23:46:44.000Z | portal/migrations/versions/ed70c144ae46_.py | pep8speaks/true_nth_usa_portal | 31ff755b0cfe61ab908e2a399e3c41ef17ca8c16 | [
"BSD-3-Clause"
] | 876 | 2016-04-04T20:45:11.000Z | 2019-02-28T00:10:36.000Z | portal/migrations/versions/ed70c144ae46_.py | pep8speaks/true_nth_usa_portal | 31ff755b0cfe61ab908e2a399e3c41ef17ca8c16 | [
"BSD-3-Clause"
] | 9 | 2016-04-13T01:18:55.000Z | 2018-09-19T20:44:23.000Z | """Eliminate internal identifiers
Revision ID: ed70c144ae46
Revises: 3da0caa42c62
Create Date: 2018-05-02 17:39:24.558043
"""
from alembic import op
from sqlalchemy import text
from portal.models.user import internal_identifier_systems
# revision identifiers, used by Alembic.
revision = 'ed70c144ae46'
down_revision = '3da0caa42c62'
def upgrade():
conn = op.get_bind()
# Internal identifiers don't belong in the database - they're at best
# duplicate data, and at worse, conflicting.
identifiers = text(
"SELECT id FROM identifiers WHERE system IN :internal_systems")
bad_ids = [i[0] for i in conn.execute(
identifiers,
internal_systems=tuple(internal_identifier_systems))]
if bad_ids:
remove_uis = text(
"DELETE FROM user_identifiers WHERE identifier_id IN :bad_ids")
conn.execute(remove_uis, bad_ids=tuple(bad_ids))
remove_ids = text("DELETE FROM identifiers WHERE id IN :bad_ids")
conn.execute(remove_ids, bad_ids=tuple(bad_ids))
def downgrade():
# no downgrade step - bogus data need not come back
pass
| 28.846154 | 75 | 0.709333 |
308270a836f618e08e5132a27ad9a24a74586062 | 7,536 | py | Python | src/parsetab.py | videoutpl/Videout | 94ac2643a655fb7d53b2bc1af500c1f81b9e728d | [
"MIT"
] | 2 | 2019-02-27T02:08:31.000Z | 2019-05-10T01:32:10.000Z | src/parsetab.py | videoutpl/Videout | 94ac2643a655fb7d53b2bc1af500c1f81b9e728d | [
"MIT"
] | 3 | 2019-04-03T18:04:19.000Z | 2019-04-22T22:38:23.000Z | src/parsetab.py | videoutpl/Videout | 94ac2643a655fb7d53b2bc1af500c1f81b9e728d | [
"MIT"
] | null | null | null |
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'ADD_AUDIO ADD_TEXT AND ASPECT_RATIO ASSIGN BETWEEN BOOL BY COMMA CONCATENATE_CLIP CROP EXTRACT_AUDIO FLOAT FROM IDENTIFIER INT LASTING LPAREN PATH PHOTO POSITION RENDER_GIF RENDER_VIDEO RESIZE RPAREN SHOW_VARS STRING TO TRIM VIDEO\n videout : var_assign\n | methodcall\n | NUMBER\n | BOOLEAN\n | empty\n \n var_assign : IDENTIFIER ASSIGN Init\n | IDENTIFIER ASSIGN STRING\n | IDENTIFIER ASSIGN NUMBER\n | IDENTIFIER ASSIGN BOOLEAN\n | IDENTIFIER ASSIGN IDENTIFIER\n\n \n Init : videoInit\n | photoInit\n | concatenateClip\n \n videoInit : VIDEO FROM STRING BETWEEN INT COMMA INT AND INT COMMA INT\n \n photoInit : PHOTO FROM STRING LASTING INT\n \n concatenateClip : CONCATENATE_CLIP IDENTIFIER AND IDENTIFIER\n \n methodcall : resizemethod\n | addTextmethod\n | renderVideo\n | renderGif\n | cropmethod\n | addAudiomethod\n | addExtractedAudiomethod\n | showVarmethod\n | showAllVarsmethod\n\n \n showVarmethod : IDENTIFIER\n\n \n showAllVarsmethod : SHOW_VARS\n\n \n resizemethod : RESIZE IDENTIFIER BY NUMBER\n \n cropmethod : CROP IDENTIFIER BY ASPECT_RATIO\n \n addAudiomethod : ADD_AUDIO STRING TO IDENTIFIER BETWEEN NUMBER COMMA NUMBER\n \n addExtractedAudiomethod : EXTRACT_AUDIO IDENTIFIER TO IDENTIFIER BETWEEN NUMBER COMMA NUMBER\n\n \n addTextmethod : ADD_TEXT STRING TO IDENTIFIER TO POSITION\n \n renderVideo : RENDER_VIDEO IDENTIFIER\n \n renderGif : RENDER_GIF IDENTIFIER\n \n BOOLEAN : BOOL\n \n NUMBER : INT\n | FLOAT\n \n empty :\n '
_lr_action_items = {'IDENTIFIER':([0,20,22,23,24,26,28,46,48,50,51,62,],[7,29,31,32,33,35,36,54,56,58,59,68,]),'INT':([0,28,47,64,65,66,67,74,75,76,80,82,],[17,17,17,17,17,72,73,17,17,79,81,83,]),'FLOAT':([0,28,47,64,65,74,75,],[18,18,18,18,18,18,18,]),'BOOL':([0,28,],[19,19,]),'$end':([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,27,31,32,36,37,38,39,40,41,42,43,55,57,68,69,73,77,78,83,],[-38,0,-1,-2,-3,-4,-5,-26,-17,-18,-19,-20,-21,-22,-23,-24,-25,-36,-37,-35,-27,-33,-34,-10,-6,-7,-8,-9,-11,-12,-13,-28,-29,-16,-32,-15,-30,-31,-14,]),'RESIZE':([0,],[20,]),'ADD_TEXT':([0,],[21,]),'RENDER_VIDEO':([0,],[22,]),'RENDER_GIF':([0,],[23,]),'CROP':([0,],[24,]),'ADD_AUDIO':([0,],[25,]),'EXTRACT_AUDIO':([0,],[26,]),'SHOW_VARS':([0,],[27,]),'ASSIGN':([7,],[28,]),'COMMA':([17,18,70,71,72,81,],[-36,-37,74,75,76,82,]),'STRING':([21,25,28,52,53,],[30,34,38,60,61,]),'VIDEO':([28,],[44,]),'PHOTO':([28,],[45,]),'CONCATENATE_CLIP':([28,],[46,]),'BY':([29,33,],[47,49,]),'TO':([30,34,35,56,],[48,50,51,63,]),'FROM':([44,45,],[52,53,]),'ASPECT_RATIO':([49,],[57,]),'AND':([54,79,],[62,80,]),'BETWEEN':([58,59,60,],[64,65,66,]),'LASTING':([61,],[67,]),'POSITION':([63,],[69,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'videout':([0,],[1,]),'var_assign':([0,],[2,]),'methodcall':([0,],[3,]),'NUMBER':([0,28,47,64,65,74,75,],[4,39,55,70,71,77,78,]),'BOOLEAN':([0,28,],[5,40,]),'empty':([0,],[6,]),'resizemethod':([0,],[8,]),'addTextmethod':([0,],[9,]),'renderVideo':([0,],[10,]),'renderGif':([0,],[11,]),'cropmethod':([0,],[12,]),'addAudiomethod':([0,],[13,]),'addExtractedAudiomethod':([0,],[14,]),'showVarmethod':([0,],[15,]),'showAllVarsmethod':([0,],[16,]),'Init':([28,],[37,]),'videoInit':([28,],[41,]),'photoInit':([28,],[42,]),'concatenateClip':([28,],[43,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> videout","S'",1,None,None,None),
('videout -> var_assign','videout',1,'p_videout','lparser.py',30),
('videout -> methodcall','videout',1,'p_videout','lparser.py',31),
('videout -> NUMBER','videout',1,'p_videout','lparser.py',32),
('videout -> BOOLEAN','videout',1,'p_videout','lparser.py',33),
('videout -> empty','videout',1,'p_videout','lparser.py',34),
('var_assign -> IDENTIFIER ASSIGN Init','var_assign',3,'p_var_assign','lparser.py',43),
('var_assign -> IDENTIFIER ASSIGN STRING','var_assign',3,'p_var_assign','lparser.py',44),
('var_assign -> IDENTIFIER ASSIGN NUMBER','var_assign',3,'p_var_assign','lparser.py',45),
('var_assign -> IDENTIFIER ASSIGN BOOLEAN','var_assign',3,'p_var_assign','lparser.py',46),
('var_assign -> IDENTIFIER ASSIGN IDENTIFIER','var_assign',3,'p_var_assign','lparser.py',47),
('Init -> videoInit','Init',1,'p_init','lparser.py',58),
('Init -> photoInit','Init',1,'p_init','lparser.py',59),
('Init -> concatenateClip','Init',1,'p_init','lparser.py',60),
('videoInit -> VIDEO FROM STRING BETWEEN INT COMMA INT AND INT COMMA INT','videoInit',11,'p_videoInit','lparser.py',69),
('photoInit -> PHOTO FROM STRING LASTING INT','photoInit',5,'p_photoInit','lparser.py',83),
('concatenateClip -> CONCATENATE_CLIP IDENTIFIER AND IDENTIFIER','concatenateClip',4,'p_concatenateClip','lparser.py',96),
('methodcall -> resizemethod','methodcall',1,'p_methodcall','lparser.py',113),
('methodcall -> addTextmethod','methodcall',1,'p_methodcall','lparser.py',114),
('methodcall -> renderVideo','methodcall',1,'p_methodcall','lparser.py',115),
('methodcall -> renderGif','methodcall',1,'p_methodcall','lparser.py',116),
('methodcall -> cropmethod','methodcall',1,'p_methodcall','lparser.py',117),
('methodcall -> addAudiomethod','methodcall',1,'p_methodcall','lparser.py',118),
('methodcall -> addExtractedAudiomethod','methodcall',1,'p_methodcall','lparser.py',119),
('methodcall -> showVarmethod','methodcall',1,'p_methodcall','lparser.py',120),
('methodcall -> showAllVarsmethod','methodcall',1,'p_methodcall','lparser.py',121),
('showVarmethod -> IDENTIFIER','showVarmethod',1,'p_showVarmethod','lparser.py',128),
('showAllVarsmethod -> SHOW_VARS','showAllVarsmethod',1,'p_showAllVarsmethod','lparser.py',136),
('resizemethod -> RESIZE IDENTIFIER BY NUMBER','resizemethod',4,'p_resizemethod','lparser.py',144),
('cropmethod -> CROP IDENTIFIER BY ASPECT_RATIO','cropmethod',4,'p_cropmethod','lparser.py',157),
('addAudiomethod -> ADD_AUDIO STRING TO IDENTIFIER BETWEEN NUMBER COMMA NUMBER','addAudiomethod',8,'p_addAudiomethod','lparser.py',170),
('addExtractedAudiomethod -> EXTRACT_AUDIO IDENTIFIER TO IDENTIFIER BETWEEN NUMBER COMMA NUMBER','addExtractedAudiomethod',8,'p_addExtractedAudiomethod','lparser.py',186),
('addTextmethod -> ADD_TEXT STRING TO IDENTIFIER TO POSITION','addTextmethod',6,'p_addTextmethod','lparser.py',201),
('renderVideo -> RENDER_VIDEO IDENTIFIER','renderVideo',2,'p_renderVideo','lparser.py',215),
('renderGif -> RENDER_GIF IDENTIFIER','renderGif',2,'p_renderGif','lparser.py',228),
('BOOLEAN -> BOOL','BOOLEAN',1,'p_BOOLEAN','lparser.py',242),
('NUMBER -> INT','NUMBER',1,'p_NUMBER','lparser.py',248),
('NUMBER -> FLOAT','NUMBER',1,'p_NUMBER','lparser.py',249),
('empty -> <empty>','empty',0,'p_empty','lparser.py',256),
]
| 109.217391 | 1,827 | 0.635748 |
3bbac16236e61631387703fef505552fc168bc2d | 3,057 | py | Python | lifelong_rl/trainers/lstm_memory/state_predictor.py | nakamotoo/lifelong_rl | a8376a57cdeff158810e71cd31cba089852399b7 | [
"MIT"
] | null | null | null | lifelong_rl/trainers/lstm_memory/state_predictor.py | nakamotoo/lifelong_rl | a8376a57cdeff158810e71cd31cba089852399b7 | [
"MIT"
] | null | null | null | lifelong_rl/trainers/lstm_memory/state_predictor.py | nakamotoo/lifelong_rl | a8376a57cdeff158810e71cd31cba089852399b7 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import lifelong_rl.torch.pytorch_util as ptu
# q(s | o, m)
class StatePredictor(torch.nn.Module):
def __init__(
self,
observation_size,
latent_size,
hidden_state_size,
normalize_observations=True,
fc_layer_params=(256, 256),
fix_variance=True,
activation_func=torch.nn.ReLU,
):
super().__init__()
self._observation_size = observation_size
self._latent_size = latent_size
self._hidden_state_size = hidden_state_size
self._normalize_observations = normalize_observations
self._fc_layer_params = fc_layer_params
self._fix_variance = fix_variance
layers = []
for i in range(len(fc_layer_params)-1):
if i == 0:
layers.append(activation_func())
layers.append(torch.nn.Linear(fc_layer_params[i], fc_layer_params[i+1]))
layers.append(activation_func())
self.model = torch.nn.Sequential(*layers)
in_layers = []
if self._normalize_observations:
in_layers.append(torch.nn.BatchNorm1d(observation_size + latent_size))
self.out_preproc = torch.nn.BatchNorm1d(hidden_state_size, affine=False)
else:
print('not normalization observations')
in_layers.append(torch.nn.Linear(observation_size + latent_size, fc_layer_params[0]))
self.in_func = torch.nn.Sequential(*in_layers)
self.out_mean = torch.nn.Linear(fc_layer_params[-1], hidden_state_size)
if not self._fix_variance:
self.out_std = torch.nn.Linear(fc_layer_params[-1], hidden_state_size)
# TODO: implement clipping
raise NotImplementedError
self._normalize_output = True
def forward(self, obs, latents):
x = torch.cat([obs, latents], dim=-1)
x = self.in_func(x)
x = self.model(x)
if self._fix_variance:
return self.out_mean(x)
else:
return self.out_mean(x), self.out_std(x)
def _get_distribution(self, obs, latents):
x = torch.cat([obs, latents], dim=-1)
x = self.in_func(x)
x = self.model(x)
mean = self.out_mean(x)
if self._fix_variance:
std = ptu.ones(*mean.shape)
dist = torch.distributions.independent.Independent(
torch.distributions.Normal(mean, std), 1
)
else:
raise NotImplementedError
return dist
def get_log_prob(self, obs, latents, hidden_states):
if self._normalize_observations:
hidden_states = self.out_preproc(hidden_states)
dist = self._get_distribution(obs, latents)
return dist.log_prob(hidden_states)
def get_loss(self, obs, latents, hidden_states, weights=None):
log_probs = self.get_log_prob(obs, latents, hidden_states)
if weights is not None:
log_probs = log_probs * weights
return -log_probs.mean()
| 33.228261 | 93 | 0.622833 |
5681976f5cdf4105c0ab33f9b45c8a0bf7218ba9 | 3,052 | py | Python | 2022/Python-FreeCodeCamp/Scientific_computing_with_python/test_module.py | millennialseb/EDU_python | 806bb21f873170c29d45d5279af5bd83c8b27dc9 | [
"MIT"
] | 1 | 2021-09-10T23:39:55.000Z | 2021-09-10T23:39:55.000Z | 2022/Python-FreeCodeCamp/Scientific_computing_with_python/test_module.py | millennialseb/EDU_python | 806bb21f873170c29d45d5279af5bd83c8b27dc9 | [
"MIT"
] | null | null | null | 2022/Python-FreeCodeCamp/Scientific_computing_with_python/test_module.py | millennialseb/EDU_python | 806bb21f873170c29d45d5279af5bd83c8b27dc9 | [
"MIT"
] | null | null | null | """
Rules
The function will return the correct conversion if the supplied problems are properly formatted, otherwise, it will return a string that describes an error that is meaningful to the user.
Situations that will return an error:
If there are too many problems supplied to the function. The limit is five, anything more will return: Error: Too many problems.
The appropriate operators the function will accept are addition and subtraction. Multiplication and division will return an error. Other operators not mentioned in this bullet point will not need to be tested. The error returned will be: Error: Operator must be '+' or '-'.
Each number (operand) should only contain digits. Otherwise, the function will return: Error: Numbers must only contain digits.
Each operand (aka number on each side of the operator) has a max of four digits in width. Otherwise, the error string returned will be: Error: Numbers cannot be more than four digits.
If the user supplied the correct format of problems, the conversion you return will follow these rules:
There should be a single space between the operator and the longest of the two operands, the operator will be on the same line as the second operand, both operands will be in the same order as provided (the first will be the top one and the second will be the bottom.
Numbers should be right-aligned.
There should be four spaces between each problem.
There should be dashes at the bottom of each problem. The dashes should run along the entire length of each problem individually. (The example above shows what this should look like.)
Development
Write your code in arithmetic_arranger.py. For development, you can use main.py to test your arithmetic_arranger() function. Click the "run" button and main.py will run.
Testing
The unit tests for this project are in test_module.py. We are running the tests from test_module.py in main.py for your convenience. The tests will run automatically whenever you hit the "run" button. Alternatively you may run the tests by inputting pytest in the console.
Submitting
Copy your project's URL and submit it below.
"""
import pandas as pd
import operator
from test_1 import arrange
# creating an empty list
lst1 = []
lst2 = []
# converting list of numebert to digits
def magic(lst): # [1,2,3]
s = map(str, lst) # ['1','2','3']
s = "".join(s) # '123'
s = int(s) # 123
return s
# number of elements as input
n = int(input("Enter number of operation cases that you want to sum: "))
print(f"Now, enter the {n} sum operations that you want to sum:")
# iterating till the range
for i in range(0, n):
x1 = int(input("Enter the first addend: "))
x2 = int(input("Enter the second addend: "))
lst1.append(x1) # adding the element
lst2.append(x2)
print(f" the first addend list is: {lst1}")
print(f" the second addend list is: {lst2}")
for x in lst1:
a = magic(lst1.pop())
for x in lst2:
b = magic(lst2.pop())
print(arrange([f"{a} + {b}"]))
sum = list(map(operator.add, lst1, lst2))
print(f" the sum are: {sum}")
| 49.225806 | 273 | 0.74574 |
97e474c8d38d4d0e5eb656dca517f26fe5ad00ef | 3,604 | py | Python | tests/gold_tests/pluginTest/sslheaders/sslheaders.test.py | zds05/trafficserver | 258c69b7628f5a4b90488e147c244a582222b5c8 | [
"Apache-2.0"
] | null | null | null | tests/gold_tests/pluginTest/sslheaders/sslheaders.test.py | zds05/trafficserver | 258c69b7628f5a4b90488e147c244a582222b5c8 | [
"Apache-2.0"
] | null | null | null | tests/gold_tests/pluginTest/sslheaders/sslheaders.test.py | zds05/trafficserver | 258c69b7628f5a4b90488e147c244a582222b5c8 | [
"Apache-2.0"
] | null | null | null | '''
Test the sslheaders plugin.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test sslheaders plugin.
'''
Test.SkipUnless(
Condition.HasCurlFeature('http2'),
)
Test.Disk.File('sslheaders.log').Content = 'sslheaders.gold'
server = Test.MakeOriginServer("server", options={'--load': Test.TestDirectory + '/observer.py'})
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
# Disable the cache to make sure each request is forwarded to the origin
# server.
ts = Test.MakeATSProcess("ts", enable_tls=True, enable_cache=False)
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
# ts.addSSLfile("ssl/signer.pem")
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 0,
'proxy.config.diags.debug.tags': 'http',
'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name.
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.http.server_ports': (
'ipv4:{0} ipv4:{1}:proto=http2;http:ssl ipv6:{0} ipv6:{1}:proto=http2;http:ssl'
.format(ts.Variables.port, ts.Variables.ssl_port)),
# 'proxy.config.ssl.client.verify.server': 0,
# 'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
# 'proxy.config.url_remap.pristine_host_hdr' : 1,
# 'proxy.config.ssl.client.certification_level': 2,
# 'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir),
# 'proxy.config.ssl.TLSv1_3': 0
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map http://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Disk.remap_config.AddLine(
'map https://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Disk.plugin_config.AddLine(
'sslheaders.so SSL-Client-ID=client.subject'
)
tr = Test.AddTestRun()
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.Command = (
'curl -H "SSL-Client-ID: My Fake Client ID" --verbose --ipv4 --insecure --header "Host: bar.com"' +
' https://localhost:{}'.format(ts.Variables.ssl_port)
)
tr.Processes.Default.ReturnCode = 0
| 42.4 | 333 | 0.720866 |
bbed173dd6fcf655e09cdecba3a84bd4863db2f1 | 4,386 | py | Python | user.py | taindp98/deep_reinforcement_learning | 2f5c734280fbc242267a8fea72cf59c9fff54966 | [
"MIT"
] | null | null | null | user.py | taindp98/deep_reinforcement_learning | 2f5c734280fbc242267a8fea72cf59c9fff54966 | [
"MIT"
] | null | null | null | user.py | taindp98/deep_reinforcement_learning | 2f5c734280fbc242267a8fea72cf59c9fff54966 | [
"MIT"
] | null | null | null | from dialogue_config import FAIL, SUCCESS, usersim_intents, all_slots
from utils import reward_function
class User:
"""Connects a real user to the conversation through the console."""
def __init__(self, constants):
"""
The constructor for User.
Parameters:
constants (dict): Loaded constants as dict
"""
self.max_round = constants['run']['max_round_num']
def reset(self):
"""
Reset the user.
Returns:
dict: The user response
"""
return self._return_response()
def _return_response(self):
"""
Asks user in console for response then receives a response as input.
Format must be like this: request/moviename: room, date: friday/starttime, city, theater
or inform/moviename: zootopia/
or request//starttime
or done//
intents, informs keys and values, and request keys and values cannot contain / , :
Returns:
dict: The response of the user
"""
response = {'intent': '', 'inform_slots': {}, 'request_slots': {}}
while True:
input_string = input('Response: ')
chunks = input_string.split('/')
intent_correct = True
if chunks[0] not in usersim_intents:
intent_correct = False
response['intent'] = chunks[0]
informs_correct = True
if len(chunks[1]) > 0:
print(chunks[1])
informs_items_list = chunks[1].split('**')
print(informs_items_list)
for inf in informs_items_list:
# print(inf)
inf = inf.split(':: ')
# print(inf)
if inf[0] not in all_slots:
informs_correct = False
break
response['inform_slots'][inf[0]] = inf[1].split('||')
requests_correct = True
if len(chunks[2]) > 0:
requests_key_list = chunks[2].split(', ')
for req in requests_key_list:
if req not in all_slots:
requests_correct = False
break
response['request_slots'][req] = 'UNK'
if intent_correct and informs_correct and requests_correct:
break
return response
def _return_success(self):
"""
Ask the user in console to input success (-1, 0 or 1) for (loss, neither loss nor win, win).
Returns:
int: Success: -1, 0 or 1
"""
success = -2
while success not in (-1, 0, 1):
success = int(input('Success?: '))
return success
def step(self, agent_action):
"""
Return the user's response, reward, done and success.
Parameters:
agent_action (dict): The current action of the agent
Returns:
dict: User response
int: Reward
bool: Done flag
int: Success: -1, 0 or 1 for loss, neither win nor loss, win
"""
# Assertions ----
# No unk in agent action informs
for value in agent_action['inform_slots'].values():
assert value != 'UNK'
assert value != 'PLACEHOLDER'
# No PLACEHOLDER in agent_action at all
for value in agent_action['request_slots'].values():
assert value != 'PLACEHOLDER'
# ---------------
print('Agent Action: {}'.format(agent_action))
done = False
user_response = {'intent': '', 'request_slots': {}, 'inform_slots': {}}
# First check round num, if equal to max then fail
if agent_action['round'] == self.max_round:
success = FAIL
user_response['intent'] = 'done'
else:
user_response = self._return_response()
success = self._return_success()
if success == FAIL or success == SUCCESS:
done = True
assert 'UNK' not in user_response['inform_slots'].values()
assert 'PLACEHOLDER' not in user_response['request_slots'].values()
reward = reward_function(success, self.max_round)
return user_response, reward, done, True if success is 1 else False
| 31.328571 | 100 | 0.53648 |
5aa568a0d1337e9b047fc93f33e40cc25c7c624b | 2,507 | py | Python | run_continuation.py | RasmooL/dqn-tf | d928d5c83a5587dac2ebe1af715038e9ba0fddb2 | [
"MIT"
] | 1 | 2016-08-19T21:38:56.000Z | 2016-08-19T21:38:56.000Z | run_continuation.py | RasmooL/dqn-tf | d928d5c83a5587dac2ebe1af715038e9ba0fddb2 | [
"MIT"
] | null | null | null | run_continuation.py | RasmooL/dqn-tf | d928d5c83a5587dac2ebe1af715038e9ba0fddb2 | [
"MIT"
] | null | null | null | """
Copyright 2016 Rasmus Larsen
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE.txt file for details.
"""
import sys
import time
from sacred import Experiment
from core.ALEEmulator import ALEEmulator
from continuation.OriginalNet import OriginalNet
from core.ScreenBuffer import ScreenBuffer
import numpy as np
import cv2
ex = Experiment('continuation')
@ex.config
def net_config():
conv_layers = 3
conv_units = [32, 64, 64]
filter_sizes = [8, 4, 2]
strides = [4, 2, 1]
hidden_units = 512
num_heads = 3
gate_noise = 0.01
sharpening_slope = 10
in_width = 84
in_height = 84
device = '/gpu:0'
lr = 0.0001
opt_decay = 0.95
momentum = 0.5
opt_eps = 0.01
tensorboard = False
tensorboard_freq = 50
@ex.config
def emu_config():
rom_path = '../ale-git/roms/'
rom_name = 'breakout'
display_screen = True
frame_skip = 4
repeat_prob = 0.0
color_avg = True
random_seed = 42
random_start = 30
@ex.config
def agent_config():
batch_size = 16
train_start = 5e3
train_frames = 5e6
test_freq = 5e4
test_frames = 5e3
save_freq = 5e3
@ex.automain
def main(_config, _log):
sys.stdout = open('log_' + _config['rom_name'] + time.strftime('%H%M%d%m', time.gmtime()), 'w', buffering=True)
print "#{}".format(_config)
emu = ALEEmulator(_config)
_config['num_actions'] = emu.num_actions
net = OriginalNet(_config)
cv2.startWindowThread()
cv2.namedWindow("prediction")
# fill screen history up to batch size
buf = ScreenBuffer(_config, _config['batch_size'])
for n in range(_config['batch_size']):
emu.act(emu.actions[np.random.randint(0, emu.num_actions)]) # act randomly
buf.insert(emu.get_screen_rgb())
# train
step = 0
while step < _config['train_frames']:
cost = net.train(buf.get(), [step])
print step, cost
# predict next frame
hidden = net.encode(buf.get()[np.newaxis, -1])
pred = net.predict_from_hidden(hidden)
emu.act(emu.actions[np.random.randint(0, emu.num_actions)]) # act randomly
buf.insert(emu.get_screen_rgb())
# display difference between prediction and true frame
cv2.imshow('prediction', cv2.resize(pred[0], (84 * 4, 84 * 4)))
if emu.terminal():
emu.new_game()
if step % _config['save_freq'] == 0:
net.save('cont')
step += 1
| 23.212963 | 115 | 0.640207 |
87e830a13dddc4cc79a804fad5c7d2918ad0f54a | 18,797 | py | Python | training/src/tests/tests/python/PaddingLayer.py | steelONIONknight/bolt | 9bd3d08f2abb14435ca3ad0179889e48fa7e9b47 | [
"MIT"
] | null | null | null | training/src/tests/tests/python/PaddingLayer.py | steelONIONknight/bolt | 9bd3d08f2abb14435ca3ad0179889e48fa7e9b47 | [
"MIT"
] | null | null | null | training/src/tests/tests/python/PaddingLayer.py | steelONIONknight/bolt | 9bd3d08f2abb14435ca3ad0179889e48fa7e9b47 | [
"MIT"
] | null | null | null | # Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#!/usr/bin/env python
import unittest
import torch
import torch.nn as nn
class PaddingLayerTests(unittest.TestCase):
def setUp(self):
self.filling_value = 5.0
fv = self.filling_value
self.symmetric_padding_result = torch.tensor(
[
[
[
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv, fv],
[fv, fv, fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv, fv],
[fv, fv, fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
]
]
]
)
self.asymmetric_padding_result = torch.tensor(
[
[
[
[fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv],
[fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv],
[fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv],
[fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv],
]
]
]
)
self.asymmetric_padding_for_W_result = torch.tensor(
[
[
[
[fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv],
[fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv],
[fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv],
]
]
]
)
self.delta1_for_constant_pad = torch.tensor(
[
[
[
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv, fv],
[fv, fv, fv, fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv, fv],
[fv, fv, fv, fv, 1.0, 1.0, 1.0, 1.0, 1.0, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
]
]
]
)
self.constant_pad_backward_pass_result1 = torch.tensor(
[
[
[
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
]
]
]
)
self.delta2_for_constant_pad = torch.tensor(
[
[
[
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
[fv, fv, fv, fv, 2.0, 2.0, 2.0, 2.0, 2.0, fv, fv, fv],
[fv, fv, fv, fv, 2.0, 2.0, 2.0, 2.0, 2.0, fv, fv, fv],
[fv, fv, fv, fv, 2.0, 2.0, 2.0, 2.0, 2.0, fv, fv, fv],
[fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv, fv],
]
]
]
)
# self.backward_pass_result2 filled by 3.0 value because of torch.autograd package calculates gradient
# as sum of all results of backward() function calls
self.constant_pad_backward_pass_result2 = torch.tensor(
[
[
[
[3.0, 3.0, 3.0, 3.0, 3.0],
[3.0, 3.0, 3.0, 3.0, 3.0],
[3.0, 3.0, 3.0, 3.0, 3.0],
]
]
]
)
self.symmetric_reflection_padding_result = torch.tensor(
[
[
[
[10.0, 9.0, 8.0, 9.0, 10.0, 11.0, 10.0, 9.0],
[6.0, 5.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0],
[2.0, 1.0, 0.0, 1.0, 2.0, 3.0, 2.0, 1.0],
[6.0, 5.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0],
[10.0, 9.0, 8.0, 9.0, 10.0, 11.0, 10.0, 9.0],
[6.0, 5.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0],
[2.0, 1.0, 0.0, 1.0, 2.0, 3.0, 2.0, 1.0],
]
]
]
)
self.asymmetric_reflection_padding_result = torch.tensor(
[
[
[
[7.0, 6.0, 5.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0],
[3.0, 2.0, 1.0, 0.0, 1.0, 2.0, 3.0, 2.0, 1.0],
[7.0, 6.0, 5.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0],
[11.0, 10.0, 9.0, 8.0, 9.0, 10.0, 11.0, 10.0, 9.0],
]
]
]
)
self.reflection_pad_backward_pass_result = torch.tensor(
[
[
[
[1.0, 3.0, 3.0, 3.0, 2.0],
[3.0, 9.0, 9.0, 9.0, 6.0],
[2.0, 6.0, 6.0, 6.0, 4.0],
]
]
]
)
self.symmetric_replication_padding_result = torch.tensor(
[
[
[
[0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0],
[0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0],
[0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0],
[4.0, 4.0, 4.0, 5.0, 6.0, 7.0, 7.0, 7.0],
[8.0, 8.0, 8.0, 9.0, 10.0, 11.0, 11.0, 11.0],
[8.0, 8.0, 8.0, 9.0, 10.0, 11.0, 11.0, 11.0],
[8.0, 8.0, 8.0, 9.0, 10.0, 11.0, 11.0, 11.0],
]
]
]
)
self.asymmetric_replication_padding_result = torch.tensor(
[
[
[
[0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0],
[4.0, 4.0, 4.0, 4.0, 5.0, 6.0, 7.0, 7.0, 7.0],
[8.0, 8.0, 8.0, 8.0, 9.0, 10.0, 11.0, 11.0, 11.0],
]
]
]
)
self.replication_pad_backward_pass_result = torch.tensor(
[
[
[
[15.0, 3.0, 3.0, 3.0, 12.0],
[5.0, 1.0, 1.0, 1.0, 4.0],
[10.0, 2.0, 2.0, 2.0, 8.0],
]
]
]
)
def test_symmetric_padding_for_each_side_of_H_and_W(self):
common_padding = 3
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d(common_padding, self.filling_value)
output = model(input)
added_from_left = common_padding
added_from_right = common_padding
added_from_top = common_padding
added_from_bottom = common_padding
self.assertEqual(input.size(), torch.Size([1, 1, 3, 5]))
self.assertEqual(
output.size(),
torch.Size(
[
1,
1,
3 + added_from_top + added_from_bottom,
5 + added_from_left + added_from_right,
]
),
)
self.assertTrue(torch.equal(output, self.symmetric_padding_result))
def test_asymmetric_padding_for_each_side_of_H_and_W(self):
paddings = [1, 2, 3, 4]
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d(paddings, self.filling_value)
output = model(input)
added_from_left = paddings[0]
added_from_right = paddings[1]
added_from_top = paddings[2]
added_from_bottom = paddings[3]
self.assertEqual(input.size(), torch.Size([1, 1, 3, 5]))
self.assertEqual(
output.size(),
torch.Size(
[
1,
1,
3 + added_from_top + added_from_bottom,
5 + added_from_left + added_from_right,
]
),
)
self.assertTrue(torch.equal(output, self.asymmetric_padding_result))
def test_assymmetric_padding_only_for_W(self):
paddings = [1, 2]
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d(paddings, self.filling_value)
output = model(input)
added_from_left = paddings[0]
added_from_right = paddings[1]
self.assertEqual(input.size(), torch.Size([1, 1, 3, 5]))
self.assertEqual(
output.size(), torch.Size([1, 1, 3, 5 + added_from_left + added_from_right])
)
self.assertTrue(torch.equal(output, self.asymmetric_padding_for_W_result))
def test_setting_no_paddings_for_each_side_of_H_and_W(self):
no_padding = 0
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d(no_padding, 0.0)
output = model(input)
self.assertEqual(input.size(), torch.Size([1, 1, 3, 5]))
self.assertEqual(output.size(), input.size())
self.assertTrue(torch.equal(output, input))
no_paddings = [0, 0, 0, 0]
model = nn.ConstantPad2d(no_paddings, 0.0)
output = model(input)
self.assertEqual(input.size(), torch.Size([1, 1, 3, 5]))
self.assertEqual(output.size(), input.size())
self.assertTrue(torch.equal(output, input))
def test_that_cannot_be_set_different_paddings_only_for_left_right_of_W_and_top_of_H(
self,
):
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d([1, 2, 3], 0.0)
self.assertRaises(AssertionError, model, input)
def test_that_cannot_be_set_different_paddings_only_for_left_of_W(self):
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d([1], 0.0)
self.assertRaises(AssertionError, model, input)
def test_that_cannot_be_set_more_then_4_paddings(self):
input = torch.ones(1, 1, 3, 5, dtype=torch.float)
model = nn.ConstantPad2d([1, 2, 3, 4, 5], 0.0)
self.assertRaises(AssertionError, model, input)
def test_backward_computations_of_ConstantPad2d(self):
# since ConstantPad2d is functional layer (without weights)
# gradient of ConstantPad2d input is result of calculation
# of function ConstantPad2d_derivative(backward_input)
# where ConstantPad2d_derivative calculation result is tensor
# with the same shape as input and
# grad[n][c][h][w] = backward_input[n][c][h + padding_top][w + padding_left]
paddings = [4, 3, 2, 1]
input = torch.ones(1, 1, 3, 5, dtype=torch.float, requires_grad=True)
model = nn.ConstantPad2d(paddings, self.filling_value)
output = model(input)
output.backward(self.delta1_for_constant_pad)
self.assertEqual(input.grad.size(), input.size())
self.assertTrue(
torch.equal(input.grad, self.constant_pad_backward_pass_result1)
)
output.backward(self.delta2_for_constant_pad)
self.assertEqual(input.grad.size(), input.size())
self.assertTrue(
torch.equal(input.grad, self.constant_pad_backward_pass_result2)
)
def test_symmetric_reflection_padding(self):
common_padding = 2
input = torch.arange(12, dtype=torch.float).reshape(1, 1, 3, 4)
model = nn.ReflectionPad2d(common_padding)
output = model(input)
added_from_left = common_padding
added_from_right = common_padding
added_from_top = common_padding
added_from_bottom = common_padding
self.assertEqual(input.size(), torch.Size([1, 1, 3, 4]))
self.assertEqual(
output.size(),
torch.Size(
[
1,
1,
3 + added_from_top + added_from_bottom,
4 + added_from_left + added_from_right,
]
),
)
self.assertTrue(torch.equal(output, self.symmetric_reflection_padding_result))
def test_asymmetric_reflection_padding(self):
paddings = [3, 2, 1, 0]
input = torch.arange(12, dtype=torch.float).reshape(1, 1, 3, 4)
model = nn.ReflectionPad2d(paddings)
output = model(input)
added_from_left = paddings[0]
added_from_right = paddings[1]
added_from_top = paddings[2]
added_from_bottom = paddings[3]
self.assertEqual(input.size(), torch.Size([1, 1, 3, 4]))
self.assertEqual(
output.size(),
torch.Size(
[
1,
1,
3 + added_from_top + added_from_bottom,
4 + added_from_left + added_from_right,
]
),
)
self.assertTrue(torch.equal(output, self.asymmetric_reflection_padding_result))
def test_no_reflection_padding(self):
input = torch.arange(12, dtype=torch.float).reshape(1, 1, 3, 4)
model = nn.ReflectionPad2d(0)
output = model(input)
self.assertEqual(input.size(), torch.Size([1, 1, 3, 4]))
self.assertEqual(output.size(), input.size())
self.assertTrue(torch.equal(output, input))
def test_that_reflection_padding_cannot_be_greater_then_or_equal_to_the_corresponding_dimension(
self,
):
input = torch.arange(12, dtype=torch.float).reshape(1, 1, 3, 4)
model = nn.ReflectionPad2d(3)
self.assertRaises(RuntimeError, model, input)
model = nn.ReflectionPad2d([4, 1, 1, 1])
self.assertRaises(RuntimeError, model, input)
def test_backward_computations_of_ReflectionPad2d(self):
paddings = [4, 3, 2, 1]
input = torch.arange(1 * 1 * 3 * 5, dtype=torch.float).reshape(1, 1, 3, 5)
input.requires_grad_(True)
model = nn.ReflectionPad2d(paddings)
output = model(input)
delta = torch.ones(1, 1, 6, 12, dtype=torch.float)
output.backward(delta)
self.assertEqual(input.grad.size(), input.size())
self.assertTrue(
torch.equal(input.grad, self.reflection_pad_backward_pass_result)
)
def test_symmetric_replecation_padding(self):
common_padding = 2
input = torch.arange(12, dtype=torch.float).reshape(1, 1, 3, 4)
model = nn.ReplicationPad2d(common_padding)
output = model(input)
added_from_left = common_padding
added_from_right = common_padding
added_from_top = common_padding
added_from_bottom = common_padding
self.assertEqual(input.size(), torch.Size([1, 1, 3, 4]))
self.assertEqual(
output.size(),
torch.Size(
[
1,
1,
3 + added_from_top + added_from_bottom,
4 + added_from_left + added_from_right,
]
),
)
self.assertTrue(torch.equal(output, self.symmetric_replication_padding_result))
def test_asymmetric_replication_padding(self):
paddings = [3, 2, 1, 0]
input = torch.arange(12, dtype=torch.float).reshape(1, 1, 3, 4)
model = nn.ReplicationPad2d(paddings)
output = model(input)
added_from_left = paddings[0]
added_from_right = paddings[1]
added_from_top = paddings[2]
added_from_bottom = paddings[3]
self.assertEqual(input.size(), torch.Size([1, 1, 3, 4]))
self.assertEqual(
output.size(),
torch.Size(
[
1,
1,
3 + added_from_top + added_from_bottom,
4 + added_from_left + added_from_right,
]
),
)
self.assertTrue(torch.equal(output, self.asymmetric_replication_padding_result))
def test_no_replication_padding(self):
input = torch.arange(12, dtype=torch.float).reshape(1, 1, 3, 4)
model = nn.ReplicationPad2d(0)
output = model(input)
self.assertEqual(input.size(), torch.Size([1, 1, 3, 4]))
self.assertEqual(output.size(), input.size())
self.assertTrue(torch.equal(output, input))
def test_backward_computations_of_ReplicationPad2d(self):
paddings = [4, 3, 2, 1]
input = torch.arange(1 * 1 * 3 * 5, dtype=torch.float).reshape(1, 1, 3, 5)
input.requires_grad_(True)
model = nn.ReplicationPad2d(paddings)
output = model(input)
delta = torch.ones(1, 1, 6, 12, dtype=torch.float)
output.backward(delta)
self.assertEqual(input.grad.size(), input.size())
self.assertTrue(
torch.equal(input.grad, self.replication_pad_backward_pass_result)
)
if __name__ == "__main__":
unittest.main()
| 38.361224 | 148 | 0.490663 |
1c9131a21fc4206921e43f315b97033dd6893577 | 2,277 | py | Python | firmware/controller.py | pedrohov/rpi-localization-mapping | 82abadc90d49b79e3fb95c36db1ec31c8a0e74ad | [
"MIT"
] | 5 | 2019-05-19T22:36:13.000Z | 2019-09-15T01:43:50.000Z | firmware/controller.py | pedrohov/rpi-localization-mapping | 82abadc90d49b79e3fb95c36db1ec31c8a0e74ad | [
"MIT"
] | null | null | null | firmware/controller.py | pedrohov/rpi-localization-mapping | 82abadc90d49b79e3fb95c36db1ec31c8a0e74ad | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO;
from motor import *;
import time;
class Controller():
""" Class that controls each motor of the vehicle.
It can execute a movement for a specified period
of time, or execute a movement until it receives
another command.
"""
def __init__(self,
TL1, TL2, BL1, BL2,
TR1, TR2, BR1, BR2):
GPIO.setmode(GPIO.BCM);
self.top_left = Motor(TL1, TL2);
self.bottom_left = Motor(BL1, BL2);
self.top_right = Motor(TR1, TR2);
self.bottom_right = Motor(BR1, BR2);
def forward(self, timer = None):
self.top_left.forward();
self.bottom_left.forward();
self.top_right.forward();
self.bottom_right.forward();
if(timer is not None):
time.sleep(timer);
self.stop();
def reverse(self, timer = None):
self.top_left.reverse();
self.bottom_left.reverse();
self.top_right.reverse();
self.bottom_right.reverse();
if(timer is not None):
time.sleep(timer);
self.stop();
def rotateLeft(self, timer = None):
self.top_left.reverse();
self.bottom_left.reverse();
self.top_right.forward();
self.bottom_right.forward();
if(timer is not None):
time.sleep(timer);
self.stop();
def rotateRight(self, timer = None):
self.top_left.forward();
self.bottom_left.forward();
self.top_right.reverse();
self.bottom_right.reverse();
if(timer is not None):
time.sleep(timer);
self.stop();
def stop(self):
self.top_left.stop();
self.bottom_left.stop();
self.top_right.stop();
self.bottom_right.stop();
if __name__ == "__main__":
try:
# Test the controller:
controller = Controller(11, 5, 22, 27, 26, 19, 6, 13);
#controller.forward(1);
#controller.reverse(3);
controller.rotateLeft(1.5);
#controller.rotateRight(1.5);
#controller.top_right.reverse();
time.sleep(2);
GPIO.cleanup();
except:
GPIO.cleanup();
| 28.822785 | 62 | 0.541941 |
2f18e9cc54a7442b893360cddfa8948ac184c039 | 471 | py | Python | homework/models.py | ymlihaa/Rscool | d8f509f9430dec48413bfb1273a1244baca8db19 | [
"MIT"
] | null | null | null | homework/models.py | ymlihaa/Rscool | d8f509f9430dec48413bfb1273a1244baca8db19 | [
"MIT"
] | null | null | null | homework/models.py | ymlihaa/Rscool | d8f509f9430dec48413bfb1273a1244baca8db19 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class HomeWork(models.Model):
title = models.CharField(verbose_name='Title', max_length=200)
content = models.CharField(max_length=200)
upload = models.FileField(
verbose_name='Ödev Dosyası', upload_to='homework')
created_date = models.DateTimeField(auto_now_add=True)
finish_date = models.DateTimeField(auto_now_add=False)
def __str__(self):
return self.title | 21.409091 | 66 | 0.715499 |
4d5a4b64c275dad3759cd82834e5d16102193a69 | 13,344 | py | Python | vis_gui.py | Rootie/LEDCoCa | 094e6314adc576bc8633c8ab2d9eb2aaa711a483 | [
"MIT"
] | null | null | null | vis_gui.py | Rootie/LEDCoCa | 094e6314adc576bc8633c8ab2d9eb2aaa711a483 | [
"MIT"
] | null | null | null | vis_gui.py | Rootie/LEDCoCa | 094e6314adc576bc8633c8ab2d9eb2aaa711a483 | [
"MIT"
] | null | null | null | import glob
import numpy as np
import open3d as o3d
import open3d.visualization.gui as gui
import open3d.visualization.rendering as rendering
import os
import platform
import sys
import math
isMacOS = (platform.system() == "Darwin")
class Settings:
UNLIT = "defaultUnlit"
def __init__(self):
self.mouse_model = gui.SceneWidget.Controls.ROTATE_CAMERA
self.bg_color = gui.Color(1, 1, 1)
self.show_axes = True
self.apply_material = True # clear to False after processing
self._materials = {
Settings.UNLIT: rendering.MaterialRecord(),
}
self._materials[Settings.UNLIT].base_color = [1, 1, 1, 1.0]
self._materials[Settings.UNLIT].shader = Settings.UNLIT
# Conveniently, assigning from self._materials[...] assigns a reference,
# not a copy, so if we change the property of a material, then switch
# to another one, then come back, the old setting will still be there.
self.material = self._materials[Settings.UNLIT]
def set_material(self, name):
self.material = self._materials[name]
self.apply_material = True
def apply_material_prefab(self, name):
assert (self.material.shader == Settings.LIT)
prefab = Settings.PREFAB[name]
for key, val in prefab.items():
setattr(self.material, "base_" + key, val)
def apply_lighting_profile(self, name):
profile = Settings.LIGHTING_PROFILES[name]
for key, val in profile.items():
setattr(self, key, val)
class AppWindow:
MENU_QUIT = 3
MENU_ABOUT = 21
def __init__(self, width, height):
self.settings = Settings()
self.rot_x = 0
self.rot_y = 0
self.rot_z = 0
self.window = gui.Application.instance.create_window(
"LED CoCa", width, height)
w = self.window # to make the code more concise
# 3D widget
self._scene = gui.SceneWidget()
self._scene.scene = rendering.Open3DScene(w.renderer)
# ---- Settings panel ----
# Rather than specifying sizes in pixels, which may vary in size based
# on the monitor, especially on macOS which has 220 dpi monitors, use
# the em-size. This way sizings will be proportional to the font size,
# which will create a more visually consistent size across platforms.
em = w.theme.font_size
separation_height = int(round(0.5 * em))
self._settings_panel = gui.Vert(
0, gui.Margins(0.25 * em, 0.25 * em, 0.25 * em, 0.25 * em))
rotations = gui.CollapsableVert("Model rotation", 0,
gui.Margins(em, 0, 0, 0))
self._rot_x_slider = gui.Slider(gui.Slider.INT)
self._rot_x_slider.set_limits(0, 360)
self._rot_x_slider.set_on_value_changed(self._on_rot_x)
self._rot_y_slider = gui.Slider(gui.Slider.INT)
self._rot_y_slider.set_limits(0, 360)
self._rot_y_slider.set_on_value_changed(self._on_rot_y)
self._rot_z_slider = gui.Slider(gui.Slider.INT)
self._rot_z_slider.set_limits(0, 360)
self._rot_z_slider.set_on_value_changed(self._on_rot_z)
grid = gui.VGrid(2, 0.25 * em)
grid.add_child(gui.Label("X"))
grid.add_child(self._rot_x_slider)
grid.add_child(gui.Label("Y"))
grid.add_child(self._rot_y_slider)
grid.add_child(gui.Label("Z"))
grid.add_child(self._rot_z_slider)
rotations.add_child(grid)
self._settings_panel.add_child(rotations)
view_ctrls = gui.CollapsableVert("View controls", 0.25 * em,
gui.Margins(em, 0, 0, 0))
self._arcball_button = gui.Button("Arcball")
self._arcball_button.horizontal_padding_em = 0.5
self._arcball_button.vertical_padding_em = 0
self._arcball_button.set_on_clicked(self._set_mouse_mode_rotate)
self._fly_button = gui.Button("Fly")
self._fly_button.horizontal_padding_em = 0.5
self._fly_button.vertical_padding_em = 0
self._fly_button.set_on_clicked(self._set_mouse_mode_fly)
view_ctrls.add_child(gui.Label("Mouse controls"))
h = gui.Horiz(0.25 * em)
h.add_stretch()
h.add_child(self._arcball_button)
h.add_child(self._fly_button)
h.add_stretch()
view_ctrls.add_child(h)
self._bg_color = gui.ColorEdit()
self._bg_color.set_on_value_changed(self._on_bg_color)
grid = gui.VGrid(2, 0.25 * em)
grid.add_child(gui.Label("BG Color"))
grid.add_child(self._bg_color)
view_ctrls.add_child(grid)
self._show_axes = gui.Checkbox("Show axes")
self._show_axes.set_on_checked(self._on_show_axes)
view_ctrls.add_fixed(separation_height)
view_ctrls.add_child(self._show_axes)
self._settings_panel.add_child(view_ctrls)
material_settings = gui.CollapsableVert("Material settings", 0,
gui.Margins(em, 0, 0, 0))
self._material_color = gui.ColorEdit()
self._material_color.set_on_value_changed(self._on_material_color)
self._point_size = gui.Slider(gui.Slider.INT)
self._point_size.set_limits(1, 10)
self._point_size.set_on_value_changed(self._on_point_size)
grid = gui.VGrid(2, 0.25 * em)
grid.add_child(gui.Label("Color"))
grid.add_child(self._material_color)
grid.add_child(gui.Label("Point size"))
grid.add_child(self._point_size)
material_settings.add_child(grid)
self._settings_panel.add_fixed(separation_height)
self._settings_panel.add_child(material_settings)
# ----
# Normally our user interface can be children of all one layout (usually
# a vertical layout), which is then the only child of the window. In our
# case we want the scene to take up all the space and the settings panel
# to go above it. We can do this custom layout by providing an on_layout
# callback. The on_layout callback should set the frame
# (position + size) of every child correctly. After the callback is
# done the window will layout the grandchildren.
w.set_on_layout(self._on_layout)
w.add_child(self._scene)
w.add_child(self._settings_panel)
# ---- Menu ----
# The menu is global (because the macOS menu is global), so only create
# it once, no matter how many windows are created
if gui.Application.instance.menubar is None:
if isMacOS:
app_menu = gui.Menu()
app_menu.add_item("About", AppWindow.MENU_ABOUT)
app_menu.add_separator()
app_menu.add_item("Quit", AppWindow.MENU_QUIT)
file_menu = gui.Menu()
if not isMacOS:
#file_menu.add_separator()
file_menu.add_item("Quit", AppWindow.MENU_QUIT)
help_menu = gui.Menu()
help_menu.add_item("About", AppWindow.MENU_ABOUT)
menu = gui.Menu()
if isMacOS:
# macOS will name the first menu item for the running application
# (in our case, probably "Python"), regardless of what we call
# it. This is the application menu, and it is where the
# About..., Preferences..., and Quit menu items typically go.
menu.add_menu("Example", app_menu)
menu.add_menu("File", file_menu)
# Don't include help menu unless it has something more than
# About...
else:
menu.add_menu("File", file_menu)
menu.add_menu("Help", help_menu)
gui.Application.instance.menubar = menu
# The menubar is global, but we need to connect the menu items to the
# window, so that the window can call the appropriate function when the
# menu item is activated.
w.set_on_menu_item_activated(AppWindow.MENU_QUIT, self._on_menu_quit)
w.set_on_menu_item_activated(AppWindow.MENU_ABOUT, self._on_menu_about)
# ----
self._apply_settings()
def _apply_settings(self):
bg_color = [
self.settings.bg_color.red, self.settings.bg_color.green,
self.settings.bg_color.blue, self.settings.bg_color.alpha
]
self._scene.scene.set_background(bg_color)
self._scene.scene.show_skybox(False)
self._scene.scene.show_axes(self.settings.show_axes)
if self.settings.apply_material:
self._scene.scene.update_material(self.settings.material)
self.settings.apply_material = False
self._bg_color.color_value = self.settings.bg_color
self._show_axes.checked = self.settings.show_axes
c = gui.Color(self.settings.material.base_color[0],
self.settings.material.base_color[1],
self.settings.material.base_color[2],
self.settings.material.base_color[3])
self._material_color.color_value = c
self._point_size.double_value = self.settings.material.point_size
def _on_layout(self, layout_context):
# The on_layout callback should set the frame (position + size) of every
# child correctly. After the callback is done the window will layout
# the grandchildren.
r = self.window.content_rect
self._scene.frame = r
width = 17 * layout_context.theme.font_size
height = min(
r.height,
self._settings_panel.calc_preferred_size(
layout_context, gui.Widget.Constraints()).height)
self._settings_panel.frame = gui.Rect(r.get_right() - width, r.y, width,
height)
def _set_mouse_mode_rotate(self):
self._scene.set_view_controls(gui.SceneWidget.Controls.ROTATE_CAMERA)
def _set_mouse_mode_fly(self):
self._scene.set_view_controls(gui.SceneWidget.Controls.FLY)
def _on_bg_color(self, new_color):
self.settings.bg_color = new_color
self._apply_settings()
def _on_show_axes(self, show):
self.settings.show_axes = show
self._apply_settings()
def _on_material_color(self, color):
self.settings.material.base_color = [
color.red, color.green, color.blue, color.alpha
]
self.settings.apply_material = True
self._apply_settings()
def _update_rotation(self):
R = np.matrix([
[ math.cos(self.rot_y) * math.cos(self.rot_z), -math.sin(self.rot_z), math.sin(self.rot_y)],
[ math.sin(self.rot_z), math.cos(self.rot_x) * math.cos(self.rot_z), -math.sin(self.rot_x)],
[ -math.sin(self.rot_y), math.sin(self.rot_x), math.cos(self.rot_x) * math.cos(self.rot_y)]])
new_points = np.array(self._points_3D * R)
geometry = o3d.geometry.PointCloud()
geometry.points = o3d.utility.Vector3dVector(new_points)
self._geometry = geometry
self._scene.scene.clear_geometry()
self._scene.scene.add_geometry("__model__", geometry,
self.settings.material)
lines = []
colors = []
for i in range(len(geometry.points) - 1):
lines.append([i, i+1])
colors.append([i / len(geometry.points), 1-(i / len(geometry.points)), 0])
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(geometry.points)
line_set.lines = o3d.utility.Vector2iVector(lines)
line_set.colors = o3d.utility.Vector3dVector(colors)
self._scene.scene.add_geometry("__lines__", line_set,
self.settings.material)
def _on_rot_x(self, rot_x):
self.rot_x = rot_x * math.pi / 180
self._update_rotation()
def _on_rot_y(self, rot_y):
self.rot_y = rot_y * math.pi / 180
self._update_rotation()
def _on_rot_z(self, rot_z):
self.rot_z = rot_z * math.pi / 180
self._update_rotation()
def _on_point_size(self, size):
self.settings.material.point_size = int(size)
self.settings.apply_material = True
self._apply_settings()
def _on_menu_quit(self):
gui.Application.instance.quit()
def _on_menu_about(self):
em = self.window.theme.font_size
dlg = gui.Dialog("About")
dlg_layout = gui.Vert(em, gui.Margins(em, em, em, em))
dlg_layout.add_child(gui.Label("LED CoCa 0.1"))
ok = gui.Button("OK")
ok.set_on_clicked(self._on_about_ok)
h = gui.Horiz()
h.add_stretch()
h.add_child(ok)
h.add_stretch()
dlg_layout.add_child(h)
dlg.add_child(dlg_layout)
self.window.show_dialog(dlg)
def _on_about_ok(self):
self.window.close_dialog()
def set_points(self, points_3D):
self._points_3D = points_3D
try:
self._update_rotation()
bounds = self._geometry.get_axis_aligned_bounding_box()
self._scene.setup_camera(60, bounds, bounds.get_center())
except Exception as e:
print(e)
| 38.678261 | 105 | 0.628447 |
8e568de070354c267048fdac6c7a9323a98e4b43 | 2,796 | py | Python | Chapter16/dcn.py | PacktPublishing/Machine-Learning-Algorithms-Second-Edition | 2ddacea1c9f81b4ef9a0a51c4230687350afba6c | [
"MIT"
] | 50 | 2018-08-13T13:11:04.000Z | 2022-02-17T23:00:20.000Z | Chapter16/dcn.py | srikanthlakkoju/Machine-Learning-Algorithms-Second-Edition | b25d3607e9d5cc388bcf5f1a029bae39bb2b837b | [
"MIT"
] | null | null | null | Chapter16/dcn.py | srikanthlakkoju/Machine-Learning-Algorithms-Second-Edition | b25d3607e9d5cc388bcf5f1a029bae39bb2b837b | [
"MIT"
] | 29 | 2018-06-08T10:56:40.000Z | 2022-02-19T06:26:23.000Z | from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Conv2D, AveragePooling2D, Flatten
from keras.optimizers import Adam
from keras.utils import to_categorical
# Set random seed for reproducibility
np.random.seed(1000)
if __name__ == '__main__':
# Load the dataset
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
width = height = X_train.shape[1]
X_train = X_train.reshape((X_train.shape[0], width, height, 1)).astype(np.float32) / 255.0
X_test = X_test.reshape((X_test.shape[0], width, height, 1)).astype(np.float32) / 255.0
Y_train = to_categorical(Y_train, num_classes=10)
Y_test = to_categorical(Y_test, num_classes=10)
# Create the model
model = Sequential()
model.add(Dropout(0.25, input_shape=(width, height, 1), seed=1000))
model.add(Conv2D(16, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Dropout(0.5, seed=1000))
model.add(Conv2D(16, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Dropout(0.5, seed=1000))
model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))
model.add(Conv2D(32, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))
model.add(Conv2D(64, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Dropout(0.5, seed=1000))
model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5, seed=1000))
model.add(Dense(10))
model.add(Activation('softmax'))
# Compile the model
model.compile(optimizer=Adam(lr=0.001, decay=1e-5),
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
epochs=200,
batch_size=256,
validation_data=(X_test, Y_test))
# Show the results
fig, ax = plt.subplots(1, 2, figsize=(18, 6))
ax[0].plot(history.history['acc'], label='Training accuracy')
ax[0].plot(history.history['val_acc'], label='Validation accuracy')
ax[0].set_xlabel('Epoch')
ax[0].set_ylabel('Accuracy')
ax[0].legend()
ax[0].grid()
ax[1].plot(history.history['loss'], label='Training loss')
ax[1].plot(history.history['val_loss'], label='Validation loss')
ax[1].set_xlabel('Epoch')
ax[1].set_ylabel('Loss')
ax[1].set_yticks(np.linspace(0.0, 1.0, 10))
ax[1].legend()
ax[1].grid()
plt.show() | 30.725275 | 94 | 0.651645 |
343b1dafadf170756afb4deeb50cf9cea09ee780 | 9,505 | py | Python | gbpservice/contrib/tests/unit/nfp/configurator/advanced_controller/test_controller.py | ansao-aci/group-based-policy | d80a94dcb51bfce6994cd18339d3c79a7cb54bfe | [
"Apache-2.0"
] | null | null | null | gbpservice/contrib/tests/unit/nfp/configurator/advanced_controller/test_controller.py | ansao-aci/group-based-policy | d80a94dcb51bfce6994cd18339d3c79a7cb54bfe | [
"Apache-2.0"
] | null | null | null | gbpservice/contrib/tests/unit/nfp/configurator/advanced_controller/test_controller.py | ansao-aci/group-based-policy | d80a94dcb51bfce6994cd18339d3c79a7cb54bfe | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import zlib
import mock
import oslo_serialization.jsonutils as jsonutils
import pecan
import pika
PECAN_CONFIG_FILE = (os.getcwd() +
"/gbpservice/nfp/pecan/api/config.py")
pecan.set_config(PECAN_CONFIG_FILE, overwrite=True)
import webtest
from neutron.tests import base
from pecan import rest
from gbpservice.nfp.pecan import constants
setattr(pecan, 'mode', constants.advanced)
from gbpservice.contrib.nfp.configurator.advanced_controller import controller
from gbpservice.nfp.pecan.api import root_controller
import six
if six.PY3:
from importlib import reload
reload(root_controller)
pika.BlockingConnection = mock.MagicMock(return_value=None)
class ControllerTestCase(base.BaseTestCase, rest.RestController):
"""
This class contains all the unittest cases for REST server of configurator.
This class tests success and failure cases for all the HTTP requests which
are implemented in REST server.
"""
@classmethod
def setUpClass(cls):
"""A class method called before tests in an individual class run
"""
rootController = root_controller.RootController()
ControllerTestCase.app = webtest.TestApp(
pecan.make_app(rootController))
ControllerTestCase.data = {'info': {'service_type': 'firewall',
'service_vendor': 'vyos',
'context': {}},
'config': [{'resource': 'firewall',
'resource_data': {}}]
}
def test_get_notifications(self):
"""Tests HTTP get request get_notifications.
Returns: none
"""
with mock.patch.object(
controller.RMQConsumer, 'pull_notifications') as mock_pn:
response = self.app.get('/v1/nfp/get_notifications')
mock_pn.assert_called_with()
self.assertEqual(response.status_code, 200)
def test_post_create_network_function_device_config(self):
"""Tests HTTP post request create_network_function_device_config.
Returns: none
"""
with mock.patch.object(
controller.RPCClient, 'cast') as rpc_mock:
response = self.app.post(
'/v1/nfp/create_network_function_device_config',
zlib.compress(jsonutils.dumps(self.data)),
content_type='application/octet-stream')
rpc_mock.assert_called_with(
'create_network_function_device_config', self.data)
self.assertEqual(response.status_code, 200)
def test_post_create_network_function_config(self):
"""Tests HTTP post request create_network_function_config.
Returns: none
"""
with mock.patch.object(
controller.RPCClient, 'cast') as rpc_mock:
response = self.app.post(
'/v1/nfp/create_network_function_config',
zlib.compress(jsonutils.dumps(self.data)),
content_type='application/octet-stream')
rpc_mock.assert_called_with(
'create_network_function_config', self.data)
self.assertEqual(response.status_code, 200)
def test_post_delete_network_function_device_config(self):
"""Tests HTTP post request delete_network_function_device_config.
Returns: none
"""
with mock.patch.object(
controller.RPCClient, 'cast') as rpc_mock:
response = self.app.post(
'/v1/nfp/delete_network_function_device_config',
zlib.compress(jsonutils.dumps(self.data)),
content_type='application/octet-stream')
rpc_mock.assert_called_with(
'delete_network_function_device_config', self.data)
self.assertEqual(response.status_code, 200)
def test_post_delete_network_function_config(self):
"""Tests HTTP post request delete_network_function_config.
Returns: none
"""
with mock.patch.object(
controller.RPCClient, 'cast') as rpc_mock:
response = self.app.post(
'/v1/nfp/delete_network_function_config',
zlib.compress(jsonutils.dumps(self.data)),
content_type='application/octet-stream')
rpc_mock.assert_called_with(
'delete_network_function_config', self.data)
self.assertEqual(response.status_code, 200)
def test_put_update_network_function_device_config(self):
"""Tests HTTP put request update_network_function_device_config.
Returns: none
"""
with mock.patch.object(
controller.RPCClient, 'cast') as rpc_mock:
response = self.app.put(
'/v1/nfp/update_network_function_device_config',
zlib.compress(jsonutils.dumps(self.data)),
content_type='application/octet-stream')
rpc_mock.assert_called_with(
'update_network_function_device_config', self.data)
self.assertEqual(response.status_code, 200)
def test_put_update_network_function_config(self):
"""Tests HTTP put request update_network_function_config.
Returns: none
"""
with mock.patch.object(
controller.RPCClient, 'cast') as rpc_mock:
response = self.app.put(
'/v1/nfp/update_network_function_config',
zlib.compress(jsonutils.dumps(self.data)),
content_type='application/octet-stream')
rpc_mock.assert_called_with(
'update_network_function_config', self.data)
self.assertEqual(response.status_code, 200)
def test_post_create_network_function_device_config_fail(self):
"""Tests failure case of HTTP post request
create_network_function_device_config
Returns: none
"""
with mock.patch.object(
controller.RPCClient, 'cast') as rpc_mock:
rpc_mock.return_value = Exception
response = self.app.post(
'/v1/nfp/create_network_function_device_config',
expect_errors=True)
self.assertEqual(response.status_code, 400)
def test_post_create_network_function_config_fail(self):
"""Tests failure case of HTTP post request
create_network_function_config
Returns: none
"""
with mock.patch.object(
controller.RPCClient, 'cast') as rpc_mock:
rpc_mock.return_value = Exception
response = self.app.post(
'/v1/nfp/create_network_function_config',
expect_errors=True)
self.assertEqual(response.status_code, 400)
def test_post_delete_network_function_device_config_fail(self):
"""Tests failure case of HTTP post request
delete_network_function_device_config
Returns: none
"""
with mock.patch.object(
controller.RPCClient, 'cast') as rpc_mock:
rpc_mock.return_value = Exception
response = self.app.post(
'/v1/nfp/delete_network_function_device_config',
expect_errors=True)
self.assertEqual(response.status_code, 400)
def test_post_delete_network_function_config_fail(self):
"""Tests failure case of HTTP post request
delete_network_function_config
Returns: none
"""
with mock.patch.object(
controller.RPCClient, 'cast') as rpc_mock:
rpc_mock.return_value = Exception
response = self.app.post(
'/v1/nfp/delete_network_function_config',
expect_errors=True)
self.assertEqual(response.status_code, 400)
def test_put_update_network_function_device_config_fail(self):
"""Tests failure case of HTTP put request
update_network_function_device_config
Returns: none
"""
with mock.patch.object(
controller.RPCClient, 'cast') as rpc_mock:
rpc_mock.return_value = Exception
response = self.app.post(
'/v1/nfp/update_network_function_device_config',
expect_errors=True)
self.assertEqual(response.status_code, 400)
def test_put_update_network_function_config_fail(self):
"""Tests failure case of HTTP put request
update_network_function_config
Returns: none
"""
with mock.patch.object(
controller.RPCClient, 'cast') as rpc_mock:
rpc_mock.return_value = Exception
response = self.app.post(
'/v1/nfp/update_network_function_config',
expect_errors=True)
self.assertEqual(response.status_code, 400)
| 34.314079 | 79 | 0.634824 |
aa0de8112b5a4c447b4d6f8ba2c457031112a195 | 859 | py | Python | ports/esp32/modules/neopixel.py | sebastien-riou/micropython | 116c15842fd48ddb77b0bc016341d936a0756573 | [
"MIT"
] | 663 | 2018-12-30T00:17:59.000Z | 2022-03-14T05:03:41.000Z | ports/esp32/modules/neopixel.py | sebastien-riou/micropython | 116c15842fd48ddb77b0bc016341d936a0756573 | [
"MIT"
] | 176 | 2020-10-18T14:31:03.000Z | 2022-03-30T23:22:39.000Z | ports/esp32/modules/neopixel.py | sebastien-riou/micropython | 116c15842fd48ddb77b0bc016341d936a0756573 | [
"MIT"
] | 60 | 2019-06-01T04:25:00.000Z | 2022-02-25T01:47:31.000Z | # NeoPixel driver for MicroPython on ESP32
# MIT license; Copyright (c) 2016 Damien P. George
from esp import neopixel_write
class NeoPixel:
ORDER = (1, 0, 2, 3)
def __init__(self, pin, n, bpp=3, timing=1):
self.pin = pin
self.n = n
self.bpp = bpp
self.buf = bytearray(n * bpp)
self.pin.init(pin.OUT)
self.timing = timing
def __setitem__(self, index, val):
offset = index * self.bpp
for i in range(self.bpp):
self.buf[offset + self.ORDER[i]] = val[i]
def __getitem__(self, index):
offset = index * self.bpp
return tuple(self.buf[offset + self.ORDER[i]] for i in range(self.bpp))
def fill(self, color):
for i in range(self.n):
self[i] = color
def write(self):
neopixel_write(self.pin, self.buf, self.timing)
| 26.030303 | 79 | 0.586729 |
c108c72d978d3769eee9ea8864587467b6c27aa6 | 1,393 | py | Python | tests/test_resource_tracker/test_api/test_resource_pool_api_views/test_delete.py | LaudateCorpus1/squest | 98304f20c1d966fb3678d348ffd7c5be438bb6be | [
"Apache-2.0"
] | 112 | 2021-04-21T08:52:55.000Z | 2022-03-01T15:09:19.000Z | tests/test_resource_tracker/test_api/test_resource_pool_api_views/test_delete.py | LaudateCorpus1/squest | 98304f20c1d966fb3678d348ffd7c5be438bb6be | [
"Apache-2.0"
] | 216 | 2021-04-21T09:06:47.000Z | 2022-03-30T14:21:28.000Z | tests/test_resource_tracker/test_api/test_resource_pool_api_views/test_delete.py | LaudateCorpus1/squest | 98304f20c1d966fb3678d348ffd7c5be438bb6be | [
"Apache-2.0"
] | 21 | 2021-04-20T13:53:54.000Z | 2022-03-30T21:43:04.000Z | from rest_framework import status
from rest_framework.reverse import reverse
from resource_tracker.models import ResourcePool
from tests.test_resource_tracker.test_api.base_test_api import BaseTestAPI
class TestResourcePoolDelete(BaseTestAPI):
def setUp(self):
super(TestResourcePoolDelete, self).setUp()
# get a resource to delete
self.resource_to_delete_id = self.rp_vcenter.id
self.url = reverse('api_resource_pool_details', args=[self.resource_to_delete_id])
def test_delete_resource_group(self):
number_resource_before = ResourcePool.objects.all().count()
response = self.client.delete(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(number_resource_before - 1,
ResourcePool.objects.all().count())
self.assertFalse(ResourcePool.objects.filter(id=self.resource_to_delete_id).exists())
def test_delete_non_existing_resource_pool(self):
url = reverse('api_resource_pool_details', args=[999999])
number_resource_before = ResourcePool.objects.all().count()
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(number_resource_before,
ResourcePool.objects.all().count())
| 44.935484 | 93 | 0.727207 |
cd423963a968077d069c86d0b904dd059772319c | 11,764 | py | Python | tools/processing_tool_datasets_merger/lib_data_io_nc.py | c-hydro/hmc | 66470234e126e4a727e1faf2fe64bd58547220ed | [
"MIT"
] | null | null | null | tools/processing_tool_datasets_merger/lib_data_io_nc.py | c-hydro/hmc | 66470234e126e4a727e1faf2fe64bd58547220ed | [
"MIT"
] | 2 | 2021-07-08T13:10:33.000Z | 2021-07-08T13:11:33.000Z | tools/processing_tool_datasets_merger/lib_data_io_nc.py | c-hydro/hmc | 66470234e126e4a727e1faf2fe64bd58547220ed | [
"MIT"
] | null | null | null | """
Class Features
Name: lib_data_io_nc
Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
Date: '20210408'
Version: '1.0.0'
"""
#######################################################################################
# Libraries
import logging
import os
import time
import numpy as np
import xarray as xr
import pandas as pd
from copy import deepcopy
from netCDF4 import Dataset
from tools.processing_tool_datasets_merger.lib_info_args import logger_name
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Method to write data netcdf
def write_data_nc(file_name, file_obj_data, file_obj_dims_def, file_obj_dims_list,
file_obj_attrs, file_format='NETCDF4_CLASSIC'):
# Open file handle
file_handle = Dataset(file_name, 'w', format=file_format)
# Cycle on file dimension(s)
dim_obj_collections = {}
for dim_key, dim_value in file_obj_dims_def.items():
if dim_key not in list(file_handle.dimensions.items()):
dim_obj_step = file_handle.createDimension(dim_key, dim_value)
dim_obj_collections[dim_key] = dim_obj_step
# Cycle on file attribute(s)
for attr_key, attr_value in file_obj_attrs.items():
file_handle.setncattr(attr_key, attr_value)
file_handle.filedate = 'Created ' + time.ctime(time.time())
for data_key, data_values in file_obj_data.items():
if data_key in list(file_obj_dims_list.keys()):
dim_values = file_obj_dims_list[data_key]
if data_values.ndim == 2:
data_dim_x = dim_values[1]
data_dim_y = dim_values[0]
file_var = file_handle.createVariable(data_key, np.float32,
(data_dim_y, data_dim_x,), zlib=True)
file_var[:, :] = np.transpose(np.rot90(data_values, -1))
elif data_values.ndim == 3:
data_dim_x = dim_values[1]
data_dim_y = dim_values[0]
data_dim_time = dim_values[2]
file_var = file_handle.createVariable(data_key, np.float32,
(data_dim_time, data_dim_y, data_dim_x,), zlib=True)
file_var[:, :, :] = np.transpose(np.rot90(data_values, -1))
else:
log_stream.warning(' ===> Datasets dimensions for ' +
data_key + ' is not allowed. Only 2D or 3D arrays are implemented.')
file_handle.close()
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read data netcdf
def read_data_nc(file_name, geo_ref_x=None, geo_ref_y=None, geo_ref_attrs=None,
var_coords=None, var_scale_factor=1, var_name=None, var_time=None, var_no_data=-9999.0,
coord_name_time='time', coord_name_geo_x='Longitude', coord_name_geo_y='Latitude',
dim_name_time='time', dim_name_geo_x='west_east', dim_name_geo_y='south_north',
dims_order=None, decimal_round=4):
if var_coords is None:
var_coords = {'x': 'Longitude', 'y': 'Latitude', 'time': 'time'}
if dims_order is None:
dims_order = [dim_name_geo_y, dim_name_geo_x, dim_name_time]
if not isinstance(var_name, list):
var_name = [var_name]
if not isinstance(var_scale_factor, list):
var_scale_factor = [var_scale_factor]
if not isinstance(var_no_data, list):
var_no_data = [var_no_data]
if var_name.__len__() != var_scale_factor.__len__():
raise RuntimeError('Variable name(s) and scale factor(s) must have the same dimension.')
if var_name.__len__() != var_no_data.__len__():
raise RuntimeError('Variable name(s) and no data value(s) must have the same dimension.')
data_workspace, file_attrs = None, None
if os.path.exists(file_name):
# Open file nc
file_handle = xr.open_dataset(file_name)
file_attrs = file_handle.attrs
file_variables = list(file_handle.variables)
file_dims = list(file_handle.dims)
file_coords = list(file_handle.coords)
idx_coords = {}
for coord_key, coord_name in var_coords.items():
if coord_name in file_coords:
coord_idx = file_coords.index(coord_name)
else:
coord_idx = None
idx_coords[coord_key] = coord_idx
for var_name_step, var_scale_factor_step, var_no_data_step in zip(var_name, var_scale_factor, var_no_data):
if data_workspace is None:
data_workspace = {}
data_workspace[var_name_step] = {}
if var_name_step in file_variables:
var_data = file_handle[var_name_step].values
var_data = np.float32(var_data / var_scale_factor_step)
var_data[var_data == var_no_data_step] = np.nan
'''
# DEBUG
import matplotlib.pylab as plt
plt.figure()
plt.imshow(var_data)
plt.colorbar()
plt.show()
'''
if 'time' in list(idx_coords.keys()):
if idx_coords['time'] is not None:
coord_name_time = file_coords[idx_coords['time']]
if file_handle[coord_name_time].size == 1:
if var_data.shape.__len__() < file_coords.__len__():
var_data = var_data[:, :, np.newaxis]
elif var_data.shape.__len__() == file_coords.__len__():
pass
else:
raise NotImplemented('File shape is greater than expected coords')
else:
raise NotImplemented('Time size is greater than 1')
else:
if var_data.ndim > 2:
raise IOError('Coord name "time" is not available')
geo_data_x = None
for step_coords_x in ['x', 'X']:
if step_coords_x in idx_coords:
coord_name_x = file_coords[idx_coords[step_coords_x]]
geo_data_x = file_handle[coord_name_x].values
break
if geo_data_x is None:
raise IOError('Coord name "x" is not available or not defined')
geo_data_y = None
for step_coords_y in ['y', 'Y']:
if step_coords_y in idx_coords:
coord_name_y = file_coords[idx_coords[step_coords_y]]
geo_data_y = file_handle[coord_name_y].values
break
if geo_data_y is None:
raise IOError('Coord name "y" is not available or not defined')
geo_y_upper = geo_data_y[0, 0]
geo_y_lower = geo_data_y[-1, 0]
if geo_y_lower > geo_y_upper:
geo_data_y = np.flipud(geo_data_y)
var_data = np.flipud(var_data)
if (geo_ref_x is not None) and (geo_ref_y is not None):
geo_check_x, geo_check_y = np.meshgrid(geo_ref_x, geo_ref_y)
geo_check_start_x = np.float32(round(geo_check_x[0, 0], decimal_round))
geo_check_start_y = np.float32(round(geo_check_y[0, 0], decimal_round))
geo_check_end_x = np.float32(round(geo_check_x[-1, -1], decimal_round))
geo_check_end_y = np.float32(round(geo_check_y[-1, -1], decimal_round))
geo_data_start_x = np.float32(round(geo_data_x[0, 0], decimal_round))
geo_data_start_y = np.float32(round(geo_data_y[0, 0], decimal_round))
geo_data_end_x = np.float32(round(geo_data_x[-1, -1], decimal_round))
geo_data_end_y = np.float32(round(geo_data_y[-1, -1], decimal_round))
assert geo_check_start_x == geo_data_start_x, ' ===> Variable geo x start != Reference geo x start'
assert geo_check_start_y == geo_data_start_y, ' ===> Variable geo y start != Reference geo y start'
assert geo_check_end_x == geo_data_end_x, ' ===> Variable geo x end != Reference geo x end'
assert geo_check_end_y == geo_data_end_y, ' ===> Variable geo y end != Reference geo y end'
else:
log_stream.warning(' ===> GeoX and GeoY variables have not compared with a reference GeoX and GeoY')
data_workspace[var_name_step] = var_data
else:
log_stream.warning(' ===> Variable ' + var_name_step + ' not available in loaded datasets!')
else:
log_stream.warning(' ===> File ' + file_name + ' not available in loaded datasets!')
if data_workspace is not None:
if var_time is not None:
if isinstance(var_time, pd.Timestamp):
var_data_time = pd.DatetimeIndex([var_time])
elif isinstance(var_time, pd.DatetimeIndex):
var_data_time = deepcopy(var_time)
else:
log_stream.error(' ===> Time format is not allowed. Expected Timestamp or Datetimeindex')
raise NotImplemented('Case not implemented yet')
var_dset = xr.Dataset(coords={coord_name_time: ([dim_name_time], var_data_time)})
var_dset.coords[coord_name_time] = var_dset.coords[coord_name_time].astype('datetime64[ns]')
for var_name, var_data in data_workspace.items():
var_da = xr.DataArray(var_data, name=var_name, dims=dims_order,
coords={coord_name_time: ([dim_name_time], var_data_time),
coord_name_geo_x: ([dim_name_geo_x], geo_data_x[0, :]),
coord_name_geo_y: ([dim_name_geo_y], geo_data_y[:, 0])})
var_dset[var_name] = var_da
elif var_time is None:
var_dset = xr.Dataset()
for var_name, var_data in data_workspace.items():
var_da = xr.DataArray(var_data, name=var_name, dims=dims_order,
coords={coord_name_geo_x: ([dim_name_geo_x], geo_data_x[0, :]),
coord_name_geo_y: ([dim_name_geo_y], geo_data_y[:, 0])})
var_dset[var_name] = var_da
else:
log_stream.error(' ===> Error in creating time information for dataset object')
raise RuntimeError('Unknown error in creating dataset. Check the procedure.')
if file_attrs and geo_ref_attrs:
dset_attrs = {**file_attrs, **geo_ref_attrs}
elif (not file_attrs) and geo_ref_attrs:
dset_attrs = deepcopy(geo_ref_attrs)
elif file_attrs and (not geo_ref_attrs):
dset_attrs = deepcopy(file_attrs)
else:
dset_attrs = None
if dset_attrs is not None:
var_dset.attrs = dset_attrs
else:
log_stream.warning(' ===> All filenames in the selected period are not available')
var_dset = None
return var_dset
# -------------------------------------------------------------------------------------
| 42.778182 | 120 | 0.554148 |
605012f8fc51998d3b60fc20189b2326d082cd28 | 10,278 | py | Python | tools/intogen/runtime/pyenv/lib/python2.7/site-packages/sphinx/builders/htmlhelp.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | 1 | 2021-02-05T13:19:58.000Z | 2021-02-05T13:19:58.000Z | tools/intogen/runtime/pyenv/lib/python2.7/site-packages/sphinx/builders/htmlhelp.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | null | null | null | tools/intogen/runtime/pyenv/lib/python2.7/site-packages/sphinx/builders/htmlhelp.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | 9 | 2015-08-26T19:59:06.000Z | 2022-03-07T17:10:06.000Z | # -*- coding: utf-8 -*-
"""
sphinx.builders.htmlhelp
~~~~~~~~~~~~~~~~~~~~~~~~
Build HTML help support files.
Parts adapted from Python's Doc/tools/prechm.py.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import codecs
from os import path
from docutils import nodes
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util.pycompat import htmlescape
# Project file (*.hhp) template. 'outname' is the file basename (like
# the pythlp in pythlp.hhp); 'version' is the doc version number (like
# the 2.2 in Python 2.2).
# The magical numbers in the long line under [WINDOWS] set most of the
# user-visible features (visible buttons, tabs, etc).
# About 0x10384e: This defines the buttons in the help viewer. The
# following defns are taken from htmlhelp.h. Not all possibilities
# actually work, and not all those that work are available from the Help
# Workshop GUI. In particular, the Zoom/Font button works and is not
# available from the GUI. The ones we're using are marked with 'x':
#
# 0x000002 Hide/Show x
# 0x000004 Back x
# 0x000008 Forward x
# 0x000010 Stop
# 0x000020 Refresh
# 0x000040 Home x
# 0x000080 Forward
# 0x000100 Back
# 0x000200 Notes
# 0x000400 Contents
# 0x000800 Locate x
# 0x001000 Options x
# 0x002000 Print x
# 0x004000 Index
# 0x008000 Search
# 0x010000 History
# 0x020000 Favorites
# 0x040000 Jump 1
# 0x080000 Jump 2
# 0x100000 Zoom/Font x
# 0x200000 TOC Next
# 0x400000 TOC Prev
project_template = '''\
[OPTIONS]
Binary TOC=No
Binary Index=No
Compiled file=%(outname)s.chm
Contents file=%(outname)s.hhc
Default Window=%(outname)s
Default topic=index.html
Display compile progress=No
Full text search stop list file=%(outname)s.stp
Full-text search=Yes
Index file=%(outname)s.hhk
Language=%(lcid)#x
Title=%(title)s
[WINDOWS]
%(outname)s="%(title)s","%(outname)s.hhc","%(outname)s.hhk",\
"index.html","index.html",,,,,0x63520,220,0x10384e,[0,0,1024,768],,,,,,,0
[FILES]
'''
contents_header = '''\
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
<HTML>
<HEAD>
<meta name="GENERATOR" content="Microsoft® HTML Help Workshop 4.1">
<!-- Sitemap 1.0 -->
</HEAD><BODY>
<OBJECT type="text/site properties">
<param name="Window Styles" value="0x801227">
<param name="ImageType" value="Folder">
</OBJECT>
<UL>
'''
contents_footer = '''\
</UL></BODY></HTML>
'''
object_sitemap = '''\
<OBJECT type="text/sitemap">
<param name="Name" value="%s">
<param name="Local" value="%s">
</OBJECT>
'''
# List of words the full text search facility shouldn't index. This
# becomes file outname.stp. Note that this list must be pretty small!
# Different versions of the MS docs claim the file has a maximum size of
# 256 or 512 bytes (including \r\n at the end of each line).
# Note that "and", "or", "not" and "near" are operators in the search
# language, so no point indexing them even if we wanted to.
stopwords = """
a and are as at
be but by
for
if in into is it
near no not
of on or
such
that the their then there these they this to
was will with
""".split()
# The following list includes only languages supported by Sphinx.
# See http://msdn.microsoft.com/en-us/library/ms930130.aspx for more.
chm_locales = {
# lang: LCID, encoding
'ca': (0x403, 'cp1252'),
'cs': (0x405, 'cp1250'),
'da': (0x406, 'cp1252'),
'de': (0x407, 'cp1252'),
'en': (0x409, 'cp1252'),
'es': (0x40a, 'cp1252'),
'et': (0x425, 'cp1257'),
'fa': (0x429, 'cp1256'),
'fi': (0x40b, 'cp1252'),
'fr': (0x40c, 'cp1252'),
'hr': (0x41a, 'cp1250'),
'hu': (0x40e, 'cp1250'),
'it': (0x410, 'cp1252'),
'ja': (0x411, 'cp932'),
'ko': (0x412, 'cp949'),
'lt': (0x427, 'cp1257'),
'lv': (0x426, 'cp1257'),
'nl': (0x413, 'cp1252'),
'no_NB': (0x414, 'cp1252'),
'pl': (0x415, 'cp1250'),
'pt_BR': (0x416, 'cp1252'),
'ru': (0x419, 'cp1251'),
'sk': (0x41b, 'cp1250'),
'sl': (0x424, 'cp1250'),
'sv': (0x41d, 'cp1252'),
'tr': (0x41f, 'cp1254'),
'uk_UA': (0x422, 'cp1251'),
'zh_CN': (0x804, 'cp936'),
'zh_TW': (0x404, 'cp950'),
}
class HTMLHelpBuilder(StandaloneHTMLBuilder):
"""
Builder that also outputs Windows HTML help project, contents and
index files. Adapted from the original Doc/tools/prechm.py.
"""
name = 'htmlhelp'
# don't copy the reST source
copysource = False
supported_image_types = ['image/png', 'image/gif', 'image/jpeg']
# don't add links
add_permalinks = False
# don't add sidebar etc.
embedded = True
lcid = 0x409
encoding = 'iso8859_1'
def init(self):
StandaloneHTMLBuilder.init(self)
# the output files for HTML help must be .html only
self.out_suffix = '.html'
# determine the correct locale setting
locale = chm_locales.get(self.config.language)
if locale is not None:
self.lcid, self.encoding = locale
def open_file(self, outdir, basename, mode='w'):
# open a file with the correct encoding for the selected language
return codecs.open(path.join(outdir, basename), mode,
self.encoding, 'xmlcharrefreplace')
def handle_finish(self):
self.build_hhx(self.outdir, self.config.htmlhelp_basename)
def build_hhx(self, outdir, outname):
self.info('dumping stopword list...')
f = self.open_file(outdir, outname+'.stp')
try:
for word in sorted(stopwords):
print >>f, word
finally:
f.close()
self.info('writing project file...')
f = self.open_file(outdir, outname+'.hhp')
try:
f.write(project_template % {'outname': outname,
'title': self.config.html_title,
'version': self.config.version,
'project': self.config.project,
'lcid': self.lcid})
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
for root, dirs, files in os.walk(outdir):
staticdir = root.startswith(path.join(outdir, '_static'))
for fn in files:
if (staticdir and not fn.endswith('.js')) or \
fn.endswith('.html'):
print >>f, path.join(root, fn)[olen:].replace(os.sep,
'\\')
finally:
f.close()
self.info('writing TOC file...')
f = self.open_file(outdir, outname+'.hhc')
try:
f.write(contents_header)
# special books
f.write('<LI> ' + object_sitemap % (self.config.html_short_title,
'index.html'))
for indexname, indexcls, content, collapse in self.domain_indices:
f.write('<LI> ' + object_sitemap % (indexcls.localname,
'%s.html' % indexname))
# the TOC
tocdoc = self.env.get_and_resolve_doctree(
self.config.master_doc, self, prune_toctrees=False)
def write_toc(node, ullevel=0):
if isinstance(node, nodes.list_item):
f.write('<LI> ')
for subnode in node:
write_toc(subnode, ullevel)
elif isinstance(node, nodes.reference):
link = node['refuri']
title = htmlescape(node.astext()).replace('"','"')
f.write(object_sitemap % (title, link))
elif isinstance(node, nodes.bullet_list):
if ullevel != 0:
f.write('<UL>\n')
for subnode in node:
write_toc(subnode, ullevel+1)
if ullevel != 0:
f.write('</UL>\n')
elif isinstance(node, addnodes.compact_paragraph):
for subnode in node:
write_toc(subnode, ullevel)
def istoctree(node):
return isinstance(node, addnodes.compact_paragraph) and \
node.has_key('toctree')
for node in tocdoc.traverse(istoctree):
write_toc(node)
f.write(contents_footer)
finally:
f.close()
self.info('writing index file...')
index = self.env.create_index(self)
f = self.open_file(outdir, outname+'.hhk')
try:
f.write('<UL>\n')
def write_index(title, refs, subitems):
def write_param(name, value):
item = ' <param name="%s" value="%s">\n' % \
(name, value)
f.write(item)
title = htmlescape(title)
f.write('<LI> <OBJECT type="text/sitemap">\n')
write_param('Keyword', title)
if len(refs) == 0:
write_param('See Also', title)
elif len(refs) == 1:
write_param('Local', refs[0][1])
else:
for i, ref in enumerate(refs):
# XXX: better title?
write_param('Name', '[%d] %s' % (i, ref[1]))
write_param('Local', ref[1])
f.write('</OBJECT>\n')
if subitems:
f.write('<UL> ')
for subitem in subitems:
write_index(subitem[0], subitem[1], [])
f.write('</UL>')
for (key, group) in index:
for title, (refs, subitems) in group:
write_index(title, refs, subitems)
f.write('</UL>\n')
finally:
f.close()
| 34.26 | 78 | 0.540572 |
c316b0b4e8af34e56566333f8720e0fec62956dd | 7,173 | py | Python | test/expected/python.asyncio/service_extension_same_file/f_Pinger.py | dustyholmes-wf/frugal | 915ccfc58fcc9baabc4549c522e3acd2975a2e0b | [
"Apache-2.0"
] | null | null | null | test/expected/python.asyncio/service_extension_same_file/f_Pinger.py | dustyholmes-wf/frugal | 915ccfc58fcc9baabc4549c522e3acd2975a2e0b | [
"Apache-2.0"
] | null | null | null | test/expected/python.asyncio/service_extension_same_file/f_Pinger.py | dustyholmes-wf/frugal | 915ccfc58fcc9baabc4549c522e3acd2975a2e0b | [
"Apache-2.0"
] | null | null | null | #
# Autogenerated by Frugal Compiler (3.4.7)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
import asyncio
from datetime import timedelta
import inspect
from frugal.aio.processor import FBaseProcessor
from frugal.aio.processor import FProcessorFunction
from frugal.exceptions import TApplicationExceptionType
from frugal.exceptions import TTransportExceptionType
from frugal.middleware import Method
from frugal.transport import TMemoryOutputBuffer
from frugal.util.deprecate import deprecated
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from thrift.transport.TTransport import TTransportException
from . import f_BasePinger
from .ttypes import *
class Iface(f_BasePinger.Iface):
async def ping(self, ctx):
"""
Args:
ctx: FContext
"""
pass
class Client(f_BasePinger.Client, Iface):
def __init__(self, provider, middleware=None):
"""
Create a new Client with an FServiceProvider containing a transport
and protocol factory.
Args:
provider: FServiceProvider
middleware: ServiceMiddleware or list of ServiceMiddleware
"""
middleware = middleware or []
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Client, self).__init__(provider, middleware=middleware)
middleware += provider.get_middleware()
self._methods.update({
'ping': Method(self._ping, middleware),
})
async def ping(self, ctx):
"""
Args:
ctx: FContext
"""
return await self._methods['ping']([ctx])
async def _ping(self, ctx):
memory_buffer = TMemoryOutputBuffer(self._transport.get_request_size_limit())
oprot = self._protocol_factory.get_protocol(memory_buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.CALL, 0)
args = ping_args()
args.write(oprot)
oprot.writeMessageEnd()
response_transport = await self._transport.request(ctx, memory_buffer.getvalue())
iprot = self._protocol_factory.get_protocol(response_transport)
iprot.read_response_headers(ctx)
_, mtype, _ = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = ping_result()
result.read(iprot)
iprot.readMessageEnd()
class Processor(f_BasePinger.Processor):
def __init__(self, handler, middleware=None):
"""
Create a new Processor.
Args:
handler: Iface
"""
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Processor, self).__init__(handler, middleware=middleware)
self.add_to_processor_map('ping', _ping(Method(handler.ping, middleware), self.get_write_lock()))
class _ping(FProcessorFunction):
def __init__(self, handler, lock):
super(_ping, self).__init__(handler, lock)
async def process(self, ctx, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
try:
ret = self._handler([ctx])
if inspect.iscoroutine(ret):
ret = await ret
except TApplicationException as ex:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", exception=ex)
return
except Exception as e:
async with self._lock:
_write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.INTERNAL_ERROR, message=str(e))
raise
async with self._lock:
try:
oprot.write_response_headers(ctx)
oprot.writeMessageBegin('ping', TMessageType.REPLY, 0)
result.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
except TTransportException as e:
# catch a request too large error because the TMemoryOutputBuffer always throws that if too much data is written
if e.type == TTransportExceptionType.REQUEST_TOO_LARGE:
raise _write_application_exception(ctx, oprot, "ping", ex_code=TApplicationExceptionType.RESPONSE_TOO_LARGE, message=e.message)
else:
raise e
def _write_application_exception(ctx, oprot, method, ex_code=None, message=None, exception=None):
if exception is not None:
x = exception
else:
x = TApplicationException(type=ex_code, message=message)
oprot.write_response_headers(ctx)
oprot.writeMessageBegin(method, TMessageType.EXCEPTION, 0)
x.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
return x
class ping_args(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_result(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('ping_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 31.599119 | 147 | 0.626516 |
88cd909f8de720574cf631ffd3152f5a188a4efe | 8,952 | py | Python | mrcnn/config.py | xericho/mask-rcnn | 8779381d4cb07155449c7b020bc1465e17669aca | [
"MIT"
] | null | null | null | mrcnn/config.py | xericho/mask-rcnn | 8779381d4cb07155449c7b020bc1465e17669aca | [
"MIT"
] | null | null | null | mrcnn/config.py | xericho/mask-rcnn | 8779381d4cb07155449c7b020bc1465e17669aca | [
"MIT"
] | null | null | null | """
Mask R-CNN
Base Configurations class.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import numpy as np
# Base Configuration Class
# Don't use this class directly. Instead, sub-class it and override
# the configurations you need to change.
class Config(object):
"""Base configuration class. For custom configurations, create a
sub-class that inherits from this one and override properties
that need to be changed.
"""
# Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.
# Useful if your code needs to do things differently depending on which
# experiment is running.
NAME = 'coco' # Override in sub-classes
# NUMBER OF GPUs to use. For CPU training, use 1
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 2
# Number of training steps per epoch
# This doesn't need to match the size of the training set. Tensorboard
# updates are saved at the end of each epoch, so setting this to a
# smaller number means getting more frequent TensorBoard updates.
# Validation stats are also calculated at each epoch end and they
# might take a while, so don't set this too small to avoid spending
# a lot of time on validation stats.
STEPS_PER_EPOCH = 1000
# Number of validation steps to run at the end of every training epoch.
# A bigger number improves accuracy of validation stats, but slows
# down the training.
VALIDATION_STEPS = 50
# Backbone network architecture
# Supported values are: resnet50, resnet101.
# You can also provide a callable that should have the signature
# of model.resnet_graph. If you do so, you need to supply a callable
# to COMPUTE_BACKBONE_SHAPE as well
BACKBONE = "resnet101"
# Only useful if you supply a callable to BACKBONE. Should compute
# the shape of each layer of the FPN Pyramid.
# See model.compute_backbone_shapes
COMPUTE_BACKBONE_SHAPE = None
# The strides of each layer of the FPN Pyramid. These values
# are based on a Resnet101 backbone.
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
# Size of the fully-connected layers in the classification graph
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
# Size of the top-down layers used to build the feature pyramid
TOP_DOWN_PYRAMID_SIZE = 256
# Number of classification classes (including background)
NUM_CLASSES = 1 # Override in sub-classes
# Length of square anchor side in pixels
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
# Ratios of anchors at each cell (width/height)
# A value of 1 represents a square anchor, and 0.5 is a wide anchor
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
# Anchor stride
# If 1 then anchors are created for each cell in the backbone feature map.
# If 2, then anchors are created for every other cell, and so on.
RPN_ANCHOR_STRIDE = 1
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.7
# How many anchors per image to use for RPN training
RPN_TRAIN_ANCHORS_PER_IMAGE = 256
# ROIs kept after non-maximum suppression (training and inference)
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
# If enabled, resizes instance masks to a smaller size to reduce
# memory load. Recommended when using high-resolution images.
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask
# Input image resizing
# Generally, use the "square" resizing mode for training and predicting
# and it should work well in most cases. In this mode, images are scaled
# up such that the small side is = IMAGE_MIN_DIM, but ensuring that the
# scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is
# padded with zeros to make it a square so multiple images can be put
# in one batch.
# Available resizing modes:
# none: No resizing or padding. Return the image unchanged.
# square: Resize and pad with zeros to get a square image
# of size [max_dim, max_dim].
# pad64: Pads width and height with zeros to make them multiples of 64.
# If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales
# up before padding. IMAGE_MAX_DIM is ignored in this mode.
# The multiple of 64 is needed to ensure smooth scaling of feature
# maps up and down the 6 levels of the FPN pyramid (2**6=64).
# crop: Picks random crops from the image. First, scales the image based
# on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of
# size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only.
# IMAGE_MAX_DIM is not used in this mode.
IMAGE_RESIZE_MODE = "square"
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
# Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further
# up scaling. For example, if set to 2 then images are scaled up to double
# the width and height, or more, even if MIN_IMAGE_DIM doesn't require it.
# Howver, in 'square' mode, it can be overruled by IMAGE_MAX_DIM.
IMAGE_MIN_SCALE = 0
# Image mean (RGB)
MEAN_PIXEL = np.array([123.7, 116.8, 103.9])
# Number of ROIs per image to feed to classifier/mask heads
# The Mask RCNN paper uses 512 but often the RPN doesn't generate
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 200
# Percent of positive ROIs used to train classifier/mask heads
ROI_POSITIVE_RATIO = 0.33
# Pooled ROIs
POOL_SIZE = 7
MASK_POOL_SIZE = 14
# Shape of output mask
# To change this you also need to change the neural network mask branch
MASK_SHAPE = [28, 28]
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 100
# Bounding box refinement standard deviation for RPN and final detections.
RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
# Max number of final detections
DETECTION_MAX_INSTANCES = 100
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.7
# Non-maximum suppression threshold for detection
DETECTION_NMS_THRESHOLD = 0.3
# Learning rate and momentum
# The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes
# weights to explode. Likely due to differences in optimizer
# implementation.
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
# Weight decay regularization
WEIGHT_DECAY = 0.0001
# Loss weights for more precise optimization.
# Can be used for R-CNN training setup.
LOSS_WEIGHTS = {
"rpn_class_loss": 1.,
"rpn_bbox_loss": 1.,
"mrcnn_class_loss": 1.,
"mrcnn_bbox_loss": 1.,
"mrcnn_mask_loss": 1.
}
# Use RPN ROIs or externally generated ROIs for training
# Keep this True for most situations. Set to False if you want to train
# the head branches on ROI generated by code rather than the ROIs from
# the RPN. For example, to debug the classifier head without having to
# train the RPN.
USE_RPN_ROIS = True
# Train or freeze batch normalization layers
# None: Train BN layers. This is the normal mode
# False: Freeze BN layers. Good when using a small batch size
# True: (don't use). Set layer in training mode even when predicting
TRAIN_BN = False # Defaulting to False since batch size is often small
# Gradient norm clipping
GRADIENT_CLIP_NORM = 5.0
def __init__(self):
"""Set values of computed attributes."""
# Effective batch size
self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT
# Input image size
if self.IMAGE_RESIZE_MODE == "crop":
self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, 3])
else:
self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, 3])
# Image meta data length
# See compose_image_meta() for details
self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
| 39.263158 | 84 | 0.689678 |
51164864002332400aa33bbb5439f74bd0463bc1 | 7,040 | py | Python | src/app/auth/auth_policy.py | Thiqah-Lab/aws-serverless-skeleton | d34adddb6613f2eb40e92ed483bdfbfe72332257 | [
"MIT"
] | 46 | 2019-04-08T19:09:51.000Z | 2021-12-09T23:54:35.000Z | src/app/auth/auth_policy.py | Thiqah-Lab/aws-serverless-skeleton | d34adddb6613f2eb40e92ed483bdfbfe72332257 | [
"MIT"
] | 5 | 2019-04-08T17:14:37.000Z | 2019-04-08T17:15:27.000Z | src/app/auth/auth_policy.py | Thiqah-Lab/aws-serverless-skeleton | d34adddb6613f2eb40e92ed483bdfbfe72332257 | [
"MIT"
] | 1 | 2021-07-26T08:19:12.000Z | 2021-07-26T08:19:12.000Z | import re
class HttpVerb:
GET = "GET"
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
HEAD = "HEAD"
DELETE = "DELETE"
OPTIONS = "OPTIONS"
ALL = "*"
class AuthPolicy(object):
aws_account_id = ""
"""
The AWS account id the policy will be generated for.
This is used to create the method ARNs.
"""
principal_id = ""
"""The principal used for the policy, this should be a unique identifier for the end user."""
version = "2012-10-17"
"""The policy version used for the evaluation. This should always be '2012-10-17'"""
path_regex = "^[/.a-zA-Z0-9-*]+$"
"""The regular expression used to validate resource paths for the policy"""
"""these are the internal lists of allowed and denied methods. These are lists
of objects and each object has 2 properties: A resource ARN and a nullable
conditions statement.
the build method processes these lists and generates the appropriate
statements for the final policy"""
allow_methods = []
deny_methods = []
rest_api_id = "*"
"""The API Gateway API id. By default this is set to '*'"""
region = "*"
"""The region where the API is deployed. By default this is set to '*'"""
stage = "*"
"""The name of the stage used in the policy. By default this is set to '*'"""
def __init__(self, principal, aws_account_id):
self.aws_account_id = aws_account_id
self.principal_id = principal
self.allow_methods = []
self.deny_methods = []
def _add_method(self, effect, verb, resource, conditions):
"""Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null."""
if verb != "*" and not hasattr(HttpVerb, verb):
raise NameError("Invalid HTTP verb " + verb +
". Allowed verbs in HttpVerb class")
resource_pattern = re.compile(self.path_regex)
if not resource_pattern.match(resource):
raise NameError("Invalid resource path: "
+ resource + ". Path should match " + self.path_regex)
if resource[:1] == "/":
resource = resource[1:]
resource_arn = ("arn:aws:execute-api:" +
self.region + ":" +
self.aws_account_id + ":" +
self.rest_api_id + "/" +
self.stage + "/" +
verb + "/" +
resource)
if effect.lower() == "allow":
self.allow_methods.append({
'resourceArn': resource_arn,
'conditions': conditions
})
elif effect.lower() == "deny":
self.deny_methods.append({
'resourceArn': resource_arn,
'conditions': conditions
})
@staticmethod
def _get_empty_statement(effect):
"""Returns an empty statement object prepopulated with the correct action and the
desired effect."""
statement = {
'Action': 'execute-api:Invoke',
'Effect': effect[:1].upper() + effect[1:].lower(),
'Resource': []
}
return statement
def _get_statement_for_effect(self, effect, methods):
"""This function loops over an array of objects containing a resourceArn and
conditions statement and generates the array of statements for the policy."""
statements = []
if len(methods) > 0:
statement = self._get_empty_statement(effect)
for cur_method in methods:
if cur_method['conditions'] is None or len(cur_method['conditions']) == 0:
statement['Resource'].append(cur_method['resourceArn'])
else:
conditional_statement = self._get_empty_statement(effect)
conditional_statement['Resource'].append(cur_method['resourceArn'])
conditional_statement['Condition'] = cur_method['conditions']
statements.append(conditional_statement)
statements.append(statement)
return statements
def allow_all_methods(self):
"""Adds a '*' allow to the policy to authorize access to all methods of an API"""
self._add_method("Allow", HttpVerb.ALL, "*", [])
def deny_all_methods(self):
"""Adds a '*' allow to the policy to deny access to all methods of an API"""
self._add_method("Deny", HttpVerb.ALL, "*", [])
def allow_method(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods for the policy"""
self._add_method("Allow", verb, resource, [])
def deny_method(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods for the policy"""
self._add_method("Deny", verb, resource, [])
def allow_method_with_conditions(self, verb, resource, conditions):
"""
Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here:
http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition
"""
self._add_method("Allow", verb, resource, conditions)
def deny_method_with_conditions(self, verb, resource, conditions):
"""
Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods and includes a condition for the policy statement. More on AWS policy
conditions here:
http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition
"""
self._add_method("Deny", verb, resource, conditions)
def build(self):
"""
Generates the policy document based on the internal lists of allowed and denied
conditions. This will generate a policy with two main statements for the effect:
one statement for Allow and one statement for Deny.
Methods that includes conditions will have their own statement in the policy.
"""
if ((self.allow_methods is None or len(self.allow_methods) == 0) and
(self.deny_methods is None or len(self.deny_methods) == 0)):
raise NameError("No statements defined for the policy")
policy = {
'principalId': self.principal_id,
'policyDocument': {
'Version': self.version,
'Statement': []
}
}
policy['policyDocument']['Statement'].extend(
self._get_statement_for_effect("Allow", self.allow_methods))
policy['policyDocument']['Statement'].extend(
self._get_statement_for_effect("Deny", self.deny_methods))
return policy
| 40 | 98 | 0.605256 |
5b6332c3a9d53a62845106f6eabc6f4616e9446e | 2,915 | py | Python | django_logging/logger.py | paredesivan/django-logging | 05f2e83887f26eab635dbe87c8cbe1fa3a96476b | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | django_logging/logger.py | paredesivan/django-logging | 05f2e83887f26eab635dbe87c8cbe1fa3a96476b | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | django_logging/logger.py | paredesivan/django-logging | 05f2e83887f26eab635dbe87c8cbe1fa3a96476b | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import logging
import logging.config
import os
import sys
from . import settings
LOG_LEVEL = settings.LOG_LEVEL.upper()
LOG_HANDLERS = ['default']
if settings.CONSOLE_LOG:
LOG_HANDLERS.append('console')
if settings.DEBUG:
LOG_HANDLERS.append('debug')
if settings.SQL_LOG:
LOG_HANDLERS.append('sql')
if not os.path.exists(settings.LOG_PATH):
try:
os.makedirs(settings.LOG_PATH)
except Exception as e:
raise Exception('Unable to configure logger. Can\'t create LOG_PATH: {}'.format(settings.LOG_PATH))
LOGGING = {
'version': 1,
'disable_existing_loggers': settings.DISABLE_EXISTING_LOGGERS,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'formatters': {
'verbose': {
'format': '[%(levelname)s - %(created)s] file:%(module)s.py, func:%(funcName)s, ln:%(lineno)s: %(message)s'
},
'simple': {
'format': '%(message)s'
},
'sql': {
'format': '[%(levelname)s - %(created)s] %(duration)s %(sql)s %(params)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'django_logging.handlers.ConsoleHandler',
'formatter': 'verbose',
'stream': sys.stderr
},
'default': {
'level': 'INFO',
'class': 'django_logging.handlers.DefaultFileHandler',
'formatter': 'verbose',
'maxBytes': settings.ROTATE_MB * 1024 * 1024,
'backupCount': settings.ROTATE_COUNT,
'when': settings.WHEN_INTERVAL,
'interval': settings.INTERVAL,
'filename': '{}/app.log'.format(settings.LOG_PATH)
},
'debug': {
'level': 'DEBUG',
'class': 'django_logging.handlers.DebugFileHandler',
'formatter': 'verbose',
'maxBytes': settings.ROTATE_MB * 1024 * 1024,
'backupCount': settings.ROTATE_COUNT,
'when': settings.WHEN_INTERVAL,
'interval': settings.INTERVAL,
'filename': '{}/debug.log'.format(settings.LOG_PATH)
},
'sql': {
'level': 'DEBUG',
'class': 'django_logging.handlers.SQLFileHandler',
'formatter': 'sql',
'maxBytes': settings.ROTATE_MB * 1024 * 1024,
'backupCount': settings.ROTATE_COUNT,
'when': settings.WHEN_INTERVAL,
'interval': settings.INTERVAL,
'filename': '{}/sql.log'.format(settings.LOG_PATH)
}
},
'loggers': {
'dl_logger': {
'handlers': LOG_HANDLERS,
'level': LOG_LEVEL,
'propagate': True,
},
}
}
logging.config.dictConfig(LOGGING)
def get_logger():
logger = logging.getLogger('dl_logger')
logger.setLevel(LOG_LEVEL)
return logger
| 30.051546 | 119 | 0.552316 |
9793dbcafa3631121102c414d62b5478f981fa0a | 1,490 | py | Python | tsp_2opt/solver.py | mehdibnc/TSP2opt | 94607ebd86396d3d53e1533480d3626c82ed94fc | [
"MIT"
] | null | null | null | tsp_2opt/solver.py | mehdibnc/TSP2opt | 94607ebd86396d3d53e1533480d3626c82ed94fc | [
"MIT"
] | null | null | null | tsp_2opt/solver.py | mehdibnc/TSP2opt | 94607ebd86396d3d53e1533480d3626c82ed94fc | [
"MIT"
] | null | null | null | from concurrent.futures import ThreadPoolExecutor
from .tsp_2opt import solver_2opt_parr
from tsp_2opt.utils import get_best_from_batch, get_init_route, is_symmetric, check_type
import numpy as np
def tsp_solver(distances: np.ndarray, workers: int = 5, r: int = 10):
""" TSP Solver using 2-opt local search.
The local search is ran r times in parralel using workers
workers at most.
Args:
distances: distance matrix, giving pairwise distances between
each node.
workers: maximum workers for parralel computations
r: number of times to run 2-opt with different starting points
Returns:
best: solution found, list of nodes to visit.
length: length of the solution found
"""
#-- Check input type
distances = check_type(distances)
#-- Check symmetry
symmetric = is_symmetric(distances)
executor = ThreadPoolExecutor(max_workers=workers)
nodes = len(distances)
solutions = [None] * r
#--
for i in range(r):
#-- Seed for reproducibility
init_route = get_init_route(nodes, seed=i)
future = executor.submit(
solver_2opt_parr, init_route, distances, symmetric
)
solutions[i] = future
#--
for i in range(r):
solutions[i] = solutions[i].result()
#-- Retrieve best solution
best, length = get_best_from_batch(solutions, distances)
return best, length
| 33.863636 | 88 | 0.650336 |
cbe2f96bf19dcccb8efc1762831a1cc7e5b72817 | 2,129 | py | Python | bokeh/sampledata/daylight.py | goncaloperes/bokeh | b857d2d17d7c19779bb0a7be2601d8238fb1d5e9 | [
"BSD-3-Clause"
] | 1 | 2021-04-09T02:57:29.000Z | 2021-04-09T02:57:29.000Z | bokeh/sampledata/daylight.py | goncaloperes/bokeh | b857d2d17d7c19779bb0a7be2601d8238fb1d5e9 | [
"BSD-3-Clause"
] | 5 | 2021-05-07T10:31:27.000Z | 2021-05-07T10:33:37.000Z | bokeh/sampledata/daylight.py | goncaloperes/bokeh | b857d2d17d7c19779bb0a7be2601d8238fb1d5e9 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide 2013 Warsaw daylight hours from http://www.sunrisesunset.com
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..util.sampledata import package_csv
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'daylight_warsaw_2013',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data():
'''
'''
df = package_csv('daylight', 'daylight_warsaw_2013.csv', parse_dates=["Date", "Sunrise", "Sunset"])
df["Date"] = df.Date.map(lambda x: x.date())
df["Sunrise"] = df.Sunrise.map(lambda x: x.time())
df["Sunset"] = df.Sunset.map(lambda x: x.time())
return df
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
daylight_warsaw_2013 = _read_data()
| 34.901639 | 103 | 0.276656 |
d97abcb1235f987f9d5f92fb719a00dc25e2b0a4 | 14,545 | py | Python | salt/pillar/azureblob.py | fake-name/salt | d8f04936e4407f51946e32e8166159778f6c31a5 | [
"Apache-2.0"
] | 1 | 2020-07-22T18:41:34.000Z | 2020-07-22T18:41:34.000Z | salt/pillar/azureblob.py | fake-name/salt | d8f04936e4407f51946e32e8166159778f6c31a5 | [
"Apache-2.0"
] | null | null | null | salt/pillar/azureblob.py | fake-name/salt | d8f04936e4407f51946e32e8166159778f6c31a5 | [
"Apache-2.0"
] | 1 | 2015-08-26T09:46:04.000Z | 2015-08-26T09:46:04.000Z | # -*- coding: utf-8 -*-
"""
Use Azure Blob as a Pillar source.
.. versionadded:: Sodium
:maintainer: <devops@eitr.tech>
:maturity: new
:depends:
* `azure-storage-blob <https://pypi.org/project/azure-storage-blob/>`_ >= 12.0.0
The Azure Blob ext_pillar can be configured with the following parameters:
.. code-block:: yaml
ext_pillar:
- azureblob:
container: 'test_container'
connection_string: 'connection_string'
multiple_env: False
environment: 'base'
blob_cache_expire: 30
blob_sync_on_update: True
:param container: The name of the target Azure Blob Container.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param multiple_env: Specifies whether the pillar should interpret top level folders as pillar environments.
Defaults to false.
:param environment: Specifies which environment the container represents when in single environment mode. Defaults
to 'base' and is ignored if multiple_env is set as True.
:param blob_cache_expire: Specifies expiration time of the Azure Blob metadata cache file. Defaults to 30s.
:param blob_sync_on_update: Specifies if the cache is synced on update. Defaults to True.
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import pickle
import time
from copy import deepcopy
import salt.utils.files
import salt.utils.hashutils
# Import 3rd-party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import filter
# Import Salt libs
from salt.pillar import Pillar
# pylint: enable=import-error,no-name-in-module,redefined-builtin
# Import Azure libs
HAS_LIBS = False
try:
from azure.storage.blob import BlobServiceClient
HAS_LIBS = True
except ImportError:
pass
__virtualname__ = "azureblob"
# Set up logging
log = logging.getLogger(__name__)
def __virtual__():
if not HAS_LIBS:
return (
False,
"The following dependency is required to use the Azure Blob ext_pillar: "
"Microsoft Azure Storage Blob >= 12.0.0 ",
)
return __virtualname__
def ext_pillar(
minion_id,
pillar, # pylint: disable=W0613
container,
connection_string,
multiple_env=False,
environment="base",
blob_cache_expire=30,
blob_sync_on_update=True,
):
"""
Execute a command and read the output as YAML.
:param container: The name of the target Azure Blob Container.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param multiple_env: Specifies whether the pillar should interpret top level folders as pillar environments.
Defaults to false.
:param environment: Specifies which environment the container represents when in single environment mode. Defaults
to 'base' and is ignored if multiple_env is set as True.
:param blob_cache_expire: Specifies expiration time of the Azure Blob metadata cache file. Defaults to 30s.
:param blob_sync_on_update: Specifies if the cache is synced on update. Defaults to True.
"""
# normpath is needed to remove appended '/' if root is empty string.
pillar_dir = os.path.normpath(
os.path.join(_get_cache_dir(), environment, container)
)
if __opts__["pillar_roots"].get(environment, []) == [pillar_dir]:
return {}
metadata = _init(
connection_string, container, multiple_env, environment, blob_cache_expire
)
log.debug("Blob metadata: %s", metadata)
if blob_sync_on_update:
# sync the containers to the local cache
log.info("Syncing local pillar cache from Azure Blob...")
for saltenv, env_meta in six.iteritems(metadata):
for container, files in six.iteritems(_find_files(env_meta)):
for file_path in files:
cached_file_path = _get_cached_file_name(
container, saltenv, file_path
)
log.info("%s - %s : %s", container, saltenv, file_path)
# load the file from Azure Blob if not in the cache or too old
_get_file_from_blob(
connection_string,
metadata,
saltenv,
container,
file_path,
cached_file_path,
)
log.info("Sync local pillar cache from Azure Blob completed.")
opts = deepcopy(__opts__)
opts["pillar_roots"][environment] = (
[os.path.join(pillar_dir, environment)] if multiple_env else [pillar_dir]
)
# Avoid recursively re-adding this same pillar
opts["ext_pillar"] = [x for x in opts["ext_pillar"] if "azureblob" not in x]
pil = Pillar(opts, __grains__, minion_id, environment)
compiled_pillar = pil.compile_pillar(ext=False)
return compiled_pillar
def _init(connection_string, container, multiple_env, environment, blob_cache_expire):
"""
.. versionadded:: Sodium
Connect to Blob Storage and download the metadata for each file in all containers specified and
cache the data to disk.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param container: The name of the target Azure Blob Container.
:param multiple_env: Specifies whether the pillar should interpret top level folders as pillar environments.
Defaults to false.
:param environment: Specifies which environment the container represents when in single environment mode. Defaults
to 'base' and is ignored if multiple_env is set as True.
:param blob_cache_expire: Specifies expiration time of the Azure Blob metadata cache file. Defaults to 30s.
"""
cache_file = _get_containers_cache_filename(container)
exp = time.time() - blob_cache_expire
# Check if cache_file exists and its mtime
if os.path.isfile(cache_file):
cache_file_mtime = os.path.getmtime(cache_file)
else:
# If the file does not exist then set mtime to 0 (aka epoch)
cache_file_mtime = 0
expired = cache_file_mtime <= exp
log.debug(
"Blob storage container cache file %s is %sexpired, mtime_diff=%ss, expiration=%ss",
cache_file,
"" if expired else "not ",
cache_file_mtime - exp,
blob_cache_expire,
)
if expired:
pillars = _refresh_containers_cache_file(
connection_string, container, cache_file, multiple_env, environment
)
else:
pillars = _read_containers_cache_file(cache_file)
log.debug("Blob container retrieved pillars %s", pillars)
return pillars
def _get_cache_dir():
"""
.. versionadded:: Sodium
Get pillar cache directory. Initialize it if it does not exist.
"""
cache_dir = os.path.join(__opts__["cachedir"], "pillar_azureblob")
if not os.path.isdir(cache_dir):
log.debug("Initializing Azure Blob Pillar Cache")
os.makedirs(cache_dir)
return cache_dir
def _get_cached_file_name(container, saltenv, path):
"""
.. versionadded:: Sodium
Return the cached file name for a container path file.
:param container: The name of the target Azure Blob Container.
:param saltenv: Specifies which environment the container represents.
:param path: The path of the file in the container.
"""
file_path = os.path.join(_get_cache_dir(), saltenv, container, path)
# make sure container and saltenv directories exist
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
return file_path
def _get_containers_cache_filename(container):
"""
.. versionadded:: Sodium
Return the filename of the cache for container contents. Create the path if it does not exist.
:param container: The name of the target Azure Blob Container.
"""
cache_dir = _get_cache_dir()
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return os.path.join(cache_dir, "{0}-files.cache".format(container))
def _refresh_containers_cache_file(
connection_string, container, cache_file, multiple_env=False, environment="base"
):
"""
.. versionadded:: Sodium
Downloads the entire contents of an Azure storage container to the local filesystem.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param container: The name of the target Azure Blob Container.
:param cache_file: The path of where the file will be cached.
:param multiple_env: Specifies whether the pillar should interpret top level folders as pillar environments.
:param environment: Specifies which environment the container represents when in single environment mode. This is
ignored if multiple_env is set as True.
"""
try:
# Create the BlobServiceClient object which will be used to create a container client
blob_service_client = BlobServiceClient.from_connection_string(
connection_string
)
# Create the ContainerClient object
container_client = blob_service_client.get_container_client(container)
except Exception as exc: # pylint: disable=broad-except
log.error("Exception: %s", exc)
return False
metadata = {}
def _walk_blobs(saltenv="base", prefix=None):
# Walk the blobs in the container with a generator
blob_list = container_client.walk_blobs(name_starts_with=prefix)
# Iterate over the generator
while True:
try:
blob = next(blob_list)
except StopIteration:
break
log.debug("Raw blob attributes: %s", blob)
# Directories end with "/".
if blob.name.endswith("/"):
# Recurse into the directory
_walk_blobs(prefix=blob.name)
continue
if multiple_env:
saltenv = "base" if (not prefix or prefix == ".") else prefix[:-1]
if saltenv not in metadata:
metadata[saltenv] = {}
if container not in metadata[saltenv]:
metadata[saltenv][container] = []
metadata[saltenv][container].append(blob)
_walk_blobs(saltenv=environment)
# write the metadata to disk
if os.path.isfile(cache_file):
os.remove(cache_file)
log.debug("Writing Azure blobs pillar cache file")
with salt.utils.files.fopen(cache_file, "wb") as fp_:
pickle.dump(metadata, fp_)
return metadata
def _read_containers_cache_file(cache_file):
"""
.. versionadded:: Sodium
Return the contents of the containers cache file.
:param cache_file: The path for where the file will be cached.
"""
log.debug("Reading containers cache file")
with salt.utils.files.fopen(cache_file, "rb") as fp_:
data = pickle.load(fp_)
return data
def _find_files(metadata):
"""
.. versionadded:: Sodium
Looks for all the files in the Azure Blob container cache metadata.
:param metadata: The metadata for the container files.
"""
ret = {}
for container, data in six.iteritems(metadata):
if container not in ret:
ret[container] = []
# grab the paths from the metadata
file_paths = [k["name"] for k in data]
# filter out the dirs
ret[container] += [k for k in file_paths if not k.endswith("/")]
return ret
def _find_file_meta(metadata, container, saltenv, path):
"""
.. versionadded:: Sodium
Looks for a file's metadata in the Azure Blob Container cache file.
:param metadata: The metadata for the container files.
:param container: The name of the target Azure Blob Container.
:param saltenv: Specifies which environment the container represents.
:param path: The path of the file in the container.
"""
env_meta = metadata[saltenv] if saltenv in metadata else {}
container_meta = env_meta[container] if container in env_meta else {}
for item_meta in container_meta:
item_meta = dict(item_meta)
if "name" in item_meta and item_meta["name"] == path:
return item_meta
def _get_file_from_blob(
connection_string, metadata, saltenv, container, path, cached_file_path
):
"""
.. versionadded:: Sodium
Downloads the entire contents of an Azure storage container to the local filesystem.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param metadata: The metadata for the container files.
:param saltenv: Specifies which environment the container represents when in single environment mode. This is
ignored if multiple_env is set as True.
:param container: The name of the target Azure Blob Container.
:param path: The path of the file in the container.
:param cached_file_path: The path of where the file will be cached.
"""
# check the local cache...
if os.path.isfile(cached_file_path):
file_meta = _find_file_meta(metadata, container, saltenv, path)
file_md5 = (
"".join(list(filter(str.isalnum, file_meta["etag"]))) if file_meta else None
)
cached_md5 = salt.utils.hashutils.get_hash(cached_file_path, "md5")
# hashes match we have a cache hit
log.debug(
"Cached file: path=%s, md5=%s, etag=%s",
cached_file_path,
cached_md5,
file_md5,
)
if cached_md5 == file_md5:
return
try:
# Create the BlobServiceClient object which will be used to create a container client
blob_service_client = BlobServiceClient.from_connection_string(
connection_string
)
# Create the ContainerClient object
container_client = blob_service_client.get_container_client(container)
# Create the BlobClient object
blob_client = container_client.get_blob_client(path)
except Exception as exc: # pylint: disable=broad-except
log.error("Exception: %s", exc)
return False
with salt.utils.files.fopen(cached_file_path, "wb") as outfile:
outfile.write(blob_client.download_blob().readall())
return
| 30.556723 | 118 | 0.67439 |
724b6a197c587827933c541aeb875f07736ae170 | 66,626 | py | Python | weis/optimization_drivers/tests/test_nlopt_driver.py | ebranlard/WEIS | 59851a0c3b2e801bd413ca4887ab4b78e58f928a | [
"Apache-2.0"
] | null | null | null | weis/optimization_drivers/tests/test_nlopt_driver.py | ebranlard/WEIS | 59851a0c3b2e801bd413ca4887ab4b78e58f928a | [
"Apache-2.0"
] | null | null | null | weis/optimization_drivers/tests/test_nlopt_driver.py | ebranlard/WEIS | 59851a0c3b2e801bd413ca4887ab4b78e58f928a | [
"Apache-2.0"
] | null | null | null | """ Unit tests for the NLOpt Driver."""
import copy
import sys
import unittest
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.expl_comp_array import TestExplCompArrayDense
from openmdao.test_suite.components.paraboloid import Paraboloid
from openmdao.test_suite.components.sellar import (
SellarDerivativesGrouped,
SellarDerivatives,
)
from openmdao.test_suite.components.simple_comps import NonSquareArrayComp
from openmdao.test_suite.groups.sin_fitter import SineFitter
from openmdao.utils.assert_utils import assert_near_equal
from openmdao.utils.general_utils import run_driver
from weis.optimization_drivers.nlopt_driver import NLoptDriver
try:
import nlopt
except ImportError:
nlopt = None
def rastrigin(x):
a = 10 # constant
return np.sum(np.square(x) - a * np.cos(2 * np.pi * x)) + a * np.size(x)
@unittest.skipIf(nlopt is None, "only run if NLopt is installed.")
class TestNLoptDriver(unittest.TestCase):
def test_driver_supports(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver(optimizer="LD_SLSQP", tol=1e-9)
with self.assertRaises(KeyError) as raises_msg:
prob.driver.supports["equality_constraints"] = False
exception = raises_msg.exception
msg = "NLoptDriver: Tried to set read-only option 'equality_constraints'."
self.assertEqual(exception.args[0], msg)
def test_compute_totals_basic_return_array(self):
# Make sure 'array' return_format works.
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 0.0), promotes=["x"])
model.add_subsystem("p2", om.IndepVarComp("y", 0.0), promotes=["y"])
model.add_subsystem("comp", Paraboloid(), promotes=["x", "y", "f_xy"])
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
prob.setup(check=False, mode="fwd")
prob.set_solver_print(level=0)
failed = prob.run_driver()
of = ["f_xy"]
wrt = ["x", "y"]
derivs = prob.compute_totals(of=of, wrt=wrt, return_format="array")
assert_near_equal(derivs[0, 0], -6.0, 1e-6)
assert_near_equal(derivs[0, 1], 8.0, 1e-6)
prob.setup(check=False, mode="rev")
prob.run_model()
of = ["f_xy"]
wrt = ["x", "y"]
derivs = prob.compute_totals(of=of, wrt=wrt, return_format="array")
assert_near_equal(derivs[0, 0], -6.0, 1e-6)
assert_near_equal(derivs[0, 1], 8.0, 1e-6)
def test_compute_totals_return_array_non_square(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("px", om.IndepVarComp(name="x", val=np.ones((2,))))
comp = model.add_subsystem("comp", NonSquareArrayComp())
model.connect("px.x", "comp.x1")
model.add_design_var("px.x")
model.add_objective("px.x")
model.add_constraint("comp.y1")
model.add_constraint("comp.y2")
prob.setup(check=False, mode="auto")
failed = prob.run_driver()
derivs = prob.compute_totals(
of=["comp.y1"], wrt=["px.x"], return_format="array"
)
J = comp.JJ[0:3, 0:2]
assert_near_equal(J, derivs, 1.0e-3)
# Support for a name to be in 'of' and 'wrt'
derivs = prob.compute_totals(
of=["comp.y2", "px.x", "comp.y1"], wrt=["px.x"], return_format="array"
)
assert_near_equal(J, derivs[3:, :], 1.0e-3)
assert_near_equal(comp.JJ[3:4, 0:2], derivs[0:1, :], 1.0e-3)
assert_near_equal(np.eye(2), derivs[1:3, :], 1.0e-3)
def test_deriv_wrt_self(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("px", om.IndepVarComp(name="x", val=np.ones((2,))))
model.add_design_var("px.x")
model.add_objective("px.x")
prob.setup()
failed = prob.run_driver()
# Support for a name to be in 'of' and 'wrt'
J = prob.driver._compute_totals(
of=["px.x"], wrt=["px.x"], return_format="array"
)
assert_near_equal(J, np.eye(2), 1.0e-3)
def test_optimizer_simple_paraboloid_unconstrained(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver(optimizer="LD_SLSQP", tol=1e-9)
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
prob.setup()
failed = prob.run_driver()
assert_near_equal(prob["x"], 6.66666667, 1e-6)
assert_near_equal(prob["y"], -7.3333333, 1e-6)
def test_simple_paraboloid_unconstrained(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
prob.setup()
failed = prob.run_driver()
assert_near_equal(prob["x"], 6.66666667, 1e-6)
assert_near_equal(prob["y"], -7.3333333, 1e-6)
def test_simple_paraboloid_unconstrained_LN_COBYLA(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LN_COBYLA"
prob.driver.options["tol"] = 1e-12
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
prob.setup()
failed = prob.run_driver()
assert_near_equal(prob["x"], 6.66666667, 1e-6)
assert_near_equal(prob["y"], -7.3333333, 1e-6)
def test_simple_paraboloid_upper(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", upper=-15.0)
prob.setup()
failed = prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob["x"], 7.16667, 1e-6)
assert_near_equal(prob["y"], -7.833334, 1e-6)
def test_simple_paraboloid_lower(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = x - y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", lower=15.0)
prob.setup()
failed = prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob["x"], 7.16667, 1e-6)
assert_near_equal(prob["y"], -7.833334, 1e-6)
def test_simple_paraboloid_equality(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", equals=-15.0)
prob.setup()
failed = prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob["x"], 7.16667, 1e-4)
assert_near_equal(prob["y"], -7.833334, 1e-4)
def test_missing_objective(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("x", om.IndepVarComp("x", 2.0), promotes=["*"])
model.add_subsystem("f_x", Paraboloid(), promotes=["*"])
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.model.add_design_var("x", lower=0)
prob.setup()
with self.assertRaises(Exception) as raises_msg:
prob.run_driver()
exception = raises_msg.exception
msg = "Driver requires objective to be declared"
self.assertEqual(exception.args[0], msg)
def test_simple_paraboloid_double_sided_low(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", lower=-11.0, upper=-10.0)
prob.setup()
failed = prob.run_driver()
assert_near_equal(prob["y"] - prob["x"], -11.0, 1e-6)
def test_simple_paraboloid_double_sided_high(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = x - y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", lower=10.0, upper=11.0)
prob.setup()
failed = prob.run_driver()
assert_near_equal(prob["x"] - prob["y"], 11.0, 1e-6)
def test_simple_array_comp2D(self):
prob = om.Problem()
model = prob.model
model.add_subsystem(
"p1", om.IndepVarComp("widths", np.zeros((2, 2))), promotes=["*"]
)
model.add_subsystem("comp", TestExplCompArrayDense(), promotes=["*"])
model.add_subsystem(
"con",
om.ExecComp("c = areas - 20.0", c=np.zeros((2, 2)), areas=np.zeros((2, 2))),
promotes=["*"],
)
model.add_subsystem(
"obj",
om.ExecComp("o = areas[0, 0]", areas=np.zeros((2, 2))),
promotes=["*"],
)
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("widths", lower=-50.0, upper=50.0)
model.add_objective("o")
model.add_constraint("c", equals=0.0)
prob.setup()
failed = prob.run_driver()
obj = prob["o"]
assert_near_equal(obj, 20.0, 1e-6)
def test_simple_array_comp2D_eq_con(self):
prob = om.Problem()
model = prob.model
model.add_subsystem(
"p1", om.IndepVarComp("widths", np.zeros((2, 2))), promotes=["*"]
)
model.add_subsystem("comp", TestExplCompArrayDense(), promotes=["*"])
model.add_subsystem(
"obj",
om.ExecComp("o = areas[0, 0] + areas[1, 1]", areas=np.zeros((2, 2))),
promotes=["*"],
)
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("widths", lower=-50.0, upper=50.0)
model.add_objective("o")
model.add_constraint("areas", equals=np.array([24.0, 21.0, 3.5, 17.5]))
prob.setup()
failed = prob.run_driver()
obj = prob["o"]
assert_near_equal(obj, 41.5, 1e-6)
def test_simple_array_comp2D_dbl_sided_con(self):
prob = om.Problem()
model = prob.model
model.add_subsystem(
"p1", om.IndepVarComp("widths", np.zeros((2, 2))), promotes=["*"]
)
model.add_subsystem("comp", TestExplCompArrayDense(), promotes=["*"])
model.add_subsystem(
"obj",
om.ExecComp("o = areas[0, 0]", areas=np.zeros((2, 2))),
promotes=["*"],
)
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("widths", lower=-50.0, upper=50.0)
model.add_objective("o")
model.add_constraint(
"areas",
lower=np.array([24.0, 21.0, 3.5, 17.5]),
upper=np.array([24.0, 21.0, 3.5, 17.5]),
)
prob.setup()
failed = prob.run_driver()
con = prob["areas"]
assert_near_equal(con, np.array([[24.0, 21.0], [3.5, 17.5]]), 1e-6)
def test_simple_array_comp2D_dbl_sided_con_array(self):
prob = om.Problem()
model = prob.model
model.add_subsystem(
"p1", om.IndepVarComp("widths", np.zeros((2, 2))), promotes=["*"]
)
model.add_subsystem("comp", TestExplCompArrayDense(), promotes=["*"])
model.add_subsystem(
"obj",
om.ExecComp("o = areas[0, 0]", areas=np.zeros((2, 2))),
promotes=["*"],
)
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("widths", lower=-50.0, upper=50.0)
model.add_objective("o")
model.add_constraint("areas", lower=20.0, upper=20.0)
prob.setup()
failed = prob.run_driver()
obj = prob["o"]
assert_near_equal(obj, 20.0, 1e-6)
def test_simple_array_comp2D_array_lo_hi(self):
prob = om.Problem()
model = prob.model
model.add_subsystem(
"p1", om.IndepVarComp("widths", np.zeros((2, 2))), promotes=["*"]
)
model.add_subsystem("comp", TestExplCompArrayDense(), promotes=["*"])
model.add_subsystem(
"con",
om.ExecComp("c = areas - 20.0", c=np.zeros((2, 2)), areas=np.zeros((2, 2))),
promotes=["*"],
)
model.add_subsystem(
"obj",
om.ExecComp("o = areas[0, 0]", areas=np.zeros((2, 2))),
promotes=["*"],
)
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var(
"widths", lower=-50.0 * np.ones((2, 2)), upper=50.0 * np.ones((2, 2))
)
model.add_objective("o")
model.add_constraint("c", equals=0.0)
prob.setup()
failed = prob.run_driver()
obj = prob["o"]
assert_near_equal(obj, 20.0, 1e-6)
def test_simple_paraboloid_scaled_desvars_fwd(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = x - y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0, ref=0.02)
model.add_design_var("y", lower=-50.0, upper=50.0, ref=0.02)
model.add_objective("f_xy")
model.add_constraint("c", lower=10.0, upper=11.0)
prob.setup(check=False, mode="fwd")
failed = prob.run_driver()
assert_near_equal(prob["x"] - prob["y"], 11.0, 1e-6)
def test_simple_paraboloid_scaled_desvars_rev(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = x - y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0, ref=0.02)
model.add_design_var("y", lower=-50.0, upper=50.0, ref=0.02)
model.add_objective("f_xy")
model.add_constraint("c", lower=10.0, upper=11.0)
prob.setup(check=False, mode="rev")
failed = prob.run_driver()
assert_near_equal(prob["x"] - prob["y"], 11.0, 1e-6)
def test_simple_paraboloid_scaled_constraint_fwd(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = x - y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-8
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", lower=10.0, upper=11.0, ref=10.0)
prob.setup(check=False, mode="fwd")
failed = prob.run_driver()
assert_near_equal(prob["x"] - prob["y"], 11.0, 1e-6)
def test_simple_paraboloid_scaled_objective_fwd(self):
prob = om.Problem()
model = prob.model
prob.set_solver_print(level=0)
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = x - y"), promotes=["*"])
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy", ref=10.0)
model.add_constraint("c", lower=10.0, upper=11.0)
prob.setup(check=False, mode="fwd")
failed = prob.run_driver()
assert_near_equal(prob["x"] - prob["y"], 11.0, 1e-6)
def test_simple_paraboloid_scaled_objective_rev(self):
prob = om.Problem()
model = prob.model
prob.set_solver_print(level=0)
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = x - y"), promotes=["*"])
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy", ref=10.0)
model.add_constraint("c", lower=10.0, upper=11.0)
prob.setup(check=False, mode="rev")
failed = prob.run_driver()
assert_near_equal(prob["x"] - prob["y"], 11.0, 1e-6)
def test_sellar_mdf(self):
prob = om.Problem()
model = prob.model = SellarDerivativesGrouped()
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var(
"z", lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0])
)
model.add_design_var("x", lower=0.0, upper=10.0)
model.add_objective("obj")
model.add_constraint("con1", upper=0.0)
model.add_constraint("con2", upper=0.0)
prob.setup(check=False, mode="rev")
failed = prob.run_driver()
assert_near_equal(prob["z"][0], 1.9776, 1e-3)
assert_near_equal(prob["z"][1], 0.0, 1e-3)
assert_near_equal(prob["x"], 0.0, 1e-3)
def test_bug_in_eq_constraints(self):
# We were getting extra constraints created because lower and upper are maxfloat instead of
# None when unused.
p = om.Problem(model=SineFitter())
p.driver = NLoptDriver()
p.setup()
p.run_driver()
max_defect = np.max(np.abs(p["defect.defect"]))
assert_near_equal(max_defect, 0.0, 1e-10)
def test_reraise_exception_from_callbacks(self):
class ReducedActuatorDisc(om.ExplicitComponent):
def setup(self):
# Inputs
self.add_input("a", 0.5, desc="Induced Velocity Factor")
self.add_input(
"Vu",
10.0,
units="m/s",
desc="Freestream air velocity, upstream of rotor",
)
# Outputs
self.add_output(
"Vd",
0.0,
units="m/s",
desc="Slipstream air velocity, downstream of rotor",
)
def compute(self, inputs, outputs):
a = inputs["a"]
Vu = inputs["Vu"]
outputs["Vd"] = Vu * (1 - 2 * a)
def compute_partials(self, inputs, J):
Vu = inputs["Vu"]
J["Vd", "a"] = -2.0 * Vu
prob = om.Problem()
indeps = prob.model.add_subsystem("indeps", om.IndepVarComp(), promotes=["*"])
indeps.add_output("a", 0.5)
indeps.add_output("Vu", 10.0, units="m/s")
prob.model.add_subsystem(
"a_disk", ReducedActuatorDisc(), promotes_inputs=["a", "Vu"]
)
# setup the optimization
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.model.add_design_var("a", lower=0.0, upper=1.0)
# negative one so we maximize the objective
prob.model.add_objective("a_disk.Vd", scaler=-1)
prob.setup()
with self.assertRaises(KeyError) as context:
prob.run_driver()
msg = 'Variable name pair ("Vd", "a") must first be declared.'
self.assertTrue(msg in str(context.exception))
def test_simple_paraboloid_upper_LN_COBYLA(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LN_COBYLA"
prob.driver.options["tol"] = 1e-12
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", upper=-15.0)
prob.setup()
failed = prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob["x"], 7.16667, 1e-6)
assert_near_equal(prob["y"], -7.833334, 1e-6)
def test_sellar_mdf_LN_COBYLA(self):
prob = om.Problem()
model = prob.model = SellarDerivativesGrouped()
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LN_COBYLA"
prob.driver.options["tol"] = 1e-12
prob.set_solver_print(level=0)
model.add_design_var(
"z", lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0])
)
model.add_design_var("x", lower=0.0, upper=10.0)
model.add_objective("obj")
model.add_constraint("con1", upper=0.0)
model.add_constraint("con2", upper=0.0)
prob.setup(check=False, mode="rev")
failed = prob.run_driver()
assert_near_equal(prob["z"][0], 1.9776, 1e-3)
assert_near_equal(prob["z"][1], 0.0, 1e-3)
assert_near_equal(prob["x"], 0.0, 1e-3)
def test_simple_paraboloid_lower_linear(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = x - y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", lower=15.0, linear=True)
prob.setup()
failed = prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob["x"], 7.16667, 1e-6)
assert_near_equal(prob["y"], -7.833334, 1e-6)
self.assertEqual(prob.driver._obj_and_nlcons, ["comp.f_xy"])
def test_simple_paraboloid_equality_linear(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", equals=-15.0, linear=True)
prob.setup()
failed = prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob["x"], 7.16667, 1e-6)
assert_near_equal(prob["y"], -7.833334, 1e-6)
def test_debug_print_option_totals(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
prob.driver.options["debug_print"] = ["totals"]
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", upper=-15.0)
prob.setup(check=False, mode="rev")
failed, output = run_driver(prob)
self.assertTrue(
"In mode: rev, Solving variable(s) using simul coloring:" in output
)
self.assertTrue("('comp.f_xy', [0])" in output)
self.assertTrue("Elapsed Time:" in output)
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
prob.driver.options["debug_print"] = ["totals"]
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", upper=-15.0)
prob.setup(check=False, mode="fwd")
failed, output = run_driver(prob)
self.assertTrue(
"In mode: fwd, Solving variable(s) using simul coloring:" in output
)
self.assertTrue("('p1.x', [0])" in output)
self.assertTrue("Elapsed Time:" in output)
def test_debug_print_option(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
prob.driver.options["debug_print"] = ["desvars", "ln_cons", "nl_cons", "objs"]
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", upper=-15.0)
prob.setup()
failed, output = run_driver(prob)
output = output.split("\n")
self.assertTrue(
output.count("Design Vars") > 1,
"Should be more than one design vars header printed",
)
self.assertTrue(
output.count("Nonlinear constraints") > 1,
"Should be more than one nonlinear constraint header printed",
)
self.assertTrue(
output.count("Linear constraints") > 1,
"Should be more than one linear constraint header printed",
)
self.assertTrue(
output.count("Objectives") > 1,
"Should be more than one objective header printed",
)
self.assertTrue(
len([s for s in output if s.startswith("{'p1.x")]) > 1,
"Should be more than one p1.x printed",
)
self.assertTrue(
len([s for s in output if "'p2.y'" in s]) > 1,
"Should be more than one p2.y printed",
)
self.assertTrue(
len([s for s in output if s.startswith("{'con.c")]) > 1,
"Should be more than one con.c printed",
)
self.assertTrue(
len([s for s in output if s.startswith("{'comp.f_xy")]) > 1,
"Should be more than one comp.f_xy printed",
)
def test_sellar_mdf_linear_con_directsolver(self):
# This test makes sure that we call solve_nonlinear first if we have any linear constraints
# to cache.
prob = om.Problem()
model = prob.model = SellarDerivatives()
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-3
model.add_design_var(
"z", lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0])
)
model.add_design_var("x", lower=0.0, upper=10.0)
model.add_objective("obj")
model.add_constraint("con1", upper=0.0)
model.add_constraint("con2", upper=0.0)
model.add_constraint("x", upper=11.0, linear=True)
prob.setup(check=False, mode="rev")
prob.set_solver_print(level=0)
failed = prob.run_driver()
assert_near_equal(prob["z"][0], 1.9776, 1e-3)
assert_near_equal(prob["z"][1], 0.0, 1e-3)
assert_near_equal(prob["x"], 0.0, 4e-3)
self.assertEqual(len(prob.driver._lincongrad_cache), 1)
# Piggyback test: make sure we can run the driver again as a subdriver without a keyerror.
prob.driver.run()
self.assertEqual(len(prob.driver._lincongrad_cache), 1)
def test_call_final_setup(self):
# Make sure we call final setup if our model hasn't been setup.
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", equals=-15.0)
prob.setup()
with self.assertRaises(RuntimeError) as cm:
totals = prob.check_totals(method="fd", out_stream=False)
expected_msg = (
"Problem: run_model must be called before total derivatives can be checked."
)
self.assertEqual(expected_msg, str(cm.exception))
def test_LN_COBYLA_linear_constraint(self):
# Bug where NLoptDriver tried to compute and cache the constraint derivatives for the
# lower and upper bounds of the desvars even though we were using a non-gradient optimizer.
# This causd a KeyError.
prob = om.Problem()
indeps = prob.model.add_subsystem("indeps", om.IndepVarComp())
indeps.add_output("x", 3.0)
indeps.add_output("y", -4.0)
prob.model.add_subsystem("parab", Paraboloid())
prob.model.add_subsystem("const", om.ExecComp("g = x + y"))
prob.model.connect("indeps.x", ["parab.x", "const.x"])
prob.model.connect("indeps.y", ["parab.y", "const.y"])
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LN_COBYLA"
prob.model.add_constraint("const.g", lower=0, upper=10.0)
prob.model.add_design_var(
"indeps.x", **{"ref0": 0, "ref": 2, "lower": -50, "upper": 50}
)
prob.model.add_design_var(
"indeps.y", **{"ref0": 0, "ref": 2, "lower": -50, "upper": 50}
)
prob.model.add_objective("parab.f_xy", scaler=4.0)
prob.setup()
prob.run_driver()
# minimum value
assert_near_equal(prob["parab.f_xy"], -27, 1e-6)
def test_multiple_objectives_error(self):
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
self.assertFalse(prob.driver.supports["multiple_objectives"])
prob.driver.options["debug_print"] = ["nl_cons", "objs"]
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_objective("c") # Second objective
prob.setup()
with self.assertRaises(RuntimeError):
prob.run_model()
with self.assertRaises(RuntimeError):
prob.run_driver()
def test_GN_DIRECT(self):
import openmdao.api as om
np.random.seed(6)
size = 2 # size of the design variable
class Rastrigin(om.ExplicitComponent):
def setup(self):
self.add_input("x", 0.5 * np.ones(size))
self.add_output("f", 0.5)
def compute(
self, inputs, outputs, discrete_inputs=None, discrete_outputs=None
):
x = inputs["x"]
outputs["f"] = rastrigin(x)
prob = om.Problem()
model = prob.model
model.add_subsystem(
"indeps", om.IndepVarComp("x", np.ones(size)), promotes=["*"]
)
model.add_subsystem("rastrigin", Rastrigin(), promotes=["*"])
prob.driver = driver = NLoptDriver()
driver.options["optimizer"] = "GN_DIRECT"
driver.options["maxiter"] = 1000
model.add_design_var(
"x", lower=-5.12 * np.ones(size), upper=5.12 * np.ones(size)
)
model.add_objective("f")
prob.setup()
prob.run_driver()
assert_near_equal(prob["x"], np.zeros(size), 1e-5)
assert_near_equal(prob["f"], 0.0, 1e-5)
def test_GN_DIRECT_L(self):
import openmdao.api as om
np.random.seed(6)
size = 2 # size of the design variable
class Rastrigin(om.ExplicitComponent):
def setup(self):
self.add_input("x", 0.5 * np.ones(size))
self.add_output("f", 0.5)
def compute(
self, inputs, outputs, discrete_inputs=None, discrete_outputs=None
):
x = inputs["x"]
outputs["f"] = rastrigin(x)
prob = om.Problem()
model = prob.model
model.add_subsystem(
"indeps", om.IndepVarComp("x", np.ones(size)), promotes=["*"]
)
model.add_subsystem("rastrigin", Rastrigin(), promotes=["*"])
prob.driver = driver = NLoptDriver()
driver.options["optimizer"] = "GN_DIRECT_L"
driver.options["maxiter"] = 500
model.add_design_var(
"x", lower=-5.12 * np.ones(size), upper=5.12 * np.ones(size)
)
model.add_objective("f")
prob.setup()
prob.run_driver()
assert_near_equal(prob["x"], np.zeros(size), 1e-6)
assert_near_equal(prob["f"], 0.0, 1e-6)
def test_GN_DIRECT_L_NOSCAL(self):
import openmdao.api as om
np.random.seed(6)
size = 2 # size of the design variable
class Rastrigin(om.ExplicitComponent):
def setup(self):
self.add_input("x", 0.5 * np.ones(size))
self.add_output("f", 0.5)
def compute(
self, inputs, outputs, discrete_inputs=None, discrete_outputs=None
):
x = inputs["x"]
outputs["f"] = rastrigin(x)
prob = om.Problem()
model = prob.model
model.add_subsystem(
"indeps", om.IndepVarComp("x", np.ones(size)), promotes=["*"]
)
model.add_subsystem("rastrigin", Rastrigin(), promotes=["*"])
prob.driver = driver = NLoptDriver()
driver.options["optimizer"] = "GN_DIRECT_L_NOSCAL"
driver.options["maxiter"] = 500
model.add_design_var(
"x", lower=-5.12 * np.ones(size), upper=5.12 * np.ones(size)
)
model.add_objective("f")
prob.setup()
prob.run_driver()
assert_near_equal(prob["x"], np.zeros(size), 1e-6)
assert_near_equal(prob["f"], 0.0, 1e-6)
def test_GN_ORIG_DIRECT(self):
import openmdao.api as om
np.random.seed(6)
size = 2 # size of the design variable
class Rastrigin(om.ExplicitComponent):
def setup(self):
self.add_input("x", 0.5 * np.ones(size))
self.add_output("f", 0.5)
def compute(
self, inputs, outputs, discrete_inputs=None, discrete_outputs=None
):
x = inputs["x"]
outputs["f"] = rastrigin(x)
prob = om.Problem()
model = prob.model
model.add_subsystem(
"indeps", om.IndepVarComp("x", np.ones(size)), promotes=["*"]
)
model.add_subsystem("rastrigin", Rastrigin(), promotes=["*"])
prob.driver = driver = NLoptDriver()
driver.options["optimizer"] = "GN_ORIG_DIRECT"
driver.options["maxiter"] = 1000
model.add_design_var(
"x", lower=-5.12 * np.ones(size), upper=5.12 * np.ones(size)
)
model.add_objective("f")
prob.setup()
prob.run_driver()
assert_near_equal(prob["x"], np.zeros(size), 1e-5)
assert_near_equal(prob["f"], 0.0, 1e-5)
def test_GN_ORIG_DIRECT_L(self):
import openmdao.api as om
np.random.seed(6)
size = 2 # size of the design variable
class Rastrigin(om.ExplicitComponent):
def setup(self):
self.add_input("x", 0.5 * np.ones(size))
self.add_output("f", 0.5)
def compute(
self, inputs, outputs, discrete_inputs=None, discrete_outputs=None
):
x = inputs["x"]
outputs["f"] = rastrigin(x)
prob = om.Problem()
model = prob.model
model.add_subsystem(
"indeps", om.IndepVarComp("x", np.ones(size)), promotes=["*"]
)
model.add_subsystem("rastrigin", Rastrigin(), promotes=["*"])
prob.driver = driver = NLoptDriver()
driver.options["optimizer"] = "GN_ORIG_DIRECT_L"
driver.options["maxiter"] = 500
model.add_design_var(
"x", lower=-5.12 * np.ones(size), upper=5.12 * np.ones(size)
)
model.add_objective("f")
prob.setup()
prob.run_driver()
assert_near_equal(prob["x"], np.zeros(size), 1e-6)
assert_near_equal(prob["f"], 0.0, 1e-6)
def test_simple_paraboloid_upper_LD_MMA(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_MMA"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", upper=-15.0)
prob.setup()
failed = prob.run_driver()
assert_near_equal(prob["x"], 7.1666666, 1e-6)
assert_near_equal(prob["y"], -7.83333333, 1e-6)
def test_simple_paraboloid_upper_LD_CCSAQ(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_CCSAQ"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", upper=-15.0)
prob.setup()
failed = prob.run_driver()
assert_near_equal(prob["x"], 7.1666666, 1e-6)
assert_near_equal(prob["y"], -7.83333333, 1e-6)
def test_simple_paraboloid_upper_GN_ISRES(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 25.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 25.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "GN_ISRES"
prob.driver.options["tol"] = 1e-12
prob.driver.options["maxiter"] = 10000
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", upper=-15.0)
prob.setup()
failed = prob.run_driver()
# Just get pretty close to the optimum
assert_near_equal(prob["x"], 7.1666666, 1e-2)
assert_near_equal(prob["y"], -7.83333333, 1e-2)
def test_simple_paraboloid_upper_GN_ORIG_DIRECT(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 7.5), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", -7.2), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "GN_ORIG_DIRECT"
prob.driver.options["tol"] = 1e-6
prob.driver.options["maxiter"] = 500
model.add_design_var("x", lower=5.0, upper=10.0)
model.add_design_var("y", lower=-10.0, upper=-5.0)
model.add_objective("f_xy")
model.add_constraint("c", upper=-15.0)
prob.setup()
failed = prob.run_driver()
# Just get pretty close to the optimum
assert_near_equal(prob["x"], 7.1666666, 1e-2)
assert_near_equal(prob["y"], -7.83333333, 1e-2)
def test_simple_paraboloid_upper_GN_ORIG_DIRECT_L(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 7.5), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", -7.2), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "GN_ORIG_DIRECT_L"
prob.driver.options["tol"] = 1e-6
prob.driver.options["maxiter"] = 500
model.add_design_var("x", lower=5.0, upper=10.0)
model.add_design_var("y", lower=-10.0, upper=-5.0)
model.add_objective("f_xy")
model.add_constraint("c", upper=-15.0)
prob.setup()
failed = prob.run_driver()
# Just get pretty close to the optimum
assert_near_equal(prob["x"], 7.1666666, 1e-2)
assert_near_equal(prob["y"], -7.83333333, 1e-2)
def test_simple_paraboloid_equality_COBYLA(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LN_COBYLA"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
model.add_constraint("c", equals=-15.0)
prob.setup()
failed = prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_near_equal(prob["x"], 7.16667, 1e-4)
assert_near_equal(prob["y"], -7.833334, 1e-4)
def test_simple_paraboloid_equality_ISRES(self):
prob = om.Problem()
model = prob.model
# Start very close to the correct answer with tight bounds to
# reduce test runtime
model.add_subsystem("p1", om.IndepVarComp("x", 7.5), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", -7.6), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "GN_ISRES"
prob.driver.options["tol"] = 1e-6
prob.driver.options["maxiter"] = 20000
model.add_design_var("x", lower=7.0, upper=7.5)
model.add_design_var("y", lower=-8.0, upper=-7.5)
model.add_objective("f_xy")
model.add_constraint("c", equals=-15.0)
prob.setup()
failed = prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
# Loose tolerance
assert_near_equal(prob["x"], 7.16667, 1e-2)
assert_near_equal(prob["y"], -7.833334, 1e-2)
def test_simple_paraboloid_equality_failure_MMA(self):
prob = om.Problem()
model = prob.model
# Start very close to the correct answer with tight bounds to
# reduce test runtime
model.add_subsystem("p1", om.IndepVarComp("x", 7.5), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", -7.6), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_MMA"
prob.driver.options["tol"] = 1e-6
prob.driver.options["maxiter"] = 5000
model.add_design_var("x", lower=7.0, upper=7.5)
model.add_design_var("y", lower=-8.0, upper=-7.5)
model.add_objective("f_xy")
model.add_constraint("c", equals=-15.0)
prob.setup()
with self.assertRaises(NotImplementedError) as raises_msg:
failed = prob.run_driver()
exception = raises_msg.exception
msg = "The selected optimizer, LD_MMA, does not support equality constraints."
self.assertIn(msg, exception.args[0])
def test_simple_paraboloid_equality_failure_LD_CCSAQ(self):
prob = om.Problem()
model = prob.model
# Start very close to the correct answer with tight bounds to
# reduce test runtime
model.add_subsystem("p1", om.IndepVarComp("x", 7.5), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", -7.6), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_CCSAQ"
prob.driver.options["tol"] = 1e-6
prob.driver.options["maxiter"] = 5000
model.add_design_var("x", lower=7.0, upper=7.5)
model.add_design_var("y", lower=-8.0, upper=-7.5)
model.add_objective("f_xy")
model.add_constraint("c", equals=-15.0)
prob.setup()
with self.assertRaises(NotImplementedError) as raises_msg:
failed = prob.run_driver()
exception = raises_msg.exception
msg = "The selected optimizer, LD_CCSAQ, does not support equality constraints."
self.assertIn(msg, exception.args[0])
def test_simple_paraboloid_equality_failure_GN_ORIG_DIRECT(self):
prob = om.Problem()
model = prob.model
# Start very close to the correct answer with tight bounds to
# reduce test runtime
model.add_subsystem("p1", om.IndepVarComp("x", 7.5), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", -7.6), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "GN_ORIG_DIRECT"
prob.driver.options["tol"] = 1e-6
prob.driver.options["maxiter"] = 5000
model.add_design_var("x", lower=7.0, upper=7.5)
model.add_design_var("y", lower=-8.0, upper=-7.5)
model.add_objective("f_xy")
model.add_constraint("c", equals=-15.0)
prob.setup()
with self.assertRaises(NotImplementedError) as raises_msg:
failed = prob.run_driver()
exception = raises_msg.exception
msg = "The selected optimizer, GN_ORIG_DIRECT, does not support equality constraints."
self.assertIn(msg, exception.args[0])
def test_simple_paraboloid_equality_failure_GN_ORIG_DIRECT_L(self):
prob = om.Problem()
model = prob.model
# Start very close to the correct answer with tight bounds to
# reduce test runtime
model.add_subsystem("p1", om.IndepVarComp("x", 7.5), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", -7.6), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "GN_ORIG_DIRECT_L"
prob.driver.options["tol"] = 1e-6
prob.driver.options["maxiter"] = 5000
model.add_design_var("x", lower=7.0, upper=7.5)
model.add_design_var("y", lower=-8.0, upper=-7.5)
model.add_objective("f_xy")
model.add_constraint("c", equals=-15.0)
prob.setup()
with self.assertRaises(NotImplementedError) as raises_msg:
failed = prob.run_driver()
exception = raises_msg.exception
msg = "The selected optimizer, GN_ORIG_DIRECT_L, does not support equality constraints."
self.assertIn(msg, exception.args[0])
def test_simple_paraboloid_equality_failure_GN_AGS(self):
prob = om.Problem()
model = prob.model
# Start very close to the correct answer with tight bounds to
# reduce test runtime
model.add_subsystem("p1", om.IndepVarComp("x", 7.5), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", -7.6), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "GN_AGS"
prob.driver.options["tol"] = 1e-6
prob.driver.options["maxiter"] = 5000
model.add_design_var("x", lower=7.0, upper=7.5)
model.add_design_var("y", lower=-8.0, upper=-7.5)
model.add_objective("f_xy")
model.add_constraint("c", equals=-15.0)
prob.setup()
with self.assertRaises(NotImplementedError) as raises_msg:
failed = prob.run_driver()
exception = raises_msg.exception
msg = "The selected optimizer, GN_AGS, does not support equality constraints."
self.assertIn(msg, exception.args[0])
def test_maxtime(self):
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver(optimizer="LD_SLSQP", tol=1e-9, maxtime=0.0001)
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
prob.setup()
failed = prob.run_driver()
# It shouldn't have time to move from the initial point
assert_near_equal(prob["x"], 50.0, 1e-6)
assert_near_equal(prob["y"], 50.0, 1e-6)
def test_simple_paraboloid_inequalities(self):
# This test checks that you can set a constraint with the same value
# for both the lower and upper bounds to effectively create an equality
# constraint even if the optimization method doesn't allow equality constraints
prob = om.Problem()
model = prob.model
# Start very close to the correct answer with tight bounds to
# reduce test runtime
model.add_subsystem("p1", om.IndepVarComp("x", 7.5), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", -7.6), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
model.add_subsystem("con", om.ExecComp("c = - x + y"), promotes=["*"])
prob.set_solver_print(level=0)
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "GN_ORIG_DIRECT"
prob.driver.options["tol"] = 1e-6
prob.driver.options["maxiter"] = 5000
model.add_design_var("x", lower=7.0, upper=7.5)
model.add_design_var("y", lower=-8.0, upper=-7.5)
model.add_objective("f_xy")
model.add_constraint("c", lower=-15.0, upper=-15.0)
prob.setup()
failed = prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
# Loose tolerance
assert_near_equal(prob["x"], 7.16667, 1e-2)
assert_near_equal(prob["y"], -7.833334, 1e-2)
@unittest.skipIf(nlopt is None, "only run if NLopt is installed.")
class TestNLoptDriverFeatures(unittest.TestCase):
def test_feature_basic(self):
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
from weis.optimization_drivers.nlopt_driver import NLoptDriver
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
prob.driver = NLoptDriver()
prob.driver.options["optimizer"] = "LD_SLSQP"
prob.driver.options["tol"] = 1e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
prob.setup()
prob.run_driver()
assert_near_equal(prob["x"], 6.66666667, 1e-6)
assert_near_equal(prob["y"], -7.3333333, 1e-6)
def test_feature_optimizer(self):
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
from weis.optimization_drivers.nlopt_driver import NLoptDriver
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
prob.driver = NLoptDriver(optimizer="LD_SLSQP")
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
prob.setup()
prob.run_driver()
assert_near_equal(prob["x"], 6.66666667, 1e-6)
assert_near_equal(prob["y"], -7.3333333, 1e-6)
def test_feature_maxiter(self):
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
from weis.optimization_drivers.nlopt_driver import NLoptDriver
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
prob.driver = NLoptDriver()
prob.driver.options["maxiter"] = 20
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
prob.setup()
prob.run_driver()
assert_near_equal(prob["x"], 6.66666667, 1e-6)
assert_near_equal(prob["y"], -7.3333333, 1e-6)
def test_feature_tol(self):
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
from weis.optimization_drivers.nlopt_driver import NLoptDriver
prob = om.Problem()
model = prob.model
model.add_subsystem("p1", om.IndepVarComp("x", 50.0), promotes=["*"])
model.add_subsystem("p2", om.IndepVarComp("y", 50.0), promotes=["*"])
model.add_subsystem("comp", Paraboloid(), promotes=["*"])
prob.driver = NLoptDriver()
prob.driver.options["tol"] = 1.0e-9
model.add_design_var("x", lower=-50.0, upper=50.0)
model.add_design_var("y", lower=-50.0, upper=50.0)
model.add_objective("f_xy")
prob.setup()
prob.run_driver()
assert_near_equal(prob["x"], 6.66666667, 1e-6)
assert_near_equal(prob["y"], -7.3333333, 1e-6)
@unittest.skipUnless(nlopt is None, "only run if NLopt is NOT installed.")
class TestNotInstalled(unittest.TestCase):
def test_nlopt_not_installed(self):
# the import should not fail
from weis.optimization_drivers.nlopt_driver import NLoptDriver
# but we get a RuntimeError if we try to instantiate
with self.assertRaises(RuntimeError) as ctx:
NLoptDriver()
self.assertEqual(str(ctx.exception),
'NLoptDriver is not available, NLopt is not installed.')
if __name__ == "__main__":
unittest.main()
| 34.290273 | 100 | 0.571233 |
afb17380f319988fe249070100131f7c3d707171 | 193 | py | Python | wristband/apps/models.py | hmrc/wristband | 35648a15b91dea4a927e486bfe0ace5e00b44dcc | [
"Apache-2.0"
] | 1 | 2015-07-14T14:32:17.000Z | 2015-07-14T14:32:17.000Z | wristband/apps/models.py | hmrc/wristband | 35648a15b91dea4a927e486bfe0ace5e00b44dcc | [
"Apache-2.0"
] | 4 | 2015-08-03T11:17:37.000Z | 2015-09-24T10:06:02.000Z | wristband/apps/models.py | hmrc/wristband | 35648a15b91dea4a927e486bfe0ace5e00b44dcc | [
"Apache-2.0"
] | 2 | 2020-05-05T13:56:47.000Z | 2021-04-10T23:51:52.000Z | from mongoengine import Document, StringField
class App(Document):
name = StringField(max_length=50)
stage = StringField(max_length=50)
security_zone = StringField(max_length=50)
| 24.125 | 46 | 0.761658 |
81bcbab9c005698196127f53380e0f4f09d6c8e2 | 11,200 | py | Python | alembic/versions/e9b7657f6be1_skymaps.py | bparazin/skyportal | c160610ca0cc28eef9f36c2d11cc15bd9bcbfe56 | [
"BSD-3-Clause"
] | 52 | 2018-11-02T00:53:21.000Z | 2022-03-08T16:03:52.000Z | alembic/versions/e9b7657f6be1_skymaps.py | bparazin/skyportal | c160610ca0cc28eef9f36c2d11cc15bd9bcbfe56 | [
"BSD-3-Clause"
] | 1,944 | 2017-04-27T18:51:20.000Z | 2022-03-31T20:17:44.000Z | alembic/versions/e9b7657f6be1_skymaps.py | bparazin/skyportal | c160610ca0cc28eef9f36c2d11cc15bd9bcbfe56 | [
"BSD-3-Clause"
] | 63 | 2017-05-13T01:40:47.000Z | 2022-03-12T11:32:11.000Z | """skymaps
Revision ID: e9b7657f6be1
Revises: fb0304aeca6c
Create Date: 2021-06-21 17:56:01.193974
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'e9b7657f6be1'
down_revision = 'fb0304aeca6c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
'gcnevents',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('modified', sa.DateTime(), nullable=False),
sa.Column('sent_by_id', sa.Integer(), nullable=False),
sa.Column('dateobs', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['sent_by_id'], ['users.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('dateobs'),
)
op.create_index(
op.f('ix_gcnevents_created_at'), 'gcnevents', ['created_at'], unique=False
)
op.create_index(
op.f('ix_gcnevents_sent_by_id'), 'gcnevents', ['sent_by_id'], unique=False
)
op.create_table(
'gcnnotices',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('modified', sa.DateTime(), nullable=False),
sa.Column('sent_by_id', sa.Integer(), nullable=False),
sa.Column('ivorn', sa.String(), nullable=True),
sa.Column(
'notice_type',
sa.Enum(
'GRB_COORDS',
'TEST_COORDS',
'IM_ALIVE',
'KILL_SOCKET',
'MAXBC',
'BRAD_COORDS',
'GRB_FINAL',
'HUNTS_SRC',
'ALEXIS_SRC',
'XTE_PCA_ALERT',
'XTE_PCA_SRC',
'XTE_ASM_ALERT',
'XTE_ASM_SRC',
'COMPTEL_SRC',
'IPN_RAW',
'IPN_SEG',
'SAX_WFC_ALERT',
'SAX_WFC_SRC',
'SAX_NFI_ALERT',
'SAX_NFI_SRC',
'XTE_ASM_TRANS',
'spare38',
'IPN_POS',
'HETE_ALERT_SRC',
'HETE_UPDATE_SRC',
'HETE_FINAL_SRC',
'HETE_GNDANA_SRC',
'HETE_TEST',
'GRB_CNTRPART',
'SWIFT_TOO_FOM',
'SWIFT_TOO_SC_SLEW',
'DOW_TOD',
'spare50',
'INTEGRAL_POINTDIR',
'INTEGRAL_SPIACS',
'INTEGRAL_WAKEUP',
'INTEGRAL_REFINED',
'INTEGRAL_OFFLINE',
'INTEGRAL_WEAK',
'AAVSO',
'MILAGRO_POS',
'KONUS_LC',
'SWIFT_BAT_GRB_ALERT',
'SWIFT_BAT_GRB_POS_ACK',
'SWIFT_BAT_GRB_POS_NACK',
'SWIFT_BAT_GRB_LC',
'SWIFT_BAT_SCALEDMAP',
'SWIFT_FOM_OBS',
'SWIFT_SC_SLEW',
'SWIFT_XRT_POSITION',
'SWIFT_XRT_SPECTRUM',
'SWIFT_XRT_IMAGE',
'SWIFT_XRT_LC',
'SWIFT_XRT_CENTROID',
'SWIFT_UVOT_DBURST',
'SWIFT_UVOT_FCHART',
'SWIFT_BAT_GRB_LC_PROC',
'SWIFT_XRT_SPECTRUM_PROC',
'SWIFT_XRT_IMAGE_PROC',
'SWIFT_UVOT_DBURST_PROC',
'SWIFT_UVOT_FCHART_PROC',
'SWIFT_UVOT_POS',
'SWIFT_BAT_GRB_POS_TEST',
'SWIFT_POINTDIR',
'SWIFT_BAT_TRANS',
'SWIFT_XRT_THRESHPIX',
'SWIFT_XRT_THRESHPIX_PROC',
'SWIFT_XRT_SPER',
'SWIFT_XRT_SPER_PROC',
'SWIFT_UVOT_POS_NACK',
'SWIFT_BAT_ALARM_SHORT',
'SWIFT_BAT_ALARM_LONG',
'SWIFT_UVOT_EMERGENCY',
'SWIFT_XRT_EMERGENCY',
'SWIFT_FOM_PPT_ARG_ERR',
'SWIFT_FOM_SAFE_POINT',
'SWIFT_FOM_SLEW_ABORT',
'SWIFT_BAT_QL_POS',
'SWIFT_BAT_SUB_THRESHOLD',
'SWIFT_BAT_SLEW_POS',
'AGILE_GRB_WAKEUP',
'AGILE_GRB_GROUND',
'AGILE_GRB_REFINED',
'SWIFT_ACTUAL_POINTDIR',
'AGILE_MCAL_ALERT',
'AGILE_POINTDIR',
'AGILE_TRANS',
'AGILE_GRB_POS_TEST',
'FERMI_GBM_ALERT',
'FERMI_GBM_FLT_POS',
'FERMI_GBM_GND_POS',
'FERMI_GBM_LC',
'FERMI_GBM_GND_INTERNAL',
'FERMI_GBM_FIN_POS',
'FERMI_GBM_ALERT_INTERNAL',
'FERMI_GBM_FLT_INTERNAL',
'FERMI_GBM_TRANS',
'FERMI_GBM_POS_TEST',
'FERMI_LAT_POS_INI',
'FERMI_LAT_POS_UPD',
'FERMI_LAT_POS_DIAG',
'FERMI_LAT_TRANS',
'FERMI_LAT_POS_TEST',
'FERMI_LAT_MONITOR',
'FERMI_SC_SLEW',
'FERMI_LAT_GND',
'FERMI_LAT_OFFLINE',
'FERMI_POINTDIR',
'SIMBADNED',
'FERMI_GBM_SUBTHRESH',
'SWIFT_BAT_MONITOR',
'MAXI_UNKNOWN',
'MAXI_KNOWN',
'MAXI_TEST',
'OGLE',
'CBAT',
'MOA',
'SWIFT_BAT_SUBSUB',
'SWIFT_BAT_KNOWN_SRC',
'VOE_11_IM_ALIVE',
'VOE_20_IM_ALIVE',
'FERMI_SC_SLEW_INTERNAL',
'COINCIDENCE',
'FERMI_GBM_FIN_INTERNAL',
'SUZAKU_LC',
'SNEWS',
'LVC_PRELIMINARY',
'LVC_INITIAL',
'LVC_UPDATE',
'LVC_TEST',
'LVC_COUNTERPART',
'AMON_ICECUBE_COINC',
'AMON_ICECUBE_HESE',
'CALET_GBM_FLT_LC',
'CALET_GBM_GND_LC',
'LVC_EARLY_WARNING',
'LVC_RETRACTION',
'GWHEN_COINC',
'AMON_ICECUBE_EHE',
'HAWC_BURST_MONITOR',
'AMON_NU_EM_COINC',
'ICECUBE_ASTROTRACK_GOLD',
'ICECUBE_ASTROTRACK_BRONZE',
'ICECUBE_CASCADE',
name='noticetype',
),
nullable=False,
),
sa.Column('stream', sa.String(), nullable=False),
sa.Column('date', sa.DateTime(), nullable=False),
sa.Column('dateobs', sa.DateTime(), nullable=False),
sa.Column('content', sa.LargeBinary(), nullable=False),
sa.ForeignKeyConstraint(['dateobs'], ['gcnevents.dateobs'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['sent_by_id'], ['users.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
)
op.create_index(
op.f('ix_gcnnotices_created_at'), 'gcnnotices', ['created_at'], unique=False
)
op.create_index(op.f('ix_gcnnotices_ivorn'), 'gcnnotices', ['ivorn'], unique=True)
op.create_index(
op.f('ix_gcnnotices_sent_by_id'), 'gcnnotices', ['sent_by_id'], unique=False
)
op.create_table(
'gcntags',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('modified', sa.DateTime(), nullable=False),
sa.Column('sent_by_id', sa.Integer(), nullable=False),
sa.Column('dateobs', sa.DateTime(), nullable=False),
sa.Column('text', sa.Unicode(), nullable=False),
sa.ForeignKeyConstraint(['dateobs'], ['gcnevents.dateobs'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['sent_by_id'], ['users.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
)
op.create_index(
op.f('ix_gcntags_created_at'), 'gcntags', ['created_at'], unique=False
)
op.create_index(op.f('ix_gcntags_dateobs'), 'gcntags', ['dateobs'], unique=False)
op.create_index(
op.f('ix_gcntags_sent_by_id'), 'gcntags', ['sent_by_id'], unique=False
)
op.create_table(
'localizations',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('modified', sa.DateTime(), nullable=False),
sa.Column('sent_by_id', sa.Integer(), nullable=False),
sa.Column('dateobs', sa.DateTime(), nullable=False),
sa.Column('localization_name', sa.String(), nullable=True),
sa.Column('uniq', sa.ARRAY(sa.BigInteger()), nullable=False),
sa.Column('probdensity', sa.ARRAY(sa.Float()), nullable=False),
sa.Column('distmu', sa.ARRAY(sa.Float()), nullable=True),
sa.Column('distsigma', sa.ARRAY(sa.Float()), nullable=True),
sa.Column('distnorm', sa.ARRAY(sa.Float()), nullable=True),
sa.Column('contour', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.ForeignKeyConstraint(['dateobs'], ['gcnevents.dateobs'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['sent_by_id'], ['users.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
)
op.create_index(
op.f('ix_localizations_created_at'),
'localizations',
['created_at'],
unique=False,
)
op.create_index(
op.f('ix_localizations_dateobs'), 'localizations', ['dateobs'], unique=False
)
op.create_index(
op.f('ix_localizations_localization_name'),
'localizations',
['localization_name'],
unique=False,
)
op.create_index(
op.f('ix_localizations_sent_by_id'),
'localizations',
['sent_by_id'],
unique=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_localizations_sent_by_id'), table_name='localizations')
op.drop_index(
op.f('ix_localizations_localization_name'), table_name='localizations'
)
op.drop_index(op.f('ix_localizations_dateobs'), table_name='localizations')
op.drop_index(op.f('ix_localizations_created_at'), table_name='localizations')
op.drop_table('localizations')
op.drop_index(op.f('ix_gcntags_sent_by_id'), table_name='gcntags')
op.drop_index(op.f('ix_gcntags_dateobs'), table_name='gcntags')
op.drop_index(op.f('ix_gcntags_created_at'), table_name='gcntags')
op.drop_table('gcntags')
op.drop_index(op.f('ix_gcnnotices_sent_by_id'), table_name='gcnnotices')
op.drop_index(op.f('ix_gcnnotices_ivorn'), table_name='gcnnotices')
op.drop_index(op.f('ix_gcnnotices_created_at'), table_name='gcnnotices')
op.drop_table('gcnnotices')
op.drop_index(op.f('ix_gcnevents_sent_by_id'), table_name='gcnevents')
op.drop_index(op.f('ix_gcnevents_created_at'), table_name='gcnevents')
op.drop_table('gcnevents')
# ### end Alembic commands ###
| 38.225256 | 88 | 0.546429 |
b25a2192ce263ff321253c248c80d7567b93bc12 | 3,837 | py | Python | backend/swagger_server/models/document_info_field_entry_rect.py | Lend88/libresign | 9537f39a696fa5f3433052406329d77d528b6cf9 | [
"MIT"
] | 6 | 2019-01-29T05:58:37.000Z | 2021-11-02T22:47:02.000Z | backend/swagger_server/models/document_info_field_entry_rect.py | Lend88/libresign | 9537f39a696fa5f3433052406329d77d528b6cf9 | [
"MIT"
] | 9 | 2020-09-09T04:53:01.000Z | 2022-03-08T22:52:18.000Z | backend/swagger_server/models/document_info_field_entry_rect.py | Lend88/libresign | 9537f39a696fa5f3433052406329d77d528b6cf9 | [
"MIT"
] | 4 | 2019-01-29T07:38:55.000Z | 2021-10-16T21:06:42.000Z | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from .base_model_ import Model
from .. import util
class DocumentInfoFieldEntryRect(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, x: float=None, y: float=None, w: float=None, h: float=None): # noqa: E501
"""DocumentInfoFieldEntryRect - a model defined in Swagger
:param x: The x of this DocumentInfoFieldEntryRect. # noqa: E501
:type x: float
:param y: The y of this DocumentInfoFieldEntryRect. # noqa: E501
:type y: float
:param w: The w of this DocumentInfoFieldEntryRect. # noqa: E501
:type w: float
:param h: The h of this DocumentInfoFieldEntryRect. # noqa: E501
:type h: float
"""
self.swagger_types = {
'x': float,
'y': float,
'w': float,
'h': float
}
self.attribute_map = {
'x': 'x',
'y': 'y',
'w': 'w',
'h': 'h'
}
self._x = x
self._y = y
self._w = w
self._h = h
@classmethod
def from_dict(cls, dikt) -> 'DocumentInfoFieldEntryRect':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The DocumentInfoFieldEntry_rect of this DocumentInfoFieldEntryRect. # noqa: E501
:rtype: DocumentInfoFieldEntryRect
"""
return util.deserialize_model(dikt, cls)
@property
def x(self) -> float:
"""Gets the x of this DocumentInfoFieldEntryRect.
:return: The x of this DocumentInfoFieldEntryRect.
:rtype: float
"""
return self._x
@x.setter
def x(self, x: float):
"""Sets the x of this DocumentInfoFieldEntryRect.
:param x: The x of this DocumentInfoFieldEntryRect.
:type x: float
"""
if x is None:
raise ValueError("Invalid value for `x`, must not be `None`") # noqa: E501
self._x = x
@property
def y(self) -> float:
"""Gets the y of this DocumentInfoFieldEntryRect.
:return: The y of this DocumentInfoFieldEntryRect.
:rtype: float
"""
return self._y
@y.setter
def y(self, y: float):
"""Sets the y of this DocumentInfoFieldEntryRect.
:param y: The y of this DocumentInfoFieldEntryRect.
:type y: float
"""
if y is None:
raise ValueError("Invalid value for `y`, must not be `None`") # noqa: E501
self._y = y
@property
def w(self) -> float:
"""Gets the w of this DocumentInfoFieldEntryRect.
:return: The w of this DocumentInfoFieldEntryRect.
:rtype: float
"""
return self._w
@w.setter
def w(self, w: float):
"""Sets the w of this DocumentInfoFieldEntryRect.
:param w: The w of this DocumentInfoFieldEntryRect.
:type w: float
"""
if w is None:
raise ValueError("Invalid value for `w`, must not be `None`") # noqa: E501
self._w = w
@property
def h(self) -> float:
"""Gets the h of this DocumentInfoFieldEntryRect.
:return: The h of this DocumentInfoFieldEntryRect.
:rtype: float
"""
return self._h
@h.setter
def h(self, h: float):
"""Sets the h of this DocumentInfoFieldEntryRect.
:param h: The h of this DocumentInfoFieldEntryRect.
:type h: float
"""
if h is None:
raise ValueError("Invalid value for `h`, must not be `None`") # noqa: E501
self._h = h
| 25.410596 | 98 | 0.569716 |
35a812b2fe47f6147311308b7bf39ad575aa5f48 | 15,928 | py | Python | detect_secrets/core/usage.py | digjanaik/detect-secrets | 624024ad5fd8a608e09ed719e5edab6ca95ef47e | [
"Apache-2.0"
] | null | null | null | detect_secrets/core/usage.py | digjanaik/detect-secrets | 624024ad5fd8a608e09ed719e5edab6ca95ef47e | [
"Apache-2.0"
] | 1 | 2020-08-12T21:57:16.000Z | 2020-08-12T21:57:16.000Z | detect_secrets/core/usage.py | digjanaik/detect-secrets | 624024ad5fd8a608e09ed719e5edab6ca95ef47e | [
"Apache-2.0"
] | null | null | null | import argparse
import os
from collections import namedtuple
from functools import lru_cache
from detect_secrets import VERSION
from detect_secrets.plugins.common.util import import_plugins
def add_exclude_lines_argument(parser):
parser.add_argument(
'--exclude-lines',
type=str,
help='Pass in regex to specify lines to ignore during scan.',
)
def add_word_list_argument(parser):
parser.add_argument(
'--word-list',
type=str,
help=(
'Text file with a list of words, '
'if a secret contains a word in the list we ignore it.'
),
dest='word_list_file',
)
def _is_valid_path(path): # pragma: no cover
if not os.path.exists(path):
raise argparse.ArgumentTypeError(
'Invalid path: {}'.format(path),
)
return path
class TupleAction(argparse.Action):
def __call__(self, parser, namespace, values, options_string=None):
existing_values = getattr(
namespace,
self.dest,
)
setattr(
namespace,
self.dest,
existing_values + (values,),
)
def add_custom_plugins_argument(parser):
"""
We turn custom_plugins_paths into a tuple so that we can
@lru_cache all the functions that take it as an argument.
"""
parser.add_argument(
'--custom-plugins',
action=TupleAction,
default=(),
dest='custom_plugin_paths',
help=(
'Custom plugin Python files, or directories containing them. '
'Directories are not searched recursively.'
),
type=_is_valid_path,
)
def add_use_all_plugins_argument(parser):
parser.add_argument(
'--use-all-plugins',
action='store_true',
help='Use all available plugins to scan files.',
)
def add_no_verify_flag(parser):
parser.add_argument(
'-n',
'--no-verify',
action='store_true',
help='Disables additional verification of secrets via network call.',
)
def add_shared_arguments(parser):
"""
These are arguments that are for both
`detect-secrets-hook` and `detect-secrets` console scripts.
"""
add_exclude_lines_argument(parser)
add_word_list_argument(parser)
add_custom_plugins_argument(parser)
add_use_all_plugins_argument(parser)
add_no_verify_flag(parser)
def get_parser_to_add_opt_out_options_to(parser):
"""
The pre-commit hook gets e.g. `--no-jwt-scan` type options
as well as the subparser for `detect-secrets scan`.
:rtype: argparse.ArgumentParser
:returns: argparse.ArgumentParser to pass into PluginOptions
"""
for action in parser._actions: # pragma: no cover (Always returns)
if isinstance(action, argparse._SubParsersAction):
for subparser in action.choices.values():
if subparser.prog.endswith('scan'):
return subparser
# Assume it is the 'detect-secrets-hook' console script
# Relying on parser.prog is too brittle
return parser
class ParserBuilder:
def __init__(self):
self.parser = argparse.ArgumentParser()
self.add_default_arguments()
def add_default_arguments(self):
self._add_verbosity_argument()\
._add_version_argument()
def add_pre_commit_arguments(self):
self._add_filenames_argument()\
._add_set_baseline_argument()\
add_shared_arguments(self.parser)
PluginOptions(self.parser).add_arguments()
return self
def add_console_use_arguments(self):
subparser = self.parser.add_subparsers(
dest='action',
)
for action_parser in (ScanOptions, AuditOptions):
action_parser(subparser).add_arguments()
return self
def parse_args(self, argv):
# We temporarily remove '--help' so that we can give the full
# amount of options (e.g. --no-custom-detector) after loading
# custom plugins.
argv_without_help = list(
filter(
lambda arg: (
arg not in ('-h', '--help')
),
argv,
),
)
known_args, _ = self.parser.parse_known_args(
args=argv_without_help,
)
# Audit does not use the `--custom-plugins` argument
# It pulls custom_plugins from the audited baseline
if hasattr(known_args, 'custom_plugin_paths'):
# Add e.g. `--no-jwt-scan` type options
# now that we can use the --custom-plugins argument
PluginOptions(
get_parser_to_add_opt_out_options_to(self.parser),
).add_opt_out_options(
known_args.custom_plugin_paths,
)
args = self.parser.parse_args(
args=argv,
)
PluginOptions.consolidate_args(args)
return args
def _add_version_argument(self):
self.parser.add_argument(
'--version',
action='version',
version=VERSION,
help='Display version information.',
)
return self
def _add_verbosity_argument(self):
self.parser.add_argument(
'-v',
'--verbose',
action='count',
help='Verbose mode.',
)
return self
def _add_filenames_argument(self):
self.parser.add_argument(
'filenames',
nargs='*',
help='Filenames to check.',
)
return self
def _add_set_baseline_argument(self):
self.parser.add_argument(
'--baseline',
nargs=1,
default=[''],
help='Sets a baseline for explicitly ignored secrets, generated by `--scan`.',
)
return self
class ScanOptions:
def __init__(self, subparser):
self.parser = subparser.add_parser(
'scan',
)
def add_arguments(self):
self._add_initialize_baseline_argument()\
._add_adhoc_scanning_argument()
PluginOptions(self.parser).add_arguments()
return self
def _add_initialize_baseline_argument(self):
self.parser.add_argument(
'path',
nargs='*',
default='.',
help=(
'Scans the entire codebase and outputs a snapshot of '
'currently identified secrets.'
),
)
add_shared_arguments(self.parser)
# Pairing `--exclude-files` with `--scan` because it's only used for the initialization.
# The pre-commit hook framework already has an `exclude` option that can
# be used instead.
self.parser.add_argument(
'--exclude-files',
type=str,
help='Pass in regex to specify ignored paths during initialization scan.',
)
# Pairing `--update` with `--scan` because it's only used for
# initialization.
self.parser.add_argument(
'--update',
nargs=1,
metavar='OLD_BASELINE_FILE',
help='Update existing baseline by importing settings from it.',
dest='import_filename',
)
self.parser.add_argument(
'--all-files',
action='store_true',
help='Scan all files recursively (as compared to only scanning git tracked files).',
)
return self
def _add_adhoc_scanning_argument(self):
self.parser.add_argument(
'--string',
nargs='?',
const=True,
help=(
'Scans an individual string, and displays configured '
'plugins\' verdict.'
),
)
class AuditOptions:
def __init__(self, subparser):
self.parser = subparser.add_parser(
'audit',
)
def add_arguments(self):
self.parser.add_argument(
'filename',
nargs='+',
help=(
'Audit a given baseline file to distinguish the difference '
'between false and true positives.'
),
)
action_parser = self.parser.add_mutually_exclusive_group()
action_parser.add_argument(
'--diff',
action='store_true',
help=(
'Allows the comparison of two baseline files, in order to '
'effectively distinguish the difference between various '
'plugin configurations.'
),
)
action_parser.add_argument(
'--display-results',
action='store_true',
help=(
'Displays the results of an interactive auditing session '
'which have been saved to a baseline file.'
),
)
return self
class PluginDescriptor(
namedtuple(
'PluginDescriptor',
[
# Classname of plugin; used for initialization
'classname',
# Flag to disable plugin. e.g. `--no-hex-string-scan`
'disable_flag_text',
# Description for disable flag.
'disable_help_text',
# type: list
# Allows the bundling of all related command line provided
# arguments together, under one plugin name.
# Assumes there is no shared related arg.
#
# Furthermore, each related arg can have its own default
# value (paired together, with a tuple). This allows us to
# distinguish the difference between a default value, and
# whether a user has entered the same value as a default value.
# Therefore, only populate the default value upon consolidation
# (rather than relying on argparse default).
'related_args',
],
),
):
def __new__(cls, related_args=None, **kwargs):
return super(PluginDescriptor, cls).__new__(
cls,
related_args=related_args or [],
**kwargs
)
@classmethod
def from_plugin_class(cls, plugin, name):
"""
:type plugin: Type[TypeVar('Plugin', bound=BasePlugin)]
:type name: str
"""
related_args = None
if plugin.default_options:
related_args = []
for arg_name, value in plugin.default_options.items():
related_args.append((
'--{}'.format(arg_name.replace('_', '-')),
value,
))
return cls(
classname=name,
disable_flag_text='--{}'.format(plugin.disable_flag_text),
disable_help_text=cls.get_disabled_help_text(plugin),
related_args=related_args,
)
@staticmethod
def get_disabled_help_text(plugin):
for line in plugin.__doc__.splitlines():
line = line.strip().lstrip()
if line:
break
else:
raise NotImplementedError('Plugins must declare a docstring.')
line = line[0].lower() + line[1:]
return 'Disables {}'.format(line)
@lru_cache(maxsize=1)
def get_all_plugin_descriptors(custom_plugin_paths):
return [
PluginDescriptor.from_plugin_class(plugin, name)
for name, plugin in
import_plugins(custom_plugin_paths).items()
]
class PluginOptions:
def __init__(self, parser):
self.parser = parser.add_argument_group(
title='plugins',
description=(
'Configure settings for each secret scanning '
'ruleset. By default, all plugins are enabled '
'unless explicitly disabled.'
),
)
def add_arguments(self):
self._add_custom_limits()
self._add_keyword_exclude()
return self
@staticmethod
def get_disabled_plugins(args):
return [
plugin.classname
for plugin in get_all_plugin_descriptors(args.custom_plugin_paths)
if plugin.classname not in args.plugins
]
@staticmethod
def consolidate_args(args):
"""There are many argument fields related to configuring plugins.
This function consolidates all of them, and saves the consolidated
information in args.plugins.
Note that we're deferring initialization of those plugins, because
plugins may have various initialization values, referenced in
different places.
:param args: output of `argparse.ArgumentParser.parse_args`
"""
# Using `--hex-limit` as a canary to identify whether this
# consolidation is appropriate.
if not hasattr(args, 'hex_limit'):
return
active_plugins = {}
is_using_default_value = {}
for plugin in get_all_plugin_descriptors(args.custom_plugin_paths):
arg_name = PluginOptions._convert_flag_text_to_argument_name(
plugin.disable_flag_text,
)
# Remove disabled plugins
is_disabled = getattr(args, arg_name, False)
delattr(args, arg_name)
if is_disabled:
continue
# Consolidate related args
related_args = {}
for related_arg_tuple in plugin.related_args:
flag_name, default_value = related_arg_tuple
arg_name = PluginOptions._convert_flag_text_to_argument_name(
flag_name,
)
related_args[arg_name] = getattr(args, arg_name)
delattr(args, arg_name)
if default_value and related_args[arg_name] is None:
related_args[arg_name] = default_value
is_using_default_value[arg_name] = True
active_plugins.update({
plugin.classname: related_args,
})
args.plugins = active_plugins
args.is_using_default_value = is_using_default_value
def _add_custom_limits(self):
high_entropy_help_text = (
'Sets the entropy limit for high entropy strings. '
'Value must be between 0.0 and 8.0, '
)
self.parser.add_argument(
'--base64-limit',
type=self._argparse_minmax_type,
nargs='?',
help=high_entropy_help_text + 'defaults to 4.5.',
)
self.parser.add_argument(
'--hex-limit',
type=self._argparse_minmax_type,
nargs='?',
help=high_entropy_help_text + 'defaults to 3.0.',
)
def add_opt_out_options(self, custom_plugin_paths):
for plugin in get_all_plugin_descriptors(custom_plugin_paths):
self.parser.add_argument(
plugin.disable_flag_text,
action='store_true',
help=plugin.disable_help_text,
default=False,
)
def _argparse_minmax_type(self, string):
"""Custom type for argparse to enforce value limits"""
value = float(string)
if value < 0 or value > 8:
raise argparse.ArgumentTypeError(
'%s must be between 0.0 and 8.0' % string,
)
return value
@staticmethod
def _convert_flag_text_to_argument_name(flag_text):
"""This just emulates argparse's underlying logic.
:type flag_text: str
:param flag_text: e.g. `--no-hex-string-scan`
:return: `no_hex_string_scan`
"""
return flag_text[2:].replace('-', '_')
def _add_keyword_exclude(self):
self.parser.add_argument(
'--keyword-exclude',
type=str,
help='Pass in regex to exclude false positives found by keyword detector.',
)
| 29.496296 | 96 | 0.581617 |
4dab6fa48871471f8c78467c6c19c2f4796c648d | 492 | py | Python | bin/fastaSlice.py | PapenfussLab/Mungo | 02c5b0e48ecd28596cb9481b282753859f47fed6 | [
"Artistic-2.0"
] | 1 | 2015-09-16T07:53:18.000Z | 2015-09-16T07:53:18.000Z | bin/fastaSlice.py | PapenfussLab/Mungo | 02c5b0e48ecd28596cb9481b282753859f47fed6 | [
"Artistic-2.0"
] | null | null | null | bin/fastaSlice.py | PapenfussLab/Mungo | 02c5b0e48ecd28596cb9481b282753859f47fed6 | [
"Artistic-2.0"
] | 3 | 2016-01-02T16:34:59.000Z | 2021-07-21T04:21:55.000Z | #!/usr/bin/env python
"""
fastaSlice.py <fasta file> <first entry> <number>
Author: Tony Papenfuss
Date: Wed Apr 23 14:49:40 EST 2008
"""
import os, sys
from mungo.fasta import FastaFile
if len(sys.argv)==1 or '-h' in sys.argv:
sys.exit(__doc__)
iFilename = sys.argv[1]
start = int(sys.argv[2])
n = int(sys.argv[3])
f = FastaFile(iFilename, indexed=True)
f.seek(start)
w = FastaFile(sys.stdout, 'w')
count = 0
for h,s in f:
w.write(h,s)
count += 1
if count==n: break
| 15.870968 | 49 | 0.652439 |
de1de3ec768c895ea00cea8e734dc6f321c3ddd0 | 3,245 | py | Python | IPython/zmq/session.py | 08saikiranreddy/ipython | 3498382180ad409592f46a9dd0d190ca917bfbff | [
"BSD-3-Clause-Clear"
] | 1 | 2022-03-13T23:26:37.000Z | 2022-03-13T23:26:37.000Z | IPython/zmq/session.py | 08saikiranreddy/ipython | 3498382180ad409592f46a9dd0d190ca917bfbff | [
"BSD-3-Clause-Clear"
] | null | null | null | IPython/zmq/session.py | 08saikiranreddy/ipython | 3498382180ad409592f46a9dd0d190ca917bfbff | [
"BSD-3-Clause-Clear"
] | null | null | null | import os
import uuid
import pprint
import zmq
class Message(object):
"""A simple message object that maps dict keys to attributes.
A Message can be created from a dict and a dict from a Message instance
simply by calling dict(msg_obj)."""
def __init__(self, msg_dict):
dct = self.__dict__
for k, v in msg_dict.iteritems():
if isinstance(v, dict):
v = Message(v)
dct[k] = v
# Having this iterator lets dict(msg_obj) work out of the box.
def __iter__(self):
return iter(self.__dict__.iteritems())
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
return pprint.pformat(self.__dict__)
def __contains__(self, k):
return k in self.__dict__
def __getitem__(self, k):
return self.__dict__[k]
def msg_header(msg_id, username, session):
return {
'msg_id' : msg_id,
'username' : username,
'session' : session
}
def extract_header(msg_or_header):
"""Given a message or header, return the header."""
if not msg_or_header:
return {}
try:
# See if msg_or_header is the entire message.
h = msg_or_header['header']
except KeyError:
try:
# See if msg_or_header is just the header
h = msg_or_header['msg_id']
except KeyError:
raise
else:
h = msg_or_header
if not isinstance(h, dict):
h = dict(h)
return h
class Session(object):
def __init__(self, username=os.environ.get('USER','username'), session=None):
self.username = username
if session is None:
self.session = str(uuid.uuid4())
else:
self.session = session
self.msg_id = 0
def msg_header(self):
h = msg_header(self.msg_id, self.username, self.session)
self.msg_id += 1
return h
def msg(self, msg_type, content=None, parent=None):
msg = {}
msg['header'] = self.msg_header()
msg['parent_header'] = {} if parent is None else extract_header(parent)
msg['msg_type'] = msg_type
msg['content'] = {} if content is None else content
return msg
def send(self, socket, msg_type, content=None, parent=None, ident=None):
msg = self.msg(msg_type, content, parent)
if ident is not None:
socket.send(ident, zmq.SNDMORE)
socket.send_json(msg)
omsg = Message(msg)
return omsg
def recv(self, socket, mode=zmq.NOBLOCK):
try:
msg = socket.recv_json(mode)
except zmq.ZMQError, e:
if e.errno == zmq.EAGAIN:
# We can convert EAGAIN to None as we know in this case
# recv_json won't return None.
return None
else:
raise
return Message(msg)
def test_msg2obj():
am = dict(x=1)
ao = Message(am)
assert ao.x == am['x']
am['y'] = dict(z=1)
ao = Message(am)
assert ao.y.z == am['y']['z']
k1, k2 = 'y', 'z'
assert ao[k1][k2] == am[k1][k2]
am2 = dict(ao)
assert am['x'] == am2['x']
assert am['y']['z'] == am2['y']['z']
| 26.382114 | 81 | 0.566102 |
02b08fe6e21ba9e7f16c241b533afc1d17992978 | 3,103 | py | Python | nlpfit/explanation/visualisation/vec_to_tflowvis.py | MFajcik/NLP-FIT | 446ee99d5e8237da62bae3548f5de31e2f332d35 | [
"MIT"
] | null | null | null | nlpfit/explanation/visualisation/vec_to_tflowvis.py | MFajcik/NLP-FIT | 446ee99d5e8237da62bae3548f5de31e2f332d35 | [
"MIT"
] | null | null | null | nlpfit/explanation/visualisation/vec_to_tflowvis.py | MFajcik/NLP-FIT | 446ee99d5e8237da62bae3548f5de31e2f332d35 | [
"MIT"
] | null | null | null | import argparse
import os
import re
import sys
import pandas as pd
from gensim.models import KeyedVectors
from nlpfit.other.logging_config import init_logging, logger_stub
from nlpfit.preprocessing import lemmatize_list
def get_words(input):
cnwa = pd.read_csv(input, delimiter="\t", header=None)
words = set()
for idx, row in cnwa.iterrows():
col1_words = re.split(':|\s', ' '.join(row[0].split()[:-1]).strip())
col3_words = re.split(':|\s', row[2].strip())
words = words.union(set(col1_words + col3_words))
return words
def generate_files_for_tflow_embedding_projector(model, lemmatized_words, o_vec="vecfile.tsv", o_label="labelfile.tsv",
logging=logger_stub()):
with open(o_vec, mode="w") as ov, open(o_label, mode="w") as ol:
already_written = []
for word in lemmatized_words:
# We want to show similar words and the important word
if word in model.vocab:
candidates = list(map(lambda x: x[0], model.wv.most_similar(positive=word))) + [word]
for candidate in candidates:
if candidate not in already_written:
ol.write(candidate + "\n")
ov.write('\t'.join(model.wv[candidate].astype(str)) + "\n")
already_written.append(candidate)
else:
logging.critical("Word {} out of vocabulary.".format(word))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input",
required=True,
help="Input file in cnwa format.")
parser.add_argument("-o_vec",
required=True,
help="Output file with vectors.")
parser.add_argument("-o_labels",
required=True,
help="Output file with labels.")
parser.add_argument("-l", "--logpath",
default=os.getcwd(),
help="Explicit setting of log folder path")
parser.add_argument("-m", "--model",
required=True,
help="Vec model to do tsne on.")
parser.add_argument("-d", "--dictionary",
default="/mnt/minerva1/nlp/projects/semantic_relatedness9" + \
"/models/cz_morphodita/czech-morfflex-160310.dict",
help="Morphological analyzer.")
args = parser.parse_args()
logging = init_logging(os.path.basename(sys.argv[0]).split(".")[0], logpath=args.logpath)
logging.info("Counting words from cnwa...")
words = get_words(args.input)
logging.info("Lemmatizing words...")
lemmatized_words = lemmatize_list(list(words), args.dictionary)
model = KeyedVectors.load_word2vec_format(args.model, binary=False)
logging.info("Generating tensorflow embedding projection outputs")
generate_files_for_tflow_embedding_projector(model, lemmatized_words, o_vec=args.o_vec, o_label=args.o_labels)
| 43.704225 | 119 | 0.593619 |
431bd4661fd643b63fb4ddbfe9b8ecbc1da9c3b2 | 1,791 | py | Python | SimVascular-master/Python/site-packages/sv_ml/factories/preprocessor_factory.py | mccsssk2/SimVascularPM3_March2020 | 3cce6cc7be66545bea5dc3915a2db50a3892bf04 | [
"BSD-3-Clause"
] | null | null | null | SimVascular-master/Python/site-packages/sv_ml/factories/preprocessor_factory.py | mccsssk2/SimVascularPM3_March2020 | 3cce6cc7be66545bea5dc3915a2db50a3892bf04 | [
"BSD-3-Clause"
] | null | null | null | SimVascular-master/Python/site-packages/sv_ml/factories/preprocessor_factory.py | mccsssk2/SimVascularPM3_March2020 | 3cce6cc7be66545bea5dc3915a2db50a3892bf04 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Stanford University, The Regents of the University of
# California, and others.
#
# All Rights Reserved.
#
# See Copyright-SimVascular.txt for additional details.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from sv_ml.components.common import BasePreProcessor
#from components.seg import SegPreProcessor
def get(config):
if config.get('PREPROCESSOR') == 'SEG':
return SegPreProcessor(config)
return BasePreProcessor(config)
| 45.923077 | 74 | 0.771636 |
1e6bc469f7194e8f6eaffc018fcb1caaac7e2131 | 628 | py | Python | topic/migrations/0003_auto_20190306_1228.py | reBiocoder/my_hubu | 19dbfc12d9e5ee509b3cc70826eafa6a5014f21a | [
"MIT"
] | 6 | 2020-05-02T11:08:39.000Z | 2021-07-20T02:55:47.000Z | topic/migrations/0003_auto_20190306_1228.py | reBiocoder/my_hubu | 19dbfc12d9e5ee509b3cc70826eafa6a5014f21a | [
"MIT"
] | 8 | 2020-06-06T01:45:02.000Z | 2022-03-12T00:24:54.000Z | topic/migrations/0003_auto_20190306_1228.py | reBiocoder/my_hubu | 19dbfc12d9e5ee509b3cc70826eafa6a5014f21a | [
"MIT"
] | 3 | 2020-05-04T00:36:46.000Z | 2021-02-15T09:49:44.000Z | # Generated by Django 2.1.4 on 2019-03-06 12:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topic', '0002_auto_20190213_1229'),
]
operations = [
migrations.AddField(
model_name='create_topic_model',
name='zhihu_id',
field=models.CharField(blank=True, max_length=500, null=True, verbose_name='是否为知乎文章'),
),
migrations.AlterField(
model_name='create_topic_model',
name='title',
field=models.CharField(max_length=200, verbose_name='标题'),
),
]
| 26.166667 | 98 | 0.60828 |
6324798a4bb3d8a89e1635017d4869adb5d001d3 | 2,040 | py | Python | third_party/tvcm/tvcm/style_sheet_unittest.py | Acidburn0zzz/trace-viewer | c4d7cee712b0306afc564787085cff76fd5bb5d9 | [
"BSD-3-Clause"
] | 2 | 2015-02-07T05:19:08.000Z | 2016-12-12T21:17:50.000Z | third_party/tvcm/tvcm/style_sheet_unittest.py | Acidburn0zzz/trace-viewer | c4d7cee712b0306afc564787085cff76fd5bb5d9 | [
"BSD-3-Clause"
] | null | null | null | third_party/tvcm/tvcm/style_sheet_unittest.py | Acidburn0zzz/trace-viewer | c4d7cee712b0306afc564787085cff76fd5bb5d9 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import unittest
from tvcm import style_sheet
from tvcm import project as project_module
from tvcm import resource_loader
from tvcm import fake_fs
from tvcm import module
class StyleSheetUnittest(unittest.TestCase):
def testImages(self):
fs = fake_fs.FakeFS()
fs.AddFile('/src/foo/x.css', """
.x .y {
background-image: url(../images/bar.jpeg);
}
""")
fs.AddFile('/src/images/bar.jpeg', 'hello world')
with fs:
project = project_module.Project(['/src/'],
include_tvcm_paths=False)
loader = resource_loader.ResourceLoader(project)
foo_x = loader.LoadStyleSheet('foo.x')
self.assertEquals(1, len(foo_x.images))
r0 = foo_x.images[0]
self.assertEquals('/src/images/bar.jpeg', r0.absolute_path)
inlined = foo_x.contents_with_inlined_images
self.assertEquals("""
.x .y {
background-image: url(data:image/jpeg;base64,%s);
}
""" % base64.standard_b64encode('hello world'), inlined)
def testURLResolveFails(self):
fs = fake_fs.FakeFS()
fs.AddFile('/src/foo/x.css', """
.x .y {
background-image: url(../images/missing.jpeg);
}
""")
with fs:
project = project_module.Project(['/src/'],
include_tvcm_paths=False)
loader = resource_loader.ResourceLoader(project)
self.assertRaises(module.DepsException,
lambda: loader.LoadStyleSheet('foo.x'))
def testImportsCauseFailure(self):
fs = fake_fs.FakeFS()
fs.AddFile('/src/foo/x.css', """
@import url(awesome.css);
""")
with fs:
project = project_module.Project(['/src/'],
include_tvcm_paths=False)
loader = resource_loader.ResourceLoader(project)
self.assertRaises(Exception,
lambda: loader.LoadStyleSheet('foo.x'))
| 29.565217 | 72 | 0.639706 |
f482f0d491562fd44756da047d0505afe07e0c86 | 3,346 | py | Python | tests/acceptance/features/glancesync/steps/conflicts.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | null | null | null | tests/acceptance/features/glancesync/steps/conflicts.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | 88 | 2015-07-21T22:13:23.000Z | 2016-11-15T21:28:56.000Z | tests/acceptance/features/glancesync/steps/conflicts.py | telefonicaid/fiware-glancesync | 5ad0c80e12b9384473f31bf336015c75cf02a2a2 | [
"Apache-2.0"
] | 2 | 2015-08-12T11:19:55.000Z | 2018-05-25T19:04:43.000Z | # -*- coding: utf-8 -*-
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
from behave import step
from hamcrest import assert_that, is_not
from qautils.dataset.dataset_utils import DatasetUtils
import commons.glancesync_output_assertions as glancesync_assertions
__copyright__ = "Copyright 2015-2016"
__license__ = " Apache License, Version 2.0"
REPLACE_CONFIG_VALUE_PATTER = "(\w*)\((\w*)\)"
__dataset_utils__ = DatasetUtils()
@step(u'the image "(?P<image_name>\w*)" is replaced')
def image_is_replaced(context, image_name):
assert_that(context.glancesync_result, is_not(None),
"Problem when executing Sync command")
for region in context.glance_manager_list:
if region != context.master_region_name:
glancesync_assertions.image_is_replaced_assertion(context.glancesync_result, region, image_name)
@step(u'the image "(?P<image_name>\w*)" is not replaced')
def image_is_not_replaced(context, image_name):
assert_that(context.glancesync_result, is_not(None),
"Problem when executing Sync command")
for region in context.glance_manager_list:
if region != context.master_region_name:
glancesync_assertions.image_is_not_replaced_assertion(context.glancesync_result, region, image_name)
@step(u'all images are replaced')
def images_are_replaced(context):
for image_name in context.created_images_list:
image_is_replaced(context, image_name)
@step(u'the image "(?P<image_name>\w*)" is renamed and replaced')
def image_is_renamed_replaced(context, image_name):
assert_that(context.glancesync_result, is_not(None),
"Problem when executing Sync command")
for region in context.glance_manager_list:
if region != context.master_region_name:
glancesync_assertions.image_is_renamed_replaced_assertion(context.glancesync_result, region, image_name)
@step(u'the image "(?P<image_name>\w*)" is neither renamed nor replaced')
def image_is_not_renamed_replaced(context, image_name):
assert_that(context.glancesync_result, is_not(None),
"Problem when executing Sync command")
for region in context.glance_manager_list:
if region != context.master_region_name:
glancesync_assertions.image_is_not_renamed_replaced_assertion(context.glancesync_result,
region, image_name)
@step(u'all images are renamed and replaced')
def images_are_renamed_replaced(context):
for image_name in context.created_images_list:
image_is_renamed_replaced(context, image_name)
| 35.221053 | 116 | 0.737298 |
0ce85902719c9de2850a5df41757eb30ef0d52c6 | 242 | py | Python | mwxml/iteration/util.py | framawiki/python-mwxml | 6a8c18be99cd0bcee9c496e607f08bf4dfe5b510 | [
"MIT"
] | 23 | 2015-09-13T04:42:24.000Z | 2021-05-28T23:28:57.000Z | mwxml/iteration/util.py | framawiki/python-mwxml | 6a8c18be99cd0bcee9c496e607f08bf4dfe5b510 | [
"MIT"
] | 23 | 2015-01-14T04:48:59.000Z | 2015-08-25T19:25:43.000Z | mwxml/iteration/util.py | framawiki/python-mwxml | 6a8c18be99cd0bcee9c496e607f08bf4dfe5b510 | [
"MIT"
] | 14 | 2015-09-15T16:04:50.000Z | 2022-01-09T19:18:39.000Z | def consume_tags(tag_map, element):
value_map = {}
for sub_element in element:
tag_name = sub_element.tag
if tag_name in tag_map:
value_map[tag_name] = tag_map[tag_name](sub_element)
return value_map
| 24.2 | 64 | 0.661157 |
b11dde9f963e126a69f253509fba773b960b9faf | 2,133 | py | Python | vulnerabilities/migrations/0008_ruby_importer.py | edoardolanzini/vulnerablecode | d33bd5215a94ea4301763baaf29f3cf22d86a9b4 | [
"Apache-2.0"
] | null | null | null | vulnerabilities/migrations/0008_ruby_importer.py | edoardolanzini/vulnerablecode | d33bd5215a94ea4301763baaf29f3cf22d86a9b4 | [
"Apache-2.0"
] | null | null | null | vulnerabilities/migrations/0008_ruby_importer.py | edoardolanzini/vulnerablecode | d33bd5215a94ea4301763baaf29f3cf22d86a9b4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/vulnerablecode/
# The VulnerableCode software is licensed under the Apache License version 2.0.
# Data generated with VulnerableCode require an acknowledgment.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with VulnerableCode or any VulnerableCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with VulnerableCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# VulnerableCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# VulnerableCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/vulnerablecode/ for support and download.
from django.db import migrations
def add_ruby_importer(apps, _):
Importer = apps.get_model('vulnerabilities', 'Importer')
Importer.objects.create(
name='ruby',
license='',
last_run=None,
data_source='RubyDataSource',
data_source_cfg={
'repository_url': 'https://github.com/rubysec/ruby-advisory-db.git',
},
)
def remove_ruby_importer(apps, _):
Importer = apps.get_model('vulnerabilities', 'Importer')
qs = Importer.objects.filter(name='ruby')
if qs:
qs[0].delete()
class Migration(migrations.Migration):
dependencies = [
('vulnerabilities', '0007_npm_importer'),
]
operations = [
migrations.RunPython(add_ruby_importer, remove_ruby_importer),
]
| 38.089286 | 93 | 0.732771 |
619e20a12326be7e7f4215aadd2281770a6cb2ab | 63,451 | py | Python | livius/video/processing/speakerTracking.py | papar22/livius | a28929ef27f9737a598bbae36360ebe7b55a3f41 | [
"Unlicense"
] | 1 | 2018-05-08T20:04:08.000Z | 2018-05-08T20:04:08.000Z | livius/video/processing/speakerTracking.py | raffienficiaud/livius | a28929ef27f9737a598bbae36360ebe7b55a3f41 | [
"Unlicense"
] | null | null | null | livius/video/processing/speakerTracking.py | raffienficiaud/livius | a28929ef27f9737a598bbae36360ebe7b55a3f41 | [
"Unlicense"
] | null | null | null |
"""
This module implements various algorithms based on CMT for speaker tracking.
The implementations are based on http://www.gnebehay.com/cmt/ and for more details,
the readers will be reffered to the following paper.
http://www.gnebehay.com/publications/wacv_2014/wacv_2014.pdf
This algorithm has been combined with Kalman filter, and
different approaches have been used to ease the computation time.
For every algorithm, one class has been defined in which there is a method named "speakerTracker" to call.
Different classes have different attributes, but in order to call the corresponding
function "speakerTracker", all classes are the same.
classes:
- CMT_algorithm
--> to run the CMT only
- CMT_algorithm_kalman_filter
--> to run the CMT, predict, update and smooth the results by kalman filter
- CMT_algorithm_kalman_filter_stripe
--> to run the CMT, predict, update and smooth the results by kalman filter (in a stripe line including the speaker)
- CMT_algorithm_kalman_filter_downsample
--> to run the CMT, predict, update and smooth the results by kalman filter (downsampling the frames to ease the computational time)
- CMT_algorithm_kalman_filter_vertical_mean
--> to run the CMT, predict, update and smooth the results by kalman filter (no vertical coordinate estimation --
we assume, the speaker wont jump during the lecture)
- CMT_algorithm_kalman_filter_neighboring
--> to run the CMT, predict, update and smooth the results by kalman filter (keypoint calculation is only for neighboring window)
- FOV_specification
--> to get a certain amount of field of view including the speaker
"""
import os
import cv2
import exceptions
import json
import glob
import time
import logging
from numpy import empty, nan
import sys
import math
import CMT.CMT
import CMT.util as cmtutil
import numpy as np
from moviepy.editor import *
from pykalman import KalmanFilter
from functools import wraps
from moviepy.video.fx.crop import crop as moviepycrop
import matplotlib.pyplot as plt
from pylab import *
from ...util.histogram import get_histogram_min_max_with_percentile
# logging facility
FORMAT = '[%(asctime)-15s] %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# temporary path
_tmp_path = os.path.join('/home/livius/Code/livius/livius/Example Data/tmp')
if not os.path.exists(_tmp_path):
os.makedirs(_tmp_path)
debug = True
def counterFunction(func):
@wraps(func)
def tmp(*args, **kwargs):
tmp.count += 1
return func(*args, **kwargs)
tmp.count = 0
return tmp
class CMT_algorithm():
def __init__(self, inputPath, bBox=None , skip=None):
self.inputPath = inputPath # 'The input path.'
self.bBox = bBox # 'Specify initial bounding box.'
self.skip = skip # 'Skip the first n frames.'
self.CMT = CMT.CMT.CMT()
def speakerTracker(self):
# Clean up
cv2.destroyAllWindows()
if self.inputPath is not None:
# If a path to a file was given, assume it is a single video file
if os.path.isfile(self.inputPath):
cap = cv2.VideoCapture(self.inputPath)
clip = VideoFileClip(self.inputPath, audio=False)
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
self.numFrames = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
print "[speakerTrackering] Number of frames" , self.numFrames
pathBase = os.path.basename(self.inputPath)
pathDirectory = os.path.dirname(self.inputPath)
baseName = pathDirectory + '/' + os.path.splitext(pathBase)[0] + '_' + 'speakerCoordinates.txt'
# Skip first frames if required
if self.skip is not None:
cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, self.skip)
# Otherwise assume it is a format string for reading images
else:
cap = cmtutil.FileVideoCapture(self.inputPath)
# Skip first frames if required
if self.skip is not None:
cap.frame = 1 + self.skip
else:
# If no input path was specified, open camera device
sys.exit("[speakerTrackering] Error: no input path was specified")
# Read first frame
status, im0 = cap.read()
imGray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
imDraw = np.copy(im0)
if self.bBox is not None:
# Try to disassemble user specified bounding box
values = self.bBox.split(',')
try:
values = [int(v) for v in values]
except:
raise Exception('[speakerTrackering] Unable to parse bounding box')
if len(values) != 4:
raise Exception('[speakerTrackering] Bounding box must have exactly 4 elements')
bbox = np.array(values)
# Convert to point representation, adding singleton dimension
bbox = cmtutil.bb2pts(bbox[None, :])
# Squeeze
bbox = bbox[0, :]
tl = bbox[:2]
br = bbox[2:4]
else:
# Get rectangle input from user
(tl, br) = cmtutil.get_rect(imDraw)
print '[speakerTrackering] Using', tl, br, 'as initial bounding box for the speaker'
self.CMT.initialise(imGray0, tl, br)
newClip = clip.fl_image(self.crop)
return newClip
def crop (self, frame):
windowSize = (2 * 640, 2 * 360)
newFrames = np.zeros((windowSize[0], windowSize[1], 3))
imGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.CMT.process_frame(imGray)
if not (math.isnan(self.CMT.center[0]) or math.isnan(self.CMT.center[1])
or (self.CMT.center[0] <= 0) or (self.CMT.center[1] <= 0)):
x1 = np.floor(self.CMT.center[1] - windowSize[1] / 2)
y1 = np.floor(self.CMT.center[0] - windowSize[0] / 2)
x2 = np.floor(x1 + windowSize[1])
y2 = np.floor(y1 + windowSize[0])
# Corner correction (Height)
if (x1 <= 0):
x1 = 0
x2 = np.floor(x1 + windowSize[1])
if (x2 >= imGray.shape[0]):
x2 = np.floor(imGray.shape[0])
x1 = np.floor(x2 - windowSize[1])
# Corner correction (Width)
if (y1 <= 0):
y1 = 0
y2 = np.floor(y1 + windowSize[0])
if (y2 >= imGray.shape[1]):
y2 = np.floor(imGray.shape[1])
y1 = np.floor(y2 - windowSize[0])
newFrames = frame[x1:x2, y1:y2, :]
# print 'Center: {0:.2f},{1:.2f}'.format(CMT.center[0], CMT.center[1])
return newFrames
class CMT_algorithm_kalman_filter():
def __init__(self, inputPath, skip=None):
self.inputPath = inputPath # 'The input path.'
self.skip = skip # 'Skip the first n frames.'
self.frameCounter = 0
self.numFrames = None
self.CMT = CMT.CMT.CMT()
def speakerTracker(self):
# Clean up
cv2.destroyAllWindows()
if self.inputPath is not None:
# If a path to a file was given, assume it is a single video file
if os.path.isfile(self.inputPath):
cap = cv2.VideoCapture(self.inputPath)
clip = VideoFileClip(self.inputPath, audio=False)
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
self.numFrames = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
print "[speakerTracker] Number of frames" , self.numFrames
pathBase = os.path.basename(self.inputPath)
pathDirectory = os.path.dirname(self.inputPath)
baseName = pathDirectory + '/' + os.path.splitext(pathBase)[0] + '_'
# Skip first frames if required
if self.skip is not None:
cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, self.skip)
# Otherwise assume it is a format string for reading images
else:
cap = cmtutil.FileVideoCapture(self.inputPath)
# Skip first frames if required
if self.skip is not None:
cap.frame = 1 + self.skip
else:
# If no input path was specified, open camera device
sys.exit("[speakerTracker] Error: no input path was specified")
# Read first frame
status, im0 = cap.read()
imGray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
if debug:
# speaker bounding box used for debugging
(tl, br) = (1848, 840), (2136, 1116)
else:
imDraw = np.copy(im0)
(tl, br) = cmtutil.get_rect(imDraw)
print '[speakerTrackering] Using', tl, br, 'as initial bounding box for the speaker'
self.CMT.initialise(imGray0, tl, br)
measuredTrack = np.zeros((self.numFrames + 10, 2)) - 1
count = 0
while count <= self.numFrames:
status, im = cap.read()
if not status:
break
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
logging.debug('[tracker] processing frame %d', count)
self.CMT.process_frame(im_gray)
# debug
if debug:
im_debug = np.copy(im)
cmtutil.draw_keypoints(self.CMT.active_keypoints, im_debug, (0, 0, 255))
cmtutil.draw_keypoints(self.CMT.tracked_keypoints, im_debug, (0, 255, 0))
print 'frame: {2:4d}, Center: {0:.2f},{1:.2f}'.format(self.CMT.center[0], self.CMT.center[1] , count)
if not (math.isnan(self.CMT.center[0])
or math.isnan(self.CMT.center[1])
or (self.CMT.center[0] <= 0)
or (self.CMT.center[1] <= 0)):
measuredTrack[count, 0] = self.CMT.center[0]
measuredTrack[count, 1] = self.CMT.center[1]
else:
# take the previous estimate if none is found in the current frame
measuredTrack[count, 0] = measuredTrack[count - 1, 0]
measuredTrack[count, 1] = measuredTrack[count - 1, 1]
if debug:
cmtutil.draw_bounding_box((int(measuredTrack[count, 0] - 50), int(measuredTrack[count, 1] - 50)),
(int(measuredTrack[count, 0] + 50), int(measuredTrack[count, 1] + 50)),
im_debug)
cv2.imwrite(os.path.join(_tmp_path, 'debug_file_%.6d.png' % count), im_debug)
im_debug = np.copy(im)
cmtutil.draw_keypoints([kp.pt for kp in self.CMT.keypoints_cv], im_debug, (0, 0, 255))
cv2.imwrite(os.path.join(_tmp_path, 'all_keypoints_%.6d.png' % count), im_debug)
count += 1
numMeas = measuredTrack.shape[0]
markedMeasure = np.ma.masked_less(measuredTrack, 0)
# Kalman Filter Parameters
deltaT = 1.0 / clip.fps
transitionMatrix = [[1, 0, deltaT, 0], [0, 1, 0, deltaT], [0, 0, 1, 0], [0, 0, 0, 1]] # A
observationMatrix = [[1, 0, 0, 0], [0, 1, 0, 0]] # C
xinit = markedMeasure[0, 0]
yinit = markedMeasure[0, 1]
vxinit = markedMeasure[1, 0] - markedMeasure[0, 0]
vyinit = markedMeasure[1, 1] - markedMeasure[0, 1]
initState = [xinit, yinit, vxinit, vyinit] # mu0
initCovariance = 1.0e-3 * np.eye(4) # sigma0
transistionCov = 1.0e-4 * np.eye(4) # Q
observationCov = 1.0e-1 * np.eye(2) # R
kf = KalmanFilter(transition_matrices=transitionMatrix,
observation_matrices=observationMatrix,
initial_state_mean=initState,
initial_state_covariance=initCovariance,
transition_covariance=transistionCov,
observation_covariance=observationCov)
self.measuredTrack = measuredTrack
(self.filteredStateMeans, self.filteredStateCovariances) = kf.filter(markedMeasure)
(self.filterStateMeanSmooth, self.filterStateCovariancesSmooth) = kf.smooth(markedMeasure)
# np.savetxt((baseName + 'speakerCoordinates_CMT_Kalman.txt'),
# np.hstack((np.asarray(self.filteredStateMeans), np.asarray(self.filteredStateCovariances) ,
# np.asarray(self.filterStateMeanSmooth), np.asarray(self.filterStateCovariancesSmooth) )))
# np.savetxt((baseName + 'speakerCoordinates_CMT.txt'), np.asarray(measuredTrack))
newClip = clip.fl_image(self.crop)
return newClip
@counterFunction
def crop (self, frame):
self.frameCounter = self.crop.count
# print self.frameCounter
windowSize = (2 * 640, 2 * 360)
newFrames = np.zeros((windowSize[0], windowSize[1], 3))
if self.frameCounter <= self.numFrames:
imGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
x1 = np.floor(self.filterStateMeanSmooth[(self.frameCounter) - 1][1] - windowSize[1] / 2)
y1 = np.floor(self.filterStateMeanSmooth[(self.frameCounter) - 1][0] - windowSize[0] / 2)
x2 = np.floor(x1 + windowSize[1])
y2 = np.floor(y1 + windowSize[0])
# Corner correction (Height)
if (x1 <= 0):
x1 = 0
x2 = np.floor(x1 + windowSize[1])
if (x2 >= imGray.shape[0]):
x2 = np.floor(imGray.shape[0])
x1 = np.floor(x2 - windowSize[1])
# Corner correction (Width)
if (y1 <= 0):
y1 = 0
y2 = np.floor(y1 + windowSize[0])
if (y2 >= imGray.shape[1]):
y2 = np.floor(imGray.shape[1])
y1 = np.floor(y2 - windowSize[0])
# print x1, y1 , x2, y2
newFrames = frame[x1:x2, y1:y2, :]
return newFrames
class FOV_specification():
def __init__(self, inputPath, skip=None):
self.inputPath = inputPath # 'The input path.'
self.skip = skip # 'Skip the first n frames.'
self.CMT = CMT.CMT.CMT()
def speakerTracker(self):
# Clean up
cv2.destroyAllWindows()
if self.inputPath is not None:
# If a path to a file was given, assume it is a single video file
if os.path.isfile(self.inputPath):
cap = cv2.VideoCapture(self.inputPath)
clip = VideoFileClip(self.inputPath, audio=False)
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
self.numFrames = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
print "[speakerTrackering] Number of frames" , self.numFrames
pathBase = os.path.basename(self.inputPath)
pathDirectory = os.path.dirname(self.inputPath)
baseName = pathDirectory + '/' + os.path.splitext(pathBase)[0] + '_' + 'speakerCoordinates.txt'
# Skip first frames if required
if self.skip is not None:
cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, self.skip)
# Otherwise assume it is a format string for reading images
else:
cap = cmtutil.FileVideoCapture(self.inputPath)
# Skip first frames if required
if self.skip is not None:
cap.frame = 1 + self.skip
else:
# If no input path was specified, open camera device
sys.exit("[speakerTrackering] Error: no input path was specified")
# Read first frame
status, im0 = cap.read()
imGray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
imDraw = np.copy(im0)
(tl, br) = cmtutil.get_rect(imDraw)
print '[speakerTrackering] Using', tl, br, 'as initial bounding box for the speaker'
# Get the points to crop the video with 4:3 aspect ratio
# If the selected points are near to the border of the image, It will automaticaly croped till borders.
x1 = np.floor(tl[0])
y1 = np.floor(tl[1])
x2 = np.floor(br[0])
y2 = np.floor(tl[1] + np.abs((br[0] - tl[0]) * (3.0 / 4.0)))
print x1, x2, y1, y2
croppedClip = moviepycrop(clip, x1, y1, x2, y2)
return croppedClip
class CMT_algorithm_kalman_filter_stripe():
def __init__(self, inputPath, skip=None):
self.inputPath = inputPath # 'The input path.'
self.frameCounter = 0
self.numFrames = None
self.CMT = CMT.CMT.CMT()
def speakerTracker(self):
# Clean up
cv2.destroyAllWindows()
if self.inputPath is not None:
# If a path to a file was given, assume it is a single video file
if os.path.isfile(self.inputPath):
cap = cv2.VideoCapture(self.inputPath)
clip = VideoFileClip(self.inputPath, audio=False)
W, H = clip.size
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
self.numFrames = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
print "[speakerTracker] Number of frames" , self.numFrames
pathBase = os.path.basename(self.inputPath)
pathDirectory = os.path.dirname(self.inputPath)
baseName = pathDirectory + '/' + os.path.splitext(pathBase)[0] + '_'
# Otherwise assume it is a format string for reading images
else:
cap = cmtutil.FileVideoCapture(self.inputPath)
else:
# If no input path was specified, open camera device
sys.exit("[speakerTracker] Error: no input path was specified")
# Read first frame
status, im0 = cap.read()
imDraw = np.copy(im0)
(tl, br) = cmtutil.get_rect(imDraw)
print '[speakerTrackering] Using', tl, br, 'as initial bounding box for the speaker'
x1 = 1
x2 = W
y1 = tl[1] - 100
y2 = br[1] + 100
imGray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
croppedGray0 = imGray0[y1:y2, x1:x2]
self.CMT.initialise(croppedGray0, tl, br)
measuredTrack = np.zeros((self.numFrames + 10, 2)) - 1
count = 0
for frame in clip.iter_frames():
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
im_gray = grayFrame[y1:y2, x1:x2]
plt.imshow(im_gray, cmap=cm.Greys_r)
plt.show()
self.CMT.process_frame(im_gray)
print 'frame: {2:4d}, Center: {0:.2f},{1:.2f}'.format(self.CMT.center[0], self.CMT.center[1] , count)
if not (math.isnan(self.CMT.center[0]) or math.isnan(self.CMT.center[1])
or (self.CMT.center[0] <= 0) or (self.CMT.center[1] <= 0)):
measuredTrack[count, 0] = self.CMT.center[0]
measuredTrack[count, 1] = self.CMT.center[1]
count += 1
numMeas = measuredTrack.shape[0]
markedMeasure = np.ma.masked_less(measuredTrack, 0)
# Kalman Filter Parameters
deltaT = 1.0 / clip.fps
transitionMatrix = [[1, 0, deltaT, 0], [0, 1, 0, deltaT], [0, 0, 1, 0], [0, 0, 0, 1]] # A
observationMatrix = [[1, 0, 0, 0], [0, 1, 0, 0]] # C
xinit = markedMeasure[0, 0]
yinit = markedMeasure[0, 1]
vxinit = markedMeasure[1, 0] - markedMeasure[0, 0]
vyinit = markedMeasure[1, 1] - markedMeasure[0, 1]
initState = [xinit, yinit, vxinit, vyinit] # mu0
initCovariance = 1.0e-3 * np.eye(4) # sigma0
transistionCov = 1.0e-4 * np.eye(4) # Q
observationCov = 1.0e-1 * np.eye(2) # R
kf = KalmanFilter(transition_matrices=transitionMatrix,
observation_matrices=observationMatrix,
initial_state_mean=initState,
initial_state_covariance=initCovariance,
transition_covariance=transistionCov,
observation_covariance=observationCov)
self.measuredTrack = measuredTrack
(self.filteredStateMeans, self.filteredStateCovariances) = kf.filter(markedMeasure)
(self.filterStateMeanSmooth, self.filterStateCovariancesSmooth) = kf.smooth(markedMeasure)
newClip = clip.fl_image(self.crop)
return newClip
@counterFunction
def crop (self, frame):
self.frameCounter = self.crop.count
# print self.frameCounter
windowSize = (2 * 640, 2 * 360)
newFrames = np.zeros((windowSize[0], windowSize[1], 3))
if self.frameCounter <= self.numFrames:
imGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
x1 = np.floor(self.filterStateMeanSmooth[(self.frameCounter) - 1][1] - windowSize[1] / 2)
y1 = np.floor(self.filterStateMeanSmooth[(self.frameCounter) - 1][0] - windowSize[0] / 2)
x2 = np.floor(x1 + windowSize[1])
y2 = np.floor(y1 + windowSize[0])
# Corner correction (Height)
if (x1 <= 0):
x1 = 0
x2 = np.floor(x1 + windowSize[1])
if (x2 >= imGray.shape[0]):
x2 = np.floor(imGray.shape[0])
x1 = np.floor(x2 - windowSize[1])
# Corner correction (Width)
if (y1 <= 0):
y1 = 0
y2 = np.floor(y1 + windowSize[0])
if (y2 >= imGray.shape[1]):
y2 = np.floor(imGray.shape[1])
y1 = np.floor(y2 - windowSize[0])
# print x1, y1 , x2, y2
newFrames = frame[x1:x2, y1:y2, :]
return newFrames
class CMT_algorithm_kalman_filter_downsample():
def __init__(self, inputPath, resizeFactor=0.5, skip=None):
self.inputPath = inputPath # 'The input path.'
self.frameCounter = 0
self.numFrames = None
self.CMT = CMT.CMT.CMT()
self.resizeFactor = resizeFactor
def speakerTracker(self):
# Clean up
cv2.destroyAllWindows()
if self.inputPath is not None:
# If a path to a file was given, assume it is a single video file
if os.path.isfile(self.inputPath):
cap = cv2.VideoCapture(self.inputPath)
clip = VideoFileClip(self.inputPath, audio=False)
W, H = clip.size
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
self.numFrames = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
print "[speakerTracker] Number of frames" , self.numFrames
pathBase = os.path.basename(self.inputPath)
pathDirectory = os.path.dirname(self.inputPath)
baseName = pathDirectory + '/' + os.path.splitext(pathBase)[0] + '_'
# Otherwise assume it is a format string for reading images
else:
cap = cmtutil.FileVideoCapture(self.inputPath)
else:
# If no input path was specified, open camera device
sys.exit("[speakerTracker] Error: no input path was specified")
# Read first frame
status, im0 = cap.read()
imGray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
imResized = cv2.resize(imGray0, (0, 0), fx=self.resizeFactor, fy=self.resizeFactor)
imDraw = np.copy(imResized)
(tl, br) = cmtutil.get_rect(imDraw)
print '[speakerTrackering] Using', tl, br, 'as initial bounding box for the speaker'
self.CMT.initialise(imResized, tl, br)
measuredTrack = np.zeros((self.numFrames + 10, 2)) - 1
count = 0
while count <= self.numFrames:
status, im = cap.read()
if not status:
break
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im_resized = cv2.resize(im_gray, (0, 0), fx=self.resizeFactor, fy=self.resizeFactor)
tic = time.time()
self.CMT.process_frame(im_resized)
toc = time.time()
print 'frame: {2:4d}, Center: {0:.2f},{1:.2f}'.format(self.CMT.center[0], self.CMT.center[1] , count)
# print 1000*(toc-tic)
if not (math.isnan(self.CMT.center[0]) or math.isnan(self.CMT.center[1])
or (self.CMT.center[0] <= 0) or (self.CMT.center[1] <= 0)):
measuredTrack[count, 0] = self.CMT.center[0]
measuredTrack[count, 1] = self.CMT.center[1]
count += 1
numMeas = measuredTrack.shape[0]
markedMeasure = np.ma.masked_less(measuredTrack, 0)
# Kalman Filter Parameters
deltaT = 1.0 / clip.fps
transitionMatrix = [[1, 0, deltaT, 0], [0, 1, 0, deltaT], [0, 0, 1, 0], [0, 0, 0, 1]] # A
observationMatrix = [[1, 0, 0, 0], [0, 1, 0, 0]] # C
xinit = markedMeasure[0, 0]
yinit = markedMeasure[0, 1]
vxinit = markedMeasure[1, 0] - markedMeasure[0, 0]
vyinit = markedMeasure[1, 1] - markedMeasure[0, 1]
initState = [xinit, yinit, vxinit, vyinit] # mu0
initCovariance = 1.0e-3 * np.eye(4) # sigma0
transistionCov = 1.0e-4 * np.eye(4) # Q
observationCov = 1.0e-1 * np.eye(2) # R
kf = KalmanFilter(transition_matrices=transitionMatrix,
observation_matrices=observationMatrix,
initial_state_mean=initState,
initial_state_covariance=initCovariance,
transition_covariance=transistionCov,
observation_covariance=observationCov)
self.measuredTrack = measuredTrack
(self.filteredStateMeans, self.filteredStateCovariances) = kf.filter(markedMeasure)
(self.filterStateMeanSmooth, self.filterStateCovariancesSmooth) = kf.smooth(markedMeasure)
# np.savetxt((baseName + 'speakerCoordinates_CMT_Kalman.txt'),
# np.hstack((np.asarray(self.filteredStateMeans), np.asarray(self.filteredStateCovariances) ,
# np.asarray(self.filterStateMeanSmooth), np.asarray(self.filterStateCovariancesSmooth) )))
# np.savetxt((baseName + 'speakerCoordinates_CMT.txt'), np.asarray(measuredTrack))
newClip = clip.fl_image(self.crop)
return newClip
@counterFunction
def crop (self, frame):
self.frameCounter = self.crop.count
# print self.frameCounter
windowSize = (2 * 640, 2 * 360)
newFrames = np.zeros((windowSize[0], windowSize[1], 3))
if self.frameCounter <= self.numFrames:
imGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
x1 = np.floor((self.filterStateMeanSmooth[(self.frameCounter) - 1][1]) * (1.0 / self.resizeFactor) - windowSize[1] / 2)
y1 = np.floor((self.filterStateMeanSmooth[(self.frameCounter) - 1][0]) * (1.0 / self.resizeFactor) - windowSize[0] / 2)
x2 = np.floor(x1 + windowSize[1])
y2 = np.floor(y1 + windowSize[0])
# Corner correction (Height)
if (x1 <= 0):
x1 = 0
x2 = np.floor(x1 + windowSize[1])
if (x2 >= imGray.shape[0]):
x2 = np.floor(imGray.shape[0])
x1 = np.floor(x2 - windowSize[1])
# Corner correction (Width)
if (y1 <= 0):
y1 = 0
y2 = np.floor(y1 + windowSize[0])
if (y2 >= imGray.shape[1]):
y2 = np.floor(imGray.shape[1])
y1 = np.floor(y2 - windowSize[0])
# print x1, y1 , x2, y2
newFrames = frame[x1:x2, y1:y2, :]
return newFrames
class CMT_algorithm_kalman_filter_vertical_mean():
def __init__(self, inputPath, skip=None):
self.inputPath = inputPath # 'The input path.'
self.frameCounter = 0
self.numFrames = None
self.CMT = CMT.CMT.CMT()
def speakerTracker(self):
# Clean up
cv2.destroyAllWindows()
if self.inputPath is not None:
# If a path to a file was given, assume it is a single video file
if os.path.isfile(self.inputPath):
cap = cv2.VideoCapture(self.inputPath)
clip = VideoFileClip(self.inputPath, audio=False)
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
self.numFrames = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
print "[speakerTracker] Number of frames" , self.numFrames
pathBase = os.path.basename(self.inputPath)
pathDirectory = os.path.dirname(self.inputPath)
baseName = pathDirectory + '/' + os.path.splitext(pathBase)[0] + '_'
# Otherwise assume it is a format string for reading images
else:
cap = cmtutil.FileVideoCapture(self.inputPath)
else:
# If no input path was specified, open camera device
sys.exit("[speakerTracker] Error: no input path was specified")
# Read first frame
status, im0 = cap.read()
imGray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
imDraw = np.copy(im0)
(tl, br) = cmtutil.get_rect(imDraw)
print '[speakerTrackering] Using', tl, br, 'as initial bounding box for the speaker'
self.CMT.initialise(imGray0, tl, br)
# self.inity = tl[1] - self.CMT.center_to_tl[1]
measuredTrack = np.zeros((self.numFrames + 10, 2)) - 1
count = 0
for frame in clip.iter_frames():
im_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.CMT.process_frame(im_gray)
print 'frame: {2:4d}, Center: {0:.2f},{1:.2f}'.format(self.CMT.center[0], self.CMT.center[1] , count)
# print self.inity
if not (math.isnan(self.CMT.center[0]) or (self.CMT.center[0] <= 0)):
measuredTrack[count, 0] = self.CMT.center[0]
measuredTrack[count, 1] = self.CMT.center[1]
count += 1
numMeas = measuredTrack.shape[0]
markedMeasure = np.ma.masked_less(measuredTrack, 0)
# Kalman Filter Parameters
deltaT = 1.0 / clip.fps
transitionMatrix = [[1, 0, deltaT, 0], [0, 1, 0, deltaT], [0, 0, 1, 0], [0, 0, 0, 1]] # A
observationMatrix = [[1, 0, 0, 0], [0, 1, 0, 0]] # C
xinit = markedMeasure[0, 0]
yinit = markedMeasure[0, 1]
vxinit = markedMeasure[1, 0] - markedMeasure[0, 0]
vyinit = markedMeasure[1, 1] - markedMeasure[0, 1]
initState = [xinit, yinit, vxinit, vyinit] # mu0
initCovariance = 1.0e-3 * np.eye(4) # sigma0
transistionCov = 1.0e-4 * np.eye(4) # Q
observationCov = 1.0e-1 * np.eye(2) # R
kf = KalmanFilter(transition_matrices=transitionMatrix,
observation_matrices=observationMatrix,
initial_state_mean=initState,
initial_state_covariance=initCovariance,
transition_covariance=transistionCov,
observation_covariance=observationCov)
self.measuredTrack = measuredTrack
(self.filteredStateMeans, self.filteredStateCovariances) = kf.filter(markedMeasure)
(self.filterStateMeanSmooth, self.filterStateCovariancesSmooth) = kf.smooth(markedMeasure)
self.inity = np.mean(self.filterStateMeanSmooth[:][1], axis=0)
newClip = clip.fl_image(self.crop)
return newClip
@counterFunction
def crop (self, frame):
self.frameCounter = self.crop.count
# print self.frameCounter
windowSize = (2 * 640, 2 * 360)
newFrames = np.zeros((windowSize[0], windowSize[1], 3))
if self.frameCounter <= self.numFrames:
imGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
y1 = np.floor(self.filterStateMeanSmooth[(self.frameCounter) - 1][0] - windowSize[1] / 2)
x1 = np.floor(self.inity - windowSize[0] / 2)
x2 = np.floor(x1 + windowSize[1])
y2 = np.floor(y1 + windowSize[0])
# Corner correction (Height)
if (x1 <= 0):
x1 = 0
x2 = np.floor(x1 + windowSize[1])
if (x2 >= imGray.shape[0]):
x2 = np.floor(imGray.shape[0])
x1 = np.floor(x2 - windowSize[1])
# Corner correction (Width)
if (y1 <= 0):
y1 = 0
y2 = np.floor(y1 + windowSize[0])
if (y2 >= imGray.shape[1]):
y2 = np.floor(imGray.shape[1])
y1 = np.floor(y2 - windowSize[0])
# print x1, y1 , x2, y2
newFrames = frame[x1:x2, y1:y2, :]
return newFrames
class CMT_algorithm_kalman_filter_neighboring():
def __init__(self, inputPath, skip=None):
self.inputPath = inputPath # 'The input path.'
self.skip = skip # 'Skip the first n frames.'
self.frameCounter = 0
self.numFrames = None
self.CMT = CMT.CMT.CMT()
def speakerTracker(self):
# Clean up
cv2.destroyAllWindows()
if self.inputPath is not None:
# If a path to a file was given, assume it is a single video file
if os.path.isfile(self.inputPath):
cap = cv2.VideoCapture(self.inputPath)
clip = VideoFileClip(self.inputPath, audio=False)
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
self.numFrames = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
print "[speakerTracker] Number of frames" , self.numFrames
pathBase = os.path.basename(self.inputPath)
pathDirectory = os.path.dirname(self.inputPath)
baseName = pathDirectory + '/' + os.path.splitext(pathBase)[0] + '_'
# Skip first frames if required
if self.skip is not None:
cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, self.skip)
# Otherwise assume it is a format string for reading images
else:
cap = cmtutil.FileVideoCapture(self.inputPath)
# Skip first frames if required
if self.skip is not None:
cap.frame = 1 + self.skip
else:
# If no input path was specified, open camera device
sys.exit("[speakerTracker] Error: no input path was specified")
# The number of pixels from the center of object till the border of cropped image
marginPixels = 300
# Read first frame
status, im0 = cap.read()
imGray0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
imDraw = np.copy(im0)
(tl0, br0) = cmtutil.get_rect(imDraw)
print '[speakerTracker] Using', tl0, br0, 'as initial bounding box for the speaker'
# First initialization to get the center
self.CMT.initialise(imGray0, tl0, br0)
# The first x and y coordinates of the object of interest
self.inity = tl0[1] - self.CMT.center_to_tl[1]
self.initx = tl0[0] - self.CMT.center_to_tl[0]
# Crop the first frame
imGray0_initial = imGray0[self.inity - marginPixels : self.inity + marginPixels,
self.initx - marginPixels : self.initx + marginPixels]
# Calculate the translation vector from main image to the cropped frame
self.originFromMainImageY = self.inity - marginPixels
self.originFromMainImageX = self.initx - marginPixels
# Calculate the position of the selected rectangle in the cropped frame
tl = (tl0[0] - self.originFromMainImageX , tl0[1] - self.originFromMainImageY)
br = (br0[0] - self.originFromMainImageX , br0[1] - self.originFromMainImageY)
# print '[speakerTracker] Using', tl, br, 'as initial bounding box for the speaker'
# initialization and keypoint calculation
self.CMT.initialise(imGray0_initial, tl, br)
# Center of object in cropped frame
self.currentY = tl[1] - self.CMT.center_to_tl[1]
self.currentX = tl[0] - self.CMT.center_to_tl[0]
# Center of object in main frame
self.currentYMainImage = self.currentY + self.originFromMainImageY
self.currentXMainImage = self.currentX + self.originFromMainImageX
measuredTrack = np.zeros((self.numFrames + 10, 2)) - 1
count = 0
# loop to read all frames,
# crop them with the center of last frame,
# calculate keypoints and center of the object
for frame in clip.iter_frames():
# Read the frame and convert it to gray scale
im_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Corner correction (Height)
if (self.currentYMainImage + marginPixels >= im_gray.shape[0]):
self.currentYMainImage = im_gray.shape[0] - marginPixels - 1
else:
self.currentYMainImage = self.currentYMainImage
if (self.currentXMainImage + marginPixels >= im_gray.shape[1]):
self.currentXMainImage = im_gray.shape[1] - marginPixels - 1
else:
self.currentXMainImage = self.currentXMainImage
if (self.currentYMainImage - marginPixels <= 0):
self.currentYMainImage = 0 + marginPixels + 1
else:
self.currentYMainImage = self.currentYMainImage
if (self.currentXMainImage - marginPixels <= 0):
self.currentXMainImage = 0 + marginPixels + 1
else:
self.currentXMainImage = self.currentXMainImage
# Crop it by previous coordinates
im_gray_crop = im_gray[self.currentYMainImage - marginPixels : self.currentYMainImage + marginPixels,
self.currentXMainImage - marginPixels : self.currentXMainImage + marginPixels]
# plt.imshow(im_gray_crop, cmap = cm.Greys_r)
# plt.show()
# print "self.currentYMainImage:", self.currentYMainImage
# print "self.currentXMainImage:", self.currentXMainImage
# print im_gray_crop.shape
# Compute all keypoints in the cropped frame
self.CMT.process_frame(im_gray_crop)
# print 'frame: {2:4d}, Center: {0:.2f},{1:.2f}'.format(self.CMT.center[0], self.CMT.center[1] , count)
if not (math.isnan(self.CMT.center[0]) or math.isnan(self.CMT.center[1])
or (self.CMT.center[0] <= 0) or (self.CMT.center[1] <= 0)):
# Compute the center of the object with respect to the main image
self.diffY = self.CMT.center[0] - self.currentY
self.diffX = self.CMT.center[1] - self.currentX
self.currentYMainImage = self.diffY + self.currentYMainImage
self.currentXMainImage = self.diffX + self.currentXMainImage
self.currentY = self.CMT.center[0]
self.currentX = self.CMT.center[1]
# Save the center of frames in an array for further process
measuredTrack[count, 0] = self.currentYMainImage
measuredTrack[count, 1] = self.currentXMainImage
else:
self.currentYMainImage = self.currentYMainImage
self.currentXMainImage = self.currentXMainImage
print 'frame: {2:4d}, Center: {0:.2f},{1:.2f}'.format(self.currentYMainImage, self.currentXMainImage , count)
count += 1
numMeas = measuredTrack.shape[0]
markedMeasure = np.ma.masked_less(measuredTrack, 0)
# Kalman Filter Parameters
deltaT = 1.0 / clip.fps
transitionMatrix = [[1, 0, deltaT, 0], [0, 1, 0, deltaT], [0, 0, 1, 0], [0, 0, 0, 1]] # A
observationMatrix = [[1, 0, 0, 0], [0, 1, 0, 0]] # C
xinit = markedMeasure[0, 0]
yinit = markedMeasure[0, 1]
vxinit = markedMeasure[1, 0] - markedMeasure[0, 0]
vyinit = markedMeasure[1, 1] - markedMeasure[0, 1]
initState = [xinit, yinit, vxinit, vyinit] # mu0
initCovariance = 1.0e-3 * np.eye(4) # sigma0
transistionCov = 1.0e-4 * np.eye(4) # Q
observationCov = 1.0e-1 * np.eye(2) # R
# Kalman Filter bias
kf = KalmanFilter(transition_matrices=transitionMatrix,
observation_matrices=observationMatrix,
initial_state_mean=initState,
initial_state_covariance=initCovariance,
transition_covariance=transistionCov,
observation_covariance=observationCov)
self.measuredTrack = measuredTrack
# Kalman Filter
(self.filteredStateMeans, self.filteredStateCovariances) = kf.filter(markedMeasure)
# Kalman Smoother
(self.filterStateMeanSmooth, self.filterStateCovariancesSmooth) = kf.smooth(markedMeasure)
newClip = clip.fl_image(self.crop)
return newClip
@counterFunction
def crop (self, frame):
self.frameCounter = self.crop.count
# print self.frameCounter
windowSize = (2 * 640, 2 * 360)
newFrames = np.zeros((windowSize[0], windowSize[1], 3))
if self.frameCounter <= self.numFrames:
imGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Use Kalman Filter Smoother results to crop the frames with corresponding window size
x1 = np.floor(self.filterStateMeanSmooth[(self.frameCounter) - 1][0] - windowSize[1] / 2)
y1 = np.floor(self.filterStateMeanSmooth[(self.frameCounter) - 1][1] - windowSize[0] / 2)
x2 = np.floor(x1 + windowSize[1])
y2 = np.floor(y1 + windowSize[0])
# Corner correction (Height)
if (x1 <= 0):
x1 = 0
x2 = np.floor(x1 + windowSize[1])
if (x2 >= imGray.shape[0]):
x2 = np.floor(imGray.shape[0])
x1 = np.floor(x2 - windowSize[1])
# Corner correction (Width)
if (y1 <= 0):
y1 = 0
y2 = np.floor(y1 + windowSize[0])
if (y2 >= imGray.shape[1]):
y2 = np.floor(imGray.shape[1])
y1 = np.floor(y2 - windowSize[0])
# print x1, y1 , x2, y2
newFrames = frame[x1:x2, y1:y2, :]
return newFrames
class BBoxTracker(object):
def __init__(self):
self.bboxes = []
def set_size(self, width, height):
self.width = width
self.height = height
def add_bounding_box(self, timestamp, center, width):
# TODO resize according to the original size
self.bboxes.append((timestamp, center, width))
class DummyTracker(object):
"""A simple implementation of the speacker tracker"""
def __init__(self,
inputPath,
slide_coordinates,
resize_max=None,
fps=None,
skip=None,
speaker_bb_height_location=None):
"""
:param inputPath: input video file or path containing images
:param slide_coordinates: the coordinates where the slides are located (in 0-1 space)
:param resize_max: max size
:param fps: frame per second in use if the video is a sequence of image files
:param speaker_bb_height_location: if given, this will be used as the possible heights at which the speaker should be tracked.
"""
if inputPath is None:
raise exceptions.RuntimeError("no input specified")
self.inputPath = inputPath # 'The input path.'
self.skip = skip # 'Skip the first n frames.'
self.slide_crop_coordinates = self._inner_rectangle(slide_coordinates)
print self.slide_crop_coordinates
self.resize_max = resize_max
self.tracker = BBoxTracker()
self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
self.fgbg = cv2.BackgroundSubtractorMOG()
# TODO this location should be in the full frame, or indicated in the range [0,1]
self.speaker_bb_height_location = speaker_bb_height_location
def _inner_rectangle(self, coordinates):
"""Get the inner rectangle of the slide coordinates for cropping the image.
Returns a 4x1 numpy array in the order:
[min_y, max_y, min_x, max_x]
"""
# This is specified by the rectify_coordinates() function in slideDetection.py
top_left = 0
top_right = 1
bottom_right = 2
bottom_left = 3
x = 0
y = 1
min_x = max(coordinates[top_left, x], coordinates[bottom_left, x])
max_x = min(coordinates[top_right, x], coordinates[bottom_right, x])
# y is flipped, so top and bottom are as well
min_y = max(coordinates[top_left, y], coordinates[top_right, y])
max_y = min(coordinates[bottom_left, y], coordinates[bottom_right, y])
return np.array([min_y, max_y, min_x, max_x])
def _resize(self, im):
"""Resizes the input image according to the initial parameters"""
if self.resize_max is None:
return im
# assuming landscape orientation
dest_size = self.resize_max, int(im.shape[0] * (float(self.resize_max) / im.shape[1]))
return cv2.resize(im, dest_size)
def speakerTracker(self):
# Clean up
cv2.destroyAllWindows()
# TODO move this in a function
# If a path to a file was given, assume it is a single video file
if os.path.isfile(self.inputPath):
cap = cv2.VideoCapture(self.inputPath)
clip = VideoFileClip(self.inputPath, audio=False)
self.fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
self.numFrames = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
self.width = cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
self.height = cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
self.tracker.set_size(self.width, self.height)
logger.info("[VIDEO] #frames %d, frames size (%d x %d)", self.numFrames, self.width, self.height)
pathBase = os.path.basename(self.inputPath)
pathDirectory = os.path.dirname(self.inputPath)
baseName = pathDirectory + '/' + os.path.splitext(pathBase)[0] + '_' + 'speakerCoordinates.txt'
# Skip first frames if required
if self.skip is not None:
cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, self.skip)
# Otherwise assume it is a format string for reading images
else:
cap = cmtutil.FileVideoCapture(self.inputPath)
# Skip first frames if required
if self.skip is not None:
cap.frame = 1 + self.skip
# Read first frame
status, im0_not_resized = cap.read()
im0 = self._resize(im0_not_resized)
im0_lab = cv2.cvtColor(im0, cv2.COLOR_BGR2LAB)
im0_gray = cv2.cvtColor(im0_not_resized, cv2.COLOR_BGR2GRAY)
if debug:
# speaker bounding box used for debugging
(tl, br) = (2052, 948), (2376, 1608)
else:
imDraw = np.copy(im0)
(tl, br) = cmtutil.get_rect(imDraw)
logger.info('[TRACKER] Using %s, %s as initial bounding box for the speaker', tl, br)
measuredTrack = np.zeros((self.numFrames + 10, 2)) - 1
frame_count = -1
# previous histogram
previous_hist_plane = None
previous_hist_vertical_stripes = None # previous histograms computed vertically for "activity" recognition on the area where the speaker is
distances_histogram = {}
while frame_count <= self.numFrames:
status = cap.grab()
if not status:
break
frame_count += 1
time = float(frame_count) / float(self.fps)
current_time_stamp = datetime.timedelta(seconds=int(time))
if (self.fps is not None) and (frame_count % self.fps) != 0:
continue
logging.info('[VIDEO] processing frame %.6d / %d - time %s / %s - %3.3f %%',
frame_count,
self.numFrames,
current_time_stamp,
datetime.timedelta(seconds=self.numFrames / self.fps),
100 * float(frame_count) / self.numFrames)
status, im = cap.retrieve()
if not status:
logger.error('[VIDEO] error reading frame %d', frame_count)
# resize and color conversion
im = self._resize(im)
im_lab = cv2.cvtColor(im, cv2.COLOR_BGR2LAB)
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# color diff
im_diff = (im_lab - im0_lab) ** 2
im_diff_lab = np.sqrt(np.sum(im_diff, axis=2))
# background
fgmask = self.fgbg.apply(im0)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, self.kernel)
# threshold the diff
# histogram
hist = []
for i in range(im_diff.shape[2]):
hist.append(cv2.calcHist([im_diff], [i], None, [256], [0, 256]))
hist_plane = []
slide_hist_plane = []
# Compute the histogram for the slide image
resized_x = im_diff_lab.shape[1]
resized_y = im_diff_lab.shape[0]
min_y = self.slide_crop_coordinates[0] * resized_y
max_y = self.slide_crop_coordinates[1] * resized_y
min_x = self.slide_crop_coordinates[2] * resized_x
max_x = self.slide_crop_coordinates[3] * resized_x
slide = im_gray[min_y : max_y, min_x : max_x]
slidehist = cv2.calcHist([slide], [0], None, [256], [0, 256])
plt.subplot(2, 1, 1)
plt.imshow(slide, cmap=cm.Greys_r)
plt.subplot(2, 1, 2)
plt.plot(slidehist)
plt.xlim([0, 256])
histogram_boundaries = get_histogram_min_max_with_percentile(slidehist, False)
# this is part of a pre-processing
# dividing the plane vertically by N=3 and computing histograms on that. The purpose of this is to detect the environment changes
N_stripes = 3
for i in range(N_stripes):
location = int(i * im_diff_lab.shape[0] / float(N_stripes)), min(im_diff_lab.shape[0], int((i + 1) * im_diff_lab.shape[0] / float(N_stripes)))
current_plane = im_diff_lab[location[0]:location[1], :]
# print current_plane.min(), current_plane.max()
hist_plane.append(cv2.calcHist([current_plane.astype(np.uint8)], [0], None, [256], [0, 256]))
# slide_hist_plane.append(cv2.calcHist(current_slide_plane))
# dividing the location of the speaker by N=10 vertical stripes. The purpose of this is to detect the x location of activity/motion
hist_vertical_stripes = []
energy_vertical_stripes = []
N_vertical_stripes = 10
if self.speaker_bb_height_location is not None:
for i in range(N_vertical_stripes):
location = int(i * im_diff_lab.shape[1] / float(N_vertical_stripes)), min(im_diff_lab.shape[1], int((i + 1) * im_diff_lab.shape[1] / float(N_vertical_stripes)))
current_vertical_stripe = im_diff_lab[self.speaker_bb_height_location[0]:self.speaker_bb_height_location[1], location[0]:location[1]]
hist_vertical_stripes.append(cv2.calcHist([current_vertical_stripe.astype(np.uint8)], [0], None, [256], [0, 256]))
energy_vertical_stripes.append(current_vertical_stripe.sum())
pass
pass
# histogram distance
# location of all the connected components
if previous_hist_plane is not None:
distances_histogram[frame_count] = {}
element = distances_histogram[frame_count]
# element['timestamp'] = current_time_stamp
element['dist_stripes'] = {}
for e, h1, h2 in zip(range(N_stripes), previous_hist_plane, hist_plane):
element['dist_stripes'][e] = cv2.compareHist(h1, h2, cv2.cv.CV_COMP_CORREL)
if previous_hist_vertical_stripes is not None:
element = distances_histogram.get(frame_count, {})
distances_histogram[frame_count] = element
element['vert_stripes'] = {}
for e, h1, h2 in zip(range(N_vertical_stripes), previous_hist_vertical_stripes, hist_vertical_stripes):
element['vert_stripes'][e] = cv2.compareHist(h1, h2, cv2.cv.CV_COMP_CORREL)
# "activity" which is the enery in each stripe
element = distances_histogram.get(frame_count, {})
distances_histogram[frame_count] = element
element['energy_stripes'] = {}
element['peak_stripes'] = {}
element['histogram_boundaries'] = {}
for e, energy, h1 in zip(range(N_vertical_stripes), energy_vertical_stripes, hist_vertical_stripes):
element['energy_stripes'][e] = int(energy)
element['peak_stripes'][e] = max([i for i, j in enumerate(h1) if j > 0])
# Store histogram boundaries
element['histogram_boundaries']['min'] = histogram_boundaries[0]
element['histogram_boundaries']['max'] = histogram_boundaries[1]
# debug
if debug:
cv2.imwrite(os.path.join(_tmp_path, 'background_%.6d.png' % frame_count), fgmask)
cv2.imwrite(os.path.join(_tmp_path, 'diff_%.6d.png' % frame_count), im_diff)
cv2.imwrite(os.path.join(_tmp_path, 'diff_lab_%.6d.png' % frame_count), im_diff_lab)
with open(os.path.join(_tmp_path, 'info_%.6d.json' % frame_count), 'w') as f:
f.write(json.dumps(distances_histogram))
# cv2.imwrite(os.path.join(_tmp_path, 'diff_thres_%.6d.png' % frame_count), color_mask)
im0 = im
im0_lab = im_lab
im0_gray = im_gray
previous_hist_plane = hist_plane
previous_hist_vertical_stripes = hist_vertical_stripes
continue
print 'frame: {2:4d}, Center: {0:.2f},{1:.2f}'.format(self.CMT.center[0], self.CMT.center[1] , frame_count)
if not (math.isnan(self.CMT.center[0])
or math.isnan(self.CMT.center[1])
or (self.CMT.center[0] <= 0)
or (self.CMT.center[1] <= 0)):
measuredTrack[frame_count, 0] = self.CMT.center[0]
measuredTrack[frame_count, 1] = self.CMT.center[1]
else:
# take the previous estimate if none is found in the current frame
measuredTrack[frame_count, 0] = measuredTrack[frame_count - 1, 0]
measuredTrack[frame_count, 1] = measuredTrack[frame_count - 1, 1]
if debug:
cmtutil.draw_bounding_box((int(measuredTrack[frame_count, 0] - 50), int(measuredTrack[frame_count, 1] - 50)),
(int(measuredTrack[frame_count, 0] + 50), int(measuredTrack[frame_count, 1] + 50)),
im_debug)
cv2.imwrite(os.path.join(_tmp_path, 'debug_file_%.6d.png' % frame_count), im_debug)
im_debug = np.copy(im)
cmtutil.draw_keypoints([kp.pt for kp in self.CMT.keypoints_cv], im_debug, (0, 0, 255))
cv2.imwrite(os.path.join(_tmp_path, 'all_keypoints_%.6d.png' % frame_count), im_debug)
return
def crop (self, frame):
windowSize = (2 * 640, 2 * 360)
newFrames = np.zeros((windowSize[0], windowSize[1], 3))
imGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.CMT.process_frame(imGray)
if not (math.isnan(self.CMT.center[0]) or math.isnan(self.CMT.center[1])
or (self.CMT.center[0] <= 0) or (self.CMT.center[1] <= 0)):
x1 = np.floor(self.CMT.center[1] - windowSize[1] / 2)
y1 = np.floor(self.CMT.center[0] - windowSize[0] / 2)
x2 = np.floor(x1 + windowSize[1])
y2 = np.floor(y1 + windowSize[0])
# Corner correction (Height)
if (x1 <= 0):
x1 = 0
x2 = np.floor(x1 + windowSize[1])
if (x2 >= imGray.shape[0]):
x2 = np.floor(imGray.shape[0])
x1 = np.floor(x2 - windowSize[1])
# Corner correction (Width)
if (y1 <= 0):
y1 = 0
y2 = np.floor(y1 + windowSize[0])
if (y2 >= imGray.shape[1]):
y2 = np.floor(imGray.shape[1])
y1 = np.floor(y2 - windowSize[0])
newFrames = frame[x1:x2, y1:y2, :]
# print 'Center: {0:.2f},{1:.2f}'.format(CMT.center[0], CMT.center[1])
return newFrames
def plot_histogram_distances():
"""Reads back the sequence of histograms and plots the distance between two consecutive histograms over time"""
list_files = glob.glob(os.path.join(_tmp_path, 'info_*.json'))
list_files.sort()
# the last one contains all the necessary information, -2 is used for tests while the script is running
last_file = list_files[-2]
with open(last_file) as f:
distances_histogram = json.load(f)
frame_indices = [(i, int(i)) for i in distances_histogram.keys()]
frame_indices.sort(key=lambda x: x[1])
plots_dict = {}
plots_dict_index = []
for count, count_integer in frame_indices:
if 'dist_stripes' not in distances_histogram[count]:
continue
plots_dict_index.append(count_integer)
current_sample = distances_histogram[count]['dist_stripes']
for i in current_sample.keys():
if int(i) not in plots_dict:
plots_dict[int(i)] = []
plots_dict[int(i)].append(float(current_sample[i]))
N_stripes = max(plots_dict.keys())
# vertical stripes are the location of the speaker
plots_dict_vert_stripes = {}
plots_dict_vert_stripes_index = []
for count, count_integer in frame_indices:
if 'vert_stripes' not in distances_histogram[count]:
continue
plots_dict_vert_stripes_index.append(count_integer)
current_sample = distances_histogram[count]['vert_stripes']
for i in current_sample.keys():
if int(i) not in plots_dict_vert_stripes:
plots_dict_vert_stripes[int(i)] = []
plots_dict_vert_stripes[int(i)].append(float(current_sample[i]))
plots_dict_vert_stripes_energy = {}
plots_dict_vert_stripes_energy_index = []
for count, count_integer in frame_indices:
current_sample = distances_histogram[count]['energy_stripes']
plots_dict_vert_stripes_energy_index.append(count_integer)
for i in current_sample.keys():
if int(i) not in plots_dict_vert_stripes_energy:
plots_dict_vert_stripes_energy[int(i)] = []
plots_dict_vert_stripes_energy[int(i)].append(float(current_sample[i]))
N_stripes_vert = max(plots_dict_vert_stripes.keys())
from matplotlib import pyplot as plt
for i in sorted(plots_dict.keys()):
plt.subplot(N_stripes + 1, 1, i + 1) # , sharex=True)
plt.plot(plots_dict_index, plots_dict[i], aa=False, linewidth=1)
if i == 0:
plt.title('Histogram distance for each stripe')
# lines.set_linewidth(1)
plt.ylabel('Stripe %d' % i)
plt.xlabel('frame #')
plt.savefig(os.path.join(_tmp_path, 'histogram_distance.png'))
# plotting the vertical stripes content
for i in sorted(plots_dict_vert_stripes.keys()):
plt.subplot(N_stripes_vert + 1, 1, i + 1) # , sharex=True)
plt.plot(plots_dict_vert_stripes_index, plots_dict_vert_stripes[i], aa=False, linewidth=1)
if i == 0:
plt.title('Histogram distance for each vertical stripe')
# lines.set_linewidth(1)
plt.ylabel('%d' % i)
plt.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
plt.tick_params(axis='y',
which='both',
left='off', # ticks along the bottom edge are off
right='off', # ticks along the top edge are off
top='off',
bottom='off',
labelleft='on')
plt.xlabel('frame #')
plt.tick_params(axis='x',
which='both',
bottom='on',
top='off',
labelbottom='on')
plt.savefig(os.path.join(_tmp_path, 'histogram_vert_distance.png'), dpi=(200))
# plotting the vertical stripes content: energy
for i in sorted(plots_dict_vert_stripes_energy.keys()):
plt.subplot(N_stripes_vert + 1, 1, i + 1) # , sharex=True)
plt.plot(plots_dict_vert_stripes_energy_index, plots_dict_vert_stripes_energy[i], aa=False, linewidth=1)
if i == 0:
plt.title('Energy for each vertical stripe')
plt.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
plt.tick_params(axis='y',
which='both',
left='off', # ticks along the bottom edge are off
right='off', # ticks along the top edge are off
top='off',
bottom='off',
labelleft='on')
# lines.set_linewidth(1)
plt.ylabel('%d' % i, fontsize=3)
# plt.axis([0, max(plots_dict_vert_stripes_energy[i])])
plt.xlabel('frame #')
plt.tick_params(axis='x',
which='both',
bottom='on',
top='off',
labelbottom='on')
plt.savefig(os.path.join(_tmp_path, 'histogram_vert_energy.png'), dpi=(200))
if __name__ == '__main__':
storage = '/home/livius/Code/livius/livius/Example Data'
filename = 'video_7.mp4'
# plot_histogram_distances()
# sys.exit(0)
if True:
obj = DummyTracker(os.path.join(storage, filename),
slide_coordinates=np.array([[ 0.36004776, 0.01330207],
[ 0.68053395, 0.03251761],
[ 0.67519468, 0.42169076],
[ 0.3592881, 0.41536275]]),
resize_max=640,
speaker_bb_height_location=(155, 260))
new_clip = obj.speakerTracker()
# plot_histogram_distances()
sys.exit(0)
new_clip.write_videofile("video_CMT_algorithm_kalman_filter.mp4")
| 37.65638 | 180 | 0.58371 |
5d474eb937aff799b65cbb2292034575c5d5bafe | 26,294 | py | Python | bulbs/element.py | rpedigoni/bulbs | 0c0f4e5883ba796dff1afae5413b0388491eb55d | [
"BSD-3-Clause"
] | 1 | 2017-06-14T13:57:23.000Z | 2017-06-14T13:57:23.000Z | bulbs/element.py | rpedigoni/bulbs | 0c0f4e5883ba796dff1afae5413b0388491eb55d | [
"BSD-3-Clause"
] | null | null | null | bulbs/element.py | rpedigoni/bulbs | 0c0f4e5883ba796dff1afae5413b0388491eb55d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2012 James Thornton (http://jamesthornton.com)
# BSD License (see LICENSE for details)
#
"""
Vertex and Edge container classes and associated proxy classes.
"""
from .utils import u # Python 3 unicode
from .utils import initialize_element, initialize_elements, coerce_id, get_logger
log = get_logger(__name__)
class Element(object):
"""An abstract base class for Vertex and Edge containers."""
def __init__(self, client):
# NOTE: moved all private prop defs here so they are declared and
# pre-defined in _properties so that setattr works in model NORMAL mode
# Client object
self._client = client
# Property data
self._data = {}
# Result object.
self._result = None
# Vertex Proxy Object
self._vertices = None
# Edge Proxy Object
self._edges = None
# Initialized Flag
# Initialize all non-database properties here because when _initialized
# is set to True, __setattr__ will assume all non-defined properties
# are database properties and will set them in self._data.
self._initialized = True
def _initialize(self, result):
"""
Initialize the element with the result that was returned by the DB.
:param result: The Result object returned by the the Client request.
:type result: Result
:rtype: None
"""
self._result = result
# TODO: Do we really need to make a copy?
self._data = result.get_data().copy()
# Sets the element ID to the var defined in Config. Defaults to eid.
self._set_pretty_id(self._client)
# These vertex and edge proxies are primarily used for gets;
# all mutable methods that use these are overloaded in Model.
self._vertices = VertexProxy(Vertex,self._client)
self._edges = EdgeProxy(Edge,self._client)
@classmethod
def get_base_type(cls):
"""
Returns this element class's base type.
:rtype: str
"""
raise NotImplementedError
@classmethod
def get_element_key(cls, config):
"""
Returns the element key.
:param config: Config object.
:type config: bulbs.config.Config
:rtype: str
"""
raise NotImplementedError
@classmethod
def get_index_name(cls, config):
"""
Returns the index name.
:param config: Config object.
:type config: bulbs.config.Config
:rtype: str
"""
raise NotImplementedError
@classmethod
def get_proxy_class(cls):
"""
Returns the proxy class.
:param config: Config object.
:type config: bulbs.config.Config
:rtype: class
"""
raise NotImplementedError
@property
def _id(self):
"""
Returns the element ID.
:rtype: int or str
.. note:: This is the element's "primary key"; however, some DBs (such
as neo4j) reuse IDs if they are deleted so be careful with
how you use them.
If you want to guarantee they are unique across the DB's
lifetime either don't physically delete elements and just set
a deleted flag, or use some other mechanism for the primary
key, such as an external sequence or a hash.
"""
return self._result.get_id()
@property
def _type(self):
"""
Returns the result's base type, either vertex or edge.
:rtype: str
"""
return self._result.get_type()
def _set_pretty_id(self, client):
"""
Sets the ID var defined in Config as a Python property. Defaults to eid.
:param client: Client object.
:type client: Client
:rtype: None
.. note:: The user-configured element_type and label vars are not set
as Python properties because they are class vars so you set
those when you define the Models.
"""
pretty_var = client.config.id_var
fget = lambda self: self._result.get_id()
setattr(Element, pretty_var, property(fget))
def __setattr__(self, key, value):
"""
Overloaded to set the object attributes or the property data.
If you explicitly set/change the values of an element's properties,
make sure you call save() to updated the values in the DB.
:param key: Database property key.
:type key: str
:param value: Database property value.
:type value: str, int, long, float, list, dict
:rtype: None
"""
# caching __dict__ to avoid the dots and boost performance
dict_ = self.__dict__
# dict_.get() is faster than getattr()
_initialized = dict_.get("_initialized", False)
if key in dict_ or _initialized is False:
# set the attribute normally
object.__setattr__(self, key, value)
else:
# set the attribute as a data property
self._data[key] = value
def __getattr__(self, name):
"""
Returns the value of the database property for the given name.
:param name: The name of the data property.
:type name: str
:raises: AttributeError
:rtype: str, int, long, float, list, dict, or None
"""
try:
return self._data[name]
except:
raise AttributeError(name)
def __len__(self):
"""
Returns the number of items stored in the DB results
:rtype: int
"""
return len(self._data)
def __contains__(self, key):
"""
Returns True if the key in the database property data.
:param key: Property key.
:type key: str
:rtype: bool
"""
return key in self._data
def __eq__(self, element):
"""
Returns True if the elements are equal
:param element: Element object.
:type element: Element
:rtype bool
"""
return (isinstance(element, Element) and
element.__class__ == self.__class__ and
element._id == self._id and
element._data == self._data)
def __ne__(self, element):
"""
Returns True if the elements are not equal.
:param element: Element object.
:type element: Element
:rtype bool
"""
return not self.__eq__(element)
def __repr__(self):
"""
Returns the string representation of the attribute.
:rtype: unicode
"""
return self.__unicode__()
def __str__(self):
"""
Returns the string representation of the attribute.
:rtype: unicode
"""
return self.__unicode__()
def __unicode__(self):
"""
Returns the unicode representation of the attribute.
:rtype: unicode
"""
class_name = self.__class__.__name__
element_uri = self._result.get_uri()
representation = "<%s: %s>" % (class_name, element_uri)
return u(representation) # Python 3
def __setstate__(self, state):
config = state['_config']
client_class = state['_client_class']
client = client_class(config)
state['_client'] = client
state['_vertices'] = VertexProxy(Vertex, client)
state['_edges'] = EdgeProxy(Edge, client)
del state['_client_class']
del state['_config']
self.__dict__ = state
def __getstate__(self):
state = self.__dict__.copy()
state['_config'] = self._client.config
state['_client_class'] = self._client.__class__
del state['_client']
del state['_vertices']
del state['_edges']
return state
def get(self, name, default_value=None):
"""
Returns the value of a Python attribute or the default value.
:param name: Python attribute name.
:type name: str
:param default_value: Default value. Defaults to None.
:type default_value: object
:rtype: object or None
"""
# TODO: Why do we need this?
return getattr(self, name, default_value)
def data(self):
"""
Returns the element's property data.
:rtype: dict
"""
return self._data
def map(self):
"""
Deprecated. Returns the element's property data.
:rtype: dict
"""
log.debug("This is deprecated; use data() instead.")
return self.data()
#
# Vertices
#
class Vertex(Element):
"""
A container for a Vertex returned by a client proxy.
:param client: The Client object for the database.
:type client: Client
:ivar eid: Element ID. This varname is configurable in Config.
:ivar _client: Client object.
:ivar _data: Property data dict returned in Result.
:ivar _vertices: Vertex proxy object.
:ivar _edges: Edge proxy object.
:ivar _initialized: Boolean set to True upon initialization.
Example::
>>> from bulbs.neo4jserver import Graph
>>> g = Graph() # Create a Neo4j Graph object
>>> james = g.vertices.get(3) # Get a vertex from the database
>>> james.age = 34 # Set a database property
>>> james.save() # Save the vertex in the database
>>> james.data() # Get the database property map
>>> friends = james.outV("knows") # Return Vertex generator of friends
"""
@classmethod
def get_base_type(cls):
"""
Returns this element class's base type, which is "vertex".
:rtype: str
.. admonition:: WARNING
Don't override this.
"""
# Don't override this
return "vertex"
@classmethod
def get_element_key(cls, config):
"""
Returns the element key. Defaults to "vertex". Override this in Model.
:param config: Config object.
:type config: Config
:rtype: str
"""
return "vertex"
@classmethod
def get_index_name(cls, config):
"""
Returns the index name. Defaults to the value of Config.vertex_index.
:param config: Config object.
:type config: Config
:rtype: str
"""
return config.vertex_index
@classmethod
def get_proxy_class(cls):
"""
Returns the proxy class. Defaults to VertexProxy.
:rtype: class
"""
return VertexProxy
def outE(self, label=None, start=None, limit=None):
"""
Returns the outgoing edges.
:param label: Optional edge label.
:type label: str or None
:rtype: Edge generator
"""
resp = self._client.outE(self._id, label, start, limit)
return initialize_elements(self._client,resp)
def inE(self, label=None, start=None, limit=None):
"""
Returns the incoming edges.
:param label: Optional edge label.
:type label: str or None
:rtype: Edge generator
"""
resp = self._client.inE(self._id, label, start, limit)
return initialize_elements(self._client,resp)
def bothE(self, label=None, start=None, limit=None):
"""
Returns the incoming and outgoing edges.
:param label: Optional edge label.
:type label: str or None
:rtype: Edge generator
"""
resp = self._client.bothE(self._id, label, start, limit)
return initialize_elements(self._client,resp)
def outV(self, label=None, start=None, limit=None):
"""
Returns the out-adjacent vertices.
:param label: Optional edge label.
:type label: str or None
:rtype: Vertex generator
"""
resp = self._client.outV(self._id, label, start, limit)
return initialize_elements(self._client,resp)
def inV(self, label=None, start=None, limit=None):
"""
Returns the in-adjacent vertices.
:param label: Optional edge label.
:type label: str or None
:rtype: Vertex generator
"""
resp = self._client.inV(self._id, label, start, limit)
return initialize_elements(self._client,resp)
def bothV(self, label=None, start=None, limit=None):
"""
Returns all incoming- and outgoing-adjacent vertices.
:param label: Optional edge label.
:type label: str or None
:rtype: Vertex generator
"""
resp = self._client.bothV(self._id, label, start, limit)
return initialize_elements(self._client,resp)
def save(self):
"""
Saves the vertex in the database.
:rtype: Response
"""
return self._vertices.update(self._id, self._data)
class VertexProxy(object):
"""
A proxy for interacting with vertices on the graph database.
:param element_class: The element class managed by this proxy instance.
:type element_class: Vertex class
:param client: The Client object for the database.
:type client: Client
:ivar element_class: Element class.
:ivar client: Client object.
:ivar index: The primary index object or None.
.. note:: The Graph object contains a VertexProxy instance named "vertices".
Example::
>>> from bulbs.neo4jserver import Graph
>>> g = Graph() # Create Neo4j Graph
>>> james = g.vertices.create(name="James") # Create vertex in DB
>>> g.vertices.update(james.eid, name="James T") # Update properties
>>> james = g.vertices.get(james.eid) # Get vertex (again)
>>> g.vertices.delete(james.eid) # Delete vertex
"""
def __init__(self,element_class, client):
assert issubclass(element_class, Vertex)
self.element_class = element_class
self.client = client
self.index = None
# Add element class to Registry so we can initialize query results.
self.client.registry.add_class(element_class)
def create(self, _data=None, **kwds):
"""
Adds a vertex to the database and returns it.
:param _data: Optional property data dict.
:type _data: dict
:param kwds: Optional property data keyword pairs.
:type kwds: dict
:rtype: Vertex
"""
data = build_data(_data, kwds)
resp = self.client.create_vertex(data)
return initialize_element(self.client, resp.results)
def get(self, _id):
"""
Returns the vertex for the given ID.
:param _id: The vertex ID.
:type _id: int or str
:rtype: Vertex or None
"""
try:
resp = self.client.get_vertex(_id)
return initialize_element(self.client, resp.results)
except LookupError:
return None
def get_or_create(self, key, value, _data=None, **kwds):
"""
Lookup a vertex in the index and create it if it doesn't exsit.
:param key: Index key.
:type key: str
:param value: Index value.
:type value: str, int, long
:param _data: Optional property data dict.
:type _data: dict
:param kwds: Optional property data keyword pairs.
:type kwds: dict
:rtype: Vertex
"""
# TODO: Make this an atomic Gremlin method
# TODO: This will only index for non-models if autoindex is True.
# Relationship Models are set to index by default, but
# EdgeProxy doesn't have this method anyway.
vertex = self.index.get_unique(key, value)
if vertex is None:
vertex = self.create(_data, **kwds)
return vertex
def get_all(self):
"""
Returns all the vertices in the graph.
:rtype: Vertex generator
"""
resp = self.client.get_all_vertices()
return initialize_elements(self.client, resp)
def update(self,_id, _data=None, **kwds):
"""
Updates an element in the graph DB and returns it.
:param _id: The vertex ID.
:type _id: int or str
:param _data: Optional property data dict.
:type _data: dict
:param kwds: Optional property data keyword pairs.
:type kwds: dict
:rtype: Response
"""
# NOTE: this no longer returns an initialized element because not all
# Clients return element data, e.g. Neo4jServer retuns nothing.
data = build_data(_data, kwds)
self.client.update_vertex(_id, _data)
def remove_properties(self, _id):
"""
Removes all properties from a vertex and returns the response.
:param _id: The vertex ID.
:type _id: int or str
:rtype: Response
"""
return self.client.remove_vertex_properties(_id)
def delete(self, _id):
"""
Deletes a vertex from the graph database and returns the response.
:param _id: The vertex ID.
:type _id: int or str
:rtype: Response
"""
return self.client.delete_vertex(_id)
#
# Edges
#
class Edge(Element):
"""
A container for an Edge returned by a client proxy.
:param client: The Client object for the database.
:type client: Client
:ivar eid: Element ID. This varname is configurable in Config.
:ivar _client: Client object.
:ivar _data: Property data dict returned in Result.
:ivar _vertices: Vertex proxy object.
:ivar _edges: Edge proxy object.
:ivar _initialized: Boolean set to True upon initialization.
Example:
>>> from bulbs.neo4jserver import Graph
>>> g = Graph() # Create a Neo4j Graph
>>> edge = g.edges.get(8) # Get an edge from DB
>>> label = edge.label() # Return edge label
>>> outV = edge.outV() # Return outgoing vertex
>>> inV = edge.inV() # Return incoming vertex
>>> edge._outV # Return the outgoing vertex ID
>>> edge._inV # Return the incoming vertex ID
>>> edge.weight = 0.5 # Set a property
>>> edge.save() # Save properties in DB
>>> data = edge.data() # Return property data
"""
@classmethod
def get_base_type(cls):
"""
Returns this element class's base type, which is "edge".
:rtype: str
.. admonition:: WARNING
Don't override this.
"""
#: Don't override this
return "edge"
@classmethod
def get_element_key(cls, config):
"""
Returns the element key. Defaults to "edge". Override this in Model.
:rtype: str
"""
return "edge"
@classmethod
def get_index_name(cls, config):
"""
Returns the index name. Defaults to the value of Config.edge_index.
:rtype: str
"""
return config.edge_index
@classmethod
def get_proxy_class(cls):
"""
Returns the proxy class. Defaults to EdgeProxy.
:rtype: class
"""
return EdgeProxy
@property
def _outV(self):
"""
Returns the edge's outgoing (start) vertex ID.
:rtype: int
"""
return self._result.get_outV()
@property
def _inV(self):
"""
Returns the edge's incoming (end) vertex ID.
:rtype: int
"""
return self._result.get_inV()
@property
def _label(self):
"""
Returns the edge's label.
:rtype: str
"""
return self._result.get_label()
def outV(self):
"""
Returns the outgoing (start) Vertex of the edge.
:rtype: Vertex
"""
return self._vertices.get(self._outV)
def inV(self):
"""
Returns the incoming (end) Vertex of the edge.
:rtype: Vertex
"""
return self._vertices.get(self._inV)
def label(self):
"""
Returns the edge's label.
:rtype: str
"""
return self._result.get_label()
def save(self):
"""
Saves the edge in the database.
:rtype: Response
"""
return self._edges.update(self._id, self._data)
class EdgeProxy(object):
"""
A proxy for interacting with edges on the graph database.
:param element_class: The element class managed by this proxy instance.
:type element_class: Edge class
:param client: The Client object for the database.
:type client: Client
:ivar element_class: Element class
:ivar client: Client object.
:ivar index: The primary index object or None.
.. note:: The Graph object contains an EdgeProxy instance named "edges".
Example::
>>> from bulbs.neo4jserver import Graph
>>> g = Graph() # Create Neo4j Graph
>>> james = g.vertices.create(name="James") # Create vertex
>>> julie = g.vertices.create(name="Julie") # Create vertex
>>> knows = g.edges.create(james, "knows", julie) # Create edge
>>> knows = g.edges.get(knows.eid) # Get edge (again)
>>> g.edges.update(knows.eid, weight=0.5) # Update properties
>>> g.edges.delete(knows.eid) # Delete edge
"""
def __init__(self, element_class, client):
assert issubclass(element_class, Edge)
self.element_class = element_class
self.client = client
self.index = None
# Add element class to Registry so we can initialize query results.
self.client.registry.add_class(element_class)
def create(self, outV, label, inV, _data=None, **kwds):
"""
Creates an edge in the database and returns it.
:param outV: The outgoing vertex.
:type outV: Vertex or int
:param label: The edge's label.
:type label: str
:param inV: The incoming vertex.
:type inV: Vertex or int
:param _data: Optional property data dict.
:type _data: dict
:param kwds: Optional property data keyword pairs.
:type kwds: dict
:rtype: Edge
"""
assert label is not None
data = build_data(_data, kwds)
outV, inV = coerce_vertices(outV, inV)
resp = self.client.create_edge(outV, label, inV, data)
return initialize_element(self.client, resp.results)
def get(self,_id):
"""
Retrieves an edge from the database and returns it.
:param _id: The edge ID.
:type _id: int or str
:rtype: Edge or None
"""
try:
resp = self.client.get_edge(_id)
return initialize_element(self.client, resp.results)
except LookupError:
return None
def get_all(self):
"""
Returns all the edges in the graph.
:rtype: Edge generator
"""
resp = self.client.get_all_edges()
return initialize_elements(self.client, resp)
def update(self,_id, _data=None, **kwds):
"""
Updates an edge in the database and returns it.
:param _id: The edge ID.
:type _id: int or str
:param _data: Optional property data dict.
:type _data: dict
:param kwds: Optional property data keyword pairs.
:type kwds: dict
:rtype: Response
"""
# NOTE: this no longer returns an initialized element because
# not all Clients return element data, e.g. Neo4jServer retuns nothing.
data = build_data(_data, kwds)
return self.client.update_edge(_id, data)
def remove_properties(self, _id):
"""
Removes all properties from a element and returns the response.
:param _id: The edge ID.
:type _id: int or str
:rtype: Response
"""
return self.client.remove_edge_properties(_id)
def delete(self, _id):
"""
Deletes a vertex from a graph database and returns the response.
:param _id: The edge ID.
:type _id: int or str
:rtype: Response
"""
return self.client.delete_edge(_id)
#
# Element Utils
#
def build_data(_data, kwds):
"""
Returns property data dict, regardless of how it was entered.
:param _data: Optional property data dict.
:type _data: dict
:param kwds: Optional property data keyword pairs.
:type kwds: dict
:rtype: dict
"""
# Doing this rather than defaulting the _data arg to a mutable value
data = {} if _data is None else _data
data.update(kwds)
return data
def coerce_vertices(outV, inV):
"""
Coerces the outgoing and incoming vertices to integers or strings.
:param outV: The outgoing vertex.
:type outV: Vertex or int
:param inV: The incoming vertex.
:type inV: Vertex or int
:rtype: tuple
"""
outV = coerce_vertex(outV)
inV = coerce_vertex(inV)
return outV, inV
def coerce_vertex(vertex):
"""
Coerces an object into a vertex ID and returns it.
:param vertex: The object we want to coerce into a vertex ID.
:type vertex: Vertex object or vertex ID.
:rtype: int or str
"""
if isinstance(vertex, Vertex):
vertex_id = vertex._id
else:
# the vertex ID may have been passed in as a string
# using corece_id to support OrientDB and linked-data URI (non-integer) IDs
vertex_id = coerce_id(vertex)
return vertex_id
| 25.930966 | 83 | 0.578345 |
64037c3f41a95e6e644632da51e808c5b3d1897e | 1,704 | py | Python | server.py | qfuggett/people-manager-flask | 97511e14c26a90df5b3dc2117c504c7572532761 | [
"Unlicense"
] | null | null | null | server.py | qfuggett/people-manager-flask | 97511e14c26a90df5b3dc2117c504c7572532761 | [
"Unlicense"
] | null | null | null | server.py | qfuggett/people-manager-flask | 97511e14c26a90df5b3dc2117c504c7572532761 | [
"Unlicense"
] | null | null | null | from flask import (Flask, render_template, request, redirect)
from jinja2 import StrictUndefined
from model import connect_to_db
import crud
app = Flask(__name__)
app.jinja_env.undefined = StrictUndefined
app.jinja_env.add_extension('jinja2.ext.loopcontrols')
@app.route('/', methods=["GET", "POST"])
def homepage():
users = crud.get_users()
if request.method == 'POST':
name = request.form.get('name')
email = request.form.get('email')
birthday = request.form.get('birthday')
zip_code = request.form.get('zip_code')
user = crud.create_user(name, email, birthday, zip_code)
print("*********************", "USER:", user, "********************")
return redirect('/')
return render_template('homepage.html', users=users)
@app.route('/update/<user_id>', methods=["GET", "POST"])
def update_user(user_id):
user = crud.get_user_by_id(user_id)
if request.method == 'POST':
name = request.form.get('name')
email = request.form.get('email')
birthday = request.form.get('birthday')
zip_code = request.form.get('zip_code')
new_user = crud.update_user(user_id, name, email, birthday, zip_code)
print("****************", "USER UPDATED", new_user, "****************")
return redirect('/')
return render_template('update.html', user=user)
@app.route('/delete/<user_id>', methods=["GET", "POST"])
def delete_user(user_id):
crud.delete_user(user_id)
print("****************", "USER DELETED", "****************")
return redirect('/')
if __name__ == '__main__':
from flask import Flask
connect_to_db(app)
app.run(host='0.0.0.0', debug=True)
| 27.047619 | 79 | 0.606808 |
d424c1c5ec98f58dcc1191c755acacf0c68d1cd3 | 923 | py | Python | modules/exploitation/exploit-db.py | decidedlygray/ptf | f17f50606fac5ef30f42c0b5e0fa57b58f696b99 | [
"FTL"
] | 4,391 | 2015-05-12T19:30:45.000Z | 2022-03-30T13:39:27.000Z | modules/exploitation/exploit-db.py | decidedlygray/ptf | f17f50606fac5ef30f42c0b5e0fa57b58f696b99 | [
"FTL"
] | 340 | 2015-05-14T13:50:44.000Z | 2022-01-13T14:40:14.000Z | modules/exploitation/exploit-db.py | decidedlygray/ptf | f17f50606fac5ef30f42c0b5e0fa57b58f696b99 | [
"FTL"
] | 1,290 | 2015-05-13T00:24:58.000Z | 2022-03-30T08:20:22.000Z | #!/usr/bin/env python
#####################################
# Installation module for exploit-db
#####################################
# AUTHOR OF MODULE NAME
AUTHOR="Mauro Risonho de Paula Assumpcao (firebits)"
# DESCRIPTION OF THE MODULE
DESCRIPTION="This module will install/update exploit-db - official exploit repo from OFFSEC"
# INSTALL TYPE GIT, SVN, FILE DOWNLOAD
# OPTIONS = GIT, SVN, FILE
INSTALL_TYPE="GIT"
# LOCATION OF THE FILE OR GIT/SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/offensive-security/exploit-database.git"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="exploit-db"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN="git"
# DEPENDS FOR FEDORA INSTALLS
FEDORA="git"
# COMMANDS TO RUN AFTER
AFTER_COMMANDS="sed -i 's/gitpath=\"\/opt\/exploit-database\"/gitpath=\"\/pentest\/exploitation\/exploit-db\"/g' /{INSTALL_LOCATION}/searchsploit "
# STUFF TO COPY TO PATH
LAUNCHER="searchsploit"
| 27.969697 | 147 | 0.699892 |
cf99b6eb624a659eb08fe61ff8373271785b8e21 | 1,441 | py | Python | cheritest/trunk/tests/cp2/test_cp2_ldl_unalign.py | tupipa/beri | cef1b41d52592cfa7454ddf59f9f2994e447cd66 | [
"Apache-2.0"
] | 36 | 2015-05-29T16:47:19.000Z | 2022-02-08T21:16:26.000Z | cheritest/trunk/tests/cp2/test_cp2_ldl_unalign.py | tupipa/beri | cef1b41d52592cfa7454ddf59f9f2994e447cd66 | [
"Apache-2.0"
] | 1 | 2015-10-14T13:05:21.000Z | 2015-10-19T20:34:03.000Z | cheritest/trunk/tests/cp2/test_cp2_ldl_unalign.py | tupipa/beri | cef1b41d52592cfa7454ddf59f9f2994e447cd66 | [
"Apache-2.0"
] | 15 | 2015-06-11T07:10:58.000Z | 2021-06-18T05:14:54.000Z | #-
# Copyright (c) 2016 Michael Roe
# All rights reserved.
#
# This software was developed by the University of Cambridge Computer
# Laboratory as part of the Rigorous Engineering of Mainstream Systems (REMS)
# project, funded by EPSRC grant EP/K008528/1.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_cp2_ldl_unalign(BaseBERITestCase):
@attr('capabilities')
def test_cp2_ldl_unalign_1(self):
self.assertRegisterEqual(self.MIPS.a0, 0x0102030405060700, "LDL with unaligned C0.base did not read the expected value")
| 40.027778 | 128 | 0.775156 |
c78e642828e459f5d8adcb6ca3bdece4b26217b4 | 123 | py | Python | Exercicios/ex021.py | LuccasAls/Python-Exercicos | 6f51eec061cd1d6d5bc827a46670d2377162afee | [
"MIT"
] | null | null | null | Exercicios/ex021.py | LuccasAls/Python-Exercicos | 6f51eec061cd1d6d5bc827a46670d2377162afee | [
"MIT"
] | null | null | null | Exercicios/ex021.py | LuccasAls/Python-Exercicos | 6f51eec061cd1d6d5bc827a46670d2377162afee | [
"MIT"
] | null | null | null | from pygame import mixer
mixer.init()
mixer.music.load('arquivo da musica')
mixer.music.play()
input('Agora você escuta?')
| 20.5 | 37 | 0.756098 |
d763fec1741acdf79cac1abc802e4ddb0818d788 | 126 | py | Python | formbuilder/widgets.py | frueringborgerforening/fruering | cb482268272698d34482dbb956b79f0d114e1834 | [
"MIT"
] | 1 | 2018-05-14T20:19:42.000Z | 2018-05-14T20:19:42.000Z | formbuilder/widgets.py | andreasbrakhagecarstensen/fruering | cb482268272698d34482dbb956b79f0d114e1834 | [
"MIT"
] | 78 | 2018-03-18T09:36:26.000Z | 2019-12-16T21:06:09.000Z | formbuilder/widgets.py | andreasbrakhagecarstensen/fruering | cb482268272698d34482dbb956b79f0d114e1834 | [
"MIT"
] | null | null | null | from django.forms import RadioSelect
class FrueringRadio(RadioSelect):
template_name = 'formbuilder/widgets/radio.html'
| 21 | 52 | 0.801587 |
396770164381ec5dadc58b51d16894d9b242cb20 | 96 | py | Python | app/src/domain/entity/__init__.py | hagifoo/gae-pomodoro | 6babdfc8d4ac8483b59b4da1d2c9b13fddcc4383 | [
"MIT"
] | null | null | null | app/src/domain/entity/__init__.py | hagifoo/gae-pomodoro | 6babdfc8d4ac8483b59b4da1d2c9b13fddcc4383 | [
"MIT"
] | null | null | null | app/src/domain/entity/__init__.py | hagifoo/gae-pomodoro | 6babdfc8d4ac8483b59b4da1d2c9b13fddcc4383 | [
"MIT"
] | null | null | null | from .user import User
from .timer import Timer
from .slack import Slack
from .team import Team
| 19.2 | 24 | 0.791667 |
7f5e94f9f52510ba3a9e930331c761552234bd10 | 48,756 | py | Python | eth_tester/utils/backend_testing.py | PabloLefort/eth-tester | 9a795cff7da3916062884e9c1e690545741e60c5 | [
"MIT"
] | null | null | null | eth_tester/utils/backend_testing.py | PabloLefort/eth-tester | 9a795cff7da3916062884e9c1e690545741e60c5 | [
"MIT"
] | null | null | null | eth_tester/utils/backend_testing.py | PabloLefort/eth-tester | 9a795cff7da3916062884e9c1e690545741e60c5 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import pytest
from cytoolz.dicttoolz import (
merge,
assoc,
dissoc,
)
import rlp
from eth_utils import (
is_address,
is_integer,
is_same_address,
is_dict,
is_hex,
denoms,
)
from eth_keys import (
keys,
)
from eth_tester.constants import (
UINT256_MIN,
UINT256_MAX,
BURN_ADDRESS,
FORK_HOMESTEAD,
FORK_DAO,
FORK_SPURIOUS_DRAGON,
FORK_TANGERINE_WHISTLE,
FORK_BYZANTIUM,
)
from eth_tester.exceptions import (
AccountLocked,
BlockNotFound,
FilterNotFound,
ValidationError,
TransactionFailed,
TransactionNotFound,
UnknownFork,
)
from .emitter_contract import (
_deploy_emitter,
_call_emitter,
EMITTER_ENUM,
)
from .math_contract import (
_deploy_math,
_make_call_math_transaction,
_decode_math_result,
)
from .throws_contract import (
_deploy_throws,
_make_call_throws_transaction,
_decode_throws_result,
)
PK_A = '0x58d23b55bc9cdce1f18c2500f40ff4ab7245df9a89505e9b1fa4851f623d241d'
PK_A_ADDRESS = '0xdc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd'
NON_DEFAULT_GAS_PRICE = 504
SIMPLE_TRANSACTION = {
"to": BURN_ADDRESS,
"gas_price": NON_DEFAULT_GAS_PRICE,
"value": 0,
"gas": 21000,
}
TRANSACTION_WTH_NONCE = assoc(SIMPLE_TRANSACTION, 'nonce', 0)
CONTRACT_TRANSACTION_EMPTY_TO = {
"to": '',
"gas_price": NON_DEFAULT_GAS_PRICE,
"value": 0,
"gas": 100000,
}
CONTRACT_TRANSACTION_MISSING_TO = dissoc(CONTRACT_TRANSACTION_EMPTY_TO, 'to')
BLOCK_KEYS = {
"number",
"hash",
"parent_hash",
"nonce",
"sha3_uncles",
"logs_bloom",
"transactions_root",
"receipts_root",
"state_root",
"miner",
"difficulty",
"total_difficulty",
"size",
"extra_data",
"gas_limit",
"gas_used",
"timestamp",
"transactions",
"uncles",
}
def _validate_serialized_block(block):
missing_keys = BLOCK_KEYS.difference(block.keys())
if missing_keys:
error_message = "Serialized block is missing the following keys: {0}".format(
"|".join(sorted(missing_keys)),
)
raise AssertionError(error_message)
class BaseTestBackendDirect(object):
#
# Utils
#
def _send_and_check_transaction(self, eth_tester, test_transaction, _from):
transaction = assoc(test_transaction, 'from', _from)
txn_hash = eth_tester.send_transaction(transaction)
txn = eth_tester.get_transaction_by_hash(txn_hash)
self._check_transactions(transaction, txn)
def _check_transactions(self, expected_transaction, actual_transaction):
assert is_same_address(actual_transaction['from'], expected_transaction['from'])
if 'to' not in expected_transaction or expected_transaction['to'] == '':
assert actual_transaction['to'] == ''
else:
assert is_same_address(actual_transaction['to'], expected_transaction['to'])
assert actual_transaction['gas_price'] == expected_transaction['gas_price']
assert actual_transaction['gas'] == expected_transaction['gas']
assert actual_transaction['value'] == expected_transaction['value']
#
# Testing Flags
#
supports_evm_execution = True
def skip_if_no_evm_execution(self):
if not self.supports_evm_execution:
pytest.skip('EVM Execution is not supported.')
#
# Accounts
#
def test_get_accounts(self, eth_tester):
accounts = eth_tester.get_accounts()
assert accounts
assert all(
is_address(account)
for account
in accounts
)
def test_add_account_no_password(self, eth_tester):
account = eth_tester.add_account(PK_A)
assert is_address(account)
assert any((
is_same_address(account, value)
for value
in eth_tester.get_accounts()
))
# Fund it
eth_tester.send_transaction({
'from': eth_tester.get_accounts()[0],
'to': account,
'value': 1 * denoms.ether,
'gas': 21000,
'gas_price': NON_DEFAULT_GAS_PRICE,
})
self._send_and_check_transaction(eth_tester, SIMPLE_TRANSACTION, account)
def test_add_account_with_password(self, eth_tester):
account = eth_tester.add_account(PK_A, 'test-password')
assert is_address(account)
assert any((
is_same_address(account, value)
for value
in eth_tester.get_accounts()
))
# Fund it
eth_tester.send_transaction({
'from': eth_tester.get_accounts()[0],
'to': account,
'value': 1 * denoms.ether,
'gas': 21000,
'gas_price': NON_DEFAULT_GAS_PRICE,
})
with pytest.raises(AccountLocked):
self._send_and_check_transaction(eth_tester, SIMPLE_TRANSACTION, account)
eth_tester.unlock_account(account, 'test-password')
self._send_and_check_transaction(eth_tester, SIMPLE_TRANSACTION, account)
eth_tester.lock_account(account)
with pytest.raises(AccountLocked):
self._send_and_check_transaction(eth_tester, SIMPLE_TRANSACTION, account)
def test_get_balance_of_listed_accounts(self, eth_tester):
for account in eth_tester.get_accounts():
balance = eth_tester.get_balance(account)
assert is_integer(balance)
assert balance >= UINT256_MIN
assert balance <= UINT256_MAX
def test_get_code_account_with_code(self, eth_tester):
self.skip_if_no_evm_execution()
emitter_address = _deploy_emitter(eth_tester)
code = eth_tester.get_code(emitter_address)
assert code == "0x606060405236156100615760e060020a60003504630bb563d6811461006357806317c0c1801461013657806320f0256e1461017057806390b41d8b146101ca5780639c37705314610215578063aa6fd82214610267578063e17bf956146102a9575b005b60206004803580820135601f810184900490930260809081016040526060848152610061946024939192918401918190838280828437509496505050505050507fa95e6e2a182411e7a6f9ed114a85c3761d87f9b8f453d842c71235aa64fff99f8160405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156101255780820380516001836020036101000a031916815260200191505b509250505060405180910390a15b50565b610061600435600181141561037a577f1e86022f78f8d04f8e3dfd13a2bdb280403e6632877c0dbee5e4eeb259908a5c60006060a1610133565b6100616004356024356044356064356084356005851415610392576060848152608084815260a084905260c08390527ff039d147f23fe975a4254bdf6b1502b8c79132ae1833986b7ccef2638e73fdf991a15b5050505050565b61006160043560243560443560038314156103d457606082815260808290527fdf0cb1dea99afceb3ea698d62e705b736f1345a7eee9eb07e63d1f8f556c1bc590604090a15b505050565b6100616004356024356044356064356004841415610428576060838152608083905260a08290527f4a25b279c7c585f25eda9788ac9420ebadae78ca6b206a0e6ab488fd81f550629080a15b50505050565b61006160043560243560028214156104655760608181527f56d2ef3c5228bf5d88573621e325a4672ab50e033749a601e4f4a5e1dce905d490602090a15b5050565b60206004803580820135601f810184900490930260809081016040526060848152610061946024939192918401918190838280828437509496505050505050507f532fd6ea96cfb78bb46e09279a26828b8b493de1a2b8b1ee1face527978a15a58160405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156101255780820380516001836020036101000a03191681526020019150509250505060405180910390a150565b600081141561038d5760006060a0610133565b610002565b600b85141561038d5760608481526080849052819083907fa30ece802b64cd2b7e57dabf4010aabf5df26d1556977affb07b98a77ad955b590604090a36101c3565b600983141561040f57606082815281907f057bc32826fbe161da1c110afcdcae7c109a8b69149f727fc37a603c60ef94ca90602090a2610210565b600883141561038d5760608281528190602090a1610210565b600a84141561038d576060838152819083907ff16c999b533366ca5138d78e85da51611089cd05749f098d6c225d4cd42ee6ec90602090a3610261565b600782141561049a57807ff70fe689e290d8ce2b2a388ac28db36fbb0e16a6d89c6804c461f65a1b40bb1560006060a26102a5565b600682141561038d578060006060a16102a556" # noqa: E501
def test_get_code_account_without_code(self, eth_tester):
code = eth_tester.get_code(BURN_ADDRESS)
assert code == '0x'
def test_get_nonce(self, eth_tester):
for account in eth_tester.get_accounts():
nonce = eth_tester.get_nonce(account)
assert is_integer(nonce)
assert nonce >= UINT256_MIN
assert nonce <= UINT256_MAX
#
# Mining
#
def test_mine_block_single(self, eth_tester):
eth_tester.mine_blocks()
before_block_number = eth_tester.get_block_by_number('latest')['number']
eth_tester.mine_blocks()
after_block_number = eth_tester.get_block_by_number('latest')['number']
assert is_integer(before_block_number)
assert is_integer(after_block_number)
assert before_block_number == after_block_number - 1
def test_mine_multiple_blocks(self, eth_tester):
eth_tester.mine_blocks()
before_block_number = eth_tester.get_block_by_number('latest')['number']
eth_tester.mine_blocks(10)
after_block_number = eth_tester.get_block_by_number('latest')['number']
assert is_integer(before_block_number)
assert is_integer(after_block_number)
assert before_block_number == after_block_number - 10
#
# Transaction Sending
#
@pytest.mark.parametrize('is_pending', [True, False])
def test_send_raw_transaction_valid_raw_transaction(self, eth_tester, is_pending):
if is_pending:
from eth_tester.backends import PyEthereum16Backend, PyEthereum21Backend
if isinstance(eth_tester.backend, (PyEthereum16Backend, PyEthereum21Backend)):
pytest.xfail("backend does not support presigned pending transactions")
# send funds to our sender
raw_privkey = b'\x11' * 32
test_key = keys.PrivateKey(raw_privkey)
eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": test_key.public_key.to_checksum_address(),
"gas": 21000,
"value": 1 * denoms.ether,
})
# transaction: nonce=0, gas_price=1, gas=21000, to=BURN_ADDRESS, value=50000, data=b'',
# and signed with `test_key`
transaction_hex = "0xf861800182520894dead00000000000000000000000000000000000082c350801ba073128146b850e2d38a4742d1afa48544e0ac6bc4b4dcb562583cd2224ad9a082a0680086a2801d02b12431cc3c79ec6c6a0cb846a0b3a8ec970f6e1b76d55ee7e2" # noqa: E501
if is_pending:
eth_tester.disable_auto_mine_transactions()
transaction_hash = eth_tester.send_raw_transaction(transaction_hex)
if is_pending:
with pytest.raises(TransactionNotFound):
eth_tester.get_transaction_receipt(transaction_hash)
eth_tester.enable_auto_mine_transactions()
receipt = eth_tester.get_transaction_receipt(transaction_hash)
# assert that the raw transaction is confirmed and successful
assert receipt['transaction_hash'] == transaction_hash
def test_send_raw_transaction_invalid_raw_transaction(self, eth_tester):
self.skip_if_no_evm_execution()
invalid_transaction_hex = '0x1234'
with pytest.raises(rlp.exceptions.DecodingError):
eth_tester.send_raw_transaction(invalid_transaction_hex)
@pytest.mark.parametrize(
'test_transaction',
(
SIMPLE_TRANSACTION,
TRANSACTION_WTH_NONCE,
CONTRACT_TRANSACTION_EMPTY_TO,
CONTRACT_TRANSACTION_MISSING_TO,
),
ids=[
'Simple transaction',
'Transaction with nonce',
'Create Contract - empty to',
'Create Contract - missing to',
],
)
def test_send_transaction(self, eth_tester, test_transaction):
accounts = eth_tester.get_accounts()
assert accounts, "No accounts available for transaction sending"
self._send_and_check_transaction(eth_tester, test_transaction, accounts[0])
def test_block_number_auto_mine_transactions_enabled(self, eth_tester):
eth_tester.mine_blocks()
eth_tester.enable_auto_mine_transactions()
before_block_number = eth_tester.get_block_by_number('latest')['number']
eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"gas": 21000,
})
after_block_number = eth_tester.get_block_by_number('latest')['number']
assert before_block_number == after_block_number - 1
def test_auto_mine_transactions_disabled_block_number(self, eth_tester):
eth_tester.mine_blocks()
eth_tester.disable_auto_mine_transactions()
before_block_number = eth_tester.get_block_by_number('latest')['number']
eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"gas": 21000,
})
after_block_number = eth_tester.get_block_by_number('latest')['number']
assert before_block_number == after_block_number
def test_auto_mine_transactions_disabled_replace_transaction(self, eth_tester):
eth_tester.mine_blocks()
eth_tester.disable_auto_mine_transactions()
transaction = {
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"value": 1,
"gas": 21000,
"nonce": 0,
}
try:
eth_tester.send_transaction(transaction)
transaction["value"] = 2
eth_tester.send_transaction(transaction)
except Exception:
pytest.fail("Sending replacement transaction caused exception")
def test_auto_mine_transactions_disabled_multiple_accounts(self, eth_tester):
eth_tester.mine_blocks()
eth_tester.disable_auto_mine_transactions()
tx1 = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"value": 1,
"gas": 21000,
"nonce": 0,
})
tx2 = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[1],
"to": BURN_ADDRESS,
"value": 1,
"gas": 21000,
"nonce": 0,
})
assert tx1 == eth_tester.get_transaction_by_hash(tx1)['hash']
assert tx2 == eth_tester.get_transaction_by_hash(tx2)['hash']
tx2_replacement = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[1],
"to": BURN_ADDRESS,
"value": 2,
"gas": 21000,
"nonce": 0,
})
# Replaces the correct transaction
assert tx1 == eth_tester.get_transaction_by_hash(tx1)['hash']
assert tx2_replacement == eth_tester.get_transaction_by_hash(tx2_replacement)['hash']
with pytest.raises(TransactionNotFound):
eth_tester.get_transaction_by_hash(tx2)
def test_auto_mine_transactions_disabled_returns_hashes_when_enabled(self, eth_tester):
self.skip_if_no_evm_execution()
eth_tester.mine_blocks()
eth_tester.disable_auto_mine_transactions()
tx1 = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"value": 1,
"gas": 21000,
"nonce": 0,
})
tx2 = eth_tester.send_transaction({ # noqa: F841
"from": eth_tester.get_accounts()[1],
"to": BURN_ADDRESS,
"value": 1,
"gas": 21000,
"nonce": 0,
})
tx2_replacement = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[1],
"to": BURN_ADDRESS,
"value": 2,
"gas": 21000,
"nonce": 0,
})
sent_transactions = eth_tester.enable_auto_mine_transactions()
assert sent_transactions == [tx1, tx2_replacement]
@pytest.mark.parametrize(
'test_transaction',
(
SIMPLE_TRANSACTION,
CONTRACT_TRANSACTION_EMPTY_TO,
CONTRACT_TRANSACTION_MISSING_TO,
),
ids=[
'Simple transaction',
'Create Contract - empty to',
'Create Contract - missing to',
],
)
def test_manual_mine_pending_transactions(self, eth_tester, test_transaction):
accounts = eth_tester.get_accounts()
assert accounts, "No accounts available for transaction sending"
complete_transaction = assoc(test_transaction, 'from', accounts[0])
self.skip_if_no_evm_execution()
eth_tester.mine_blocks()
eth_tester.disable_auto_mine_transactions()
txn_hash = eth_tester.send_transaction(complete_transaction)
with pytest.raises(TransactionNotFound):
eth_tester.get_transaction_receipt(txn_hash)
pending_transaction = eth_tester.get_transaction_by_hash(txn_hash)
self._check_transactions(complete_transaction, pending_transaction)
eth_tester.mine_block()
receipt = eth_tester.get_transaction_receipt(txn_hash)
assert receipt['transaction_hash'] == txn_hash
assert receipt['block_number']
mined_transaction = eth_tester.get_transaction_by_hash(txn_hash)
self._check_transactions(complete_transaction, mined_transaction)
#
# Blocks
#
def test_get_genesis_block_by_number(self, eth_tester):
block = eth_tester.get_block_by_number(0)
assert block['number'] == 0
_validate_serialized_block(block)
def test_get_genesis_block_by_hash(self, eth_tester):
genesis_hash = eth_tester.get_block_by_number(0)['hash']
block = eth_tester.get_block_by_hash(genesis_hash)
assert block['number'] == 0
_validate_serialized_block(block)
def test_get_block_by_number(self, eth_tester):
origin_block_number = eth_tester.get_block_by_number('pending')['number']
mined_block_hashes = eth_tester.mine_blocks(10)
for offset, block_hash in enumerate(mined_block_hashes):
block_number = origin_block_number + offset
block = eth_tester.get_block_by_number(block_number)
assert block['number'] == block_number
assert block['hash'] == block_hash
_validate_serialized_block(block)
def test_get_block_by_number_full_transactions(self, eth_tester):
eth_tester.mine_blocks(2)
transaction_hash = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"gas": 21000,
})
transaction = eth_tester.get_transaction_by_hash(transaction_hash)
block = eth_tester.get_block_by_number(
transaction['block_number'],
full_transactions=True,
)
assert is_dict(block['transactions'][0])
def test_get_block_by_number_only_transaction_hashes(self, eth_tester):
eth_tester.mine_blocks(2)
transaction_hash = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"gas": 21000,
})
transaction = eth_tester.get_transaction_by_hash(transaction_hash)
block = eth_tester.get_block_by_number(
transaction['block_number'],
full_transactions=False,
)
assert is_hex(block['transactions'][0])
def test_get_block_by_hash(self, eth_tester):
origin_block_number = eth_tester.get_block_by_number('pending')['number']
mined_block_hashes = eth_tester.mine_blocks(10)
for offset, block_hash in enumerate(mined_block_hashes):
block_number = origin_block_number + offset
block = eth_tester.get_block_by_hash(block_hash)
assert block['number'] == block_number
assert block['hash'] == block_hash
def test_get_block_by_hash_full_transactions(self, eth_tester):
eth_tester.mine_blocks(2)
transaction_hash = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"gas": 21000,
})
transaction = eth_tester.get_transaction_by_hash(transaction_hash)
block = eth_tester.get_block_by_hash(
transaction['block_hash'],
full_transactions=True,
)
assert is_dict(block['transactions'][0])
def test_get_block_by_hash_only_transaction_hashes(self, eth_tester):
eth_tester.mine_blocks(2)
transaction_hash = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"gas": 21000,
})
transaction = eth_tester.get_transaction_by_hash(transaction_hash)
block = eth_tester.get_block_by_hash(
transaction['block_hash'],
full_transactions=False,
)
assert is_hex(block['transactions'][0])
def test_get_block_by_earliest(self, eth_tester):
eth_tester.mine_blocks(10)
block = eth_tester.get_block_by_number('earliest')
assert block['number'] == 0
def test_get_block_by_latest_unmined_genesis(self, eth_tester):
block = eth_tester.get_block_by_number('latest')
assert block['number'] == 0
def test_get_block_by_latest_only_genesis(self, eth_tester):
block = eth_tester.get_block_by_number('latest')
assert block['number'] == 0
def test_get_block_by_latest(self, eth_tester):
origin_block_number = eth_tester.get_block_by_number('pending')['number']
eth_tester.mine_blocks(10)
block = eth_tester.get_block_by_number('latest')
assert block['number'] == 9 + origin_block_number
def test_get_block_by_pending(self, eth_tester):
origin_block_number = eth_tester.get_block_by_number('pending')['number']
eth_tester.mine_blocks(10)
block = eth_tester.get_block_by_number('pending')
assert block['number'] == 10 + origin_block_number
def test_get_block_missing(self, eth_tester):
with pytest.raises(BlockNotFound):
eth_tester.get_block_by_hash('0x' + '00' * 32)
# Transactions
def test_get_transaction_by_hash(self, eth_tester):
transaction_hash = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"gas": 21000,
})
transaction = eth_tester.get_transaction_by_hash(transaction_hash)
assert transaction['hash'] == transaction_hash
def test_get_transaction_by_hash_for_unmined_transaction(self, eth_tester):
eth_tester.disable_auto_mine_transactions()
transaction_hash = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"gas": 21000,
})
transaction = eth_tester.get_transaction_by_hash(transaction_hash)
assert transaction['hash'] == transaction_hash
assert transaction['block_hash'] is None
def test_get_transaction_receipt_for_mined_transaction(self, eth_tester):
transaction_hash = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"gas": 21000,
})
receipt = eth_tester.get_transaction_receipt(transaction_hash)
assert receipt['transaction_hash'] == transaction_hash
def test_get_transaction_receipt_for_unmined_transaction_raises(self, eth_tester):
eth_tester.disable_auto_mine_transactions()
transaction_hash = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"gas": 21000,
})
with pytest.raises(TransactionNotFound):
eth_tester.get_transaction_receipt(transaction_hash)
def test_call_return13(self, eth_tester):
self.skip_if_no_evm_execution()
math_address = _deploy_math(eth_tester)
call_math_transaction = _make_call_math_transaction(
eth_tester,
math_address,
'return13',
)
raw_result = eth_tester.call(call_math_transaction)
result = _decode_math_result('return13', raw_result)
assert result == (13,)
def test_call_add(self, eth_tester):
self.skip_if_no_evm_execution()
math_address = _deploy_math(eth_tester)
call_math_transaction = _make_call_math_transaction(
eth_tester,
math_address,
'add',
fn_args=(7, 13),
)
raw_result = eth_tester.call(call_math_transaction)
result = _decode_math_result('add', raw_result)
assert result == (20,)
def test_call_query_previous_state(self, eth_tester):
self.skip_if_no_evm_execution()
math_address = _deploy_math(eth_tester)
call_math_transaction = _make_call_math_transaction(
eth_tester,
math_address,
'counter'
)
call_math_transaction_inc = _make_call_math_transaction(
eth_tester,
math_address,
'increment',
)
eth_tester.mine_blocks(2)
eth_tester.send_transaction(call_math_transaction_inc)
raw_result = eth_tester.call(call_math_transaction, 1)
result = _decode_math_result('counter', raw_result)
raw_result_new = eth_tester.call(call_math_transaction)
result_new = _decode_math_result('counter', raw_result_new)
assert result == (0,)
assert result_new == (1,)
def test_estimate_gas(self, eth_tester):
self.skip_if_no_evm_execution()
math_address = _deploy_math(eth_tester)
estimate_call_math_transaction = _make_call_math_transaction(
eth_tester,
math_address,
'increment',
)
gas_estimation = eth_tester.estimate_gas(estimate_call_math_transaction)
call_math_transaction = assoc(estimate_call_math_transaction, 'gas', gas_estimation)
transaction_hash = eth_tester.send_transaction(call_math_transaction)
receipt = eth_tester.get_transaction_receipt(transaction_hash)
assert receipt['gas_used'] <= gas_estimation
# Tolerance set to the default py-evm tolerance:
# https://github.com/ethereum/py-evm/blob/f0276e684edebd7cd9e84cd04b3229ab9dd958b9/evm/estimators/gas.py#L77
# https://github.com/ethereum/py-evm/blob/f0276e684edebd7cd9e84cd04b3229ab9dd958b9/evm/estimators/__init__.py#L11
assert receipt['gas_used'] >= gas_estimation - 21000
def test_can_call_after_exception_raised_calling(self, eth_tester):
self.skip_if_no_evm_execution()
throws_address = _deploy_throws(eth_tester)
call_will_throw_transaction = _make_call_throws_transaction(
eth_tester,
throws_address,
'willThrow',
)
with pytest.raises(TransactionFailed):
eth_tester.call(call_will_throw_transaction)
call_value_transaction = _make_call_throws_transaction(
eth_tester,
throws_address,
'value',
)
raw_result = eth_tester.call(call_value_transaction)
result = _decode_throws_result('value', raw_result)
assert result == (1,)
def test_can_estimate_gas_after_exception_raised_estimating_gas(self, eth_tester):
self.skip_if_no_evm_execution()
throws_address = _deploy_throws(eth_tester)
call_will_throw_transaction = _make_call_throws_transaction(
eth_tester,
throws_address,
'willThrow',
)
with pytest.raises(TransactionFailed):
eth_tester.estimate_gas(dissoc(call_will_throw_transaction, 'gas'))
call_set_value_transaction = _make_call_throws_transaction(
eth_tester,
throws_address,
'setValue',
fn_args=(2,),
)
gas_estimation = eth_tester.estimate_gas(dissoc(call_set_value_transaction, 'gas'))
assert gas_estimation
#
# Snapshot and Revert
#
def test_genesis_snapshot_and_revert(self, eth_tester):
origin_latest = eth_tester.get_block_by_number('latest')['number']
origin_pending = eth_tester.get_block_by_number('pending')['number']
snapshot_id = eth_tester.take_snapshot()
# now mine 10 blocks in
eth_tester.mine_blocks(10)
assert eth_tester.get_block_by_number('latest')['number'] == origin_latest + 10
assert eth_tester.get_block_by_number('pending')['number'] == origin_pending + 10
eth_tester.revert_to_snapshot(snapshot_id)
assert eth_tester.get_block_by_number('latest')['number'] == origin_latest
assert eth_tester.get_block_by_number('pending')['number'] == origin_pending
def test_snapshot_and_revert_post_genesis(self, eth_tester):
eth_tester.mine_blocks(5)
origin_latest = eth_tester.get_block_by_number('latest')['number']
origin_pending = eth_tester.get_block_by_number('pending')['number']
snapshot_id = eth_tester.take_snapshot()
# now mine 10 blocks in
eth_tester.mine_blocks(10)
assert eth_tester.get_block_by_number('latest')['number'] == origin_latest + 10
assert eth_tester.get_block_by_number('pending')['number'] == origin_pending + 10
eth_tester.revert_to_snapshot(snapshot_id)
assert eth_tester.get_block_by_number('latest')['number'] == origin_latest
assert eth_tester.get_block_by_number('pending')['number'] == origin_pending
def test_revert_cleans_up_invalidated_pending_block_filters(self, eth_tester):
# first mine 10 blocks in
eth_tester.mine_blocks(2)
# setup a filter
filter_a_id = eth_tester.create_block_filter()
filter_b_id = eth_tester.create_block_filter()
# mine 5 blocks before the snapshot
common_blocks = set(eth_tester.mine_blocks(2))
snapshot_id = eth_tester.take_snapshot()
# mine another 5 blocks
fork_a_transaction_hash = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"gas": 21000,
"value": 1,
})
fork_a_transaction_block_hash = eth_tester.get_transaction_by_hash(
fork_a_transaction_hash,
)['block_hash']
fork_a_blocks = eth_tester.mine_blocks(2)
before_revert_changes_logs_a = eth_tester.get_only_filter_changes(filter_a_id)
before_revert_all_logs_a = eth_tester.get_all_filter_logs(filter_a_id)
before_revert_all_logs_b = eth_tester.get_all_filter_logs(filter_b_id)
assert common_blocks.intersection(before_revert_changes_logs_a) == common_blocks
assert common_blocks.intersection(before_revert_all_logs_a) == common_blocks
assert common_blocks.intersection(before_revert_all_logs_b) == common_blocks
expected_before_block_hashes = common_blocks.union([
fork_a_transaction_block_hash,
]).union(fork_a_blocks)
# sanity check that the filters picked up on the log changes.
assert set(before_revert_changes_logs_a) == expected_before_block_hashes
assert set(before_revert_changes_logs_a) == expected_before_block_hashes
assert set(before_revert_all_logs_a) == expected_before_block_hashes
assert set(before_revert_all_logs_b) == expected_before_block_hashes
# now revert to snapshot
eth_tester.revert_to_snapshot(snapshot_id)
# send a different transaction to ensure our new blocks are different
fork_b_transaction_hash = eth_tester.send_transaction({
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"gas": 21000,
"value": 2,
})
fork_b_transaction_block_hash = eth_tester.get_transaction_by_hash(
fork_b_transaction_hash,
)['block_hash']
fork_b_blocks = eth_tester.mine_blocks(2)
# check that are blocks don't intersect
assert not set(fork_a_blocks).intersection(fork_b_blocks)
after_revert_changes_logs_a = eth_tester.get_only_filter_changes(filter_a_id)
after_revert_changes_logs_b = eth_tester.get_only_filter_changes(filter_b_id)
after_revert_all_logs_a = eth_tester.get_all_filter_logs(filter_a_id)
after_revert_all_logs_b = eth_tester.get_all_filter_logs(filter_b_id)
expected_all_after_blocks = common_blocks.union([
fork_b_transaction_block_hash,
]).union(fork_b_blocks)
expected_new_after_blocks = set(fork_b_blocks).union([
fork_b_transaction_block_hash,
])
assert set(after_revert_changes_logs_a) == expected_new_after_blocks
assert set(after_revert_changes_logs_b) == expected_all_after_blocks
assert set(after_revert_all_logs_a) == expected_all_after_blocks
assert set(after_revert_all_logs_b) == expected_all_after_blocks
def test_revert_cleans_up_invalidated_pending_transaction_filters(self, eth_tester):
def _transaction(**kwargs):
return merge(
{"from": eth_tester.get_accounts()[0], "to": BURN_ADDRESS, "gas": 21000},
kwargs,
)
# send a few initial transactions
for _ in range(5):
eth_tester.send_transaction(_transaction())
# setup a filter
filter_id = eth_tester.create_pending_transaction_filter()
# send 2 transactions
common_transactions = set([
eth_tester.send_transaction(_transaction(value=1)),
eth_tester.send_transaction(_transaction(value=2)),
])
# take a snapshot
snapshot_id = eth_tester.take_snapshot()
# send 3 transactions
before_transactions = [
eth_tester.send_transaction(_transaction(value=3)),
eth_tester.send_transaction(_transaction(value=4)),
eth_tester.send_transaction(_transaction(value=5)),
]
# pull and sanity check the filter changes
before_filter_changes = eth_tester.get_only_filter_changes(filter_id)
before_filter_logs = eth_tester.get_all_filter_logs(filter_id)
assert set(before_filter_changes) == common_transactions.union(before_transactions)
assert set(before_filter_logs) == common_transactions.union(before_transactions)
# revert the chain
eth_tester.revert_to_snapshot(snapshot_id)
# send 3 transactions on the new fork
after_transactions = [
eth_tester.send_transaction(_transaction(value=6)),
eth_tester.send_transaction(_transaction(value=7)),
eth_tester.send_transaction(_transaction(value=8)),
]
# pull and sanity check the filter changes
after_filter_changes = eth_tester.get_only_filter_changes(filter_id)
after_filter_logs = eth_tester.get_all_filter_logs(filter_id)
assert set(after_filter_changes) == set(after_transactions)
assert set(after_filter_logs) == common_transactions.union(after_transactions)
def test_revert_cleans_up_invalidated_log_entries(self, eth_tester):
self.skip_if_no_evm_execution()
# setup the emitter
emitter_address = _deploy_emitter(eth_tester)
def _emit(v):
return _call_emitter(
eth_tester,
emitter_address,
'logSingle',
[EMITTER_ENUM['LogSingleWithIndex'], v],
)
# emit 2 logs pre-filtering
_emit(1)
_emit(2)
# setup a filter
filter_id = eth_tester.create_log_filter()
# emit 2 logs pre-snapshot
_emit(1)
_emit(2)
# take a snapshot
snapshot_id = eth_tester.take_snapshot()
# emit 3 logs after-snapshot
_emit(3)
_emit(4)
_emit(5)
before_changes = eth_tester.get_only_filter_changes(filter_id)
before_all = eth_tester.get_all_filter_logs(filter_id)
assert len(before_changes) == 5
assert len(before_all) == 5
# revert the chain
eth_tester.revert_to_snapshot(snapshot_id)
# emit 4 logs after-reverting
_emit(6)
_emit(7)
_emit(8)
_emit(9)
after_changes = eth_tester.get_only_filter_changes(filter_id)
after_all = eth_tester.get_all_filter_logs(filter_id)
assert len(after_changes) == 4
assert len(after_all) == 6
def test_reset_to_genesis(self, eth_tester):
origin_latest = eth_tester.get_block_by_number('latest')['number']
origin_pending = eth_tester.get_block_by_number('pending')['number']
eth_tester.mine_blocks(5)
assert eth_tester.get_block_by_number('latest')['number'] == origin_latest + 5
assert eth_tester.get_block_by_number('pending')['number'] == origin_pending + 5
eth_tester.reset_to_genesis()
assert eth_tester.get_block_by_number('latest')['number'] == origin_latest
assert eth_tester.get_block_by_number('pending')['number'] == origin_pending
#
# Filters
#
def test_block_filter(self, eth_tester):
# first mine 10 blocks in
eth_tester.mine_blocks(10)
# setup a filter
filter_a_id = eth_tester.create_block_filter()
# mine another 5 blocks
blocks_10_to_14 = eth_tester.mine_blocks(5)
# setup another filter
filter_b_id = eth_tester.create_block_filter()
# mine another 8 blocks
blocks_15_to_22 = eth_tester.mine_blocks(8)
filter_a_changes_part_1 = eth_tester.get_only_filter_changes(filter_a_id)
filter_a_logs_part_1 = eth_tester.get_all_filter_logs(filter_a_id)
filter_b_logs_part_1 = eth_tester.get_all_filter_logs(filter_b_id)
assert len(filter_a_changes_part_1) == 13
assert len(filter_a_logs_part_1) == 13
assert len(filter_b_logs_part_1) == 8
assert set(filter_a_changes_part_1) == set(filter_a_logs_part_1)
assert set(filter_a_changes_part_1) == set(blocks_10_to_14).union(blocks_15_to_22)
assert set(filter_b_logs_part_1) == set(blocks_15_to_22)
# mine another 7 blocks
blocks_23_to_29 = eth_tester.mine_blocks(7)
filter_a_changes_part_2 = eth_tester.get_only_filter_changes(filter_a_id)
filter_b_changes = eth_tester.get_only_filter_changes(filter_b_id)
filter_a_logs_part_2 = eth_tester.get_all_filter_logs(filter_a_id)
filter_b_logs_part_2 = eth_tester.get_all_filter_logs(filter_b_id)
assert len(filter_a_changes_part_2) == 7
assert len(filter_b_changes) == 15
assert len(filter_a_logs_part_2) == 20
assert len(filter_b_logs_part_2) == 15
assert set(filter_a_changes_part_2) == set(blocks_23_to_29)
assert set(filter_b_changes) == set(blocks_15_to_22).union(blocks_23_to_29)
assert set(filter_b_changes) == set(filter_b_logs_part_2)
assert set(filter_a_logs_part_2) == set(blocks_10_to_14).union(
blocks_15_to_22,
).union(blocks_23_to_29)
assert set(filter_b_logs_part_2) == set(blocks_15_to_22).union(blocks_23_to_29)
def test_pending_transaction_filter(self, eth_tester):
transaction = {
"from": eth_tester.get_accounts()[0],
"to": BURN_ADDRESS,
"gas": 21000,
}
# send a few initial transactions
for _ in range(5):
eth_tester.send_transaction(transaction)
# setup a filter
filter_a_id = eth_tester.create_pending_transaction_filter()
# send 8 transactions
transactions_0_to_7 = [
eth_tester.send_transaction(transaction)
for _ in range(8)
]
# setup another filter
filter_b_id = eth_tester.create_pending_transaction_filter()
# send 5 transactions
transactions_8_to_12 = [
eth_tester.send_transaction(transaction)
for _ in range(5)
]
filter_a_changes_part_1 = eth_tester.get_only_filter_changes(filter_a_id)
filter_a_logs_part_1 = eth_tester.get_all_filter_logs(filter_a_id)
filter_b_logs_part_1 = eth_tester.get_all_filter_logs(filter_b_id)
assert set(filter_a_changes_part_1) == set(filter_a_logs_part_1)
assert set(filter_a_changes_part_1) == set(transactions_0_to_7).union(transactions_8_to_12)
assert set(filter_b_logs_part_1) == set(transactions_8_to_12)
# send 7 transactions
transactions_13_to_20 = [
eth_tester.send_transaction(transaction)
for _ in range(7)
]
filter_a_changes_part_2 = eth_tester.get_only_filter_changes(filter_a_id)
filter_b_changes = eth_tester.get_only_filter_changes(filter_b_id)
filter_a_logs_part_2 = eth_tester.get_all_filter_logs(filter_a_id)
filter_b_logs_part_2 = eth_tester.get_all_filter_logs(filter_b_id)
assert len(filter_a_changes_part_2) == 7
assert len(filter_b_changes) == 12
assert len(filter_a_logs_part_2) == 20
assert len(filter_b_logs_part_2) == 12
assert set(filter_a_changes_part_2) == set(transactions_13_to_20)
assert set(filter_b_changes) == set(filter_b_logs_part_2)
assert set(filter_b_changes) == set(transactions_8_to_12).union(transactions_13_to_20)
assert set(filter_a_logs_part_2) == set(transactions_0_to_7).union(
transactions_8_to_12,
).union(transactions_13_to_20)
assert set(filter_b_logs_part_2) == set(transactions_8_to_12).union(transactions_13_to_20)
@pytest.mark.parametrize(
'filter_topics,expected',
(
[None, 1],
[[], 1],
[['0xf70fe689e290d8ce2b2a388ac28db36fbb0e16a6d89c6804c461f65a1b40bb15'], 1],
[['0xf70fe689e290d8ce2b2a388ac28db36fbb0e16a6d89c6804c461f65a1b40bb15', None], 1],
[
[
'0xf70fe689e290d8ce2b2a388ac28db36fbb0e16a6d89c6804c461f65a1b40bb15',
'0x' + '00' * 31 + '02',
],
1,
],
[
[
'0xf70fe689e290d8ce2b2a388ac28db36fbb0e16a6d89c6804c461f65a1b40bb15',
'0x' + '00' * 31 + '99',
],
0,
],
),
ids=[
'filter None',
'filter []',
'filter Event only',
'filter Event and None',
'filter Event and argument',
'filter Event and wrong argument',
],
)
def test_log_filter_picks_up_new_logs(self, eth_tester, filter_topics, expected):
"""
Cases to test:
- filter multiple transactions in one block.
- filter mined.
self.skip_if_no_evm_execution()
- filter against topics.
- filter against blocks numbers that are already mined.
"""
self.skip_if_no_evm_execution()
emitter_address = _deploy_emitter(eth_tester)
emit_a_hash = _call_emitter(
eth_tester,
emitter_address,
'logSingle',
[EMITTER_ENUM['LogSingleWithIndex'], 1],
)
eth_tester.get_transaction_receipt(emit_a_hash)
filter_event = eth_tester.create_log_filter(topics=filter_topics)
_call_emitter(
eth_tester,
emitter_address,
'logSingle',
[EMITTER_ENUM['LogSingleWithIndex'], 2],
)
specific_logs_changes = eth_tester.get_only_filter_changes(filter_event)
specific_logs_all = eth_tester.get_all_filter_logs(filter_event)
specific_direct_logs_all = eth_tester.get_logs(topics=filter_topics)
assert len(specific_logs_changes) == expected
assert len(specific_logs_all) == expected
assert len(specific_direct_logs_all) == expected
def test_log_filter_includes_old_logs(self, eth_tester):
"""
Cases to test:
- filter multiple transactions in one block.
- filter mined.
self.skip_if_no_evm_execution()
- filter against topics.
- filter against blocks numbers that are already mined.
"""
self.skip_if_no_evm_execution()
emitter_address = _deploy_emitter(eth_tester)
_call_emitter(
eth_tester,
emitter_address,
'logSingle',
[EMITTER_ENUM['LogSingleWithIndex'], 1],
)
filter_any_id = eth_tester.create_log_filter(from_block=0)
_call_emitter(
eth_tester,
emitter_address,
'logSingle',
[EMITTER_ENUM['LogSingleWithIndex'], 2],
)
logs_changes = eth_tester.get_only_filter_changes(filter_any_id)
logs_all = eth_tester.get_all_filter_logs(filter_any_id)
direct_logs_all = eth_tester.get_logs(from_block=0)
assert len(logs_changes) == len(logs_all) == len(direct_logs_all) == 2
def test_delete_filter(self, eth_tester):
self.skip_if_no_evm_execution()
filter_id = eth_tester.create_block_filter()
eth_tester.get_all_filter_logs(filter_id)
eth_tester.get_only_filter_changes(filter_id)
eth_tester.delete_filter(filter_id)
with pytest.raises(FilterNotFound):
eth_tester.get_all_filter_logs(filter_id)
with pytest.raises(FilterNotFound):
eth_tester.get_only_filter_changes(filter_id)
with pytest.raises(FilterNotFound):
eth_tester.delete_filter(filter_id)
with pytest.raises(FilterNotFound):
eth_tester.delete_filter(12345)
#
# Time Travel
#
def test_time_traveling(self, eth_tester):
# first mine a few blocks
eth_tester.mine_blocks(3)
# check the time
before_timestamp = eth_tester.get_block_by_number('pending')['timestamp']
# now travel forward 2 minutes
eth_tester.time_travel(before_timestamp + 120)
# now check the time
after_timestamp = eth_tester.get_block_by_number('pending')['timestamp']
assert before_timestamp + 120 == after_timestamp
def test_time_traveling_backwards_not_allowed(self, eth_tester):
# first mine a few blocks
eth_tester.mine_blocks(3)
# check the time
before_timestamp = eth_tester.get_block_by_number('pending')['timestamp']
# now travel forward 2 minutes
with pytest.raises(ValidationError):
eth_tester.time_travel(before_timestamp - 10)
#
# Fork Configuration
#
@pytest.mark.parametrize(
'fork_name,expected_init_block,set_to_block',
(
(FORK_HOMESTEAD, None, 12345),
(FORK_DAO, None, 12345),
(FORK_SPURIOUS_DRAGON, None, 12345),
(FORK_TANGERINE_WHISTLE, None, 12345),
(FORK_BYZANTIUM, None, 12345),
)
)
def test_getting_and_setting_fork_blocks(self,
eth_tester,
fork_name,
expected_init_block,
set_to_block):
if eth_tester.backend.__class__.__name__ == 'PyEthereum16Backend':
if fork_name == FORK_BYZANTIUM:
with pytest.raises(UnknownFork):
eth_tester.get_fork_block(fork_name)
with pytest.raises(UnknownFork):
eth_tester.set_fork_block(fork_name, set_to_block)
return
# TODO: this should realy test something about the EVM actually using
# the *right* rules but for now this should suffice.
init_fork_block = eth_tester.get_fork_block(fork_name)
if fork_name == FORK_DAO:
# support pyethereum2.0
assert init_fork_block in {expected_init_block, 999999999999999}
else:
assert init_fork_block == expected_init_block
eth_tester.set_fork_block(fork_name, set_to_block)
after_set_fork_block = eth_tester.get_fork_block(fork_name)
assert after_set_fork_block == set_to_block
| 38.120407 | 2,437 | 0.676307 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.