hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f724061c6d8dbec2acb290ec23c12a1f23882924 | 2,260 | py | Python | scripts/quickstart_tooling_dpg/template/setup.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | scripts/quickstart_tooling_dpg/template/setup.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | scripts/quickstart_tooling_dpg/template/setup.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import re
from setuptools import setup, find_packages
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "{{ package_name }}"
PACKAGE_PPRINT_NAME = "{{ package_pprint_name }}"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace("-", "/")
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, "_version.py"), "r") as fd:
version = re.search(
r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE
).group(1)
if not version:
raise RuntimeError("Cannot find version information")
setup(
name=PACKAGE_NAME,
version=version,
description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME),
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
license="MIT License",
author="Microsoft Corporation",
author_email="azpysdkhelp@microsoft.com",
url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/{{ folder_parent }}/{{ package_name }}",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
],
zip_safe=False,
packages=find_packages(
exclude=[
# Exclude packages that will be covered by PEP420 or nspkg
"{{ folder_first }}",
"{{ folder_first }}.{{ folder_second }}",
]
),
install_requires=[
"azure-core<2.0.0,>=1.23.0",
"msrest>=0.6.21",
],
python_requires=">=3.6",
)
| 35.3125 | 109 | 0.592035 |
import os
import re
from setuptools import setup, find_packages
PACKAGE_NAME = "{{ package_name }}"
PACKAGE_PPRINT_NAME = "{{ package_pprint_name }}"
package_folder_path = PACKAGE_NAME.replace("-", "/")
with open(os.path.join(package_folder_path, "_version.py"), "r") as fd:
version = re.search(
r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE
).group(1)
if not version:
raise RuntimeError("Cannot find version information")
setup(
name=PACKAGE_NAME,
version=version,
description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME),
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
license="MIT License",
author="Microsoft Corporation",
author_email="azpysdkhelp@microsoft.com",
url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/{{ folder_parent }}/{{ package_name }}",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
],
zip_safe=False,
packages=find_packages(
exclude=[
# Exclude packages that will be covered by PEP420 or nspkg
"{{ folder_first }}",
"{{ folder_first }}.{{ folder_second }}",
]
),
install_requires=[
"azure-core<2.0.0,>=1.23.0",
"msrest>=0.6.21",
],
python_requires=">=3.6",
)
| true | true |
f72406b04989bee31f7efd503e2851a5ff5375e6 | 3,344 | py | Python | recommendation_system_demos/Basic-CMN-Demo/util/gmf.py | sweetpand/tensorflow_mri | 7a483cbbbe515ad395928311759505707bd72503 | [
"MIT"
] | 2 | 2021-02-06T11:27:17.000Z | 2021-05-29T10:45:22.000Z | recommendation_system_demos/Basic-CMN-Demo/util/gmf.py | sweetpand/tensorflow_mri | 7a483cbbbe515ad395928311759505707bd72503 | [
"MIT"
] | null | null | null | recommendation_system_demos/Basic-CMN-Demo/util/gmf.py | sweetpand/tensorflow_mri | 7a483cbbbe515ad395928311759505707bd72503 | [
"MIT"
] | 1 | 2020-10-23T20:26:36.000Z | 2020-10-23T20:26:36.000Z | import sonnet as snt
import tensorflow as tf
from util.helper import GraphKeys, add_to_collection
from util.layers import DenseLayer, LossLayer, OptimizerLayer, ModelBase
class PairwiseGMF(ModelBase):
def __init__(self, config):
"""
:param config:
"""
# super(PairwiseGMF, self).__init__(config)
self.config = config
self._activation_fn = tf.nn.relu
self._embedding_initializers = {
'embeddings': tf.truncated_normal_initializer(stddev=0.01),
}
self._embedding_regularizers = {}
self._initializers = {
"w": tf.contrib.layers.xavier_initializer(),
}
self._regularizers = {
'w': tf.contrib.layers.l2_regularizer(config.l2)
}
self._construct_placeholders()
self._construct_weights()
self._construct()
tf.summary.scalar('Model/Loss', tf.get_collection(GraphKeys.LOSSES)[0])
self.summary = tf.summary.merge_all()
def _construct(self):
"""
Construct the model; main part of it goes here
"""
self.v = DenseLayer(1, False, tf.nn.relu, initializers=self._initializers,
regularizers=self._regularizers, name='OutputVector')
self.score = tf.squeeze(self.v(self._cur_user * self._cur_item))
negative_output = tf.squeeze(self.v(self._cur_user * self._cur_item_negative))
tf.add_to_collection(GraphKeys.PREDICTION, self.score)
self.loss = LossLayer()(self.score, negative_output)
self._optimizer = OptimizerLayer(self.config.optimizer, clip=5.0,
params={})
self.train = self._optimizer(self.loss)
def _construct_weights(self):
"""
Constructs the user/item memories and user/item external memory/outputs
Also add the embedding lookups
"""
self.user_memory = snt.Embed(self.config.user_count, self.config.embed_size,
initializers=self._embedding_initializers,
regularizers=self._embedding_regularizers,
name='MemoryEmbed')
self.item_memory = snt.Embed(self.config.item_count,
self.config.embed_size,
initializers=self._embedding_initializers,
regularizers=self._embedding_regularizers,
name="ItemMemory")
# [batch, embedding size]
self._cur_user = self.user_memory(self.input_users)
# Item memories a query
self._cur_item = self.item_memory(self.input_items)
self._cur_item_negative = self.item_memory(self.input_items_negative)
def _construct_placeholders(self):
self.input_users = tf.placeholder(tf.int32, [None], 'UserID')
self.input_items = tf.placeholder(tf.int32, [None], 'ItemID')
self.input_items_negative = tf.placeholder(tf.int32, [None], 'NegativeItemID')
# Add our placeholders
add_to_collection(GraphKeys.PLACEHOLDER, [self.input_users,
self.input_items,
self.input_items_negative])
| 39.809524 | 97 | 0.592105 | import sonnet as snt
import tensorflow as tf
from util.helper import GraphKeys, add_to_collection
from util.layers import DenseLayer, LossLayer, OptimizerLayer, ModelBase
class PairwiseGMF(ModelBase):
def __init__(self, config):
self.config = config
self._activation_fn = tf.nn.relu
self._embedding_initializers = {
'embeddings': tf.truncated_normal_initializer(stddev=0.01),
}
self._embedding_regularizers = {}
self._initializers = {
"w": tf.contrib.layers.xavier_initializer(),
}
self._regularizers = {
'w': tf.contrib.layers.l2_regularizer(config.l2)
}
self._construct_placeholders()
self._construct_weights()
self._construct()
tf.summary.scalar('Model/Loss', tf.get_collection(GraphKeys.LOSSES)[0])
self.summary = tf.summary.merge_all()
def _construct(self):
self.v = DenseLayer(1, False, tf.nn.relu, initializers=self._initializers,
regularizers=self._regularizers, name='OutputVector')
self.score = tf.squeeze(self.v(self._cur_user * self._cur_item))
negative_output = tf.squeeze(self.v(self._cur_user * self._cur_item_negative))
tf.add_to_collection(GraphKeys.PREDICTION, self.score)
self.loss = LossLayer()(self.score, negative_output)
self._optimizer = OptimizerLayer(self.config.optimizer, clip=5.0,
params={})
self.train = self._optimizer(self.loss)
def _construct_weights(self):
self.user_memory = snt.Embed(self.config.user_count, self.config.embed_size,
initializers=self._embedding_initializers,
regularizers=self._embedding_regularizers,
name='MemoryEmbed')
self.item_memory = snt.Embed(self.config.item_count,
self.config.embed_size,
initializers=self._embedding_initializers,
regularizers=self._embedding_regularizers,
name="ItemMemory")
self._cur_user = self.user_memory(self.input_users)
self._cur_item = self.item_memory(self.input_items)
self._cur_item_negative = self.item_memory(self.input_items_negative)
def _construct_placeholders(self):
self.input_users = tf.placeholder(tf.int32, [None], 'UserID')
self.input_items = tf.placeholder(tf.int32, [None], 'ItemID')
self.input_items_negative = tf.placeholder(tf.int32, [None], 'NegativeItemID')
add_to_collection(GraphKeys.PLACEHOLDER, [self.input_users,
self.input_items,
self.input_items_negative])
| true | true |
f724082117396a7bfcfccb74e93f50fbf7372564 | 1,382 | py | Python | venv/lib/python3.8/site-packages/vsts/build/v4_1/models/task_definition_reference.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/build/v4_1/models/task_definition_reference.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/build/v4_1/models/task_definition_reference.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TaskDefinitionReference(Model):
"""TaskDefinitionReference.
:param definition_type: The type of task (task or task group).
:type definition_type: str
:param id: The ID of the task.
:type id: str
:param version_spec: The version of the task.
:type version_spec: str
"""
_attribute_map = {
'definition_type': {'key': 'definitionType', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'version_spec': {'key': 'versionSpec', 'type': 'str'}
}
def __init__(self, definition_type=None, id=None, version_spec=None):
super(TaskDefinitionReference, self).__init__()
self.definition_type = definition_type
self.id = id
self.version_spec = version_spec
| 40.647059 | 95 | 0.523155 |
from msrest.serialization import Model
class TaskDefinitionReference(Model):
_attribute_map = {
'definition_type': {'key': 'definitionType', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'version_spec': {'key': 'versionSpec', 'type': 'str'}
}
def __init__(self, definition_type=None, id=None, version_spec=None):
super(TaskDefinitionReference, self).__init__()
self.definition_type = definition_type
self.id = id
self.version_spec = version_spec
| true | true |
f72409de32069ee9ca6e49c33f64c6ae4fe101a4 | 1,177 | py | Python | google/cloud/ids/v1/ids-v1-py/google/cloud/ids_v1/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/ids/v1/ids-v1-py/google/cloud/ids_v1/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/ids/v1/ids-v1-py/google/cloud/ids_v1/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.ids import IDSClient
from .services.ids import IDSAsyncClient
from .types.ids import CreateEndpointRequest
from .types.ids import DeleteEndpointRequest
from .types.ids import Endpoint
from .types.ids import GetEndpointRequest
from .types.ids import ListEndpointsRequest
from .types.ids import ListEndpointsResponse
from .types.ids import OperationMetadata
__all__ = (
'IDSAsyncClient',
'CreateEndpointRequest',
'DeleteEndpointRequest',
'Endpoint',
'GetEndpointRequest',
'IDSClient',
'ListEndpointsRequest',
'ListEndpointsResponse',
'OperationMetadata',
)
| 30.179487 | 74 | 0.782498 |
from .services.ids import IDSClient
from .services.ids import IDSAsyncClient
from .types.ids import CreateEndpointRequest
from .types.ids import DeleteEndpointRequest
from .types.ids import Endpoint
from .types.ids import GetEndpointRequest
from .types.ids import ListEndpointsRequest
from .types.ids import ListEndpointsResponse
from .types.ids import OperationMetadata
__all__ = (
'IDSAsyncClient',
'CreateEndpointRequest',
'DeleteEndpointRequest',
'Endpoint',
'GetEndpointRequest',
'IDSClient',
'ListEndpointsRequest',
'ListEndpointsResponse',
'OperationMetadata',
)
| true | true |
f7240a4dd4cae9e22522251b2018d0cd567af902 | 5,982 | py | Python | test/integration/ggrc_basic_permissions/test_permissions_loading.py | sfarbotka/ggrc-core | ef7aae6bc09ad2f53a2414f643572e07d689784a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/integration/ggrc_basic_permissions/test_permissions_loading.py | sfarbotka/ggrc-core | ef7aae6bc09ad2f53a2414f643572e07d689784a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/integration/ggrc_basic_permissions/test_permissions_loading.py | sfarbotka/ggrc-core | ef7aae6bc09ad2f53a2414f643572e07d689784a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Test user permissions loading and caching."""
import mock
from appengine import base
from ggrc.models import all_models
from ggrc.cache import utils as cache_utils
from integration.ggrc import TestCase, generator
from integration.ggrc.api_helper import Api
from integration.ggrc.models import factories
# stub for module, which cannot be imported in global scope bacause of
# import error
ggrc_basic_permissions = None # pylint: invalid-name
def _lazy_load_module():
"""Load required module in runtime to prevent import error"""
global ggrc_basic_permissions # pylint: global-statement,invalid-name
import ggrc_basic_permissions # pylint: disable=redefined-outer-name
@base.with_memcache
class TestMemcacheBase(TestCase):
"""Base class for permissions tests"""
def setUp(self):
super(TestMemcacheBase, self).setUp()
_lazy_load_module()
class TestPermissionsLoading(TestMemcacheBase):
"""Test user permissions loading."""
def setUp(self):
super(TestPermissionsLoading, self).setUp()
self.api = Api()
self.generator = generator.ObjectGenerator()
self.control_id = factories.ControlFactory().id
_, user = self.generator.generate_person(user_role="Creator")
self.api.set_user(user)
def test_permissions_loading(self):
"""Test if permissions created only once for GET requests."""
with mock.patch(
"ggrc_basic_permissions.store_results_into_memcache",
side_effect=ggrc_basic_permissions.store_results_into_memcache
) as store_perm:
self.api.get(all_models.Control, self.control_id)
store_perm.assert_called_once()
store_perm.call_count = 0
# On second GET permissions should be loaded from memcache
# but not created from scratch.
self.api.get(all_models.Control, self.control_id)
store_perm.assert_not_called()
class TestPermissionsCacheFlushing(TestMemcacheBase):
"""Test user permissions loading."""
def setUp(self):
super(TestPermissionsCacheFlushing, self).setUp()
self.api = Api()
@staticmethod
def load_perms(user_id, new_perms):
"""Emulate procedure to load permissions
"""
with mock.patch(
'ggrc_basic_permissions._load_permissions_from_database',
return_value=new_perms
):
mock_user = mock.Mock()
mock_user.id = user_id
return ggrc_basic_permissions.load_permissions_for(mock_user)
def test_memcache_flushing(self):
"""Test if memcache is properly cleaned on object creation
Procedure to test functionality:
1) load and permissions for specific user and store them in memcahe
2) emulate new object creation, which cleans permissions in memcache
3) make request which tries to get cache for permissions from memcache
Also, it's assumed that 2 or more GGRC workers are running
"""
client = cache_utils.get_cache_manager().cache_object.memcache_client
client.flush_all()
# load perms and store them in memcache
self.load_perms(11, {"11": "a"})
# emulate situation when a new object is created
# this procedure cleans memcache in the end
cache_utils.clear_permission_cache()
# emulate work of worker #1 - get permissions for our user
# the first step - check permissions in memcache
ggrc_basic_permissions.query_memcache(client, "permissions:11")
# step 2 - load permissions from DB and save then into memcahe
# this step is omitted
# load permission on behalf of worker #2, before step 2 of worker #1
result = self.load_perms(11, {"11": "b"})
# ensure that new permissions were returned instead of old ones
self.assertEquals(result, {"11": "b"})
def test_permissions_flush_on_post(self):
"""Test that permissions in memcache are cleaned after POST request."""
user = self.create_user_with_role("Creator")
self.api.set_user(user)
self.api.client.get("/permissions")
perm_ids = self.memcache_client.get('permissions:list')
self.assertEqual(perm_ids, {'permissions:{}'.format(user.id)})
response = self.api.post(
all_models.Objective,
{"objective": {"title": "Test Objective", "context": None}}
)
self.assert_status(response, 201)
perm_ids = self.memcache_client.get('permissions:list')
self.assertIsNone(perm_ids)
def test_permissions_flush_on_put(self):
"""Test that permissions in memcache are cleaned after PUT request."""
with factories.single_commit():
user = self.create_user_with_role("Creator")
objective = factories.ObjectiveFactory()
objective_id = objective.id
objective.add_person_with_role_name(user, "Admin")
self.api.set_user(user)
self.api.client.get("/permissions")
perm_ids = self.memcache_client.get('permissions:list')
self.assertEqual(perm_ids, {'permissions:{}'.format(user.id)})
objective = all_models.Objective.query.get(objective_id)
response = self.api.put(objective, {"title": "new title"})
self.assert200(response)
perm_ids = self.memcache_client.get('permissions:list')
self.assertIsNone(perm_ids)
def test_permissions_flush_on_delete(self):
"""Test that permissions in memcache are cleaned after DELETE request."""
with factories.single_commit():
user = self.create_user_with_role("Creator")
objective = factories.ObjectiveFactory()
objective.add_person_with_role_name(user, "Admin")
objective_id = objective.id
self.api.set_user(user)
self.api.client.get("/permissions")
perm_ids = self.memcache_client.get('permissions:list')
self.assertEqual(perm_ids, {'permissions:{}'.format(user.id)})
objective = all_models.Objective.query.get(objective_id)
response = self.api.delete(objective)
self.assert200(response)
perm_ids = self.memcache_client.get('permissions:list')
self.assertIsNone(perm_ids)
| 33.049724 | 78 | 0.726847 |
import mock
from appengine import base
from ggrc.models import all_models
from ggrc.cache import utils as cache_utils
from integration.ggrc import TestCase, generator
from integration.ggrc.api_helper import Api
from integration.ggrc.models import factories
ggrc_basic_permissions = None
def _lazy_load_module():
global ggrc_basic_permissions
import ggrc_basic_permissions
@base.with_memcache
class TestMemcacheBase(TestCase):
def setUp(self):
super(TestMemcacheBase, self).setUp()
_lazy_load_module()
class TestPermissionsLoading(TestMemcacheBase):
def setUp(self):
super(TestPermissionsLoading, self).setUp()
self.api = Api()
self.generator = generator.ObjectGenerator()
self.control_id = factories.ControlFactory().id
_, user = self.generator.generate_person(user_role="Creator")
self.api.set_user(user)
def test_permissions_loading(self):
with mock.patch(
"ggrc_basic_permissions.store_results_into_memcache",
side_effect=ggrc_basic_permissions.store_results_into_memcache
) as store_perm:
self.api.get(all_models.Control, self.control_id)
store_perm.assert_called_once()
store_perm.call_count = 0
self.api.get(all_models.Control, self.control_id)
store_perm.assert_not_called()
class TestPermissionsCacheFlushing(TestMemcacheBase):
def setUp(self):
super(TestPermissionsCacheFlushing, self).setUp()
self.api = Api()
@staticmethod
def load_perms(user_id, new_perms):
with mock.patch(
'ggrc_basic_permissions._load_permissions_from_database',
return_value=new_perms
):
mock_user = mock.Mock()
mock_user.id = user_id
return ggrc_basic_permissions.load_permissions_for(mock_user)
def test_memcache_flushing(self):
client = cache_utils.get_cache_manager().cache_object.memcache_client
client.flush_all()
self.load_perms(11, {"11": "a"})
cache_utils.clear_permission_cache()
query_memcache(client, "permissions:11")
, {"11": "b"})
self.assertEquals(result, {"11": "b"})
def test_permissions_flush_on_post(self):
user = self.create_user_with_role("Creator")
self.api.set_user(user)
self.api.client.get("/permissions")
perm_ids = self.memcache_client.get('permissions:list')
self.assertEqual(perm_ids, {'permissions:{}'.format(user.id)})
response = self.api.post(
all_models.Objective,
{"objective": {"title": "Test Objective", "context": None}}
)
self.assert_status(response, 201)
perm_ids = self.memcache_client.get('permissions:list')
self.assertIsNone(perm_ids)
def test_permissions_flush_on_put(self):
with factories.single_commit():
user = self.create_user_with_role("Creator")
objective = factories.ObjectiveFactory()
objective_id = objective.id
objective.add_person_with_role_name(user, "Admin")
self.api.set_user(user)
self.api.client.get("/permissions")
perm_ids = self.memcache_client.get('permissions:list')
self.assertEqual(perm_ids, {'permissions:{}'.format(user.id)})
objective = all_models.Objective.query.get(objective_id)
response = self.api.put(objective, {"title": "new title"})
self.assert200(response)
perm_ids = self.memcache_client.get('permissions:list')
self.assertIsNone(perm_ids)
def test_permissions_flush_on_delete(self):
with factories.single_commit():
user = self.create_user_with_role("Creator")
objective = factories.ObjectiveFactory()
objective.add_person_with_role_name(user, "Admin")
objective_id = objective.id
self.api.set_user(user)
self.api.client.get("/permissions")
perm_ids = self.memcache_client.get('permissions:list')
self.assertEqual(perm_ids, {'permissions:{}'.format(user.id)})
objective = all_models.Objective.query.get(objective_id)
response = self.api.delete(objective)
self.assert200(response)
perm_ids = self.memcache_client.get('permissions:list')
self.assertIsNone(perm_ids)
| true | true |
f7240b0b0e4e09e2288ea3093f1886aef31b452b | 2,852 | py | Python | xlsxwriter/test/worksheet/test_write_sheet_views9.py | DeltaEpsilon7787/XlsxWriter | 550b9c5bd678c861dcc9f6f4072b33a69566e065 | [
"BSD-2-Clause-FreeBSD"
] | 2,766 | 2015-01-02T17:36:42.000Z | 2022-03-31T09:23:30.000Z | xlsxwriter/test/worksheet/test_write_sheet_views9.py | DeltaEpsilon7787/XlsxWriter | 550b9c5bd678c861dcc9f6f4072b33a69566e065 | [
"BSD-2-Clause-FreeBSD"
] | 683 | 2015-01-03T09:55:02.000Z | 2022-03-31T07:18:15.000Z | xlsxwriter/test/worksheet/test_write_sheet_views9.py | jmcnamara/test_py_github_actions | d445d5d98b038b63453dd70c9c1a9ca1b325cb47 | [
"BSD-2-Clause-FreeBSD"
] | 636 | 2015-01-05T01:57:08.000Z | 2022-03-25T18:42:41.000Z | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ...worksheet import Worksheet
class TestWriteSheetViews(unittest.TestCase):
"""
Test the Worksheet _write_sheet_views() method.
With explicit top/left cells.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_views1(self):
"""Test the _write_sheet_views() method with split panes + selection"""
self.worksheet.select()
self.worksheet.set_selection('A2')
self.worksheet.split_panes(15, 0, 20, 0)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane ySplit="600" topLeftCell="A21" activePane="bottomLeft"/><selection pane="bottomLeft" activeCell="A2" sqref="A2"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views2(self):
"""Test the _write_sheet_views() method with split panes + selection"""
self.worksheet.select()
self.worksheet.set_selection('A21')
self.worksheet.split_panes(15, 0, 20, 0)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane ySplit="600" topLeftCell="A21" activePane="bottomLeft"/><selection pane="bottomLeft" activeCell="A21" sqref="A21"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views3(self):
"""Test the _write_sheet_views() method with split panes + selection"""
self.worksheet.select()
self.worksheet.set_selection('B1')
self.worksheet.split_panes(0, 8.43, 0, 4)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1350" topLeftCell="E1" activePane="topRight"/><selection pane="topRight" activeCell="B1" sqref="B1"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views4(self):
"""Test the _write_sheet_views() method with split panes + selection"""
self.worksheet.select()
self.worksheet.set_selection('E1')
self.worksheet.split_panes(0, 8.43, 0, 4)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1350" topLeftCell="E1" activePane="topRight"/><selection pane="topRight" activeCell="E1" sqref="E1"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
| 33.952381 | 220 | 0.650421 | true | true | |
f7240b10674288a6a9ebc3b398c4ef8d48a1ea1a | 1,044 | py | Python | psppi/users/migrations/0002_auto_20160901_0144.py | the-fool/psppi | a280cfc6f294c85119b0b5a326d2930300d6ab93 | [
"MIT"
] | null | null | null | psppi/users/migrations/0002_auto_20160901_0144.py | the-fool/psppi | a280cfc6f294c85119b0b5a326d2930300d6ab93 | [
"MIT"
] | null | null | null | psppi/users/migrations/0002_auto_20160901_0144.py | the-fool/psppi | a280cfc6f294c85119b0b5a326d2930300d6ab93 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-01 01:44
from __future__ import unicode_literals
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
| 36 | 409 | 0.632184 |
from __future__ import unicode_literals
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
| true | true |
f7240b979131fa5354ce144e34961d7ca82c0818 | 575 | py | Python | sagas/tests/langs/jieba_procs.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 3 | 2020-01-11T13:55:38.000Z | 2020-08-25T22:34:15.000Z | sagas/tests/langs/jieba_procs.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | null | null | null | sagas/tests/langs/jieba_procs.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 1 | 2021-01-01T05:21:44.000Z | 2021-01-01T05:21:44.000Z | import jieba
import jieba.posseg as pseg
def user_dict():
from sagas.conf import resource_path
dictf = resource_path('dict_zh.txt')
jieba.load_userdict(dictf)
seg_list = jieba.cut("列出所有的采购订单") # 默认是精确模式
print(", ".join(seg_list))
def user_words():
jieba.add_word('寄账单地址', tag='typ')
jieba.add_word('新建', tag='srv')
seg_list = jieba.cut("列出所有的寄账单地址") # 默认是精确模式
print(", ".join(seg_list))
words = pseg.cut("新建所有的寄账单地址")
for word, flag in words:
print('%s %s' % (word, flag))
if __name__ == '__main__':
user_dict()
| 23.958333 | 49 | 0.64 | import jieba
import jieba.posseg as pseg
def user_dict():
from sagas.conf import resource_path
dictf = resource_path('dict_zh.txt')
jieba.load_userdict(dictf)
seg_list = jieba.cut("列出所有的采购订单")
print(", ".join(seg_list))
def user_words():
jieba.add_word('寄账单地址', tag='typ')
jieba.add_word('新建', tag='srv')
seg_list = jieba.cut("列出所有的寄账单地址")
print(", ".join(seg_list))
words = pseg.cut("新建所有的寄账单地址")
for word, flag in words:
print('%s %s' % (word, flag))
if __name__ == '__main__':
user_dict()
| true | true |
f7240cd9df0ce707381f54c94daa94f450f5129e | 2,240 | py | Python | DBUIScripts/db_update.py | chaoannricardo/NTU_CARDO_Database | 5fbfa1383f2e65a04fabd863c68373f45bbf05fd | [
"Apache-2.0"
] | 1 | 2020-07-04T22:30:41.000Z | 2020-07-04T22:30:41.000Z | DBUIScripts/db_update.py | chaoannricardo/NTU_CARDO_Database | 5fbfa1383f2e65a04fabd863c68373f45bbf05fd | [
"Apache-2.0"
] | null | null | null | DBUIScripts/db_update.py | chaoannricardo/NTU_CARDO_Database | 5fbfa1383f2e65a04fabd863c68373f45bbf05fd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf8 -*-
import pandas as pd
import pymysql
# import configuration in parent dir
import os, sys, inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import configuration as conf
# import packages in models
from models import data_processing, database_management, file_management
if __name__ == '__main__':
# change working directory to project location
abspath = os.path.abspath(__file__)
dname = os.path.dirname(os.path.dirname(abspath))
os.chdir(dname)
# script parameters
name_to_update = sys.argv[1]
update_type = sys.argv[2]
update_method = sys.argv[3]
update_point = sys.argv[4]
# start updating database content
# log in database
config = conf.auto_log_in("cardo_main")
conn = pymysql.connect(**config)
conn_cursor = conn.cursor()
# set up parameters for update
column_to_update = ""
if update_type == "0":
column_to_update = "是否計算黑名單"
elif update_type == "1":
column_to_update = "CARDO點數"
command_text = ""
# update table with different method
if update_method == "0":
command_text = "UPDATE cardo_main.點數記錄表_目前 SET " + column_to_update + " = 0 WHERE 姓名 = " + name_to_update + ";"
elif update_method == "1":
command_text = "UPDATE cardo_main.點數記錄表_目前 SET " + column_to_update + " = " + update_point + \
" WHERE `姓名` = \'" + name_to_update + "\';"
# command_text = "UPDATE cardo_main.點數記錄表_目前 SET " + column_to_update + " = 0 WHERE 姓名 = " + name_to_update + ";"
elif update_method == "2":
select_command = "SELECT " + column_to_update + " FROM cardo_main.點數記錄表_目前 WHERE 姓名 = \'" + name_to_update + "\';"
data_return = pd.read_sql(select_command, conn)
update_point = str(int(data_return.iloc[0, 0]) - int(update_point))
command_text = "UPDATE cardo_main.點數記錄表_目前 SET " + column_to_update + " = " + update_point + " WHERE 姓名 = \'" + name_to_update + "\';"
# execute command
conn_cursor.execute("SET SQL_SAFE_UPDATES = 0;")
conn.commit()
conn_cursor.execute(command_text)
conn.commit()
| 36.129032 | 142 | 0.663839 |
import pandas as pd
import pymysql
import os, sys, inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import configuration as conf
from models import data_processing, database_management, file_management
if __name__ == '__main__':
abspath = os.path.abspath(__file__)
dname = os.path.dirname(os.path.dirname(abspath))
os.chdir(dname)
name_to_update = sys.argv[1]
update_type = sys.argv[2]
update_method = sys.argv[3]
update_point = sys.argv[4]
config = conf.auto_log_in("cardo_main")
conn = pymysql.connect(**config)
conn_cursor = conn.cursor()
column_to_update = ""
if update_type == "0":
column_to_update = "是否計算黑名單"
elif update_type == "1":
column_to_update = "CARDO點數"
command_text = ""
if update_method == "0":
command_text = "UPDATE cardo_main.點數記錄表_目前 SET " + column_to_update + " = 0 WHERE 姓名 = " + name_to_update + ";"
elif update_method == "1":
command_text = "UPDATE cardo_main.點數記錄表_目前 SET " + column_to_update + " = " + update_point + \
" WHERE `姓名` = \'" + name_to_update + "\';"
elif update_method == "2":
select_command = "SELECT " + column_to_update + " FROM cardo_main.點數記錄表_目前 WHERE 姓名 = \'" + name_to_update + "\';"
data_return = pd.read_sql(select_command, conn)
update_point = str(int(data_return.iloc[0, 0]) - int(update_point))
command_text = "UPDATE cardo_main.點數記錄表_目前 SET " + column_to_update + " = " + update_point + " WHERE 姓名 = \'" + name_to_update + "\';"
conn_cursor.execute("SET SQL_SAFE_UPDATES = 0;")
conn.commit()
conn_cursor.execute(command_text)
conn.commit()
| true | true |
f7240ced4c53690a554d2dbce71a2e069cae8b3f | 39,598 | py | Python | charmhelpers/core/host.py | mhilton/juju-charm-helpers | 1156b9dc5023adbe5571cc57038e648d5ce20d0e | [
"Apache-2.0"
] | null | null | null | charmhelpers/core/host.py | mhilton/juju-charm-helpers | 1156b9dc5023adbe5571cc57038e648d5ce20d0e | [
"Apache-2.0"
] | null | null | null | charmhelpers/core/host.py | mhilton/juju-charm-helpers | 1156b9dc5023adbe5571cc57038e648d5ce20d0e | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for working with the host system"""
# Copyright 2012 Canonical Ltd.
#
# Authors:
# Nick Moffitt <nick.moffitt@canonical.com>
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
import os
import re
import pwd
import glob
import grp
import random
import string
import subprocess
import hashlib
import functools
import itertools
import six
from contextlib import contextmanager
from collections import OrderedDict
from .hookenv import log, INFO, DEBUG, local_unit, charm_name
from .fstab import Fstab
from charmhelpers.osplatform import get_platform
__platform__ = get_platform()
if __platform__ == "ubuntu":
from charmhelpers.core.host_factory.ubuntu import (
service_available,
add_new_group,
lsb_release,
cmp_pkgrevno,
CompareHostReleases,
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
from charmhelpers.core.host_factory.centos import (
service_available,
add_new_group,
lsb_release,
cmp_pkgrevno,
CompareHostReleases,
) # flake8: noqa -- ignore F401 for this import
UPDATEDB_PATH = '/etc/updatedb.conf'
def service_start(service_name, **kwargs):
"""Start a system service.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be reloaded. The follow-
ing example stops the ceph-osd service for instance id=4:
service_stop('ceph-osd', id=4)
:param service_name: the name of the service to stop
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for systemd enabled systems.
"""
return service('start', service_name, **kwargs)
def service_stop(service_name, **kwargs):
"""Stop a system service.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be reloaded. The follow-
ing example stops the ceph-osd service for instance id=4:
service_stop('ceph-osd', id=4)
:param service_name: the name of the service to stop
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for systemd enabled systems.
"""
return service('stop', service_name, **kwargs)
def service_restart(service_name, **kwargs):
"""Restart a system service.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be restarted. The follow-
ing example restarts the ceph-osd service for instance id=4:
service_restart('ceph-osd', id=4)
:param service_name: the name of the service to restart
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""
return service('restart', service_name)
def service_reload(service_name, restart_on_failure=False, **kwargs):
"""Reload a system service, optionally falling back to restart if
reload fails.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be reloaded. The follow-
ing example restarts the ceph-osd service for instance id=4:
service_reload('ceph-osd', id=4)
:param service_name: the name of the service to reload
:param restart_on_failure: boolean indicating whether to fallback to a
restart if the reload fails.
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""
service_result = service('reload', service_name, **kwargs)
if not service_result and restart_on_failure:
service_result = service('restart', service_name, **kwargs)
return service_result
def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
**kwargs):
"""Pause a system service.
Stop it, and prevent it from starting again at boot.
:param service_name: the name of the service to pause
:param init_dir: path to the upstart init directory
:param initd_dir: path to the sysv init directory
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems which do not support
key=value arguments via the commandline.
"""
stopped = True
if service_running(service_name, **kwargs):
stopped = service_stop(service_name, **kwargs)
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
service('disable', service_name)
service('mask', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
with open(override_path, 'w') as fh:
fh.write("manual\n")
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "disable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
return stopped
def service_resume(service_name, init_dir="/etc/init",
initd_dir="/etc/init.d", **kwargs):
"""Resume a system service.
Reenable starting again at boot. Start the service.
:param service_name: the name of the service to resume
:param init_dir: the path to the init dir
:param initd dir: the path to the initd dir
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for systemd enabled systems.
"""
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
service('unmask', service_name)
service('enable', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
if os.path.exists(override_path):
os.unlink(override_path)
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "enable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
started = service_running(service_name, **kwargs)
if not started:
started = service_start(service_name, **kwargs)
return started
def service(action, service_name, **kwargs):
"""Control a system service.
:param action: the action to take on the service
:param service_name: the name of the service to perform th action on
:param **kwargs: additional params to be passed to the service command in
the form of key=value.
"""
if init_is_systemd():
cmd = ['systemctl', action, service_name]
else:
cmd = ['service', service_name, action]
for key, value in six.iteritems(kwargs):
parameter = '%s=%s' % (key, value)
cmd.append(parameter)
return subprocess.call(cmd) == 0
_UPSTART_CONF = "/etc/init/{}.conf"
_INIT_D_CONF = "/etc/init.d/{}"
def service_running(service_name, **kwargs):
"""Determine whether a system service is running.
:param service_name: the name of the service
:param **kwargs: additional args to pass to the service command. This is
used to pass additional key=value arguments to the
service command line for managing specific instance
units (e.g. service ceph-osd status id=2). The kwargs
are ignored in systemd services.
"""
if init_is_systemd():
return service('is-active', service_name)
else:
if os.path.exists(_UPSTART_CONF.format(service_name)):
try:
cmd = ['status', service_name]
for key, value in six.iteritems(kwargs):
parameter = '%s=%s' % (key, value)
cmd.append(parameter)
output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
# This works for upstart scripts where the 'service' command
# returns a consistent string to represent running
# 'start/running'
if ("start/running" in output or
"is running" in output or
"up and running" in output):
return True
elif os.path.exists(_INIT_D_CONF.format(service_name)):
# Check System V scripts init script return codes
return service('status', service_name)
return False
SYSTEMD_SYSTEM = '/run/systemd/system'
def init_is_systemd():
"""Return True if the host system uses systemd, False otherwise."""
if lsb_release()['DISTRIB_CODENAME'] == 'trusty':
return False
return os.path.isdir(SYSTEMD_SYSTEM)
def adduser(username, password=None, shell='/bin/bash',
system_user=False, primary_group=None,
secondary_groups=None, uid=None, home_dir=None):
"""Add a user to the system.
Will log but otherwise succeed if the user already exists.
:param str username: Username to create
:param str password: Password for user; if ``None``, create a system user
:param str shell: The default shell for the user
:param bool system_user: Whether to create a login or system user
:param str primary_group: Primary group for user; defaults to username
:param list secondary_groups: Optional list of additional groups
:param int uid: UID for user being created
:param str home_dir: Home directory for user
:returns: The password database entry struct, as returned by `pwd.getpwnam`
"""
try:
user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username))
if uid:
user_info = pwd.getpwuid(int(uid))
log('user with uid {0} already exists!'.format(uid))
except KeyError:
log('creating user {0}'.format(username))
cmd = ['useradd']
if uid:
cmd.extend(['--uid', str(uid)])
if home_dir:
cmd.extend(['--home', str(home_dir)])
if system_user or password is None:
cmd.append('--system')
else:
cmd.extend([
'--create-home',
'--shell', shell,
'--password', password,
])
if not primary_group:
try:
grp.getgrnam(username)
primary_group = username # avoid "group exists" error
except KeyError:
pass
if primary_group:
cmd.extend(['-g', primary_group])
if secondary_groups:
cmd.extend(['-G', ','.join(secondary_groups)])
cmd.append(username)
subprocess.check_call(cmd)
user_info = pwd.getpwnam(username)
return user_info
def user_exists(username):
"""Check if a user exists"""
try:
pwd.getpwnam(username)
user_exists = True
except KeyError:
user_exists = False
return user_exists
def uid_exists(uid):
"""Check if a uid exists"""
try:
pwd.getpwuid(uid)
uid_exists = True
except KeyError:
uid_exists = False
return uid_exists
def group_exists(groupname):
"""Check if a group exists"""
try:
grp.getgrnam(groupname)
group_exists = True
except KeyError:
group_exists = False
return group_exists
def gid_exists(gid):
"""Check if a gid exists"""
try:
grp.getgrgid(gid)
gid_exists = True
except KeyError:
gid_exists = False
return gid_exists
def add_group(group_name, system_group=False, gid=None):
"""Add a group to the system
Will log but otherwise succeed if the group already exists.
:param str group_name: group to create
:param bool system_group: Create system group
:param int gid: GID for user being created
:returns: The password database entry struct, as returned by `grp.getgrnam`
"""
try:
group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name))
if gid:
group_info = grp.getgrgid(gid)
log('group with gid {0} already exists!'.format(gid))
except KeyError:
log('creating group {0}'.format(group_name))
add_new_group(group_name, system_group, gid)
group_info = grp.getgrnam(group_name)
return group_info
def add_user_to_group(username, group):
"""Add a user to a group"""
cmd = ['gpasswd', '-a', username, group]
log("Adding user {} to group {}".format(username, group))
subprocess.check_call(cmd)
def chage(username, lastday=None, expiredate=None, inactive=None,
mindays=None, maxdays=None, root=None, warndays=None):
"""Change user password expiry information
:param str username: User to update
:param str lastday: Set when password was changed in YYYY-MM-DD format
:param str expiredate: Set when user's account will no longer be
accessible in YYYY-MM-DD format.
-1 will remove an account expiration date.
:param str inactive: Set the number of days of inactivity after a password
has expired before the account is locked.
-1 will remove an account's inactivity.
:param str mindays: Set the minimum number of days between password
changes to MIN_DAYS.
0 indicates the password can be changed anytime.
:param str maxdays: Set the maximum number of days during which a
password is valid.
-1 as MAX_DAYS will remove checking maxdays
:param str root: Apply changes in the CHROOT_DIR directory
:param str warndays: Set the number of days of warning before a password
change is required
:raises subprocess.CalledProcessError: if call to chage fails
"""
cmd = ['chage']
if root:
cmd.extend(['--root', root])
if lastday:
cmd.extend(['--lastday', lastday])
if expiredate:
cmd.extend(['--expiredate', expiredate])
if inactive:
cmd.extend(['--inactive', inactive])
if mindays:
cmd.extend(['--mindays', mindays])
if maxdays:
cmd.extend(['--maxdays', maxdays])
if warndays:
cmd.extend(['--warndays', warndays])
cmd.append(username)
subprocess.check_call(cmd)
remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
"""Replicate the contents of a path"""
options = options or ['--delete', '--executability']
cmd = ['/usr/bin/rsync', flags]
if timeout:
cmd = ['timeout', str(timeout)] + cmd
cmd.extend(options)
cmd.append(from_path)
cmd.append(to_path)
log(" ".join(cmd))
return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
def symlink(source, destination):
"""Create a symbolic link"""
log("Symlinking {} as {}".format(source, destination))
cmd = [
'ln',
'-sf',
source,
destination,
]
subprocess.check_call(cmd)
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
"""Create a directory"""
log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path)
path_exists = os.path.exists(realpath)
if path_exists and force:
if not os.path.isdir(realpath):
log("Removing non-directory file {} prior to mkdir()".format(path))
os.unlink(realpath)
os.makedirs(realpath, perms)
elif not path_exists:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
os.chmod(realpath, perms)
def write_file(path, content, owner='root', group='root', perms=0o444):
"""Create or overwrite a file with the contents of a byte string."""
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
# lets see if we can grab the file and compare the context, to avoid doing
# a write.
existing_content = None
existing_uid, existing_gid, existing_perms = None, None, None
try:
with open(path, 'rb') as target:
existing_content = target.read()
stat = os.stat(path)
existing_uid, existing_gid, existing_perms = (
stat.st_uid, stat.st_gid, stat.st_mode
)
except:
pass
if content != existing_content:
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
level=DEBUG)
with open(path, 'wb') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
if six.PY3 and isinstance(content, six.string_types):
content = content.encode('UTF-8')
target.write(content)
return
# the contents were the same, but we might still need to change the
# ownership or permissions.
if existing_uid != uid:
log("Changing uid on already existing content: {} -> {}"
.format(existing_uid, uid), level=DEBUG)
os.chown(path, uid, -1)
if existing_gid != gid:
log("Changing gid on already existing content: {} -> {}"
.format(existing_gid, gid), level=DEBUG)
os.chown(path, -1, gid)
if existing_perms != perms:
log("Changing permissions on existing content: {} -> {}"
.format(existing_perms, perms), level=DEBUG)
os.chown(path, perms)
def fstab_remove(mp):
"""Remove the given mountpoint entry from /etc/fstab"""
return Fstab.remove_by_mountpoint(mp)
def fstab_add(dev, mp, fs, options=None):
"""Adds the given device entry to the /etc/fstab file"""
return Fstab.add(dev, mp, fs, options=options)
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
"""Mount a filesystem at a particular mountpoint"""
cmd_args = ['mount']
if options is not None:
cmd_args.extend(['-o', options])
cmd_args.extend([device, mountpoint])
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False
if persist:
return fstab_add(device, mountpoint, filesystem, options=options)
return True
def umount(mountpoint, persist=False):
"""Unmount a filesystem"""
cmd_args = ['umount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
if persist:
return fstab_remove(mountpoint)
return True
def mounts():
"""Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
with open('/proc/mounts') as f:
# [['/mount/point','/dev/path'],[...]]
system_mounts = [m[1::-1] for m in [l.strip().split()
for l in f.readlines()]]
return system_mounts
def fstab_mount(mountpoint):
"""Mount filesystem using fstab"""
cmd_args = ['mount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
return True
def file_hash(path, hash_type='md5'):
"""Generate a hash checksum of the contents of 'path' or None if not found.
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
"""
if os.path.exists(path):
h = getattr(hashlib, hash_type)()
with open(path, 'rb') as source:
h.update(source.read())
return h.hexdigest()
else:
return None
def path_hash(path):
"""Generate a hash checksum of all files matching 'path'. Standard
wildcards like '*' and '?' are supported, see documentation for the 'glob'
module for more information.
:return: dict: A { filename: hash } dictionary for all matched files.
Empty if none found.
"""
return {
filename: file_hash(filename)
for filename in glob.iglob(path)
}
def check_hash(path, checksum, hash_type='md5'):
"""Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum
"""
actual_checksum = file_hash(path, hash_type)
if checksum != actual_checksum:
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
class ChecksumError(ValueError):
"""A class derived from Value error to indicate the checksum failed."""
pass
def restart_on_change(restart_map, stopstart=False, restart_functions=None):
"""Restart services based on configuration files changing
This function is used a decorator, for example::
@restart_on_change({
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
'/etc/apache/sites-enabled/*': [ 'apache2' ]
})
def config_changed():
pass # your code here
In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the
ceph_client_changed function. The apache2 service would be
restarted if any file matching the pattern got changed, created
or removed. Standard wildcards are supported, see documentation
for the 'glob' module for more information.
@param restart_map: {path_file_name: [service_name, ...]
@param stopstart: DEFAULT false; whether to stop, start OR restart
@param restart_functions: nonstandard functions to use to restart services
{svc: func, ...}
@returns result from decorated function
"""
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
return restart_on_change_helper(
(lambda: f(*args, **kwargs)), restart_map, stopstart,
restart_functions)
return wrapped_f
return wrap
def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
restart_functions=None):
"""Helper function to perform the restart_on_change function.
This is provided for decorators to restart services if files described
in the restart_map have changed after an invocation of lambda_f().
@param lambda_f: function to call.
@param restart_map: {file: [service, ...]}
@param stopstart: whether to stop, start or restart a service
@param restart_functions: nonstandard functions to use to restart services
{svc: func, ...}
@returns result of lambda_f()
"""
if restart_functions is None:
restart_functions = {}
checksums = {path: path_hash(path) for path in restart_map}
r = lambda_f()
# create a list of lists of the services to restart
restarts = [restart_map[path]
for path in restart_map
if path_hash(path) != checksums[path]]
# create a flat list of ordered services without duplicates from lists
services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
if services_list:
actions = ('stop', 'start') if stopstart else ('restart',)
for service_name in services_list:
if service_name in restart_functions:
restart_functions[service_name](service_name)
else:
for action in actions:
service(action, service_name)
return r
def pwgen(length=None):
"""Generate a random pasword."""
if length is None:
# A random length is ok to use a weak PRNG
length = random.choice(range(35, 45))
alphanumeric_chars = [
l for l in (string.ascii_letters + string.digits)
if l not in 'l0QD1vAEIOUaeiou']
# Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
# actual password
random_generator = random.SystemRandom()
random_chars = [
random_generator.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars))
def is_phy_iface(interface):
"""Returns True if interface is not virtual, otherwise False."""
if interface:
sys_net = '/sys/class/net'
if os.path.isdir(sys_net):
for iface in glob.glob(os.path.join(sys_net, '*')):
if '/virtual/' in os.path.realpath(iface):
continue
if interface == os.path.basename(iface):
return True
return False
def get_bond_master(interface):
"""Returns bond master if interface is bond slave otherwise None.
NOTE: the provided interface is expected to be physical
"""
if interface:
iface_path = '/sys/class/net/%s' % (interface)
if os.path.exists(iface_path):
if '/virtual/' in os.path.realpath(iface_path):
return None
master = os.path.join(iface_path, 'master')
if os.path.exists(master):
master = os.path.realpath(master)
# make sure it is a bond master
if os.path.exists(os.path.join(master, 'bonding')):
return os.path.basename(master)
return None
def list_nics(nic_type=None):
"""Return a list of nics of given type(s)"""
if isinstance(nic_type, six.string_types):
int_types = [nic_type]
else:
int_types = nic_type
interfaces = []
if nic_type:
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).decode('UTF-8')
ip_output = ip_output.split('\n')
ip_output = (line for line in ip_output if line)
for line in ip_output:
if line.split()[1].startswith(int_type):
matched = re.search('.*: (' + int_type +
r'[0-9]+\.[0-9]+)@.*', line)
if matched:
iface = matched.groups()[0]
else:
iface = line.split()[1].replace(":", "")
if iface not in interfaces:
interfaces.append(iface)
else:
cmd = ['ip', 'a']
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
ip_output = (line.strip() for line in ip_output if line)
key = re.compile('^[0-9]+:\s+(.+):')
for line in ip_output:
matched = re.search(key, line)
if matched:
iface = matched.group(1)
iface = iface.partition("@")[0]
if iface not in interfaces:
interfaces.append(iface)
return interfaces
def set_nic_mtu(nic, mtu):
"""Set the Maximum Transmission Unit (MTU) on a network interface."""
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
subprocess.check_call(cmd)
def get_nic_mtu(nic):
"""Return the Maximum Transmission Unit (MTU) for a network interface."""
cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
mtu = ""
for line in ip_output:
words = line.split()
if 'mtu' in words:
mtu = words[words.index("mtu") + 1]
return mtu
def get_nic_hwaddr(nic):
"""Return the Media Access Control (MAC) for a network interface."""
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8')
hwaddr = ""
words = ip_output.split()
if 'link/ether' in words:
hwaddr = words[words.index('link/ether') + 1]
return hwaddr
@contextmanager
def chdir(directory):
"""Change the current working directory to a different directory for a code
block and return the previous directory after the block exits. Useful to
run commands from a specificed directory.
:param str directory: The directory path to change to for this context.
"""
cur = os.getcwd()
try:
yield os.chdir(directory)
finally:
os.chdir(cur)
def chownr(path, owner, group, follow_links=True, chowntopdir=False):
"""Recursively change user and group ownership of files and directories
in given path. Doesn't chown path itself by default, only its children.
:param str path: The string path to start changing ownership.
:param str owner: The owner string to use when looking up the uid.
:param str group: The group string to use when looking up the gid.
:param bool follow_links: Also follow and chown links if True
:param bool chowntopdir: Also chown path itself if True
"""
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
if follow_links:
chown = os.chown
else:
chown = os.lchown
if chowntopdir:
broken_symlink = os.path.lexists(path) and not os.path.exists(path)
if not broken_symlink:
chown(path, uid, gid)
for root, dirs, files in os.walk(path, followlinks=follow_links):
for name in dirs + files:
full = os.path.join(root, name)
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
if not broken_symlink:
chown(full, uid, gid)
def lchownr(path, owner, group):
"""Recursively change user and group ownership of files and directories
in a given path, not following symbolic links. See the documentation for
'os.lchown' for more information.
:param str path: The string path to start changing ownership.
:param str owner: The owner string to use when looking up the uid.
:param str group: The group string to use when looking up the gid.
"""
chownr(path, owner, group, follow_links=False)
def owner(path):
"""Returns a tuple containing the username & groupname owning the path.
:param str path: the string path to retrieve the ownership
:return tuple(str, str): A (username, groupname) tuple containing the
name of the user and group owning the path.
:raises OSError: if the specified path does not exist
"""
stat = os.stat(path)
username = pwd.getpwuid(stat.st_uid)[0]
groupname = grp.getgrgid(stat.st_gid)[0]
return username, groupname
def get_total_ram():
"""The total amount of system RAM in bytes.
This is what is reported by the OS, and may be overcommitted when
there are multiple containers hosted on the same machine.
"""
with open('/proc/meminfo', 'r') as f:
for line in f.readlines():
if line:
key, value, unit = line.split()
if key == 'MemTotal:':
assert unit == 'kB', 'Unknown unit'
return int(value) * 1024 # Classic, not KiB.
raise NotImplementedError()
UPSTART_CONTAINER_TYPE = '/run/container_type'
def is_container():
"""Determine whether unit is running in a container
@return: boolean indicating if unit is in a container
"""
if init_is_systemd():
# Detect using systemd-detect-virt
return subprocess.call(['systemd-detect-virt',
'--container']) == 0
else:
# Detect using upstart container file marker
return os.path.exists(UPSTART_CONTAINER_TYPE)
def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
"""Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list.
This method has no effect if the path specified by updatedb_path does not
exist or is not a file.
@param path: string the path to add to the updatedb.conf PRUNEPATHS value
@param updatedb_path: the path the updatedb.conf file
"""
if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path):
# If the updatedb.conf file doesn't exist then don't attempt to update
# the file as the package providing mlocate may not be installed on
# the local system
return
with open(updatedb_path, 'r+') as f_id:
updatedb_text = f_id.read()
output = updatedb(updatedb_text, path)
f_id.seek(0)
f_id.write(output)
f_id.truncate()
def updatedb(updatedb_text, new_path):
lines = [line for line in updatedb_text.split("\n")]
for i, line in enumerate(lines):
if line.startswith("PRUNEPATHS="):
paths_line = line.split("=")[1].replace('"', '')
paths = paths_line.split(" ")
if new_path not in paths:
paths.append(new_path)
lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
output = "\n".join(lines)
return output
def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
""" Modulo distribution
This helper uses the unit number, a modulo value and a constant wait time
to produce a calculated wait time distribution. This is useful in large
scale deployments to distribute load during an expensive operation such as
service restarts.
If you have 1000 nodes that need to restart 100 at a time 1 minute at a
time:
time.wait(modulo_distribution(modulo=100, wait=60))
restart()
If you need restarts to happen serially set modulo to the exact number of
nodes and set a high constant wait time:
time.wait(modulo_distribution(modulo=10, wait=120))
restart()
@param modulo: int The modulo number creates the group distribution
@param wait: int The constant time wait value
@param non_zero_wait: boolean Override unit % modulo == 0,
return modulo * wait. Used to avoid collisions with
leader nodes which are often given priority.
@return: int Calculated time to wait for unit operation
"""
unit_number = int(local_unit().split('/')[1])
calculated_wait_time = (unit_number % modulo) * wait
if non_zero_wait and calculated_wait_time == 0:
return modulo * wait
else:
return calculated_wait_time
def install_ca_cert(ca_cert, name=None):
"""
Install the given cert as a trusted CA.
The ``name`` is the stem of the filename where the cert is written, and if
not provided, it will default to ``juju-{charm_name}``.
If the cert is empty or None, or is unchanged, nothing is done.
"""
if not ca_cert:
return
if not isinstance(ca_cert, bytes):
ca_cert = ca_cert.encode('utf8')
if not name:
name = 'juju-{}'.format(charm_name())
cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name)
new_hash = hashlib.md5(ca_cert).hexdigest()
if file_hash(cert_file) == new_hash:
return
log("Installing new CA cert at: {}".format(cert_file), level=INFO)
write_file(cert_file, ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])
| 36.904007 | 108 | 0.638593 |
import os
import re
import pwd
import glob
import grp
import random
import string
import subprocess
import hashlib
import functools
import itertools
import six
from contextlib import contextmanager
from collections import OrderedDict
from .hookenv import log, INFO, DEBUG, local_unit, charm_name
from .fstab import Fstab
from charmhelpers.osplatform import get_platform
__platform__ = get_platform()
if __platform__ == "ubuntu":
from charmhelpers.core.host_factory.ubuntu import (
service_available,
add_new_group,
lsb_release,
cmp_pkgrevno,
CompareHostReleases,
)
elif __platform__ == "centos":
from charmhelpers.core.host_factory.centos import (
service_available,
add_new_group,
lsb_release,
cmp_pkgrevno,
CompareHostReleases,
)
UPDATEDB_PATH = '/etc/updatedb.conf'
def service_start(service_name, **kwargs):
return service('start', service_name, **kwargs)
def service_stop(service_name, **kwargs):
return service('stop', service_name, **kwargs)
def service_restart(service_name, **kwargs):
return service('restart', service_name)
def service_reload(service_name, restart_on_failure=False, **kwargs):
service_result = service('reload', service_name, **kwargs)
if not service_result and restart_on_failure:
service_result = service('restart', service_name, **kwargs)
return service_result
def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
**kwargs):
stopped = True
if service_running(service_name, **kwargs):
stopped = service_stop(service_name, **kwargs)
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
service('disable', service_name)
service('mask', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
with open(override_path, 'w') as fh:
fh.write("manual\n")
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "disable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
return stopped
def service_resume(service_name, init_dir="/etc/init",
initd_dir="/etc/init.d", **kwargs):
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
service('unmask', service_name)
service('enable', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
if os.path.exists(override_path):
os.unlink(override_path)
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "enable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
started = service_running(service_name, **kwargs)
if not started:
started = service_start(service_name, **kwargs)
return started
def service(action, service_name, **kwargs):
if init_is_systemd():
cmd = ['systemctl', action, service_name]
else:
cmd = ['service', service_name, action]
for key, value in six.iteritems(kwargs):
parameter = '%s=%s' % (key, value)
cmd.append(parameter)
return subprocess.call(cmd) == 0
_UPSTART_CONF = "/etc/init/{}.conf"
_INIT_D_CONF = "/etc/init.d/{}"
def service_running(service_name, **kwargs):
if init_is_systemd():
return service('is-active', service_name)
else:
if os.path.exists(_UPSTART_CONF.format(service_name)):
try:
cmd = ['status', service_name]
for key, value in six.iteritems(kwargs):
parameter = '%s=%s' % (key, value)
cmd.append(parameter)
output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
if ("start/running" in output or
"is running" in output or
"up and running" in output):
return True
elif os.path.exists(_INIT_D_CONF.format(service_name)):
return service('status', service_name)
return False
SYSTEMD_SYSTEM = '/run/systemd/system'
def init_is_systemd():
if lsb_release()['DISTRIB_CODENAME'] == 'trusty':
return False
return os.path.isdir(SYSTEMD_SYSTEM)
def adduser(username, password=None, shell='/bin/bash',
system_user=False, primary_group=None,
secondary_groups=None, uid=None, home_dir=None):
try:
user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username))
if uid:
user_info = pwd.getpwuid(int(uid))
log('user with uid {0} already exists!'.format(uid))
except KeyError:
log('creating user {0}'.format(username))
cmd = ['useradd']
if uid:
cmd.extend(['--uid', str(uid)])
if home_dir:
cmd.extend(['--home', str(home_dir)])
if system_user or password is None:
cmd.append('--system')
else:
cmd.extend([
'--create-home',
'--shell', shell,
'--password', password,
])
if not primary_group:
try:
grp.getgrnam(username)
primary_group = username
except KeyError:
pass
if primary_group:
cmd.extend(['-g', primary_group])
if secondary_groups:
cmd.extend(['-G', ','.join(secondary_groups)])
cmd.append(username)
subprocess.check_call(cmd)
user_info = pwd.getpwnam(username)
return user_info
def user_exists(username):
try:
pwd.getpwnam(username)
user_exists = True
except KeyError:
user_exists = False
return user_exists
def uid_exists(uid):
try:
pwd.getpwuid(uid)
uid_exists = True
except KeyError:
uid_exists = False
return uid_exists
def group_exists(groupname):
try:
grp.getgrnam(groupname)
group_exists = True
except KeyError:
group_exists = False
return group_exists
def gid_exists(gid):
try:
grp.getgrgid(gid)
gid_exists = True
except KeyError:
gid_exists = False
return gid_exists
def add_group(group_name, system_group=False, gid=None):
try:
group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name))
if gid:
group_info = grp.getgrgid(gid)
log('group with gid {0} already exists!'.format(gid))
except KeyError:
log('creating group {0}'.format(group_name))
add_new_group(group_name, system_group, gid)
group_info = grp.getgrnam(group_name)
return group_info
def add_user_to_group(username, group):
cmd = ['gpasswd', '-a', username, group]
log("Adding user {} to group {}".format(username, group))
subprocess.check_call(cmd)
def chage(username, lastday=None, expiredate=None, inactive=None,
mindays=None, maxdays=None, root=None, warndays=None):
cmd = ['chage']
if root:
cmd.extend(['--root', root])
if lastday:
cmd.extend(['--lastday', lastday])
if expiredate:
cmd.extend(['--expiredate', expiredate])
if inactive:
cmd.extend(['--inactive', inactive])
if mindays:
cmd.extend(['--mindays', mindays])
if maxdays:
cmd.extend(['--maxdays', maxdays])
if warndays:
cmd.extend(['--warndays', warndays])
cmd.append(username)
subprocess.check_call(cmd)
remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
options = options or ['--delete', '--executability']
cmd = ['/usr/bin/rsync', flags]
if timeout:
cmd = ['timeout', str(timeout)] + cmd
cmd.extend(options)
cmd.append(from_path)
cmd.append(to_path)
log(" ".join(cmd))
return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
def symlink(source, destination):
log("Symlinking {} as {}".format(source, destination))
cmd = [
'ln',
'-sf',
source,
destination,
]
subprocess.check_call(cmd)
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path)
path_exists = os.path.exists(realpath)
if path_exists and force:
if not os.path.isdir(realpath):
log("Removing non-directory file {} prior to mkdir()".format(path))
os.unlink(realpath)
os.makedirs(realpath, perms)
elif not path_exists:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
os.chmod(realpath, perms)
def write_file(path, content, owner='root', group='root', perms=0o444):
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
existing_content = None
existing_uid, existing_gid, existing_perms = None, None, None
try:
with open(path, 'rb') as target:
existing_content = target.read()
stat = os.stat(path)
existing_uid, existing_gid, existing_perms = (
stat.st_uid, stat.st_gid, stat.st_mode
)
except:
pass
if content != existing_content:
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
level=DEBUG)
with open(path, 'wb') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
if six.PY3 and isinstance(content, six.string_types):
content = content.encode('UTF-8')
target.write(content)
return
if existing_uid != uid:
log("Changing uid on already existing content: {} -> {}"
.format(existing_uid, uid), level=DEBUG)
os.chown(path, uid, -1)
if existing_gid != gid:
log("Changing gid on already existing content: {} -> {}"
.format(existing_gid, gid), level=DEBUG)
os.chown(path, -1, gid)
if existing_perms != perms:
log("Changing permissions on existing content: {} -> {}"
.format(existing_perms, perms), level=DEBUG)
os.chown(path, perms)
def fstab_remove(mp):
return Fstab.remove_by_mountpoint(mp)
def fstab_add(dev, mp, fs, options=None):
return Fstab.add(dev, mp, fs, options=options)
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
cmd_args = ['mount']
if options is not None:
cmd_args.extend(['-o', options])
cmd_args.extend([device, mountpoint])
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False
if persist:
return fstab_add(device, mountpoint, filesystem, options=options)
return True
def umount(mountpoint, persist=False):
cmd_args = ['umount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
if persist:
return fstab_remove(mountpoint)
return True
def mounts():
with open('/proc/mounts') as f:
system_mounts = [m[1::-1] for m in [l.strip().split()
for l in f.readlines()]]
return system_mounts
def fstab_mount(mountpoint):
cmd_args = ['mount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
return True
def file_hash(path, hash_type='md5'):
if os.path.exists(path):
h = getattr(hashlib, hash_type)()
with open(path, 'rb') as source:
h.update(source.read())
return h.hexdigest()
else:
return None
def path_hash(path):
return {
filename: file_hash(filename)
for filename in glob.iglob(path)
}
def check_hash(path, checksum, hash_type='md5'):
actual_checksum = file_hash(path, hash_type)
if checksum != actual_checksum:
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
class ChecksumError(ValueError):
pass
def restart_on_change(restart_map, stopstart=False, restart_functions=None):
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
return restart_on_change_helper(
(lambda: f(*args, **kwargs)), restart_map, stopstart,
restart_functions)
return wrapped_f
return wrap
def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
restart_functions=None):
if restart_functions is None:
restart_functions = {}
checksums = {path: path_hash(path) for path in restart_map}
r = lambda_f()
restarts = [restart_map[path]
for path in restart_map
if path_hash(path) != checksums[path]]
services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
if services_list:
actions = ('stop', 'start') if stopstart else ('restart',)
for service_name in services_list:
if service_name in restart_functions:
restart_functions[service_name](service_name)
else:
for action in actions:
service(action, service_name)
return r
def pwgen(length=None):
if length is None:
length = random.choice(range(35, 45))
alphanumeric_chars = [
l for l in (string.ascii_letters + string.digits)
if l not in 'l0QD1vAEIOUaeiou']
random_generator = random.SystemRandom()
random_chars = [
random_generator.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars))
def is_phy_iface(interface):
if interface:
sys_net = '/sys/class/net'
if os.path.isdir(sys_net):
for iface in glob.glob(os.path.join(sys_net, '*')):
if '/virtual/' in os.path.realpath(iface):
continue
if interface == os.path.basename(iface):
return True
return False
def get_bond_master(interface):
if interface:
iface_path = '/sys/class/net/%s' % (interface)
if os.path.exists(iface_path):
if '/virtual/' in os.path.realpath(iface_path):
return None
master = os.path.join(iface_path, 'master')
if os.path.exists(master):
master = os.path.realpath(master)
if os.path.exists(os.path.join(master, 'bonding')):
return os.path.basename(master)
return None
def list_nics(nic_type=None):
if isinstance(nic_type, six.string_types):
int_types = [nic_type]
else:
int_types = nic_type
interfaces = []
if nic_type:
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).decode('UTF-8')
ip_output = ip_output.split('\n')
ip_output = (line for line in ip_output if line)
for line in ip_output:
if line.split()[1].startswith(int_type):
matched = re.search('.*: (' + int_type +
r'[0-9]+\.[0-9]+)@.*', line)
if matched:
iface = matched.groups()[0]
else:
iface = line.split()[1].replace(":", "")
if iface not in interfaces:
interfaces.append(iface)
else:
cmd = ['ip', 'a']
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
ip_output = (line.strip() for line in ip_output if line)
key = re.compile('^[0-9]+:\s+(.+):')
for line in ip_output:
matched = re.search(key, line)
if matched:
iface = matched.group(1)
iface = iface.partition("@")[0]
if iface not in interfaces:
interfaces.append(iface)
return interfaces
def set_nic_mtu(nic, mtu):
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
subprocess.check_call(cmd)
def get_nic_mtu(nic):
cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
mtu = ""
for line in ip_output:
words = line.split()
if 'mtu' in words:
mtu = words[words.index("mtu") + 1]
return mtu
def get_nic_hwaddr(nic):
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8')
hwaddr = ""
words = ip_output.split()
if 'link/ether' in words:
hwaddr = words[words.index('link/ether') + 1]
return hwaddr
@contextmanager
def chdir(directory):
cur = os.getcwd()
try:
yield os.chdir(directory)
finally:
os.chdir(cur)
def chownr(path, owner, group, follow_links=True, chowntopdir=False):
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
if follow_links:
chown = os.chown
else:
chown = os.lchown
if chowntopdir:
broken_symlink = os.path.lexists(path) and not os.path.exists(path)
if not broken_symlink:
chown(path, uid, gid)
for root, dirs, files in os.walk(path, followlinks=follow_links):
for name in dirs + files:
full = os.path.join(root, name)
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
if not broken_symlink:
chown(full, uid, gid)
def lchownr(path, owner, group):
chownr(path, owner, group, follow_links=False)
def owner(path):
stat = os.stat(path)
username = pwd.getpwuid(stat.st_uid)[0]
groupname = grp.getgrgid(stat.st_gid)[0]
return username, groupname
def get_total_ram():
with open('/proc/meminfo', 'r') as f:
for line in f.readlines():
if line:
key, value, unit = line.split()
if key == 'MemTotal:':
assert unit == 'kB', 'Unknown unit'
return int(value) * 1024
raise NotImplementedError()
UPSTART_CONTAINER_TYPE = '/run/container_type'
def is_container():
if init_is_systemd():
return subprocess.call(['systemd-detect-virt',
'--container']) == 0
else:
return os.path.exists(UPSTART_CONTAINER_TYPE)
def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path):
return
with open(updatedb_path, 'r+') as f_id:
updatedb_text = f_id.read()
output = updatedb(updatedb_text, path)
f_id.seek(0)
f_id.write(output)
f_id.truncate()
def updatedb(updatedb_text, new_path):
lines = [line for line in updatedb_text.split("\n")]
for i, line in enumerate(lines):
if line.startswith("PRUNEPATHS="):
paths_line = line.split("=")[1].replace('"', '')
paths = paths_line.split(" ")
if new_path not in paths:
paths.append(new_path)
lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
output = "\n".join(lines)
return output
def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
unit_number = int(local_unit().split('/')[1])
calculated_wait_time = (unit_number % modulo) * wait
if non_zero_wait and calculated_wait_time == 0:
return modulo * wait
else:
return calculated_wait_time
def install_ca_cert(ca_cert, name=None):
if not ca_cert:
return
if not isinstance(ca_cert, bytes):
ca_cert = ca_cert.encode('utf8')
if not name:
name = 'juju-{}'.format(charm_name())
cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name)
new_hash = hashlib.md5(ca_cert).hexdigest()
if file_hash(cert_file) == new_hash:
return
log("Installing new CA cert at: {}".format(cert_file), level=INFO)
write_file(cert_file, ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])
| true | true |
f724118e7121e1f8fe2af50681d24ff9abbe8300 | 1,883 | py | Python | tests/lib/bes/git/test_git_head_info.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | tests/lib/bes/git/test_git_head_info.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | tests/lib/bes/git/test_git_head_info.py | reconstruir/bes | 82ff54b2dadcaef6849d7de424787f1dedace85c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from bes.testing.unit_test import unit_test
from bes.git.git_head_info import git_head_info
class test_git_head_info(unit_test):
def test_parse_head_info(self):
f = git_head_info.parse_head_info
self.assertEqual( ( 'branch', 'release-beta-14-studio-fixes', None, '9038154f', 'track branch release-beta-14-studio-fixes [skip ci]', None ),
f(None, '* release-beta-14-studio-fixes 9038154f track branch release-beta-14-studio-fixes [skip ci]') )
self.assertEqual( ( 'branch', 'b1', None, 'b59bc43', 'message 1', None ),
f(None, '* b1 b59bc43 message 1') )
def test_parse_head_info_master(self):
f = git_head_info.parse_head_info
self.assertEqual( ( 'branch', 'master', None, 'deadbeef', 'fix foo.', None ),
f(None, '* master deadbeef fix foo.') )
def test_match_ref_branches(self):
h = git_head_info('tag', None, 'builds/foo/1.2.3', 'deadbeef', 'foo', [ 'master', 'release-beta-26', 'release-beta-27' ])
self.assertEqual( [ 'release-beta-26', 'release-beta-27' ], h.match_ref_branches([ 'release-beta-*' ]) )
def test_parse_head_info_detached_tag(self):
output = '''\
* (HEAD detached at 1.2.3) deadbeef fixed stuff
foo
master
zoo
'''
self.assertEqual( ( 'tag', None, '1.2.3', 'deadbeef', 'fixed stuff', None ),
git_head_info.parse_head_info(None, output) )
def test_parse_head_info_detached_commit(self):
output = '''\
* (HEAD detached at deadbeef) deadbeef fixed stuff
foo
master
zoo
'''
self.assertEqual( ( 'detached_commit', None, None, 'deadbeef', 'fixed stuff', None ),
git_head_info.parse_head_info(None, output) )
if __name__ == '__main__':
unit_test.main()
| 40.06383 | 146 | 0.643654 |
from bes.testing.unit_test import unit_test
from bes.git.git_head_info import git_head_info
class test_git_head_info(unit_test):
def test_parse_head_info(self):
f = git_head_info.parse_head_info
self.assertEqual( ( 'branch', 'release-beta-14-studio-fixes', None, '9038154f', 'track branch release-beta-14-studio-fixes [skip ci]', None ),
f(None, '* release-beta-14-studio-fixes 9038154f track branch release-beta-14-studio-fixes [skip ci]') )
self.assertEqual( ( 'branch', 'b1', None, 'b59bc43', 'message 1', None ),
f(None, '* b1 b59bc43 message 1') )
def test_parse_head_info_master(self):
f = git_head_info.parse_head_info
self.assertEqual( ( 'branch', 'master', None, 'deadbeef', 'fix foo.', None ),
f(None, '* master deadbeef fix foo.') )
def test_match_ref_branches(self):
h = git_head_info('tag', None, 'builds/foo/1.2.3', 'deadbeef', 'foo', [ 'master', 'release-beta-26', 'release-beta-27' ])
self.assertEqual( [ 'release-beta-26', 'release-beta-27' ], h.match_ref_branches([ 'release-beta-*' ]) )
def test_parse_head_info_detached_tag(self):
output = '''\
* (HEAD detached at 1.2.3) deadbeef fixed stuff
foo
master
zoo
'''
self.assertEqual( ( 'tag', None, '1.2.3', 'deadbeef', 'fixed stuff', None ),
git_head_info.parse_head_info(None, output) )
def test_parse_head_info_detached_commit(self):
output = '''\
* (HEAD detached at deadbeef) deadbeef fixed stuff
foo
master
zoo
'''
self.assertEqual( ( 'detached_commit', None, None, 'deadbeef', 'fixed stuff', None ),
git_head_info.parse_head_info(None, output) )
if __name__ == '__main__':
unit_test.main()
| true | true |
f72411bd9a0c9ff6c92d514d19b08bf1e792e642 | 6,001 | py | Python | PointCNN/pointcnn_geom+i+ms.py | hellwue/TreeSpeciesClassification | 8fd8dc6496d8317923c6112d3da46844d419e49f | [
"MIT"
] | null | null | null | PointCNN/pointcnn_geom+i+ms.py | hellwue/TreeSpeciesClassification | 8fd8dc6496d8317923c6112d3da46844d419e49f | [
"MIT"
] | null | null | null | PointCNN/pointcnn_geom+i+ms.py | hellwue/TreeSpeciesClassification | 8fd8dc6496d8317923c6112d3da46844d419e49f | [
"MIT"
] | null | null | null | import pickle
from myutils import load_dataset, call_home, CMDisplay
from itertools import chain
import torch
import torch.nn.functional as F
from torch.nn import Linear as Lin
from torch.optim import Adam
from torch_geometric.nn import XConv, fps, global_mean_pool
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from sklearn.metrics import classification_report as ClRp
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
class PointCNN(pl.LightningModule):
def __init__(self, numfeatures=1):
super().__init__()
self.learning_rate = 1e-3
self.train_acc = pl.metrics.Accuracy()
self.val_acc = pl.metrics.Accuracy()
self.test_acc = pl.metrics.Accuracy()
self.numfeatures = numfeatures
# First XConv layer.
# Lifting the point coordinates with no features (0) into feature space
self.conv1 = XConv(self.numfeatures, 48, dim=3,
kernel_size=8, hidden_channels=32)
# Further XConv layers to further enrich the features
self.conv2 = XConv(48, 96, dim=3, kernel_size=12,
hidden_channels=64, dilation=2)
self.conv3 = XConv(96, 192, dim=3, kernel_size=16,
hidden_channels=128, dilation=2)
self.conv4 = XConv(192, 384, dim=3, kernel_size=16,
hidden_channels=256, dilation=2)
# MLPs at the end of the PointCNN
self.lin1 = Lin(389, 256)
self.lin2 = Lin(256, 128)
self.lin3 = Lin(128, 4)
def forward(self, data):
pos, batch = data.pos, data.batch
x = data.x if self.numfeatures else None
ms_feat = data.feat
# First XConv with no features
x = F.relu(self.conv1(x, pos, batch))
# Farthest point sampling, keeping only 37.5%
idx = fps(pos, batch, ratio=0.375)
x, pos, batch = x[idx], pos[idx], batch[idx]
# Second XConv
x = F.relu(self.conv2(x, pos, batch))
# Farthest point sampling, keeping only 33.4%
idx = fps(pos, batch, ratio=0.334)
x, pos, batch = x[idx], pos[idx], batch[idx]
# Two more XConvs
x = F.relu(self.conv3(x, pos, batch))
x = F.relu(self.conv4(x, pos, batch))
# Pool the batch-elements together
# Each tree is described in one single point with 384 features
x = global_mean_pool(x, batch)
x = torch.cat((x, ms_feat), dim=1)
# MLPs at the end with ReLU
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
# Dropout (?!): Set randomly values to zero
x = F.dropout(x, p=0.5, training=self.training)
# Last MLP predicting labels
x = self.lin3(x)
# log-SoftMax Activation function to then calculate NLL-Loss (Negative Log Likelihood)
return F.log_softmax(x, dim=-1)
def training_step(self, data, batch_idx):
y = data.y
out = self(data)
loss = F.nll_loss(out, y)
self.train_acc(out, y)
self.log('train_acc', self.train_acc, on_step=True, on_epoch=True)
self.log('train_loss', loss) # , on_step=True, on_epoch=True)
return loss
def validation_step(self, data, batch_idx):
y = data.y
out = self(data)
val_loss = F.nll_loss(out, y)
self.val_acc(out, y)
self.log('val_acc', self.val_acc, on_step=True, on_epoch=True)
self.log('val_loss', val_loss) # , on_step=True, on_epoch=True)
return val_loss
def test_step(self, data, batch_idx):
y = data.y
out = self(data)
test_loss = F.nll_loss(out, y)
self.test_acc(out, y)
self.log('test_loss', test_loss)
return out
def test_step_end(self, outs):
return outs
def test_epoch_end(self, outs):
global res
res = outs
return outs
def configure_optimizers(self):
optimizer = Adam(self.parameters(), lr=self.learning_rate)
return optimizer
MODEL_NAME = 'geom+i+ms'
train_dataset = load_dataset(
'../../0_data/hdf/train.h5', batch_size=16, shuffle=True, load_ms=True)
val_dataset = load_dataset(
'../../0_data/hdf/val.h5', batch_size=16, load_ms=True)
test_dataset = load_dataset(
'../../0_data/hdf/test.h5', batch_size=16, load_ms=True)
checkpoint_callback = ModelCheckpoint(monitor='val_loss', save_top_k=1)
trainer = pl.Trainer(gpus=1,
progress_bar_refresh_rate=1,
callbacks=[EarlyStopping(
monitor='val_loss', patience=20)],
checkpoint_callback=checkpoint_callback)
# pl.seed_everything(420)
model = PointCNN()
trainer.fit(model, train_dataset, val_dataset)
best_model = checkpoint_callback.best_model_path
print(best_model)
call_home(f'Done learning {MODEL_NAME}: ' + best_model)
res = []
model = PointCNN.load_from_checkpoint(checkpoint_path=best_model)
# pl.seed_everything(420)
trainer.test(model, test_dataloaders=test_dataset)
with open(f'./results/{MODEL_NAME}_results.pickle', 'wb') as file:
pickle.dump(res, file)
logits = list(chain(*(r.exp().argmax(axis=1).tolist() for r in res)))
ground = list(chain(*(tmp.y.tolist() for tmp in test_dataset)))
classification_report = ClRp(ground,
logits,
target_names=['coniferous',
'decidious',
'snag',
'dead tree'],
digits=3)
print(classification_report)
with open(f'./results/{MODEL_NAME}_results.txt', 'w') as file:
file.writelines(classification_report)
file.writelines(best_model)
CMDisplay(metrics.confusion_matrix(ground, logits)).plot()
plt.savefig(f'./results/{MODEL_NAME}_results.eps', bbox_inches='tight')
| 34.096591 | 94 | 0.622396 | import pickle
from myutils import load_dataset, call_home, CMDisplay
from itertools import chain
import torch
import torch.nn.functional as F
from torch.nn import Linear as Lin
from torch.optim import Adam
from torch_geometric.nn import XConv, fps, global_mean_pool
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from sklearn.metrics import classification_report as ClRp
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
class PointCNN(pl.LightningModule):
def __init__(self, numfeatures=1):
super().__init__()
self.learning_rate = 1e-3
self.train_acc = pl.metrics.Accuracy()
self.val_acc = pl.metrics.Accuracy()
self.test_acc = pl.metrics.Accuracy()
self.numfeatures = numfeatures
self.conv1 = XConv(self.numfeatures, 48, dim=3,
kernel_size=8, hidden_channels=32)
self.conv2 = XConv(48, 96, dim=3, kernel_size=12,
hidden_channels=64, dilation=2)
self.conv3 = XConv(96, 192, dim=3, kernel_size=16,
hidden_channels=128, dilation=2)
self.conv4 = XConv(192, 384, dim=3, kernel_size=16,
hidden_channels=256, dilation=2)
self.lin1 = Lin(389, 256)
self.lin2 = Lin(256, 128)
self.lin3 = Lin(128, 4)
def forward(self, data):
pos, batch = data.pos, data.batch
x = data.x if self.numfeatures else None
ms_feat = data.feat
x = F.relu(self.conv1(x, pos, batch))
idx = fps(pos, batch, ratio=0.375)
x, pos, batch = x[idx], pos[idx], batch[idx]
x = F.relu(self.conv2(x, pos, batch))
idx = fps(pos, batch, ratio=0.334)
x, pos, batch = x[idx], pos[idx], batch[idx]
x = F.relu(self.conv3(x, pos, batch))
x = F.relu(self.conv4(x, pos, batch))
x = global_mean_pool(x, batch)
x = torch.cat((x, ms_feat), dim=1)
x = F.relu(self.lin1(x))
x = F.relu(self.lin2(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.lin3(x)
return F.log_softmax(x, dim=-1)
def training_step(self, data, batch_idx):
y = data.y
out = self(data)
loss = F.nll_loss(out, y)
self.train_acc(out, y)
self.log('train_acc', self.train_acc, on_step=True, on_epoch=True)
self.log('train_loss', loss)
return loss
def validation_step(self, data, batch_idx):
y = data.y
out = self(data)
val_loss = F.nll_loss(out, y)
self.val_acc(out, y)
self.log('val_acc', self.val_acc, on_step=True, on_epoch=True)
self.log('val_loss', val_loss)
return val_loss
def test_step(self, data, batch_idx):
y = data.y
out = self(data)
test_loss = F.nll_loss(out, y)
self.test_acc(out, y)
self.log('test_loss', test_loss)
return out
def test_step_end(self, outs):
return outs
def test_epoch_end(self, outs):
global res
res = outs
return outs
def configure_optimizers(self):
optimizer = Adam(self.parameters(), lr=self.learning_rate)
return optimizer
MODEL_NAME = 'geom+i+ms'
train_dataset = load_dataset(
'../../0_data/hdf/train.h5', batch_size=16, shuffle=True, load_ms=True)
val_dataset = load_dataset(
'../../0_data/hdf/val.h5', batch_size=16, load_ms=True)
test_dataset = load_dataset(
'../../0_data/hdf/test.h5', batch_size=16, load_ms=True)
checkpoint_callback = ModelCheckpoint(monitor='val_loss', save_top_k=1)
trainer = pl.Trainer(gpus=1,
progress_bar_refresh_rate=1,
callbacks=[EarlyStopping(
monitor='val_loss', patience=20)],
checkpoint_callback=checkpoint_callback)
model = PointCNN()
trainer.fit(model, train_dataset, val_dataset)
best_model = checkpoint_callback.best_model_path
print(best_model)
call_home(f'Done learning {MODEL_NAME}: ' + best_model)
res = []
model = PointCNN.load_from_checkpoint(checkpoint_path=best_model)
trainer.test(model, test_dataloaders=test_dataset)
with open(f'./results/{MODEL_NAME}_results.pickle', 'wb') as file:
pickle.dump(res, file)
logits = list(chain(*(r.exp().argmax(axis=1).tolist() for r in res)))
ground = list(chain(*(tmp.y.tolist() for tmp in test_dataset)))
classification_report = ClRp(ground,
logits,
target_names=['coniferous',
'decidious',
'snag',
'dead tree'],
digits=3)
print(classification_report)
with open(f'./results/{MODEL_NAME}_results.txt', 'w') as file:
file.writelines(classification_report)
file.writelines(best_model)
CMDisplay(metrics.confusion_matrix(ground, logits)).plot()
plt.savefig(f'./results/{MODEL_NAME}_results.eps', bbox_inches='tight')
| true | true |
f72412097759e980fabb5c2844782e9a1daa33f2 | 3,307 | py | Python | example_distributed.py | EricSteinberger/Neural-Fictitous-Self-Play | 4462845f159554b9a9812e3af43d07c2b5cd2059 | [
"MIT"
] | 52 | 2019-06-22T10:21:17.000Z | 2022-03-29T04:57:12.000Z | example_distributed.py | jsanderink/tue | 4462845f159554b9a9812e3af43d07c2b5cd2059 | [
"MIT"
] | 1 | 2020-04-24T22:45:22.000Z | 2020-04-30T02:20:37.000Z | example_distributed.py | jsanderink/tue | 4462845f159554b9a9812e3af43d07c2b5cd2059 | [
"MIT"
] | 14 | 2019-06-27T02:02:31.000Z | 2022-01-05T13:14:12.000Z | from NFSP.TrainingProfile import TrainingProfile
from NFSP.workers.driver.Driver import Driver
from PokerRL import DiscretizedNLHoldem, Poker
from PokerRL.eval.lbr import LBRArgs
from PokerRL.game import bet_sets
if __name__ == '__main__':
# Agent processes: 1 Chief, 2 Parameter-servers, 11 LAs
# Eval processes: 1 Master, 8 Workers
# Leave 1 Docker etc.
# ==> 24 cores needed.
# You can run this on e.g. a m5.12xlarge machine with hyper-threading disabled (effectively 24 cores and threads).
# You can also parallelized further since only 5% of the execution time was spent syncing.
N_WORKERS = 16
N_LBR_WORKERS = 3
ctrl = Driver(t_prof=TrainingProfile(name="NFSP_DISTRIBUTED_LH_RNN",
DISTRIBUTED=True,
n_learner_actor_workers=N_WORKERS,
nn_type="recurrent",
game_cls=DiscretizedNLHoldem,
agent_bet_set=bet_sets.B_5,
use_pre_layers_br=True,
use_pre_layers_avg=True,
n_units_final_br=64,
n_units_final_avg=64,
n_merge_and_table_layer_units_br=64,
n_merge_and_table_layer_units_avg=64,
rnn_units_br=64,
rnn_units_avg=64,
n_cards_state_units_br=128,
n_cards_state_units_avg=128,
cir_buf_size_each_la=6e5 / N_WORKERS,
res_buf_size_each_la=2e6,
n_envs=128,
n_steps_per_iter_per_la=128,
lr_br=0.1,
lr_avg=0.01,
mini_batch_size_br_per_la=64,
mini_batch_size_avg_per_la=64,
n_br_updates_per_iter=1,
n_avg_updates_per_iter=1,
eps_start=0.08,
eps_const=0.007,
eps_exponent=0.5,
eps_min=0.0,
lbr_args=LBRArgs(
lbr_bet_set=bet_sets.B_5,
n_lbr_hands_per_seat=15000,
lbr_check_to_round=Poker.TURN,
n_parallel_lbr_workers=N_LBR_WORKERS,
use_gpu_for_batch_eval=False,
DISTRIBUTED=True,
)
),
eval_methods={"lbr": 25000},
n_iterations=None)
ctrl.run()
| 47.242857 | 118 | 0.408225 | from NFSP.TrainingProfile import TrainingProfile
from NFSP.workers.driver.Driver import Driver
from PokerRL import DiscretizedNLHoldem, Poker
from PokerRL.eval.lbr import LBRArgs
from PokerRL.game import bet_sets
if __name__ == '__main__':
N_WORKERS = 16
N_LBR_WORKERS = 3
ctrl = Driver(t_prof=TrainingProfile(name="NFSP_DISTRIBUTED_LH_RNN",
DISTRIBUTED=True,
n_learner_actor_workers=N_WORKERS,
nn_type="recurrent",
game_cls=DiscretizedNLHoldem,
agent_bet_set=bet_sets.B_5,
use_pre_layers_br=True,
use_pre_layers_avg=True,
n_units_final_br=64,
n_units_final_avg=64,
n_merge_and_table_layer_units_br=64,
n_merge_and_table_layer_units_avg=64,
rnn_units_br=64,
rnn_units_avg=64,
n_cards_state_units_br=128,
n_cards_state_units_avg=128,
cir_buf_size_each_la=6e5 / N_WORKERS,
res_buf_size_each_la=2e6,
n_envs=128,
n_steps_per_iter_per_la=128,
lr_br=0.1,
lr_avg=0.01,
mini_batch_size_br_per_la=64,
mini_batch_size_avg_per_la=64,
n_br_updates_per_iter=1,
n_avg_updates_per_iter=1,
eps_start=0.08,
eps_const=0.007,
eps_exponent=0.5,
eps_min=0.0,
lbr_args=LBRArgs(
lbr_bet_set=bet_sets.B_5,
n_lbr_hands_per_seat=15000,
lbr_check_to_round=Poker.TURN,
n_parallel_lbr_workers=N_LBR_WORKERS,
use_gpu_for_batch_eval=False,
DISTRIBUTED=True,
)
),
eval_methods={"lbr": 25000},
n_iterations=None)
ctrl.run()
| true | true |
f724126c40a1dd924cae490241b2e28d84ddf8e2 | 408 | py | Python | signalrcore/messages/base_message.py | smiddle/signalrcore | 7b5f0fb6913ba2dacbe514724b62f1eab7097d91 | [
"MIT"
] | null | null | null | signalrcore/messages/base_message.py | smiddle/signalrcore | 7b5f0fb6913ba2dacbe514724b62f1eab7097d91 | [
"MIT"
] | null | null | null | signalrcore/messages/base_message.py | smiddle/signalrcore | 7b5f0fb6913ba2dacbe514724b62f1eab7097d91 | [
"MIT"
] | null | null | null | from .message_type import MessageType
class BaseMessage(object):
def __init__(self, message_type):
self.type = MessageType(message_type)
class BaseHeadersMessage(BaseMessage):
"""
All messages expct ping can carry aditional headers
"""
def __init__(self, message_type, headers):
super(BaseHeadersMessage, self).__init__(message_type)
self.headers = headers
| 27.2 | 62 | 0.710784 | from .message_type import MessageType
class BaseMessage(object):
def __init__(self, message_type):
self.type = MessageType(message_type)
class BaseHeadersMessage(BaseMessage):
def __init__(self, message_type, headers):
super(BaseHeadersMessage, self).__init__(message_type)
self.headers = headers
| true | true |
f7241292437ffed3ee5268d11c2657d9fb0bc4ff | 27,712 | py | Python | ow_lander/scripts/arm_action_servers.py | thewindsofwinter/ow_simulator | a92698a1cc7e80ac6aec17d74bcbac81edf726f7 | [
"NASA-1.3"
] | null | null | null | ow_lander/scripts/arm_action_servers.py | thewindsofwinter/ow_simulator | a92698a1cc7e80ac6aec17d74bcbac81edf726f7 | [
"NASA-1.3"
] | null | null | null | ow_lander/scripts/arm_action_servers.py | thewindsofwinter/ow_simulator | a92698a1cc7e80ac6aec17d74bcbac81edf726f7 | [
"NASA-1.3"
] | null | null | null | #!/usr/bin/env python3
# The Notices and Disclaimers for Ocean Worlds Autonomy Testbed for Exploration
# Research and Simulation can be found in README.md in the root directory of
# this repository.
import rospy
import actionlib
from ow_lander.msg import *
from LanderInterface import MoveItInterface
from LanderInterface import LinkStateSubscriber
from trajectory_async_execution import TrajectoryAsyncExecuter
import all_action_trajectories
from moveit_msgs.msg import RobotTrajectory
from controller_manager_msgs.srv import SwitchController
from ground_detection import GroundDetector
from actionlib_msgs.msg import GoalStatus
from geometry_msgs.msg import Point
class UnstowActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(self._action_name,
ow_lander.msg.UnstowAction,
execute_cb=self.on_unstow_action,
auto_start=False)
self._server.start()
# Action Feedback/Result
self._fdbk = ow_lander.msg.UnstowFeedback()
self._result = ow_lander.msg.UnstowResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("arm_controller")
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def _update_motion(self):
rospy.loginfo("Unstow arm activity started")
goal = self._interface.move_arm.get_current_pose().pose
goal = self._interface.move_arm.get_named_target_values("arm_unstowed")
self._interface.move_arm.set_joint_value_target(goal)
_, plan, _, _ = self._interface.move_arm.plan()
if len(plan.joint_trajectory.points) < 1:
return
else:
n_points = len(plan.joint_trajectory.points)
start_time = plan.joint_trajectory.points[0].time_from_start
end_time = plan.joint_trajectory.points[n_points-1].time_from_start
self._timeout = end_time - start_time
return plan
def on_unstow_action(self, goal):
plan = self._update_motion()
if plan is None:
self._server.set_aborted(self._result)
return
success = False
self.trajectory_async_executer.execute(plan.joint_trajectory,
done_cb=None,
active_cb=None,
feedback_cb=self.trajectory_async_executer.stop_arm_if_fault)
# Record start time
start_time = rospy.get_time()
def now_from_start(start):
# return rospy.get_time() - start
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self._fdbk.current.x
self._result.final.y = self._fdbk.current.y
self._result.final.z = self._fdbk.current.z
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
class StowActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(self._action_name,
ow_lander.msg.StowAction,
execute_cb=self.on_stow_action,
auto_start=False)
self._server.start()
# Action Feedback/Result
self._fdbk = ow_lander.msg.StowFeedback()
self._result = ow_lander.msg.StowResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("arm_controller")
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def _update_motion(self):
rospy.loginfo("Stow arm activity started")
goal = self._interface.move_arm.get_current_pose().pose
goal = self._interface.move_arm.get_named_target_values("arm_stowed")
self._interface.move_arm.set_joint_value_target(goal)
_, plan, _, _ = self._interface.move_arm.plan()
if len(plan.joint_trajectory.points) < 1:
return
else:
n_points = len(plan.joint_trajectory.points)
start_time = plan.joint_trajectory.points[0].time_from_start
end_time = plan.joint_trajectory.points[n_points-1].time_from_start
self._timeout = end_time - start_time
return plan
def on_stow_action(self, goal):
plan = self._update_motion()
if plan is None:
self._server.set_aborted(self._result)
return
success = False
self.trajectory_async_executer.execute(plan.joint_trajectory,
done_cb=None,
active_cb=None,
feedback_cb=self.trajectory_async_executer.stop_arm_if_fault)
# Record start time
start_time = rospy.get_time()
def now_from_start(start):
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self._fdbk.current.x
self._result.final.y = self._fdbk.current.y
self._result.final.z = self._fdbk.current.z
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
class GrindActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(self._action_name,
ow_lander.msg.GrindAction,
execute_cb=self.on_Grind_action,
auto_start=False)
self._server.start()
# Action Feedback/Result
self._fdbk = ow_lander.msg.GrindFeedback()
self._result = ow_lander.msg.GrindResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("grinder_controller")
self.current_traj = RobotTrajectory()
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def switch_controllers(self, start_controller, stop_controller):
rospy.wait_for_service('/controller_manager/switch_controller')
success = False
try:
switch_controller = rospy.ServiceProxy(
'/controller_manager/switch_controller', SwitchController)
success = switch_controller(
[start_controller], [stop_controller], 2, False, 1.0)
except rospy.ServiceException as e:
rospy.loginfo("switch_controllers error: %s" % e)
finally:
# This sleep is a workaround for "start point deviates from current robot
# state" error on dig_circular trajectory execution.
rospy.sleep(0.2)
return success
def _update_motion(self, goal):
rospy.loginfo("Grind activity started")
self.current_traj = all_action_trajectories.grind(self._interface.move_grinder,
self._interface.robot,
self._interface.moveit_fk, goal)
if self.current_traj == False:
return
else:
n_points = len(self.current_traj.joint_trajectory.points)
start_time = self.current_traj.joint_trajectory.points[0].time_from_start
end_time = self.current_traj.joint_trajectory.points[n_points -
1].time_from_start
self._timeout = (end_time - start_time)
def on_Grind_action(self, goal):
self._update_motion(goal)
if self.current_traj == False:
self._server.set_aborted(self._result)
return
success = False
switch_success = self.switch_controllers(
'grinder_controller', 'arm_controller')
if not switch_success:
return False, "Failed switching controllers"
self.trajectory_async_executer.execute(self.current_traj.joint_trajectory,
done_cb=None,
active_cb=None,
feedback_cb=self.trajectory_async_executer.stop_arm_if_fault)
# Record start time
start_time = rospy.get_time()
def now_from_start(start):
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self._fdbk.current.x
self._result.final.y = self._fdbk.current.y
self._result.final.z = self._fdbk.current.z
switch_success = self.switch_controllers(
'arm_controller', 'grinder_controller')
if not switch_success:
return False, "Failed Switching Controllers"
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
switch_success = self.switch_controllers(
'arm_controller', 'grinder_controller')
if not switch_success:
return False, "Failed Switching Controllers"
rospy.loginfo('%s: Succeeded' % self._action_name)
class GuardedMoveActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(self._action_name,
ow_lander.msg.GuardedMoveAction,
execute_cb=self.on_guarded_move_action,
auto_start=False)
self._server.start()
# Action Feedback/Result
self._fdbk = ow_lander.msg.GuardedMoveFeedback()
self._result = ow_lander.msg.GuardedMoveResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self._estimated_plan_fraction_completed = 0.0
# ratio between guarded pre-guarded move trajectory and the whole trajectory
self._guarded_move_plan_ratio = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("arm_controller")
self.guarded_move_traj = RobotTrajectory()
self.ground_detector = GroundDetector()
self.pos = Point()
self.guarded_move_pub = rospy.Publisher('/guarded_move_result', GuardedMoveFinalResult, queue_size=10)
def handle_guarded_move_done(self, state, result):
"""
:type state: int
:type result: FollowJointTrajectoryResult
"""
ground_detected = state == GoalStatus.PREEMPTED
ground_position = self.ground_detector.ground_position if ground_detected else Point()
rospy.loginfo("Ground Detected ? {}".format(ground_detected))
self.guarded_move_pub.publish(
ground_detected, 'base_link', ground_position)
def handle_guarded_move_feedback(self, feedback):
"""
:type feedback: FollowJointTrajectoryFeedback
"""
self.trajectory_async_executer.stop_arm_if_fault(feedback)
# added to compensate for slower than arm movement tan planned
execution_time_tollerance = 0.1
if self.ground_detector.detect():
if (self._estimated_plan_fraction_completed < self._guarded_move_plan_ratio
+ execution_time_tollerance):
self.ground_detector.reset()
else:
self.trajectory_async_executer.stop()
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def _update_motion(self, goal):
rospy.loginfo("Guarded move activity started")
self.guarded_move_traj, self._guarded_move_plan_ratio = all_action_trajectories.guarded_move_plan(
self._interface.move_arm,
self._interface.robot,
self._interface.moveit_fk, goal)
if self.guarded_move_traj == False:
return
else:
n_points = len(self.guarded_move_traj.joint_trajectory.points)
start_time = self.guarded_move_traj.joint_trajectory.points[0].time_from_start
end_time = self.guarded_move_traj.joint_trajectory.points[n_points-1].time_from_start
self._timeout = end_time - start_time
def on_guarded_move_action(self, goal):
self._update_motion(goal)
if self.guarded_move_traj == False:
self._server.set_aborted(self._result)
return
success = False
# detection
self.ground_detector.reset()
self.trajectory_async_executer.execute(self.guarded_move_traj.joint_trajectory,
done_cb=self.handle_guarded_move_done,
active_cb=None,
feedback_cb=self.handle_guarded_move_feedback)
# Record start time
start_time = rospy.get_time()
def now_from_start(start):
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
self._estimated_plan_fraction_completed = now_from_start(
start_time)/self._timeout
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self.ground_detector.ground_position.x
self._result.final.y = self.ground_detector.ground_position.y
self._result.final.z = self.ground_detector.ground_position.z
self._result.success = True
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
class DigCircularActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(self._action_name,
ow_lander.msg.DigCircularAction,
execute_cb=self.on_DigCircular_action,
auto_start=False)
self._server.start()
# Action Feedback/Result
self._fdbk = ow_lander.msg.DigCircularFeedback()
self._result = ow_lander.msg.DigCircularResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("arm_controller")
self.current_traj = RobotTrajectory()
def switch_controllers(self, start_controller, stop_controller):
rospy.wait_for_service('/controller_manager/switch_controller')
success = False
try:
switch_controller = rospy.ServiceProxy(
'/controller_manager/switch_controller', SwitchController)
success = switch_controller(
[start_controller], [stop_controller], 2, False, 1.0)
except rospy.ServiceException as e:
rospy.loginfo("switch_controllers error: %s" % e)
finally:
# This sleep is a workaround for "start point deviates from current robot
# state" error on dig_circular trajectory execution.
rospy.sleep(0.2)
return success
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def _update_motion(self, goal):
rospy.loginfo("DigCircular activity started")
self.current_traj = None
self.current_traj = all_action_trajectories.dig_circular(self._interface.move_arm,
self._interface.move_limbs,
self._interface.robot, self._interface.moveit_fk, goal)
if self.current_traj == False:
return
else:
n_points = len(self.current_traj.joint_trajectory.points)
start_time = self.current_traj.joint_trajectory.points[0].time_from_start
end_time = self.current_traj.joint_trajectory.points[n_points -
1].time_from_start
self._timeout = (end_time - start_time)
def on_DigCircular_action(self, goal):
self._update_motion(goal)
if self.current_traj == False:
self._server.set_aborted(self._result)
return
success = False
self.trajectory_async_executer.execute(self.current_traj.joint_trajectory,
done_cb=None,
active_cb=None,
feedback_cb=self.trajectory_async_executer.stop_arm_if_fault)
# Record start time
start_time = rospy.get_time()
def now_from_start(start):
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self._fdbk.current.x
self._result.final.y = self._fdbk.current.y
self._result.final.z = self._fdbk.current.z
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
class DigLinearActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(self._action_name,
ow_lander.msg.DigLinearAction,
execute_cb=self.on_DigLinear_action,
auto_start=False)
self._server.start()
# Action Feedback/Result
self._fdbk = ow_lander.msg.DigLinearFeedback()
self._result = ow_lander.msg.DigLinearResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("arm_controller")
self.current_traj = RobotTrajectory()
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def _update_motion(self, goal):
rospy.loginfo("DigLinear activity started")
self.current_traj = all_action_trajectories.dig_linear(self._interface.move_arm,
self._interface.robot,
self._interface.moveit_fk, goal)
if self.current_traj == False:
return
else:
n_points = len(self.current_traj.joint_trajectory.points)
start_time = self.current_traj.joint_trajectory.points[0].time_from_start
end_time = self.current_traj.joint_trajectory.points[n_points -
1].time_from_start
self._timeout = (end_time - start_time)
def on_DigLinear_action(self, goal):
self._update_motion(goal)
if self.current_traj == False:
self._server.set_aborted(self._result)
return
success = False
self.trajectory_async_executer.execute(self.current_traj.joint_trajectory,
done_cb=None,
active_cb=None,
feedback_cb=self.trajectory_async_executer.stop_arm_if_fault)
# Record start time
start_time = rospy.get_time()
def now_from_start(start):
# return rospy.get_time() - start
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self._fdbk.current.x
self._result.final.y = self._fdbk.current.y
self._result.final.z = self._fdbk.current.z
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
class DeliverActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(
self._action_name, ow_lander.msg.DeliverAction, execute_cb=self.on_deliver_action, auto_start=False)
self._server.start()
# Action Feedback/Result
self._fdbk = ow_lander.msg.UnstowFeedback()
self._result = ow_lander.msg.UnstowResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("arm_controller")
self.deliver_sample_traj = RobotTrajectory()
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def _update_motion(self, goal):
rospy.loginfo("Deliver sample activity started")
self.deliver_sample_traj = all_action_trajectories.deliver_sample(self._interface.move_arm,
self._interface.robot,
self._interface.moveit_fk, goal)
if self.deliver_sample_traj == False:
return
else:
n_points = len(self.deliver_sample_traj.joint_trajectory.points)
start_time = self.deliver_sample_traj.joint_trajectory.points[0].time_from_start
end_time = self.deliver_sample_traj.joint_trajectory.points[n_points-1].time_from_start
self._timeout = end_time - start_time
def on_deliver_action(self, goal):
self._update_motion(goal)
if self.deliver_sample_traj == False:
self._server.set_aborted(self._result)
return
success = False
self.trajectory_async_executer.execute(self.deliver_sample_traj.joint_trajectory,
done_cb=None,
active_cb=None,
feedback_cb=self.trajectory_async_executer.stop_arm_if_fault)
# Record start time
start_time = rospy.get_time()
def now_from_start(start):
# return rospy.get_time() - start
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self._fdbk.current.x
self._result.final.y = self._fdbk.current.y
self._result.final.z = self._fdbk.current.z
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
if __name__ == '__main__':
rospy.init_node('arm_action_servers')
server_unstow = UnstowActionServer("Unstow")
server_stow = StowActionServer("Stow")
server_grind = GrindActionServer("Grind")
server_guarded_move = GuardedMoveActionServer("GuardedMove")
server_dig_circular = DigCircularActionServer("DigCircular")
server_dig_linear = DigLinearActionServer("DigLinear")
server_deliver = DeliverActionServer("Deliver")
rospy.spin()
| 42.243902 | 120 | 0.612081 |
import rospy
import actionlib
from ow_lander.msg import *
from LanderInterface import MoveItInterface
from LanderInterface import LinkStateSubscriber
from trajectory_async_execution import TrajectoryAsyncExecuter
import all_action_trajectories
from moveit_msgs.msg import RobotTrajectory
from controller_manager_msgs.srv import SwitchController
from ground_detection import GroundDetector
from actionlib_msgs.msg import GoalStatus
from geometry_msgs.msg import Point
class UnstowActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(self._action_name,
ow_lander.msg.UnstowAction,
execute_cb=self.on_unstow_action,
auto_start=False)
self._server.start()
self._fdbk = ow_lander.msg.UnstowFeedback()
self._result = ow_lander.msg.UnstowResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("arm_controller")
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def _update_motion(self):
rospy.loginfo("Unstow arm activity started")
goal = self._interface.move_arm.get_current_pose().pose
goal = self._interface.move_arm.get_named_target_values("arm_unstowed")
self._interface.move_arm.set_joint_value_target(goal)
_, plan, _, _ = self._interface.move_arm.plan()
if len(plan.joint_trajectory.points) < 1:
return
else:
n_points = len(plan.joint_trajectory.points)
start_time = plan.joint_trajectory.points[0].time_from_start
end_time = plan.joint_trajectory.points[n_points-1].time_from_start
self._timeout = end_time - start_time
return plan
def on_unstow_action(self, goal):
plan = self._update_motion()
if plan is None:
self._server.set_aborted(self._result)
return
success = False
self.trajectory_async_executer.execute(plan.joint_trajectory,
done_cb=None,
active_cb=None,
feedback_cb=self.trajectory_async_executer.stop_arm_if_fault)
start_time = rospy.get_time()
def now_from_start(start):
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self._fdbk.current.x
self._result.final.y = self._fdbk.current.y
self._result.final.z = self._fdbk.current.z
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
class StowActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(self._action_name,
ow_lander.msg.StowAction,
execute_cb=self.on_stow_action,
auto_start=False)
self._server.start()
self._fdbk = ow_lander.msg.StowFeedback()
self._result = ow_lander.msg.StowResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("arm_controller")
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def _update_motion(self):
rospy.loginfo("Stow arm activity started")
goal = self._interface.move_arm.get_current_pose().pose
goal = self._interface.move_arm.get_named_target_values("arm_stowed")
self._interface.move_arm.set_joint_value_target(goal)
_, plan, _, _ = self._interface.move_arm.plan()
if len(plan.joint_trajectory.points) < 1:
return
else:
n_points = len(plan.joint_trajectory.points)
start_time = plan.joint_trajectory.points[0].time_from_start
end_time = plan.joint_trajectory.points[n_points-1].time_from_start
self._timeout = end_time - start_time
return plan
def on_stow_action(self, goal):
plan = self._update_motion()
if plan is None:
self._server.set_aborted(self._result)
return
success = False
self.trajectory_async_executer.execute(plan.joint_trajectory,
done_cb=None,
active_cb=None,
feedback_cb=self.trajectory_async_executer.stop_arm_if_fault)
start_time = rospy.get_time()
def now_from_start(start):
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self._fdbk.current.x
self._result.final.y = self._fdbk.current.y
self._result.final.z = self._fdbk.current.z
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
class GrindActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(self._action_name,
ow_lander.msg.GrindAction,
execute_cb=self.on_Grind_action,
auto_start=False)
self._server.start()
self._fdbk = ow_lander.msg.GrindFeedback()
self._result = ow_lander.msg.GrindResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("grinder_controller")
self.current_traj = RobotTrajectory()
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def switch_controllers(self, start_controller, stop_controller):
rospy.wait_for_service('/controller_manager/switch_controller')
success = False
try:
switch_controller = rospy.ServiceProxy(
'/controller_manager/switch_controller', SwitchController)
success = switch_controller(
[start_controller], [stop_controller], 2, False, 1.0)
except rospy.ServiceException as e:
rospy.loginfo("switch_controllers error: %s" % e)
finally:
# state" error on dig_circular trajectory execution.
rospy.sleep(0.2)
return success
def _update_motion(self, goal):
rospy.loginfo("Grind activity started")
self.current_traj = all_action_trajectories.grind(self._interface.move_grinder,
self._interface.robot,
self._interface.moveit_fk, goal)
if self.current_traj == False:
return
else:
n_points = len(self.current_traj.joint_trajectory.points)
start_time = self.current_traj.joint_trajectory.points[0].time_from_start
end_time = self.current_traj.joint_trajectory.points[n_points -
1].time_from_start
self._timeout = (end_time - start_time)
def on_Grind_action(self, goal):
self._update_motion(goal)
if self.current_traj == False:
self._server.set_aborted(self._result)
return
success = False
switch_success = self.switch_controllers(
'grinder_controller', 'arm_controller')
if not switch_success:
return False, "Failed switching controllers"
self.trajectory_async_executer.execute(self.current_traj.joint_trajectory,
done_cb=None,
active_cb=None,
feedback_cb=self.trajectory_async_executer.stop_arm_if_fault)
start_time = rospy.get_time()
def now_from_start(start):
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self._fdbk.current.x
self._result.final.y = self._fdbk.current.y
self._result.final.z = self._fdbk.current.z
switch_success = self.switch_controllers(
'arm_controller', 'grinder_controller')
if not switch_success:
return False, "Failed Switching Controllers"
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
switch_success = self.switch_controllers(
'arm_controller', 'grinder_controller')
if not switch_success:
return False, "Failed Switching Controllers"
rospy.loginfo('%s: Succeeded' % self._action_name)
class GuardedMoveActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(self._action_name,
ow_lander.msg.GuardedMoveAction,
execute_cb=self.on_guarded_move_action,
auto_start=False)
self._server.start()
self._fdbk = ow_lander.msg.GuardedMoveFeedback()
self._result = ow_lander.msg.GuardedMoveResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self._estimated_plan_fraction_completed = 0.0
self._guarded_move_plan_ratio = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("arm_controller")
self.guarded_move_traj = RobotTrajectory()
self.ground_detector = GroundDetector()
self.pos = Point()
self.guarded_move_pub = rospy.Publisher('/guarded_move_result', GuardedMoveFinalResult, queue_size=10)
def handle_guarded_move_done(self, state, result):
ground_detected = state == GoalStatus.PREEMPTED
ground_position = self.ground_detector.ground_position if ground_detected else Point()
rospy.loginfo("Ground Detected ? {}".format(ground_detected))
self.guarded_move_pub.publish(
ground_detected, 'base_link', ground_position)
def handle_guarded_move_feedback(self, feedback):
self.trajectory_async_executer.stop_arm_if_fault(feedback)
execution_time_tollerance = 0.1
if self.ground_detector.detect():
if (self._estimated_plan_fraction_completed < self._guarded_move_plan_ratio
+ execution_time_tollerance):
self.ground_detector.reset()
else:
self.trajectory_async_executer.stop()
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def _update_motion(self, goal):
rospy.loginfo("Guarded move activity started")
self.guarded_move_traj, self._guarded_move_plan_ratio = all_action_trajectories.guarded_move_plan(
self._interface.move_arm,
self._interface.robot,
self._interface.moveit_fk, goal)
if self.guarded_move_traj == False:
return
else:
n_points = len(self.guarded_move_traj.joint_trajectory.points)
start_time = self.guarded_move_traj.joint_trajectory.points[0].time_from_start
end_time = self.guarded_move_traj.joint_trajectory.points[n_points-1].time_from_start
self._timeout = end_time - start_time
def on_guarded_move_action(self, goal):
self._update_motion(goal)
if self.guarded_move_traj == False:
self._server.set_aborted(self._result)
return
success = False
self.ground_detector.reset()
self.trajectory_async_executer.execute(self.guarded_move_traj.joint_trajectory,
done_cb=self.handle_guarded_move_done,
active_cb=None,
feedback_cb=self.handle_guarded_move_feedback)
start_time = rospy.get_time()
def now_from_start(start):
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
self._estimated_plan_fraction_completed = now_from_start(
start_time)/self._timeout
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self.ground_detector.ground_position.x
self._result.final.y = self.ground_detector.ground_position.y
self._result.final.z = self.ground_detector.ground_position.z
self._result.success = True
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
class DigCircularActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(self._action_name,
ow_lander.msg.DigCircularAction,
execute_cb=self.on_DigCircular_action,
auto_start=False)
self._server.start()
self._fdbk = ow_lander.msg.DigCircularFeedback()
self._result = ow_lander.msg.DigCircularResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("arm_controller")
self.current_traj = RobotTrajectory()
def switch_controllers(self, start_controller, stop_controller):
rospy.wait_for_service('/controller_manager/switch_controller')
success = False
try:
switch_controller = rospy.ServiceProxy(
'/controller_manager/switch_controller', SwitchController)
success = switch_controller(
[start_controller], [stop_controller], 2, False, 1.0)
except rospy.ServiceException as e:
rospy.loginfo("switch_controllers error: %s" % e)
finally:
# state" error on dig_circular trajectory execution.
rospy.sleep(0.2)
return success
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def _update_motion(self, goal):
rospy.loginfo("DigCircular activity started")
self.current_traj = None
self.current_traj = all_action_trajectories.dig_circular(self._interface.move_arm,
self._interface.move_limbs,
self._interface.robot, self._interface.moveit_fk, goal)
if self.current_traj == False:
return
else:
n_points = len(self.current_traj.joint_trajectory.points)
start_time = self.current_traj.joint_trajectory.points[0].time_from_start
end_time = self.current_traj.joint_trajectory.points[n_points -
1].time_from_start
self._timeout = (end_time - start_time)
def on_DigCircular_action(self, goal):
self._update_motion(goal)
if self.current_traj == False:
self._server.set_aborted(self._result)
return
success = False
self.trajectory_async_executer.execute(self.current_traj.joint_trajectory,
done_cb=None,
active_cb=None,
feedback_cb=self.trajectory_async_executer.stop_arm_if_fault)
start_time = rospy.get_time()
def now_from_start(start):
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self._fdbk.current.x
self._result.final.y = self._fdbk.current.y
self._result.final.z = self._fdbk.current.z
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
class DigLinearActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(self._action_name,
ow_lander.msg.DigLinearAction,
execute_cb=self.on_DigLinear_action,
auto_start=False)
self._server.start()
self._fdbk = ow_lander.msg.DigLinearFeedback()
self._result = ow_lander.msg.DigLinearResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("arm_controller")
self.current_traj = RobotTrajectory()
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def _update_motion(self, goal):
rospy.loginfo("DigLinear activity started")
self.current_traj = all_action_trajectories.dig_linear(self._interface.move_arm,
self._interface.robot,
self._interface.moveit_fk, goal)
if self.current_traj == False:
return
else:
n_points = len(self.current_traj.joint_trajectory.points)
start_time = self.current_traj.joint_trajectory.points[0].time_from_start
end_time = self.current_traj.joint_trajectory.points[n_points -
1].time_from_start
self._timeout = (end_time - start_time)
def on_DigLinear_action(self, goal):
self._update_motion(goal)
if self.current_traj == False:
self._server.set_aborted(self._result)
return
success = False
self.trajectory_async_executer.execute(self.current_traj.joint_trajectory,
done_cb=None,
active_cb=None,
feedback_cb=self.trajectory_async_executer.stop_arm_if_fault)
start_time = rospy.get_time()
def now_from_start(start):
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self._fdbk.current.x
self._result.final.y = self._fdbk.current.y
self._result.final.z = self._fdbk.current.z
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
class DeliverActionServer(object):
def __init__(self, name):
self._action_name = name
self._server = actionlib.SimpleActionServer(
self._action_name, ow_lander.msg.DeliverAction, execute_cb=self.on_deliver_action, auto_start=False)
self._server.start()
self._fdbk = ow_lander.msg.UnstowFeedback()
self._result = ow_lander.msg.UnstowResult()
self._current_link_state = LinkStateSubscriber()
self._interface = MoveItInterface()
self._timeout = 0.0
self.trajectory_async_executer = TrajectoryAsyncExecuter()
self.trajectory_async_executer.connect("arm_controller")
self.deliver_sample_traj = RobotTrajectory()
def _update_feedback(self):
self._ls = self._current_link_state._link_value
self._fdbk.current.x = self._ls.x
self._fdbk.current.y = self._ls.y
self._fdbk.current.z = self._ls.z
self._server.publish_feedback(self._fdbk)
def _update_motion(self, goal):
rospy.loginfo("Deliver sample activity started")
self.deliver_sample_traj = all_action_trajectories.deliver_sample(self._interface.move_arm,
self._interface.robot,
self._interface.moveit_fk, goal)
if self.deliver_sample_traj == False:
return
else:
n_points = len(self.deliver_sample_traj.joint_trajectory.points)
start_time = self.deliver_sample_traj.joint_trajectory.points[0].time_from_start
end_time = self.deliver_sample_traj.joint_trajectory.points[n_points-1].time_from_start
self._timeout = end_time - start_time
def on_deliver_action(self, goal):
self._update_motion(goal)
if self.deliver_sample_traj == False:
self._server.set_aborted(self._result)
return
success = False
self.trajectory_async_executer.execute(self.deliver_sample_traj.joint_trajectory,
done_cb=None,
active_cb=None,
feedback_cb=self.trajectory_async_executer.stop_arm_if_fault)
start_time = rospy.get_time()
def now_from_start(start):
return rospy.Duration(secs=rospy.get_time() - start)
while ((now_from_start(start_time) < self._timeout)):
self._update_feedback()
success = self.trajectory_async_executer.success(
) and self.trajectory_async_executer.wait()
if success:
self._result.final.x = self._fdbk.current.x
self._result.final.y = self._fdbk.current.y
self._result.final.z = self._fdbk.current.z
rospy.loginfo('%s: Succeeded' % self._action_name)
self._server.set_succeeded(self._result)
else:
rospy.loginfo('%s: Failed' % self._action_name)
self._server.set_aborted(self._result)
if __name__ == '__main__':
rospy.init_node('arm_action_servers')
server_unstow = UnstowActionServer("Unstow")
server_stow = StowActionServer("Stow")
server_grind = GrindActionServer("Grind")
server_guarded_move = GuardedMoveActionServer("GuardedMove")
server_dig_circular = DigCircularActionServer("DigCircular")
server_dig_linear = DigLinearActionServer("DigLinear")
server_deliver = DeliverActionServer("Deliver")
rospy.spin()
| true | true |
f724145bd3a999c57b36d90294098b99246dd929 | 1,012 | py | Python | identipy_app/migrations/0004_auto_20171222_0206.py | levitsky/identipy_server | 6dd04d929fadce01eec1ffd800cd3cd17da614b9 | [
"Apache-2.0"
] | null | null | null | identipy_app/migrations/0004_auto_20171222_0206.py | levitsky/identipy_server | 6dd04d929fadce01eec1ffd800cd3cd17da614b9 | [
"Apache-2.0"
] | null | null | null | identipy_app/migrations/0004_auto_20171222_0206.py | levitsky/identipy_server | 6dd04d929fadce01eec1ffd800cd3cd17da614b9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-12-21 23:06
from __future__ import unicode_literals
from django.db import migrations, models
from identipy_app.models import SearchRun
def define_runs(apps, schema_editor):
PepXMLFile = apps.get_model('identipy_app', 'PepXMLFile')
SearchRun = apps.get_model('identipy_app', 'SearchRun')
f2r = {}
for run in SearchRun.objects.all():
for f in run.pepxmlfiles.all():
f2r[f] = run
for pepf in PepXMLFile.objects.all():
pepf.run = f2r[pepf]
pepf.save()
class Migration(migrations.Migration):
dependencies = [
('identipy_app', '0003_pepxmlfile_filtered'),
]
operations = [
migrations.AddField('PepXMLFile', 'run', models.ForeignKey('SearchRun', null=True, on_delete=models.CASCADE), False),
migrations.RunPython(define_runs),
migrations.AlterField('PepXMLFile', 'run', models.ForeignKey('SearchRun', on_delete=models.CASCADE), True),
]
| 33.733333 | 129 | 0.66996 |
from __future__ import unicode_literals
from django.db import migrations, models
from identipy_app.models import SearchRun
def define_runs(apps, schema_editor):
PepXMLFile = apps.get_model('identipy_app', 'PepXMLFile')
SearchRun = apps.get_model('identipy_app', 'SearchRun')
f2r = {}
for run in SearchRun.objects.all():
for f in run.pepxmlfiles.all():
f2r[f] = run
for pepf in PepXMLFile.objects.all():
pepf.run = f2r[pepf]
pepf.save()
class Migration(migrations.Migration):
dependencies = [
('identipy_app', '0003_pepxmlfile_filtered'),
]
operations = [
migrations.AddField('PepXMLFile', 'run', models.ForeignKey('SearchRun', null=True, on_delete=models.CASCADE), False),
migrations.RunPython(define_runs),
migrations.AlterField('PepXMLFile', 'run', models.ForeignKey('SearchRun', on_delete=models.CASCADE), True),
]
| true | true |
f72414b5e3cce6d71f11dcd8001bbbd3832c1183 | 803 | py | Python | osc-sender.py | zarquin/ASCII-Simple-Video-Synth | ea755a20141880affecb779fdb94eed1ead15245 | [
"MIT"
] | 11 | 2020-05-22T02:51:30.000Z | 2021-02-21T20:57:41.000Z | osc-sender.py | zarquin/ASCII-Simple-Video-Synth | ea755a20141880affecb779fdb94eed1ead15245 | [
"MIT"
] | 1 | 2020-05-23T17:37:41.000Z | 2020-05-23T17:37:41.000Z | osc-sender.py | zarquin/ASCII-Simple-Video-Synth | ea755a20141880affecb779fdb94eed1ead15245 | [
"MIT"
] | 3 | 2019-08-11T15:14:48.000Z | 2020-08-08T09:46:29.000Z | import random
import time
from pythonosc import osc_message_builder
from pythonosc import udp_client
client=udp_client.SimpleUDPClient("127.0.0.1",8000)
dest = [
"/red/scale",
"/red/offset",
"/red/speed",
"/green/scale",
"/green/offset",
"/green/speed",
"/blue/scale",
"/blue/offset",
"/blue/speed",
"/shape/size",
"/shape/sides",
"/shape/xinc",
"/shape/yinc",
"/shape/xcenter",
"/shape/ycenter",
"/shape/shapecount",
"/shape/shapeskip",
"/global/strobe",
"/global/invert"
]
def random_test():
val_set = random.randrange(0,255)
dest_s = dest[random.randrange(0, len(dest))]
client.send_message(dest_s,val_set)
print("sent {} to {}".format(val_set,dest_s))
while True:
random_test()
time.sleep(1)
| 19.119048 | 51 | 0.617684 | import random
import time
from pythonosc import osc_message_builder
from pythonosc import udp_client
client=udp_client.SimpleUDPClient("127.0.0.1",8000)
dest = [
"/red/scale",
"/red/offset",
"/red/speed",
"/green/scale",
"/green/offset",
"/green/speed",
"/blue/scale",
"/blue/offset",
"/blue/speed",
"/shape/size",
"/shape/sides",
"/shape/xinc",
"/shape/yinc",
"/shape/xcenter",
"/shape/ycenter",
"/shape/shapecount",
"/shape/shapeskip",
"/global/strobe",
"/global/invert"
]
def random_test():
val_set = random.randrange(0,255)
dest_s = dest[random.randrange(0, len(dest))]
client.send_message(dest_s,val_set)
print("sent {} to {}".format(val_set,dest_s))
while True:
random_test()
time.sleep(1)
| true | true |
f72416537976a13d7e99ccfa536d47805df1b886 | 12,699 | py | Python | intel_extension_for_pytorch/optim/_functional.py | Manny27nyc/intel-extension-for-pytorch | b40faedf6b00d520f6483d519d2e82bce0a6c0d1 | [
"Apache-2.0"
] | null | null | null | intel_extension_for_pytorch/optim/_functional.py | Manny27nyc/intel-extension-for-pytorch | b40faedf6b00d520f6483d519d2e82bce0a6c0d1 | [
"Apache-2.0"
] | null | null | null | intel_extension_for_pytorch/optim/_functional.py | Manny27nyc/intel-extension-for-pytorch | b40faedf6b00d520f6483d519d2e82bce0a6c0d1 | [
"Apache-2.0"
] | null | null | null | r"""Functional interface, port from torch/optim/_function.py"""
import torch
from torch import Tensor
from typing import List, Optional
def is_master_weight(param, params_attr):
return (
param.dtype == torch.float and
param in params_attr and
'bf16_param' in params_attr[param]
)
def get_bf16_grad(param, params_attr):
assert is_master_weight(param, params_attr)
return params_attr[param]['bf16_param'].grad
def _make_sparse(grad, grad_indices, values):
size = grad.size()
if grad_indices.numel() == 0 or values.numel() == 0:
return torch.empty_like(grad)
return torch.sparse_coo_tensor(grad_indices, values, size)
def _adagrad_impl(
params: List[Tensor],
grads: List[Tensor],
state_sums: List[Tensor],
state_steps: List[int],
attr: dict,
lr: float,
weight_decay: float,
lr_decay: float,
eps: float,
fused: bool,
):
r"""Functional API that performs Adagrad algorithm computation.
See :class:`~torch.optim.Adagrad` for details.
"""
for (param, grad, state_sum, step) in zip(params, grads, state_sums, state_steps):
param2 = torch.Tensor()
if param in attr:
if 'trail' in attr[param]:
assert param.dtype is torch.bfloat16
param2 = attr[param]['trail']
if 'bf16_param' in attr[param]:
assert param.dtype is torch.float
param2 = attr[param]['bf16_param']
if fused and not grad.is_sparse:
torch.ops.torch_ipex.adagrad_fused_step(
param,
grad,
state_sum,
param2,
step,
lr,
weight_decay,
lr_decay,
eps)
continue
if weight_decay != 0:
if grad.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad = grad.add(param, alpha=weight_decay)
clr = lr / (1 + (step - 1) * lr_decay)
if grad.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
state_sum.add_(_make_sparse(grad, grad_indices, grad_values.pow(2)))
std = state_sum.sparse_mask(grad)
std_values = std._values().sqrt_().add_(eps)
param.add_(_make_sparse(grad, grad_indices, grad_values / std_values), alpha=-clr)
else:
state_sum.addcmul_(grad, grad, value=1)
std = state_sum.sqrt().add_(eps)
param.addcdiv_(grad, std, value=-clr)
@torch.no_grad()
def adagrad_step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
state_sums = []
state_steps = []
for p in group['params']:
grad = get_bf16_grad(p, self.params_attr) if is_master_weight(p, self.params_attr) else p.grad
if grad is not None:
params_with_grad.append(p)
grads.append(grad)
state = self.state[p]
state_sums.append(state['sum'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'])
_adagrad_impl(
params_with_grad,
grads,
state_sums,
state_steps,
self.params_attr,
group['lr'],
group['weight_decay'],
group['lr_decay'],
group['eps'],
self.fused)
return loss
def _sgd_non_fused_micro_step(
params: Tensor,
d_p_list: Tensor,
momentum_buffer_list: Optional[Tensor],
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
):
if weight_decay != 0:
d_p = d_p.add(param, alpha=weight_decay)
if momentum != 0:
buf = momentum_buffer_list[i]
if buf is None:
buf = torch.clone(d_p).detach()
momentum_buffer_list[i] = buf
else:
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
param.add_(d_p, alpha=alpha)
def _sgd_impl(
params: List[Tensor],
d_p_list: List[Tensor],
attr: dict,
momentum_buffer_list: List[Optional[Tensor]],
*,
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
fused: bool
):
r"""Functional API that performs SGD algorithm computation.
See :class:`~torch.optim.SGD` for details.
"""
for i, param in enumerate(params):
d_p = d_p_list[i]
param2 = torch.Tensor()
if param in attr:
if 'trail' in attr[param]:
assert param.dtype is torch.bfloat16
param2 = attr[param]['trail']
if 'bf16_param' in attr[param]:
assert param.dtype is torch.float
param2 = attr[param]['bf16_param']
if fused and not d_p.is_sparse:
momentum_buffer_list[i] = torch.ops.torch_ipex.sgd_fused_step(
param,
d_p,
momentum_buffer_list[i],
param2,
momentum,
lr,
weight_decay,
dampening,
nesterov)
continue
if (
d_p.is_sparse and
d_p.dtype == torch.bfloat16 and
weight_decay == 0 and
momentum == 0
):
# packed_add can support sparse tensor
torch.ops.torch_ipex.packed_add(param, param2, d_p, alpha=-lr)
else:
# no special optimize for other non fused case, fall back to naive implementation
d_p = d_p.to(param.dtype)
_sgd_non_fused_micro_step(
param,
d_p,
momentum_buffer_list[i],
momentum,
lr,
weight_decay,
dampening,
nesterov
)
@torch.no_grad()
def sgd_step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
d_p_list = []
momentum_buffer_list = []
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
lr = group['lr']
for p in group['params']:
grad = get_bf16_grad(p, self.params_attr) if is_master_weight(p, self.params_attr) else p.grad
if grad is not None:
params_with_grad.append(p)
d_p_list.append(grad)
state = self.state[p]
if 'momentum_buffer' not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state['momentum_buffer'])
_sgd_impl(
params_with_grad,
d_p_list,
self.params_attr,
momentum_buffer_list,
weight_decay=weight_decay,
momentum=momentum,
lr=lr,
dampening=dampening,
nesterov=nesterov,
fused=self.fused)
# update momentum_buffers in state
for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
state = self.state[p]
state['momentum_buffer'] = momentum_buffer
return loss
def _lamb_fused_impl(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
attr: dict,
state_steps: List[int],
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
):
r"""Functional API that performs Lamb algorithm computation.
See :class:`~torch.optim.Lamb` for details.
"""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
param2 = torch.Tensor()
if param in attr:
if 'trail' in attr[param]:
assert param.dtype is torch.bfloat16
param2 = attr[param]['trail']
if 'bf16_param' in attr[param]:
assert param.dtype is torch.float
param2 = attr[param]['bf16_param']
torch.ops.torch_ipex.lamb_fused_step(
param,
exp_avg,
exp_avg_sq,
grad,
param2,
step,
beta1,
beta2,
lr,
weight_decay,
eps)
def _lamb_impl(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[int],
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
):
r"""Functional API that performs Lamb algorithm computation.
"""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
grad = grad.to(exp_avg.dtype)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
adam_step = (exp_avg / bias_correction1) / ((exp_avg_sq / bias_correction2).sqrt() + eps)
if weight_decay != 0:
adam_step.add_(param, alpha=weight_decay)
weight_norm = param.norm(p=2)
rtw_norm = adam_step.norm(p=2)
true_ratio = weight_norm / rtw_norm
param.add_(adam_step, alpha=-lr * true_ratio)
@torch.no_grad()
def lamb_step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
trails = []
state_steps = []
for p in group['params']:
grad = get_bf16_grad(p, self.params_attr) if is_master_weight(p, self.params_attr) else p.grad
if grad is not None:
params_with_grad.append(p)
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients')
if grad.device != torch.device('cpu'):
raise RuntimeError('Lamb supports only CPU device')
grads.append(grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state['step'] = 0
buffer_dtype = p.dtype if p.dtype is torch.float64 else torch.float
state['exp_avg'] = torch.zeros(p.shape, dtype=buffer_dtype)
state['exp_avg_sq'] = torch.zeros(p.shape, dtype=buffer_dtype)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
# update the steps for each param group update
state['step'] += 1
# record the step after step update
state_steps.append(state['step'])
beta1, beta2 = group['betas']
_lamb_fused_impl(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
self.params_attr,
state_steps,
beta1,
beta2,
group['lr'],
group['weight_decay'],
group['eps'])
return loss | 30.307876 | 106 | 0.554059 | import torch
from torch import Tensor
from typing import List, Optional
def is_master_weight(param, params_attr):
return (
param.dtype == torch.float and
param in params_attr and
'bf16_param' in params_attr[param]
)
def get_bf16_grad(param, params_attr):
assert is_master_weight(param, params_attr)
return params_attr[param]['bf16_param'].grad
def _make_sparse(grad, grad_indices, values):
size = grad.size()
if grad_indices.numel() == 0 or values.numel() == 0:
return torch.empty_like(grad)
return torch.sparse_coo_tensor(grad_indices, values, size)
def _adagrad_impl(
params: List[Tensor],
grads: List[Tensor],
state_sums: List[Tensor],
state_steps: List[int],
attr: dict,
lr: float,
weight_decay: float,
lr_decay: float,
eps: float,
fused: bool,
):
for (param, grad, state_sum, step) in zip(params, grads, state_sums, state_steps):
param2 = torch.Tensor()
if param in attr:
if 'trail' in attr[param]:
assert param.dtype is torch.bfloat16
param2 = attr[param]['trail']
if 'bf16_param' in attr[param]:
assert param.dtype is torch.float
param2 = attr[param]['bf16_param']
if fused and not grad.is_sparse:
torch.ops.torch_ipex.adagrad_fused_step(
param,
grad,
state_sum,
param2,
step,
lr,
weight_decay,
lr_decay,
eps)
continue
if weight_decay != 0:
if grad.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad = grad.add(param, alpha=weight_decay)
clr = lr / (1 + (step - 1) * lr_decay)
if grad.is_sparse:
grad = grad.coalesce()
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
state_sum.add_(_make_sparse(grad, grad_indices, grad_values.pow(2)))
std = state_sum.sparse_mask(grad)
std_values = std._values().sqrt_().add_(eps)
param.add_(_make_sparse(grad, grad_indices, grad_values / std_values), alpha=-clr)
else:
state_sum.addcmul_(grad, grad, value=1)
std = state_sum.sqrt().add_(eps)
param.addcdiv_(grad, std, value=-clr)
@torch.no_grad()
def adagrad_step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
state_sums = []
state_steps = []
for p in group['params']:
grad = get_bf16_grad(p, self.params_attr) if is_master_weight(p, self.params_attr) else p.grad
if grad is not None:
params_with_grad.append(p)
grads.append(grad)
state = self.state[p]
state_sums.append(state['sum'])
state['step'] += 1
state_steps.append(state['step'])
_adagrad_impl(
params_with_grad,
grads,
state_sums,
state_steps,
self.params_attr,
group['lr'],
group['weight_decay'],
group['lr_decay'],
group['eps'],
self.fused)
return loss
def _sgd_non_fused_micro_step(
params: Tensor,
d_p_list: Tensor,
momentum_buffer_list: Optional[Tensor],
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
):
if weight_decay != 0:
d_p = d_p.add(param, alpha=weight_decay)
if momentum != 0:
buf = momentum_buffer_list[i]
if buf is None:
buf = torch.clone(d_p).detach()
momentum_buffer_list[i] = buf
else:
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
param.add_(d_p, alpha=alpha)
def _sgd_impl(
params: List[Tensor],
d_p_list: List[Tensor],
attr: dict,
momentum_buffer_list: List[Optional[Tensor]],
*,
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
fused: bool
):
for i, param in enumerate(params):
d_p = d_p_list[i]
param2 = torch.Tensor()
if param in attr:
if 'trail' in attr[param]:
assert param.dtype is torch.bfloat16
param2 = attr[param]['trail']
if 'bf16_param' in attr[param]:
assert param.dtype is torch.float
param2 = attr[param]['bf16_param']
if fused and not d_p.is_sparse:
momentum_buffer_list[i] = torch.ops.torch_ipex.sgd_fused_step(
param,
d_p,
momentum_buffer_list[i],
param2,
momentum,
lr,
weight_decay,
dampening,
nesterov)
continue
if (
d_p.is_sparse and
d_p.dtype == torch.bfloat16 and
weight_decay == 0 and
momentum == 0
):
torch.ops.torch_ipex.packed_add(param, param2, d_p, alpha=-lr)
else:
d_p = d_p.to(param.dtype)
_sgd_non_fused_micro_step(
param,
d_p,
momentum_buffer_list[i],
momentum,
lr,
weight_decay,
dampening,
nesterov
)
@torch.no_grad()
def sgd_step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
d_p_list = []
momentum_buffer_list = []
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
lr = group['lr']
for p in group['params']:
grad = get_bf16_grad(p, self.params_attr) if is_master_weight(p, self.params_attr) else p.grad
if grad is not None:
params_with_grad.append(p)
d_p_list.append(grad)
state = self.state[p]
if 'momentum_buffer' not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state['momentum_buffer'])
_sgd_impl(
params_with_grad,
d_p_list,
self.params_attr,
momentum_buffer_list,
weight_decay=weight_decay,
momentum=momentum,
lr=lr,
dampening=dampening,
nesterov=nesterov,
fused=self.fused)
for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
state = self.state[p]
state['momentum_buffer'] = momentum_buffer
return loss
def _lamb_fused_impl(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
attr: dict,
state_steps: List[int],
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
):
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
param2 = torch.Tensor()
if param in attr:
if 'trail' in attr[param]:
assert param.dtype is torch.bfloat16
param2 = attr[param]['trail']
if 'bf16_param' in attr[param]:
assert param.dtype is torch.float
param2 = attr[param]['bf16_param']
torch.ops.torch_ipex.lamb_fused_step(
param,
exp_avg,
exp_avg_sq,
grad,
param2,
step,
beta1,
beta2,
lr,
weight_decay,
eps)
def _lamb_impl(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[int],
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float,
):
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = 1 - beta1 ** step
bias_correction2 = 1 - beta2 ** step
grad = grad.to(exp_avg.dtype)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
adam_step = (exp_avg / bias_correction1) / ((exp_avg_sq / bias_correction2).sqrt() + eps)
if weight_decay != 0:
adam_step.add_(param, alpha=weight_decay)
weight_norm = param.norm(p=2)
rtw_norm = adam_step.norm(p=2)
true_ratio = weight_norm / rtw_norm
param.add_(adam_step, alpha=-lr * true_ratio)
@torch.no_grad()
def lamb_step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
trails = []
state_steps = []
for p in group['params']:
grad = get_bf16_grad(p, self.params_attr) if is_master_weight(p, self.params_attr) else p.grad
if grad is not None:
params_with_grad.append(p)
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients')
if grad.device != torch.device('cpu'):
raise RuntimeError('Lamb supports only CPU device')
grads.append(grad)
state = self.state[p]
if len(state) == 0:
state['step'] = 0
buffer_dtype = p.dtype if p.dtype is torch.float64 else torch.float
state['exp_avg'] = torch.zeros(p.shape, dtype=buffer_dtype)
state['exp_avg_sq'] = torch.zeros(p.shape, dtype=buffer_dtype)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
state['step'] += 1
state_steps.append(state['step'])
beta1, beta2 = group['betas']
_lamb_fused_impl(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
self.params_attr,
state_steps,
beta1,
beta2,
group['lr'],
group['weight_decay'],
group['eps'])
return loss | true | true |
f72416f7c7f270568985bd86ba257f577de01834 | 6,721 | py | Python | pydeps/cli.py | AvenzaOleg/pydeps | 1e6715b7bea47a40e8042821b57937deaaa0fdc3 | [
"BSD-2-Clause"
] | null | null | null | pydeps/cli.py | AvenzaOleg/pydeps | 1e6715b7bea47a40e8042821b57937deaaa0fdc3 | [
"BSD-2-Clause"
] | null | null | null | pydeps/cli.py | AvenzaOleg/pydeps | 1e6715b7bea47a40e8042821b57937deaaa0fdc3 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
command line interface (cli) code.
"""
# pylint: disable=line-too-long
from __future__ import print_function
import argparse
from .arguments import Arguments
import json
from .pycompat import configparser
import logging
import os
import sys
import textwrap
from . import __version__
def error(*args, **kwargs): # pragma: nocover
"""Print an error message and exit.
"""
kwargs['file'] = sys.stderr
print("\n\tERROR:", *args, **kwargs)
sys.exit(1)
#: the (will become) verbose function
verbose = None
def _not_verbose(*args, **kwargs): # pragma: nocover
pass
def _mkverbose(level):
def _verbose(n, *args, **kwargs):
if not isinstance(n, int): # we're only interested in small integers
# this allows the simpler usage cli.verbose(msg)
args = (n,) + args
n = 1
if 0 < level <= n:
print(*args, **kwargs)
return _verbose
def base_argparser(argv=()):
"""Initial parser that can set values for the rest of the parsing process.
"""
global verbose
verbose = _not_verbose
_p = argparse.ArgumentParser(add_help=False)
_p.add_argument('--debug', action='store_true', help="turn on all the show and verbose options (mainly for debugging pydeps itself)")
_p.add_argument('--config', help="specify config file", metavar="FILE")
_p.add_argument('--no-config', help="disable processing of config files", action='store_true')
_p.add_argument('--version', action='store_true', help='print pydeps version')
_p.add_argument('-L', '--log', help=textwrap.dedent('''
set log-level to one of CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET.
'''))
_args, argv = _p.parse_known_args(argv)
if _args.log:
loglevels = "CRITICAL DEBUG ERROR FATAL INFO WARN"
if _args.log not in loglevels: # pragma: nocover
error('legal values for the -L parameter are:', loglevels)
loglevel = getattr(logging, _args.log)
else:
loglevel = None
logging.basicConfig(
level=loglevel,
format='%(filename)s:%(lineno)d: %(levelname)s: %(message)s'
)
if _args.version: # pragma: nocover
print("pydeps v" + __version__)
sys.exit(0)
return _p, _args, argv # return parsed and remaining args
def parse_args(argv=()):
"""Parse command line arguments, and return a dict.
"""
_p, _args, argv = base_argparser(argv)
config_files = []
if not _args.no_config: # process config files
# extra config file specified with --config <fname> has highest precedence
if _args.config:
config_files.append(_args.config)
# .pydeps file specified in current directory is next
local_pydeps = os.path.join(os.getcwd(), '.pydeps')
if os.path.exists(local_pydeps):
config_files.append(local_pydeps)
# finally the .pydeps file in the the user's homedir
home = os.environ['USERPROFILE' if sys.platform == 'win32' else 'HOME']
home_pydeps = os.path.join(home, '.pydeps')
if os.path.exists(home_pydeps):
config_files.append(home_pydeps)
args = Arguments(config_files, debug=True, parents=[_p])
args.add('fname', kind="FNAME:input", help='filename')
args.add('-v', '--verbose', default=0, action='count', help="be more verbose (-vv, -vvv for more verbosity)")
args.add('-o', default=None, kind="FNAME:output", dest='output', metavar="file", help="write output to 'file'")
args.add('-T', default='svg', dest='format', help="output format (svg|png)")
args.add('--display', kind="FNAME:exe", default=None, help="program to use to display the graph (png or svg file depending on the T parameter)", metavar="PROGRAM")
args.add('--noshow', action='store_true', help="don't call external program to display graph")
args.add('--show-deps', action='store_true', help="show output of dependency analysis")
args.add('--show-raw-deps', action='store_true', help="show output of dependency analysis before removing skips")
args.add('--show-dot', action='store_true', help="show output of dot conversion")
args.add('--nodot', action='store_true', help="skip dot conversion")
args.add('--no-output', action='store_true', help="don't create .svg/.png file, implies --no-show (-t/-o will be ignored)")
args.add('--show-cycles', action='store_true', help="show only import cycles")
args.add('--debug-mf', default=0, type=int, metavar="INT", help="set the ModuleFinder.debug flag to this value")
args.add('--noise-level', default=200, type=int, metavar="INT", help="exclude sources or sinks with degree greater than noise-level")
args.add('--max-bacon', default=2, type=int, metavar="INT", help="exclude nodes that are more than n hops away (default=2, 0 -> infinite)")
args.add('--pylib', action='store_true', help="include python std lib modules")
args.add('--pylib-all', action='store_true', help="include python all std lib modules (incl. C modules)")
args.add('--include-missing', action='store_true', help="include modules that are not installed (or can't be found on sys.path)")
args.add('-x', '--exclude', default=[], nargs="+", metavar="FNAME", help="input files to skip")
args.add('--externals', action='store_true', help='create list of direct external dependencies')
args.add('--reverse', action='store_true', help="draw arrows to (instead of from) imported modules")
_args = args.parse_args(argv)
if _args.externals:
return dict(
T='svg', config=None, debug=False, display=None, exclude=[], externals=True,
fname=_args.fname, format='svg', max_bacon=10, no_config=False, nodot=False,
noise_level=200, noshow=True, output=None, pylib=False, pylib_all=False,
show=False, show_cycles=False, show_deps=False, show_dot=False,
show_raw_deps=False, verbose=0, include_missing=True, reverse=False,
)
_args.show = True
if _args.no_output:
_args.noshow = True
if _args.noshow:
_args.show = False
if _args.nodot and _args.show_cycles:
error("Can't use --nodot and --show-cycles together") # pragma: nocover
if _args.nodot:
_args.show_dot = False
if _args.max_bacon == 0:
_args.max_bacon = sys.maxsize
_args.format = getattr(_args, 'T', getattr(_args, 'format', None))
verbose = _mkverbose(max(_args.verbose, int(_args.debug)))
verbose(2, _args, '\n')
if _args.debug: # pragma: nocover
_args.verbose = 1
_args.show = True
_args.show_deps = True
_args.show_dot = True
return vars(_args)
| 41.487654 | 167 | 0.654813 |
from __future__ import print_function
import argparse
from .arguments import Arguments
import json
from .pycompat import configparser
import logging
import os
import sys
import textwrap
from . import __version__
def error(*args, **kwargs):
kwargs['file'] = sys.stderr
print("\n\tERROR:", *args, **kwargs)
sys.exit(1)
verbose = None
def _not_verbose(*args, **kwargs):
pass
def _mkverbose(level):
def _verbose(n, *args, **kwargs):
if not isinstance(n, int):
# this allows the simpler usage cli.verbose(msg)
args = (n,) + args
n = 1
if 0 < level <= n:
print(*args, **kwargs)
return _verbose
def base_argparser(argv=()):
global verbose
verbose = _not_verbose
_p = argparse.ArgumentParser(add_help=False)
_p.add_argument('--debug', action='store_true', help="turn on all the show and verbose options (mainly for debugging pydeps itself)")
_p.add_argument('--config', help="specify config file", metavar="FILE")
_p.add_argument('--no-config', help="disable processing of config files", action='store_true')
_p.add_argument('--version', action='store_true', help='print pydeps version')
_p.add_argument('-L', '--log', help=textwrap.dedent('''
set log-level to one of CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET.
'''))
_args, argv = _p.parse_known_args(argv)
if _args.log:
loglevels = "CRITICAL DEBUG ERROR FATAL INFO WARN"
if _args.log not in loglevels: # pragma: nocover
error('legal values for the -L parameter are:', loglevels)
loglevel = getattr(logging, _args.log)
else:
loglevel = None
logging.basicConfig(
level=loglevel,
format='%(filename)s:%(lineno)d: %(levelname)s: %(message)s'
)
if _args.version: # pragma: nocover
print("pydeps v" + __version__)
sys.exit(0)
return _p, _args, argv # return parsed and remaining args
def parse_args(argv=()):
_p, _args, argv = base_argparser(argv)
config_files = []
if not _args.no_config: # process config files
# extra config file specified with --config <fname> has highest precedence
if _args.config:
config_files.append(_args.config)
# .pydeps file specified in current directory is next
local_pydeps = os.path.join(os.getcwd(), '.pydeps')
if os.path.exists(local_pydeps):
config_files.append(local_pydeps)
# finally the .pydeps file in the the user's homedir
home = os.environ['USERPROFILE' if sys.platform == 'win32' else 'HOME']
home_pydeps = os.path.join(home, '.pydeps')
if os.path.exists(home_pydeps):
config_files.append(home_pydeps)
args = Arguments(config_files, debug=True, parents=[_p])
args.add('fname', kind="FNAME:input", help='filename')
args.add('-v', '--verbose', default=0, action='count', help="be more verbose (-vv, -vvv for more verbosity)")
args.add('-o', default=None, kind="FNAME:output", dest='output', metavar="file", help="write output to 'file'")
args.add('-T', default='svg', dest='format', help="output format (svg|png)")
args.add('--display', kind="FNAME:exe", default=None, help="program to use to display the graph (png or svg file depending on the T parameter)", metavar="PROGRAM")
args.add('--noshow', action='store_true', help="don't call external program to display graph")
args.add('--show-deps', action='store_true', help="show output of dependency analysis")
args.add('--show-raw-deps', action='store_true', help="show output of dependency analysis before removing skips")
args.add('--show-dot', action='store_true', help="show output of dot conversion")
args.add('--nodot', action='store_true', help="skip dot conversion")
args.add('--no-output', action='store_true', help="don't create .svg/.png file, implies --no-show (-t/-o will be ignored)")
args.add('--show-cycles', action='store_true', help="show only import cycles")
args.add('--debug-mf', default=0, type=int, metavar="INT", help="set the ModuleFinder.debug flag to this value")
args.add('--noise-level', default=200, type=int, metavar="INT", help="exclude sources or sinks with degree greater than noise-level")
args.add('--max-bacon', default=2, type=int, metavar="INT", help="exclude nodes that are more than n hops away (default=2, 0 -> infinite)")
args.add('--pylib', action='store_true', help="include python std lib modules")
args.add('--pylib-all', action='store_true', help="include python all std lib modules (incl. C modules)")
args.add('--include-missing', action='store_true', help="include modules that are not installed (or can't be found on sys.path)")
args.add('-x', '--exclude', default=[], nargs="+", metavar="FNAME", help="input files to skip")
args.add('--externals', action='store_true', help='create list of direct external dependencies')
args.add('--reverse', action='store_true', help="draw arrows to (instead of from) imported modules")
_args = args.parse_args(argv)
if _args.externals:
return dict(
T='svg', config=None, debug=False, display=None, exclude=[], externals=True,
fname=_args.fname, format='svg', max_bacon=10, no_config=False, nodot=False,
noise_level=200, noshow=True, output=None, pylib=False, pylib_all=False,
show=False, show_cycles=False, show_deps=False, show_dot=False,
show_raw_deps=False, verbose=0, include_missing=True, reverse=False,
)
_args.show = True
if _args.no_output:
_args.noshow = True
if _args.noshow:
_args.show = False
if _args.nodot and _args.show_cycles:
error("Can't use --nodot and --show-cycles together")
if _args.nodot:
_args.show_dot = False
if _args.max_bacon == 0:
_args.max_bacon = sys.maxsize
_args.format = getattr(_args, 'T', getattr(_args, 'format', None))
verbose = _mkverbose(max(_args.verbose, int(_args.debug)))
verbose(2, _args, '\n')
if _args.debug:
_args.verbose = 1
_args.show = True
_args.show_deps = True
_args.show_dot = True
return vars(_args)
| true | true |
f72418161d72c045ee191193724c0fad0d98ece9 | 17,698 | py | Python | AFQ/api/bundle_dict.py | arokem/pyAFQ | 3aa15db49d5e50cbe9df0a5a9bc24f15f44bd758 | [
"BSD-2-Clause"
] | 28 | 2016-09-10T01:25:41.000Z | 2022-03-22T21:05:11.000Z | AFQ/api/bundle_dict.py | arokem/pyAFQ | 3aa15db49d5e50cbe9df0a5a9bc24f15f44bd758 | [
"BSD-2-Clause"
] | 688 | 2016-02-03T19:56:37.000Z | 2022-03-16T21:21:18.000Z | AFQ/api/bundle_dict.py | arokem/pyAFQ | 3aa15db49d5e50cbe9df0a5a9bc24f15f44bd758 | [
"BSD-2-Clause"
] | 21 | 2016-02-03T18:47:22.000Z | 2022-01-27T15:38:05.000Z | import logging
from collections.abc import MutableMapping
import AFQ.data as afd
logging.basicConfig(level=logging.INFO)
__all__ = ["PediatricBundleDict", "BundleDict"]
def do_preprocessing():
raise NotImplementedError
BUNDLES = ["ATR", "CGC", "CST", "IFO", "ILF", "SLF", "ARC", "UNC",
"FA", "FP"]
CALLOSUM_BUNDLES = ["AntFrontal", "Motor", "Occipital", "Orbital",
"PostParietal", "SupFrontal", "SupParietal",
"Temporal"]
# See: https://www.cmu.edu/dietrich/psychology/cognitiveaxon/documents/yeh_etal_2018.pdf # noqa
RECO_BUNDLES_16 = [
'CST', 'C', 'F', 'UF', 'MCP', 'AF', 'CCMid',
'CC_ForcepsMajor', 'CC_ForcepsMinor', 'IFOF']
RECO_BUNDLES_80 = ["AC", "AF", "AR", "AST", "C", "CB", "CC_ForcepsMajor",
"CC_ForcepsMinor", "CC", "CCMid", "CNII", "CNIII",
"CNIV", "CNV", "CNVII", "CNVIII", "CS", "CST", "CT",
"CTT", "DLF", "EMC", "F_L_R", "FPT", "ICP", "IFOF", "ILF",
"LL", "MCP", "MdLF", "ML", "MLF", "OPT", "OR", "PC", "PPT",
"RST", "SCP", "SLF", "STT", "TPT", "UF", "V", "VOF"]
RECO_UNIQUE = [
'CCMid', 'CC_ForcepsMajor', 'CC_ForcepsMinor', 'MCP', 'AC', 'PC', 'SCP',
'V', 'CC', 'F_L_R']
PEDIATRIC_BUNDLES = [
"ARC", "ATR", "CGC", "CST", "FA", "FP", "IFO", "ILF", "MdLF", "SLF", "UNC"]
DIPY_GH = "https://github.com/dipy/dipy/blob/master/dipy/"
class BundleDict(MutableMapping):
def __init__(self,
bundle_info=BUNDLES,
seg_algo="afq",
resample_to=None):
"""
Create a bundle dictionary, needed for the segmentation
Parameters
----------
bundle_names : list, optional
A list of the bundles to be used in this case. Default: all of them
seg_algo: One of {"afq", "reco", "reco16", "reco80"}
The bundle segmentation algorithm to use.
"afq" : Use waypoint ROIs + probability maps, as described
in [Yeatman2012]_
"reco" / "reco16" : Use Recobundles [Garyfallidis2017]_
with a 16-bundle set.
"reco80": Use Recobundles with an 80-bundle set.
resample_to : Nifti1Image or bool, optional
If set, templates will be resampled to the affine and shape of this
image. If None, the MNI template will be used.
If False, no resampling will be done.
Default: afd.read_mni_template()
"""
if not (isinstance(bundle_info, dict)
or isinstance(bundle_info, list)):
raise TypeError((
f"bundle_info must be a dict or a list,"
f" currently a {type(bundle_info)}"))
self.seg_algo = seg_algo.lower()
if resample_to is None:
resample_to = afd.read_mni_template()
self.resample_to = resample_to
if isinstance(bundle_info, dict):
self.bundle_names = list(bundle_info.keys())
self._dict = bundle_info.copy()
self.resample_all_roi()
self.all_gen = True
else:
expanded_bundle_names = []
for bundle_name in bundle_info:
if self.seg_algo == "afq":
if bundle_name in ["FA", "FP"]\
or bundle_name in CALLOSUM_BUNDLES:
expanded_bundle_names.append(bundle_name)
else:
expanded_bundle_names.append(bundle_name + "_R")
expanded_bundle_names.append(bundle_name + "_L")
elif self.seg_algo.startswith("reco"):
if bundle_name in RECO_UNIQUE\
or bundle_name == "whole_brain":
expanded_bundle_names.append(bundle_name)
else:
expanded_bundle_names.append(bundle_name + "_R")
expanded_bundle_names.append(bundle_name + "_L")
else:
raise ValueError(
"Input: %s is not a valid input`seg_algo`"
% self.seg_algo)
self.bundle_names = expanded_bundle_names
self._dict = {}
self.all_gen = False
# Each bundles gets a digit identifier
# (to be stored in the tractogram)
# we keep track of this for when bundles are added
# with set item
self._uid_dict = {}
for ii, b_name in enumerate(self.bundle_names):
self._uid_dict[b_name] = ii + 1
self._c_uid = ii + 2
self.logger = logging.getLogger('AFQ.api')
if self.seg_algo == "afq":
if "FP" in self.bundle_names\
and "Occipital" in self.bundle_names:
self.logger.warning((
"FP and Occipital bundles are co-located, and AFQ"
" assigns each streamline to only one bundle."
" Only Occipital will be used."))
self.bundle_names.remove("FP")
if "FA" in self.bundle_names\
and "Orbital" in self.bundle_names:
self.logger.warning((
"FA and Orbital bundles are co-located, and AFQ"
" assigns each streamline to only one bundle."
" Only Orbital will be used."))
self.bundle_names.remove("FA")
if "FA" in self.bundle_names\
and "AntFrontal" in self.bundle_names:
self.logger.warning((
"FA and AntFrontal bundles are co-located, and AFQ"
" assigns each streamline to only one bundle."
" Only AntFrontal will be used."))
self.bundle_names.remove("FA")
def gen_all(self):
if self.all_gen:
return
if self.seg_algo == "afq":
templates =\
afd.read_templates(resample_to=self.resample_to)
# For the arcuate, we need to rename a few of these
# and duplicate the SLF ROI:
templates['ARC_roi1_L'] = templates['SLF_roi1_L']
templates['ARC_roi1_R'] = templates['SLF_roi1_R']
templates['ARC_roi2_L'] = templates['SLFt_roi2_L']
templates['ARC_roi2_R'] = templates['SLFt_roi2_R']
callosal_templates =\
afd.read_callosum_templates(resample_to=self.resample_to)
for key in self.bundle_names:
# Consider hard coding since we might have different rules
# for some tracts
if key in ["FA", "FP"]:
bundle = {
'ROIs': [
templates[key + "_L"],
templates[key + "_R"],
callosal_templates["Callosum_midsag"]],
'rules': [True, True, True],
'prob_map': templates[key + "_prob_map"],
'cross_midline': True,
'uid': self._uid_dict[key]}
elif key in CALLOSUM_BUNDLES:
bundle = {
'ROIs': [
callosal_templates["L_" + key],
callosal_templates["R_" + key],
callosal_templates["Callosum_midsag"]],
'rules': [True, True, True],
'cross_midline': True,
'uid': self._uid_dict[key]}
# SLF is a special case, because it has an exclusion ROI:
elif key in ["SLF_L", "SLF_R"]:
name = key[:-2]
hemi = key[-2:]
bundle = {
'ROIs': [
templates[name + '_roi1' + hemi],
templates[name + '_roi2' + hemi],
templates["SLFt_roi2" + hemi]],
'rules': [True, True, False],
'prob_map': templates[name + hemi + '_prob_map'],
'cross_midline': False,
'uid': self._uid_dict[key]}
else:
name = key[:-2]
hemi = key[-2:]
if (templates.get(name + '_roi1' + hemi)
and templates.get(name + '_roi2' + hemi)
and templates.get(name + hemi + '_prob_map')):
bundle = {
'ROIs': [
templates[name + '_roi1' + hemi],
templates[name + '_roi2' + hemi]],
'rules': [True, True],
'prob_map': templates[
name + hemi + '_prob_map'],
'cross_midline': False,
'uid': self._uid_dict[key]}
else:
raise ValueError(f"{key} is not in AFQ templates")
self._dict[key] = bundle
self.resample_all_roi()
elif self.seg_algo.startswith("reco"):
if self.seg_algo.endswith("80"):
reco_bundle_dict = afd.read_hcp_atlas(80)
else:
reco_bundle_dict = afd.read_hcp_atlas(16)
for key in self.bundle_names:
bundle = reco_bundle_dict[key]
bundle['uid'] = self._uid_dict[key]
self._dict[key] = bundle
else:
raise ValueError(
"Input: %s is not a valid input`seg_algo`" % self.seg_algo)
self.all_gen = True
def __setitem__(self, key, item):
if not isinstance(item, dict):
raise ValueError((
"After BundleDict initialization, additional"
" bundles can only be added as dictionaries "
"(see BundleDict.gen_all for examples)"))
self.gen_all()
self._dict[key] = item
self._uid_dict[key] = self._c_uid
self._dict[key]["uid"] = self._c_uid
self._c_uid += 1
self.bundle_names.append(key)
def __getitem__(self, key):
self.gen_all()
return self._dict[key]
def __len__(self):
return len(self.bundle_names)
def __delitem__(self, key):
if key not in self._dict and key not in self.bundle_names:
raise KeyError(f"{key} not found")
if key in self._dict:
del self._dict[key]
else:
raise RuntimeError((
f"{key} not found in internal dictionary, "
f"but found in bundle_names"))
if key in self.bundle_names:
self.bundle_names.remove(key)
else:
raise RuntimeError((
f"{key} not found in bundle_names, "
f"but found in internal dictionary"))
def __iter__(self):
self.gen_all()
return iter(self._dict)
def copy(self):
self.gen_all()
return self._dict.copy()
def resample_all_roi(self):
if self.resample_to:
for key in self._dict.keys():
for ii, roi in enumerate(self._dict[key]['ROIs']):
self._dict[key]['ROIs'][ii] =\
afd.read_resample_roi(
roi, resample_to=self.resample_to)
class PediatricBundleDict(BundleDict):
def __init__(self,
bundle_info=PEDIATRIC_BUNDLES,
seg_algo="afq",
resample_to=False):
"""
Create a pediatric bundle dictionary, needed for the segmentation
Parameters
----------
bundle_info : list, optional
A list of the pediatric bundles to be used in this case.
Default: all of them
seg_algo: only "afq" is supported
The bundle segmentation algorithm to use.
"afq" : Use waypoint ROIs + probability maps, as described
in [Yeatman2012]_
resample_to : Nifti1Image or bool, optional
If set, templates will be resampled to the affine and shape of this
image. If False, no resampling will be done.
Default: False
"""
BundleDict.__init__(self, bundle_info, seg_algo, resample_to)
def gen_all(self):
if self.all_gen:
return
if self.seg_algo == "afq":
# Pediatric bundles differ from adult bundles:
# - A third ROI has been introduced for curvy tracts:
# ARC, ATR, CGC, IFO, and UCI
# - ILF posterior ROI has been split into two
# to separate ILF and mdLF
# - Addition of pAF and VOF ROIs
# - SLF ROIs are restricted to parietal cortex
pediatric_templates = afd.read_pediatric_templates()
# pediatric probability maps
prob_map_order = [
"ATR_L", "ATR_R", "CST_L", "CST_R", "CGC_L", "CGC_R",
"HCC_L", "HCC_R", "FP", "FA", "IFO_L", "IFO_R", "ILF_L",
"ILF_R", "SLF_L", "SLF_R", "UNC_L", "UNC_R",
"ARC_L", "ARC_R", "MdLF_L", "MdLF_R"]
prob_maps = pediatric_templates[
'UNCNeo_JHU_tracts_prob-for-babyAFQ']
prob_map_data = prob_maps.get_fdata()
# pediatric bundle dict
pediatric_bundles = {}
# each bundles gets a digit identifier
# (to be stored in the tractogram)
uid = 1
for name in PEDIATRIC_BUNDLES:
# ROIs that cross the mid-line
if name in ["FA", "FP"]:
pediatric_bundles[name] = {
'ROIs': [
pediatric_templates[name + "_L"],
pediatric_templates[name + "_R"],
pediatric_templates["mid-saggital"]],
'rules': [True, True, True],
'cross_midline': True,
'prob_map': prob_map_data[
...,
prob_map_order.index(name)],
'uid': uid}
uid += 1
# SLF is a special case, because it has an exclusion ROI:
elif name == "SLF":
for hemi in ['_R', '_L']:
pediatric_bundles[name + hemi] = {
'ROIs': [
pediatric_templates[name + '_roi1' + hemi],
pediatric_templates[name + '_roi2' + hemi],
pediatric_templates["SLFt_roi2" + hemi]],
'rules': [True, True, False],
'cross_midline': False,
'prob_map': prob_map_data[
...,
prob_map_order.index(name + hemi)],
'uid': uid}
uid += 1
# Third ROI for curvy tracts
elif name in ["ARC", "ATR", "CGC", "IFO", "UNC"]:
for hemi in ['_R', '_L']:
pediatric_bundles[name + hemi] = {
'ROIs': [
pediatric_templates[name + '_roi1' + hemi],
pediatric_templates[name + '_roi2' + hemi],
pediatric_templates[name + '_roi3' + hemi]],
'rules': [True, True, True],
'cross_midline': False,
'prob_map': prob_map_data[
...,
prob_map_order.index(name + hemi)],
'uid': uid}
uid += 1
elif name == "MdLF":
for hemi in ['_R', '_L']:
pediatric_bundles[name + hemi] = {
'ROIs': [
pediatric_templates[name + '_roi1' + hemi],
pediatric_templates[name + '_roi2' + hemi]],
'rules': [True, True],
'cross_midline': False,
# reuse probability map from ILF
'prob_map': prob_map_data[
...,
prob_map_order.index("ILF" + hemi)],
'uid': uid}
uid += 1
# Default: two ROIs within hemisphere
else:
for hemi in ['_R', '_L']:
pediatric_bundles[name + hemi] = {
'ROIs': [
pediatric_templates[name + '_roi1' + hemi],
pediatric_templates[name + '_roi2' + hemi]],
'rules': [True, True],
'cross_midline': False,
'prob_map': prob_map_data[
...,
prob_map_order.index(name + hemi)],
'uid': uid}
uid += 1
self._dict = pediatric_bundles
else:
raise ValueError(
"Input: %s is not a valid input`seg_algo`" % self.seg_algo)
self.all_gen = True
| 42.238663 | 96 | 0.469262 | import logging
from collections.abc import MutableMapping
import AFQ.data as afd
logging.basicConfig(level=logging.INFO)
__all__ = ["PediatricBundleDict", "BundleDict"]
def do_preprocessing():
raise NotImplementedError
BUNDLES = ["ATR", "CGC", "CST", "IFO", "ILF", "SLF", "ARC", "UNC",
"FA", "FP"]
CALLOSUM_BUNDLES = ["AntFrontal", "Motor", "Occipital", "Orbital",
"PostParietal", "SupFrontal", "SupParietal",
"Temporal"]
_BUNDLES_16 = [
'CST', 'C', 'F', 'UF', 'MCP', 'AF', 'CCMid',
'CC_ForcepsMajor', 'CC_ForcepsMinor', 'IFOF']
RECO_BUNDLES_80 = ["AC", "AF", "AR", "AST", "C", "CB", "CC_ForcepsMajor",
"CC_ForcepsMinor", "CC", "CCMid", "CNII", "CNIII",
"CNIV", "CNV", "CNVII", "CNVIII", "CS", "CST", "CT",
"CTT", "DLF", "EMC", "F_L_R", "FPT", "ICP", "IFOF", "ILF",
"LL", "MCP", "MdLF", "ML", "MLF", "OPT", "OR", "PC", "PPT",
"RST", "SCP", "SLF", "STT", "TPT", "UF", "V", "VOF"]
RECO_UNIQUE = [
'CCMid', 'CC_ForcepsMajor', 'CC_ForcepsMinor', 'MCP', 'AC', 'PC', 'SCP',
'V', 'CC', 'F_L_R']
PEDIATRIC_BUNDLES = [
"ARC", "ATR", "CGC", "CST", "FA", "FP", "IFO", "ILF", "MdLF", "SLF", "UNC"]
DIPY_GH = "https://github.com/dipy/dipy/blob/master/dipy/"
class BundleDict(MutableMapping):
def __init__(self,
bundle_info=BUNDLES,
seg_algo="afq",
resample_to=None):
if not (isinstance(bundle_info, dict)
or isinstance(bundle_info, list)):
raise TypeError((
f"bundle_info must be a dict or a list,"
f" currently a {type(bundle_info)}"))
self.seg_algo = seg_algo.lower()
if resample_to is None:
resample_to = afd.read_mni_template()
self.resample_to = resample_to
if isinstance(bundle_info, dict):
self.bundle_names = list(bundle_info.keys())
self._dict = bundle_info.copy()
self.resample_all_roi()
self.all_gen = True
else:
expanded_bundle_names = []
for bundle_name in bundle_info:
if self.seg_algo == "afq":
if bundle_name in ["FA", "FP"]\
or bundle_name in CALLOSUM_BUNDLES:
expanded_bundle_names.append(bundle_name)
else:
expanded_bundle_names.append(bundle_name + "_R")
expanded_bundle_names.append(bundle_name + "_L")
elif self.seg_algo.startswith("reco"):
if bundle_name in RECO_UNIQUE\
or bundle_name == "whole_brain":
expanded_bundle_names.append(bundle_name)
else:
expanded_bundle_names.append(bundle_name + "_R")
expanded_bundle_names.append(bundle_name + "_L")
else:
raise ValueError(
"Input: %s is not a valid input`seg_algo`"
% self.seg_algo)
self.bundle_names = expanded_bundle_names
self._dict = {}
self.all_gen = False
self._uid_dict = {}
for ii, b_name in enumerate(self.bundle_names):
self._uid_dict[b_name] = ii + 1
self._c_uid = ii + 2
self.logger = logging.getLogger('AFQ.api')
if self.seg_algo == "afq":
if "FP" in self.bundle_names\
and "Occipital" in self.bundle_names:
self.logger.warning((
"FP and Occipital bundles are co-located, and AFQ"
" assigns each streamline to only one bundle."
" Only Occipital will be used."))
self.bundle_names.remove("FP")
if "FA" in self.bundle_names\
and "Orbital" in self.bundle_names:
self.logger.warning((
"FA and Orbital bundles are co-located, and AFQ"
" assigns each streamline to only one bundle."
" Only Orbital will be used."))
self.bundle_names.remove("FA")
if "FA" in self.bundle_names\
and "AntFrontal" in self.bundle_names:
self.logger.warning((
"FA and AntFrontal bundles are co-located, and AFQ"
" assigns each streamline to only one bundle."
" Only AntFrontal will be used."))
self.bundle_names.remove("FA")
def gen_all(self):
if self.all_gen:
return
if self.seg_algo == "afq":
templates =\
afd.read_templates(resample_to=self.resample_to)
templates['ARC_roi1_L'] = templates['SLF_roi1_L']
templates['ARC_roi1_R'] = templates['SLF_roi1_R']
templates['ARC_roi2_L'] = templates['SLFt_roi2_L']
templates['ARC_roi2_R'] = templates['SLFt_roi2_R']
callosal_templates =\
afd.read_callosum_templates(resample_to=self.resample_to)
for key in self.bundle_names:
if key in ["FA", "FP"]:
bundle = {
'ROIs': [
templates[key + "_L"],
templates[key + "_R"],
callosal_templates["Callosum_midsag"]],
'rules': [True, True, True],
'prob_map': templates[key + "_prob_map"],
'cross_midline': True,
'uid': self._uid_dict[key]}
elif key in CALLOSUM_BUNDLES:
bundle = {
'ROIs': [
callosal_templates["L_" + key],
callosal_templates["R_" + key],
callosal_templates["Callosum_midsag"]],
'rules': [True, True, True],
'cross_midline': True,
'uid': self._uid_dict[key]}
elif key in ["SLF_L", "SLF_R"]:
name = key[:-2]
hemi = key[-2:]
bundle = {
'ROIs': [
templates[name + '_roi1' + hemi],
templates[name + '_roi2' + hemi],
templates["SLFt_roi2" + hemi]],
'rules': [True, True, False],
'prob_map': templates[name + hemi + '_prob_map'],
'cross_midline': False,
'uid': self._uid_dict[key]}
else:
name = key[:-2]
hemi = key[-2:]
if (templates.get(name + '_roi1' + hemi)
and templates.get(name + '_roi2' + hemi)
and templates.get(name + hemi + '_prob_map')):
bundle = {
'ROIs': [
templates[name + '_roi1' + hemi],
templates[name + '_roi2' + hemi]],
'rules': [True, True],
'prob_map': templates[
name + hemi + '_prob_map'],
'cross_midline': False,
'uid': self._uid_dict[key]}
else:
raise ValueError(f"{key} is not in AFQ templates")
self._dict[key] = bundle
self.resample_all_roi()
elif self.seg_algo.startswith("reco"):
if self.seg_algo.endswith("80"):
reco_bundle_dict = afd.read_hcp_atlas(80)
else:
reco_bundle_dict = afd.read_hcp_atlas(16)
for key in self.bundle_names:
bundle = reco_bundle_dict[key]
bundle['uid'] = self._uid_dict[key]
self._dict[key] = bundle
else:
raise ValueError(
"Input: %s is not a valid input`seg_algo`" % self.seg_algo)
self.all_gen = True
def __setitem__(self, key, item):
if not isinstance(item, dict):
raise ValueError((
"After BundleDict initialization, additional"
" bundles can only be added as dictionaries "
"(see BundleDict.gen_all for examples)"))
self.gen_all()
self._dict[key] = item
self._uid_dict[key] = self._c_uid
self._dict[key]["uid"] = self._c_uid
self._c_uid += 1
self.bundle_names.append(key)
def __getitem__(self, key):
self.gen_all()
return self._dict[key]
def __len__(self):
return len(self.bundle_names)
def __delitem__(self, key):
if key not in self._dict and key not in self.bundle_names:
raise KeyError(f"{key} not found")
if key in self._dict:
del self._dict[key]
else:
raise RuntimeError((
f"{key} not found in internal dictionary, "
f"but found in bundle_names"))
if key in self.bundle_names:
self.bundle_names.remove(key)
else:
raise RuntimeError((
f"{key} not found in bundle_names, "
f"but found in internal dictionary"))
def __iter__(self):
self.gen_all()
return iter(self._dict)
def copy(self):
self.gen_all()
return self._dict.copy()
def resample_all_roi(self):
if self.resample_to:
for key in self._dict.keys():
for ii, roi in enumerate(self._dict[key]['ROIs']):
self._dict[key]['ROIs'][ii] =\
afd.read_resample_roi(
roi, resample_to=self.resample_to)
class PediatricBundleDict(BundleDict):
def __init__(self,
bundle_info=PEDIATRIC_BUNDLES,
seg_algo="afq",
resample_to=False):
BundleDict.__init__(self, bundle_info, seg_algo, resample_to)
def gen_all(self):
if self.all_gen:
return
if self.seg_algo == "afq":
pediatric_templates = afd.read_pediatric_templates()
prob_map_order = [
"ATR_L", "ATR_R", "CST_L", "CST_R", "CGC_L", "CGC_R",
"HCC_L", "HCC_R", "FP", "FA", "IFO_L", "IFO_R", "ILF_L",
"ILF_R", "SLF_L", "SLF_R", "UNC_L", "UNC_R",
"ARC_L", "ARC_R", "MdLF_L", "MdLF_R"]
prob_maps = pediatric_templates[
'UNCNeo_JHU_tracts_prob-for-babyAFQ']
prob_map_data = prob_maps.get_fdata()
pediatric_bundles = {}
uid = 1
for name in PEDIATRIC_BUNDLES:
if name in ["FA", "FP"]:
pediatric_bundles[name] = {
'ROIs': [
pediatric_templates[name + "_L"],
pediatric_templates[name + "_R"],
pediatric_templates["mid-saggital"]],
'rules': [True, True, True],
'cross_midline': True,
'prob_map': prob_map_data[
...,
prob_map_order.index(name)],
'uid': uid}
uid += 1
elif name == "SLF":
for hemi in ['_R', '_L']:
pediatric_bundles[name + hemi] = {
'ROIs': [
pediatric_templates[name + '_roi1' + hemi],
pediatric_templates[name + '_roi2' + hemi],
pediatric_templates["SLFt_roi2" + hemi]],
'rules': [True, True, False],
'cross_midline': False,
'prob_map': prob_map_data[
...,
prob_map_order.index(name + hemi)],
'uid': uid}
uid += 1
elif name in ["ARC", "ATR", "CGC", "IFO", "UNC"]:
for hemi in ['_R', '_L']:
pediatric_bundles[name + hemi] = {
'ROIs': [
pediatric_templates[name + '_roi1' + hemi],
pediatric_templates[name + '_roi2' + hemi],
pediatric_templates[name + '_roi3' + hemi]],
'rules': [True, True, True],
'cross_midline': False,
'prob_map': prob_map_data[
...,
prob_map_order.index(name + hemi)],
'uid': uid}
uid += 1
elif name == "MdLF":
for hemi in ['_R', '_L']:
pediatric_bundles[name + hemi] = {
'ROIs': [
pediatric_templates[name + '_roi1' + hemi],
pediatric_templates[name + '_roi2' + hemi]],
'rules': [True, True],
'cross_midline': False,
'prob_map': prob_map_data[
...,
prob_map_order.index("ILF" + hemi)],
'uid': uid}
uid += 1
else:
for hemi in ['_R', '_L']:
pediatric_bundles[name + hemi] = {
'ROIs': [
pediatric_templates[name + '_roi1' + hemi],
pediatric_templates[name + '_roi2' + hemi]],
'rules': [True, True],
'cross_midline': False,
'prob_map': prob_map_data[
...,
prob_map_order.index(name + hemi)],
'uid': uid}
uid += 1
self._dict = pediatric_bundles
else:
raise ValueError(
"Input: %s is not a valid input`seg_algo`" % self.seg_algo)
self.all_gen = True
| true | true |
f72418223eb83a65ed2fe842913ad03107f2ab2b | 105 | py | Python | routes.py | joejcollins/david-kano | 73bc09494ed3fc1fb1707fcc6c9fa37f19221cec | [
"MIT"
] | null | null | null | routes.py | joejcollins/david-kano | 73bc09494ed3fc1fb1707fcc6c9fa37f19221cec | [
"MIT"
] | null | null | null | routes.py | joejcollins/david-kano | 73bc09494ed3fc1fb1707fcc6c9fa37f19221cec | [
"MIT"
] | null | null | null | from controllers import pages
wsgi_routes = [
(r'/', pages.home),
(r'/(\w+)', pages.template),
] | 17.5 | 32 | 0.590476 | from controllers import pages
wsgi_routes = [
(r'/', pages.home),
(r'/(\w+)', pages.template),
] | true | true |
f724182d4906b622c0647bd68f97712a4a05e149 | 10,622 | py | Python | tests/flow/test_path_filter.py | jeffreylovitz/RedisGraph | 257cdccd0575a940004a23c7795b2bd139ff11b7 | [
"Ruby",
"ISC",
"MIT"
] | null | null | null | tests/flow/test_path_filter.py | jeffreylovitz/RedisGraph | 257cdccd0575a940004a23c7795b2bd139ff11b7 | [
"Ruby",
"ISC",
"MIT"
] | null | null | null | tests/flow/test_path_filter.py | jeffreylovitz/RedisGraph | 257cdccd0575a940004a23c7795b2bd139ff11b7 | [
"Ruby",
"ISC",
"MIT"
] | null | null | null | import os
import sys
from RLTest import Env
from redisgraph import Graph, Node, Edge
from collections import Counter
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from base import FlowTestsBase
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
from demo import QueryInfo
GRAPH_ID = "G"
redis_con = None
redis_graph = None
class testPathFilter(FlowTestsBase):
def __init__(self):
self.env = Env()
global redis_con
redis_con = self.env.getConnection()
def setUp(self):
global redis_graph
redis_graph = Graph(GRAPH_ID, redis_con)
self.env.flush()
def test00_simple_path_filter(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0]]
query_info = QueryInfo(query = query, description="Tests simple path filter", expected_result = expected_results)
self._assert_resultset_equals_expected(result_set, query_info)
def test01_negated_simple_path_filter(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE NOT (n)-[:R]->(:L) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node1]]
query_info = QueryInfo(query = query, description="Tests simple negated path filter", expected_result = expected_results)
self._assert_resultset_equals_expected(result_set, query_info)
def test02_test_path_filter_or_property_filter(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR n.x=1 RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests OR condition with simple filter and path filter", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test03_path_filter_or_negated_path_filter(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR NOT (n)-[:R]->(:L) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests OR condition with path and negated path filters", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test04_test_level_1_nesting_logical_operators_over_path_and_property_filters(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR (n.x=1 AND NOT (n)-[:R]->(:L)) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests AND condition with simple filter and negated path filter", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test05_test_level_2_nesting_logical_operators_over_path_and_property_filters(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR (n.x=1 AND (n.x = 2 OR NOT (n)-[:R]->(:L))) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests AND condition with simple filter and nested OR", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test06_test_level_2_nesting_logical_operators_over_path_filters(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
node2 = Node(node_id=2, label="L2")
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
edge12 = Edge(src_node=node1, dest_node=node2, relation="R2")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge12)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR (n.x=1 AND ((n)-[:R2]->(:L2) OR (n)-[:R]->(:L))) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests AND condition with simple filter and nested OR", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test07_test_edge_filters(self):
node0 = Node(node_id=0, label="L", properties={'x': 'a'})
node1 = Node(node_id=1, label="L", properties={'x': 'b'})
node2 = Node(node_id=2, label="L", properties={'x': 'c'})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R", properties={'x': 1})
edge12 = Edge(src_node=node1, dest_node=node2, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge12)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R {x:1}]->() RETURN n.x"
result_set = redis_graph.query(query)
expected_results = [['a']]
query_info = QueryInfo(query = query, description="Tests pattern filter edge conditions", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test08_indexed_child_stream_resolution(self):
node0 = Node(node_id=0, label="L", properties={'x': 'a'})
node1 = Node(node_id=1, label="L", properties={'x': 'b'})
node2 = Node(node_id=2, label="L", properties={'x': 'c'})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
edge12 = Edge(src_node=node1, dest_node=node2, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge12)
redis_graph.flush()
# Create index.
query = "CREATE INDEX ON :L(x)"
result_set = redis_graph.query(query)
self.env.assertEquals(result_set.indices_created, 1)
# Issue a query in which the bound variable stream of the SemiApply op is an Index Scan.
query = "MATCH (n:L) WHERE (:L)<-[]-(n)<-[]-(:L {x: 'a'}) AND n.x = 'b' RETURN n.x"
result_set = redis_graph.query(query)
expected_results = [['b']]
self.env.assertEquals(result_set.result_set, expected_results)
def test09_no_invalid_expand_into(self):
node0 = Node(node_id=0, label="L", properties={'x': 'a'})
node1 = Node(node_id=1, label="L", properties={'x': 'b'})
node2 = Node(node_id=2, label="L", properties={'x': 'c'})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
edge12 = Edge(src_node=node1, dest_node=node2, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge12)
redis_graph.flush()
# Issue a query in which the match stream and the bound stream must both perform traversal.
query = "MATCH (n:L)-[]->(:L) WHERE ({x: 'a'})-[]->(n) RETURN n.x"
plan = redis_graph.execution_plan(query)
# Verify that the execution plan has no Expand Into and two traversals.
self.env.assertNotIn("Expand Into", plan)
self.env.assertEquals(2, plan.count("Conditional Traverse"))
result_set = redis_graph.query(query)
expected_results = [['b']]
self.env.assertEquals(result_set.result_set, expected_results)
def test10_verify_apply_results(self):
# Build a graph with 3 nodes and 3 edges, 2 of which have the same source.
node0 = Node(node_id=0, label="L", properties={'x': 'a'})
node1 = Node(node_id=1, label="L", properties={'x': 'b'})
node2 = Node(node_id=2, label="L", properties={'x': 'c'})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
edge02 = Edge(src_node=node0, dest_node=node2, relation="R")
edge12 = Edge(src_node=node1, dest_node=node2, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge02)
redis_graph.add_edge(edge12)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[]->() RETURN n.x ORDER BY n.x"
result_set = redis_graph.query(query)
# Each source node should be returned exactly once.
expected_results = [['a'], ['b']]
self.env.assertEquals(result_set.result_set, expected_results)
| 47.208889 | 159 | 0.656091 | import os
import sys
from RLTest import Env
from redisgraph import Graph, Node, Edge
from collections import Counter
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from base import FlowTestsBase
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
from demo import QueryInfo
GRAPH_ID = "G"
redis_con = None
redis_graph = None
class testPathFilter(FlowTestsBase):
def __init__(self):
self.env = Env()
global redis_con
redis_con = self.env.getConnection()
def setUp(self):
global redis_graph
redis_graph = Graph(GRAPH_ID, redis_con)
self.env.flush()
def test00_simple_path_filter(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0]]
query_info = QueryInfo(query = query, description="Tests simple path filter", expected_result = expected_results)
self._assert_resultset_equals_expected(result_set, query_info)
def test01_negated_simple_path_filter(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE NOT (n)-[:R]->(:L) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node1]]
query_info = QueryInfo(query = query, description="Tests simple negated path filter", expected_result = expected_results)
self._assert_resultset_equals_expected(result_set, query_info)
def test02_test_path_filter_or_property_filter(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR n.x=1 RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests OR condition with simple filter and path filter", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test03_path_filter_or_negated_path_filter(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR NOT (n)-[:R]->(:L) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests OR condition with path and negated path filters", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test04_test_level_1_nesting_logical_operators_over_path_and_property_filters(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR (n.x=1 AND NOT (n)-[:R]->(:L)) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests AND condition with simple filter and negated path filter", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test05_test_level_2_nesting_logical_operators_over_path_and_property_filters(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_edge(edge01)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR (n.x=1 AND (n.x = 2 OR NOT (n)-[:R]->(:L))) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests AND condition with simple filter and nested OR", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test06_test_level_2_nesting_logical_operators_over_path_filters(self):
node0 = Node(node_id=0, label="L")
node1 = Node(node_id=1, label="L", properties={'x':1})
node2 = Node(node_id=2, label="L2")
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
edge12 = Edge(src_node=node1, dest_node=node2, relation="R2")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge12)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R]->(:L) OR (n.x=1 AND ((n)-[:R2]->(:L2) OR (n)-[:R]->(:L))) RETURN n"
result_set = redis_graph.query(query)
expected_results = [[node0],[node1]]
query_info = QueryInfo(query = query, description="Tests AND condition with simple filter and nested OR", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test07_test_edge_filters(self):
node0 = Node(node_id=0, label="L", properties={'x': 'a'})
node1 = Node(node_id=1, label="L", properties={'x': 'b'})
node2 = Node(node_id=2, label="L", properties={'x': 'c'})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R", properties={'x': 1})
edge12 = Edge(src_node=node1, dest_node=node2, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge12)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[:R {x:1}]->() RETURN n.x"
result_set = redis_graph.query(query)
expected_results = [['a']]
query_info = QueryInfo(query = query, description="Tests pattern filter edge conditions", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(result_set, query_info)
def test08_indexed_child_stream_resolution(self):
node0 = Node(node_id=0, label="L", properties={'x': 'a'})
node1 = Node(node_id=1, label="L", properties={'x': 'b'})
node2 = Node(node_id=2, label="L", properties={'x': 'c'})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
edge12 = Edge(src_node=node1, dest_node=node2, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge12)
redis_graph.flush()
query = "CREATE INDEX ON :L(x)"
result_set = redis_graph.query(query)
self.env.assertEquals(result_set.indices_created, 1)
query = "MATCH (n:L) WHERE (:L)<-[]-(n)<-[]-(:L {x: 'a'}) AND n.x = 'b' RETURN n.x"
result_set = redis_graph.query(query)
expected_results = [['b']]
self.env.assertEquals(result_set.result_set, expected_results)
def test09_no_invalid_expand_into(self):
node0 = Node(node_id=0, label="L", properties={'x': 'a'})
node1 = Node(node_id=1, label="L", properties={'x': 'b'})
node2 = Node(node_id=2, label="L", properties={'x': 'c'})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
edge12 = Edge(src_node=node1, dest_node=node2, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge12)
redis_graph.flush()
query = "MATCH (n:L)-[]->(:L) WHERE ({x: 'a'})-[]->(n) RETURN n.x"
plan = redis_graph.execution_plan(query)
self.env.assertNotIn("Expand Into", plan)
self.env.assertEquals(2, plan.count("Conditional Traverse"))
result_set = redis_graph.query(query)
expected_results = [['b']]
self.env.assertEquals(result_set.result_set, expected_results)
def test10_verify_apply_results(self):
node0 = Node(node_id=0, label="L", properties={'x': 'a'})
node1 = Node(node_id=1, label="L", properties={'x': 'b'})
node2 = Node(node_id=2, label="L", properties={'x': 'c'})
edge01 = Edge(src_node=node0, dest_node=node1, relation="R")
edge02 = Edge(src_node=node0, dest_node=node2, relation="R")
edge12 = Edge(src_node=node1, dest_node=node2, relation="R")
redis_graph.add_node(node0)
redis_graph.add_node(node1)
redis_graph.add_node(node2)
redis_graph.add_edge(edge01)
redis_graph.add_edge(edge02)
redis_graph.add_edge(edge12)
redis_graph.flush()
query = "MATCH (n:L) WHERE (n)-[]->() RETURN n.x ORDER BY n.x"
result_set = redis_graph.query(query)
expected_results = [['a'], ['b']]
self.env.assertEquals(result_set.result_set, expected_results)
| true | true |
f72418cebb9a25071d43c6dff652515ba3da8366 | 3,382 | py | Python | python/GafferDispatchUI/PythonCommandUI.py | sebaDesmet/gaffer | 47b2d093c40452bd77947e3b5bd0722a366c8d59 | [
"BSD-3-Clause"
] | 1 | 2019-08-02T16:49:59.000Z | 2019-08-02T16:49:59.000Z | python/GafferDispatchUI/PythonCommandUI.py | sebaDesmet/gaffer | 47b2d093c40452bd77947e3b5bd0722a366c8d59 | [
"BSD-3-Clause"
] | 2 | 2017-08-23T21:35:45.000Z | 2018-01-29T08:59:33.000Z | python/GafferDispatchUI/PythonCommandUI.py | sebaDesmet/gaffer | 47b2d093c40452bd77947e3b5bd0722a366c8d59 | [
"BSD-3-Clause"
] | 1 | 2020-02-15T16:15:54.000Z | 2020-02-15T16:15:54.000Z | ##########################################################################
#
# Copyright (c) 2015, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferDispatch
Gaffer.Metadata.registerNode(
GafferDispatch.PythonCommand,
"description",
"""
Runs python code.
""",
plugs = {
"command" : (
"description",
"""
The command to run. This may reference any of the
variables by name, and also the node itself as `self`
and the current context as `context`.
""",
"plugValueWidget:type", "GafferUI.MultiLineStringPlugValueWidget",
"multiLineStringPlugValueWidget:role", "code",
"layout:label", "",
),
"variables" : (
"description",
"""
An arbitrary set of variables which can be accessed via
the `variables` dictionary within the python command.
""",
"layout:section", "Variables",
),
"sequence" : (
"description",
"""
Calls the command once for each sequence, instead of once
per frame. In this mode, an additional variable called `frames`
is available to the command, containing a list of all frame
numbers for which execution should be performed. The context may
be updated to reference any frame from this list, and accessing
a variable returns the value for the current frame.
A typical structure for the command might look something like this :
```
# Do some one-time initialization
...
# Process all frames
for frame in frames :
context.setFrame( frame )
# Read variables after setting the frame to get
# the right values for that frame.
v = variables["v"]
...
# Do some one-time finalization
...
```
""",
"layout:section", "Advanced",
),
}
)
| 29.666667 | 77 | 0.674157 | true | true | |
f724190ad19bba7a7fd84b44f59bcfd9044949ad | 12,110 | py | Python | seld.py | sedurCode/seld-dcase2021 | f8e09dbbbb5ac7d6ae0b82083f1a11b013c5dd51 | [
"MIT"
] | 23 | 2021-02-28T21:58:06.000Z | 2022-03-29T03:19:59.000Z | seld.py | sedurCode/seld-dcase2021 | f8e09dbbbb5ac7d6ae0b82083f1a11b013c5dd51 | [
"MIT"
] | 10 | 2021-04-06T17:20:41.000Z | 2022-03-23T13:19:42.000Z | seld.py | sedurCode/seld-dcase2021 | f8e09dbbbb5ac7d6ae0b82083f1a11b013c5dd51 | [
"MIT"
] | 13 | 2021-04-01T02:23:46.000Z | 2022-01-16T03:29:54.000Z | #
# A wrapper script that trains the SELDnet. The training stops when the early stopping metric - SELD error stops improving.
#
import os
import sys
import numpy as np
import cls_feature_class
import cls_data_generator
from cls_compute_seld_results import ComputeSELDResults, reshape_3Dto2D
import keras_model
import parameter
import time
def dump_DCASE2021_results(_data_gen, _feat_cls, _dcase_output_folder, _sed_pred, _doa_pred):
'''
Write the filewise results to individual csv files
'''
# Number of frames for a 60 second audio with 100ms hop length = 600 frames
max_frames_with_content = _data_gen.get_nb_frames()
# Number of frames in one batch (batch_size* sequence_length) consists of all the 600 frames above with
# zero padding in the remaining frames
test_filelist = _data_gen.get_filelist()
frames_per_file = _data_gen.get_frame_per_file()
for file_cnt in range(_sed_pred.shape[0] // frames_per_file):
output_file = os.path.join(_dcase_output_folder, test_filelist[file_cnt].replace('.npy', '.csv'))
dc = file_cnt * frames_per_file
output_dict = _feat_cls.regression_label_format_to_output_format(
_sed_pred[dc:dc + max_frames_with_content, :],
_doa_pred[dc:dc + max_frames_with_content, :]
)
_data_gen.write_output_format_file(output_file, output_dict)
return
def get_accdoa_labels(accdoa_in, nb_classes):
x, y, z = accdoa_in[:, :, :nb_classes], accdoa_in[:, :, nb_classes:2*nb_classes], accdoa_in[:, :, 2*nb_classes:]
sed = np.sqrt(x**2 + y**2 + z**2) > 0.5
return sed, accdoa_in
def main(argv):
"""
Main wrapper for training sound event localization and detection network.
:param argv: expects two optional inputs.
first input: task_id - (optional) To chose the system configuration in parameters.py.
(default) 1 - uses default parameters
second input: job_id - (optional) all the output files will be uniquely represented with this.
(default) 1
"""
print(argv)
if len(argv) != 3:
print('\n\n')
print('-------------------------------------------------------------------------------------------------------')
print('The code expected two optional inputs')
print('\t>> python seld.py <task-id> <job-id>')
print('\t\t<task-id> is used to choose the user-defined parameter set from parameter.py')
print('Using default inputs for now')
print('\t\t<job-id> is a unique identifier which is used for output filenames (models, training plots). '
'You can use any number or string for this.')
print('-------------------------------------------------------------------------------------------------------')
print('\n\n')
# use parameter set defined by user
task_id = '1' if len(argv) < 2 else argv[1]
params = parameter.get_params(task_id)
job_id = 1 if len(argv) < 3 else argv[-1]
feat_cls = cls_feature_class.FeatureClass(params)
train_splits, val_splits, test_splits = None, None, None
if params['mode'] == 'dev':
test_splits = [6]
val_splits = [5]
train_splits = [[1, 2, 3, 4]]
elif params['mode'] == 'eval':
test_splits = [[7, 8]]
val_splits = [[6]]
train_splits = [[1, 2, 3, 4, 5]]
for split_cnt, split in enumerate(test_splits):
print('\n\n---------------------------------------------------------------------------------------------------')
print('------------------------------------ SPLIT {} -----------------------------------------------'.format(split))
print('---------------------------------------------------------------------------------------------------')
# Unique name for the run
cls_feature_class.create_folder(params['model_dir'])
unique_name = '{}_{}_{}_{}_split{}'.format(
task_id, job_id, params['dataset'], params['mode'], split
)
unique_name = os.path.join(params['model_dir'], unique_name)
model_name = '{}_model.h5'.format(unique_name)
print("unique_name: {}\n".format(unique_name))
# Load train and validation data
print('Loading training dataset:')
data_gen_train = cls_data_generator.DataGenerator(
params=params, split=train_splits[split_cnt]
)
print('Loading validation dataset:')
data_gen_val = cls_data_generator.DataGenerator(
params=params, split=val_splits[split_cnt], shuffle=False, per_file=True, is_eval=False
)
# Collect the reference labels for validation data
data_in, data_out = data_gen_train.get_data_sizes()
print('FEATURES:\n\tdata_in: {}\n\tdata_out: {}\n'.format(data_in, data_out))
nb_classes = data_gen_train.get_nb_classes()
print('MODEL:\n\tdropout_rate: {}\n\tCNN: nb_cnn_filt: {}, f_pool_size{}, t_pool_size{}\n\trnn_size: {}, fnn_size: {}\n\tdoa_objective: {}\n'.format(
params['dropout_rate'], params['nb_cnn2d_filt'], params['f_pool_size'], params['t_pool_size'], params['rnn_size'],
params['fnn_size'], params['doa_objective']))
print('Using loss weights : {}'.format(params['loss_weights']))
model = keras_model.get_model(data_in=data_in, data_out=data_out, dropout_rate=params['dropout_rate'],
nb_cnn2d_filt=params['nb_cnn2d_filt'], f_pool_size=params['f_pool_size'], t_pool_size=params['t_pool_size'],
rnn_size=params['rnn_size'], fnn_size=params['fnn_size'],
weights=params['loss_weights'], doa_objective=params['doa_objective'], is_accdoa=params['is_accdoa'])
# Dump results in DCASE output format for calculating final scores
dcase_output_val_folder = os.path.join(params['dcase_output_dir'], '{}_{}_{}_val'.format(task_id, params['dataset'], params['mode']))
cls_feature_class.delete_and_create_folder(dcase_output_val_folder)
print('Dumping recording-wise val results in: {}'.format(dcase_output_val_folder))
# Initialize evaluation metric class
score_obj = ComputeSELDResults(params)
best_seld_metric = 99999
best_epoch = -1
patience_cnt = 0
nb_epoch = 2 if params['quick_test'] else params['nb_epochs']
tr_loss = np.zeros(nb_epoch)
seld_metric = np.zeros((nb_epoch, 5))
# start training
for epoch_cnt in range(nb_epoch):
start = time.time()
# train once per epoch
hist = model.fit_generator(
generator=data_gen_train.generate(),
steps_per_epoch=2 if params['quick_test'] else data_gen_train.get_total_batches_in_data(),
epochs=params['epochs_per_fit'],
verbose=2,
)
tr_loss[epoch_cnt] = hist.history.get('loss')[-1]
# predict once per epoch
pred = model.predict_generator(
generator=data_gen_val.generate(),
steps=2 if params['quick_test'] else data_gen_val.get_total_batches_in_data(),
verbose=2
)
if params['is_accdoa']:
sed_pred, doa_pred = get_accdoa_labels(pred, nb_classes)
sed_pred = reshape_3Dto2D(sed_pred)
doa_pred = reshape_3Dto2D(doa_pred)
else:
sed_pred = reshape_3Dto2D(pred[0]) > 0.5
doa_pred = reshape_3Dto2D(pred[1] if params['doa_objective'] is 'mse' else pred[1][:, :, nb_classes:])
# Calculate the DCASE 2021 metrics - Location-aware detection and Class-aware localization scores
dump_DCASE2021_results(data_gen_val, feat_cls, dcase_output_val_folder, sed_pred, doa_pred)
seld_metric[epoch_cnt, :] = score_obj.get_SELD_Results(dcase_output_val_folder)
patience_cnt += 1
if seld_metric[epoch_cnt, -1] < best_seld_metric:
best_seld_metric = seld_metric[epoch_cnt, -1]
best_epoch = epoch_cnt
model.save(model_name)
patience_cnt = 0
print(
'epoch_cnt: {}, time: {:0.2f}s, tr_loss: {:0.2f}, '
'\n\t\t DCASE2021 SCORES: ER: {:0.2f}, F: {:0.1f}, LE: {:0.1f}, LR:{:0.1f}, seld_score (early stopping score): {:0.2f}, '
'best_seld_score: {:0.2f}, best_epoch : {}\n'.format(
epoch_cnt, time.time() - start, tr_loss[epoch_cnt],
seld_metric[epoch_cnt, 0], seld_metric[epoch_cnt, 1]*100,
seld_metric[epoch_cnt, 2], seld_metric[epoch_cnt, 3]*100,
seld_metric[epoch_cnt, -1], best_seld_metric, best_epoch
)
)
if patience_cnt > params['patience']:
break
print('\nResults on validation split:')
print('\tUnique_name: {} '.format(unique_name))
print('\tSaved model for the best_epoch: {}'.format(best_epoch))
print('\tSELD_score (early stopping score) : {}'.format(best_seld_metric))
print('\n\tDCASE2021 scores')
print('\tClass-aware localization scores: Localization Error: {:0.1f}, Localization Recall: {:0.1f}'.format(seld_metric[best_epoch, 2], seld_metric[best_epoch, 3]*100))
print('\tLocation-aware detection scores: Error rate: {:0.2f}, F-score: {:0.1f}'.format(seld_metric[best_epoch, 0], seld_metric[best_epoch, 1]*100))
# ------------------ Calculate metric scores for unseen test split ---------------------------------
print('\nLoading the best model and predicting results on the testing split')
print('\tLoading testing dataset:')
data_gen_test = cls_data_generator.DataGenerator(
params=params, split=split, shuffle=False, per_file=True, is_eval=True if params['mode'] is 'eval' else False
)
model = keras_model.load_seld_model('{}_model.h5'.format(unique_name), params['doa_objective'])
pred_test = model.predict_generator(
generator=data_gen_test.generate(),
steps=2 if params['quick_test'] else data_gen_test.get_total_batches_in_data(),
verbose=2
)
if params['is_accdoa']:
test_sed_pred, test_doa_pred = get_accdoa_labels(pred_test, nb_classes)
test_sed_pred = reshape_3Dto2D(test_sed_pred)
test_doa_pred = reshape_3Dto2D(test_doa_pred)
else:
test_sed_pred = reshape_3Dto2D(pred_test[0]) > 0.5
test_doa_pred = reshape_3Dto2D(pred_test[1] if params['doa_objective'] is 'mse' else pred_test[1][:, :, nb_classes:])
# Dump results in DCASE output format for calculating final scores
dcase_output_test_folder = os.path.join(params['dcase_output_dir'], '{}_{}_{}_test'.format(task_id, params['dataset'], params['mode']))
cls_feature_class.delete_and_create_folder(dcase_output_test_folder)
print('Dumping recording-wise test results in: {}'.format(dcase_output_test_folder))
dump_DCASE2021_results(data_gen_test, feat_cls, dcase_output_test_folder, test_sed_pred, test_doa_pred)
if params['mode'] is 'dev':
# Calculate DCASE2021 scores
test_seld_metric = score_obj.get_SELD_Results(dcase_output_test_folder)
print('Results on test split:')
print('\tDCASE2021 Scores')
print('\tClass-aware localization scores: Localization Error: {:0.1f}, Localization Recall: {:0.1f}'.format(test_seld_metric[2], test_seld_metric[3]*100))
print('\tLocation-aware detection scores: Error rate: {:0.2f}, F-score: {:0.1f}'.format(test_seld_metric[0], test_seld_metric[1]*100))
print('\tSELD (early stopping metric): {:0.2f}'.format(test_seld_metric[-1]))
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| 48.830645 | 176 | 0.606689 |
import os
import sys
import numpy as np
import cls_feature_class
import cls_data_generator
from cls_compute_seld_results import ComputeSELDResults, reshape_3Dto2D
import keras_model
import parameter
import time
def dump_DCASE2021_results(_data_gen, _feat_cls, _dcase_output_folder, _sed_pred, _doa_pred):
max_frames_with_content = _data_gen.get_nb_frames()
test_filelist = _data_gen.get_filelist()
frames_per_file = _data_gen.get_frame_per_file()
for file_cnt in range(_sed_pred.shape[0] // frames_per_file):
output_file = os.path.join(_dcase_output_folder, test_filelist[file_cnt].replace('.npy', '.csv'))
dc = file_cnt * frames_per_file
output_dict = _feat_cls.regression_label_format_to_output_format(
_sed_pred[dc:dc + max_frames_with_content, :],
_doa_pred[dc:dc + max_frames_with_content, :]
)
_data_gen.write_output_format_file(output_file, output_dict)
return
def get_accdoa_labels(accdoa_in, nb_classes):
x, y, z = accdoa_in[:, :, :nb_classes], accdoa_in[:, :, nb_classes:2*nb_classes], accdoa_in[:, :, 2*nb_classes:]
sed = np.sqrt(x**2 + y**2 + z**2) > 0.5
return sed, accdoa_in
def main(argv):
print(argv)
if len(argv) != 3:
print('\n\n')
print('-------------------------------------------------------------------------------------------------------')
print('The code expected two optional inputs')
print('\t>> python seld.py <task-id> <job-id>')
print('\t\t<task-id> is used to choose the user-defined parameter set from parameter.py')
print('Using default inputs for now')
print('\t\t<job-id> is a unique identifier which is used for output filenames (models, training plots). '
'You can use any number or string for this.')
print('-------------------------------------------------------------------------------------------------------')
print('\n\n')
task_id = '1' if len(argv) < 2 else argv[1]
params = parameter.get_params(task_id)
job_id = 1 if len(argv) < 3 else argv[-1]
feat_cls = cls_feature_class.FeatureClass(params)
train_splits, val_splits, test_splits = None, None, None
if params['mode'] == 'dev':
test_splits = [6]
val_splits = [5]
train_splits = [[1, 2, 3, 4]]
elif params['mode'] == 'eval':
test_splits = [[7, 8]]
val_splits = [[6]]
train_splits = [[1, 2, 3, 4, 5]]
for split_cnt, split in enumerate(test_splits):
print('\n\n---------------------------------------------------------------------------------------------------')
print('------------------------------------ SPLIT {} -----------------------------------------------'.format(split))
print('---------------------------------------------------------------------------------------------------')
cls_feature_class.create_folder(params['model_dir'])
unique_name = '{}_{}_{}_{}_split{}'.format(
task_id, job_id, params['dataset'], params['mode'], split
)
unique_name = os.path.join(params['model_dir'], unique_name)
model_name = '{}_model.h5'.format(unique_name)
print("unique_name: {}\n".format(unique_name))
print('Loading training dataset:')
data_gen_train = cls_data_generator.DataGenerator(
params=params, split=train_splits[split_cnt]
)
print('Loading validation dataset:')
data_gen_val = cls_data_generator.DataGenerator(
params=params, split=val_splits[split_cnt], shuffle=False, per_file=True, is_eval=False
)
data_in, data_out = data_gen_train.get_data_sizes()
print('FEATURES:\n\tdata_in: {}\n\tdata_out: {}\n'.format(data_in, data_out))
nb_classes = data_gen_train.get_nb_classes()
print('MODEL:\n\tdropout_rate: {}\n\tCNN: nb_cnn_filt: {}, f_pool_size{}, t_pool_size{}\n\trnn_size: {}, fnn_size: {}\n\tdoa_objective: {}\n'.format(
params['dropout_rate'], params['nb_cnn2d_filt'], params['f_pool_size'], params['t_pool_size'], params['rnn_size'],
params['fnn_size'], params['doa_objective']))
print('Using loss weights : {}'.format(params['loss_weights']))
model = keras_model.get_model(data_in=data_in, data_out=data_out, dropout_rate=params['dropout_rate'],
nb_cnn2d_filt=params['nb_cnn2d_filt'], f_pool_size=params['f_pool_size'], t_pool_size=params['t_pool_size'],
rnn_size=params['rnn_size'], fnn_size=params['fnn_size'],
weights=params['loss_weights'], doa_objective=params['doa_objective'], is_accdoa=params['is_accdoa'])
dcase_output_val_folder = os.path.join(params['dcase_output_dir'], '{}_{}_{}_val'.format(task_id, params['dataset'], params['mode']))
cls_feature_class.delete_and_create_folder(dcase_output_val_folder)
print('Dumping recording-wise val results in: {}'.format(dcase_output_val_folder))
score_obj = ComputeSELDResults(params)
best_seld_metric = 99999
best_epoch = -1
patience_cnt = 0
nb_epoch = 2 if params['quick_test'] else params['nb_epochs']
tr_loss = np.zeros(nb_epoch)
seld_metric = np.zeros((nb_epoch, 5))
for epoch_cnt in range(nb_epoch):
start = time.time()
hist = model.fit_generator(
generator=data_gen_train.generate(),
steps_per_epoch=2 if params['quick_test'] else data_gen_train.get_total_batches_in_data(),
epochs=params['epochs_per_fit'],
verbose=2,
)
tr_loss[epoch_cnt] = hist.history.get('loss')[-1]
pred = model.predict_generator(
generator=data_gen_val.generate(),
steps=2 if params['quick_test'] else data_gen_val.get_total_batches_in_data(),
verbose=2
)
if params['is_accdoa']:
sed_pred, doa_pred = get_accdoa_labels(pred, nb_classes)
sed_pred = reshape_3Dto2D(sed_pred)
doa_pred = reshape_3Dto2D(doa_pred)
else:
sed_pred = reshape_3Dto2D(pred[0]) > 0.5
doa_pred = reshape_3Dto2D(pred[1] if params['doa_objective'] is 'mse' else pred[1][:, :, nb_classes:])
dump_DCASE2021_results(data_gen_val, feat_cls, dcase_output_val_folder, sed_pred, doa_pred)
seld_metric[epoch_cnt, :] = score_obj.get_SELD_Results(dcase_output_val_folder)
patience_cnt += 1
if seld_metric[epoch_cnt, -1] < best_seld_metric:
best_seld_metric = seld_metric[epoch_cnt, -1]
best_epoch = epoch_cnt
model.save(model_name)
patience_cnt = 0
print(
'epoch_cnt: {}, time: {:0.2f}s, tr_loss: {:0.2f}, '
'\n\t\t DCASE2021 SCORES: ER: {:0.2f}, F: {:0.1f}, LE: {:0.1f}, LR:{:0.1f}, seld_score (early stopping score): {:0.2f}, '
'best_seld_score: {:0.2f}, best_epoch : {}\n'.format(
epoch_cnt, time.time() - start, tr_loss[epoch_cnt],
seld_metric[epoch_cnt, 0], seld_metric[epoch_cnt, 1]*100,
seld_metric[epoch_cnt, 2], seld_metric[epoch_cnt, 3]*100,
seld_metric[epoch_cnt, -1], best_seld_metric, best_epoch
)
)
if patience_cnt > params['patience']:
break
print('\nResults on validation split:')
print('\tUnique_name: {} '.format(unique_name))
print('\tSaved model for the best_epoch: {}'.format(best_epoch))
print('\tSELD_score (early stopping score) : {}'.format(best_seld_metric))
print('\n\tDCASE2021 scores')
print('\tClass-aware localization scores: Localization Error: {:0.1f}, Localization Recall: {:0.1f}'.format(seld_metric[best_epoch, 2], seld_metric[best_epoch, 3]*100))
print('\tLocation-aware detection scores: Error rate: {:0.2f}, F-score: {:0.1f}'.format(seld_metric[best_epoch, 0], seld_metric[best_epoch, 1]*100))
print('\nLoading the best model and predicting results on the testing split')
print('\tLoading testing dataset:')
data_gen_test = cls_data_generator.DataGenerator(
params=params, split=split, shuffle=False, per_file=True, is_eval=True if params['mode'] is 'eval' else False
)
model = keras_model.load_seld_model('{}_model.h5'.format(unique_name), params['doa_objective'])
pred_test = model.predict_generator(
generator=data_gen_test.generate(),
steps=2 if params['quick_test'] else data_gen_test.get_total_batches_in_data(),
verbose=2
)
if params['is_accdoa']:
test_sed_pred, test_doa_pred = get_accdoa_labels(pred_test, nb_classes)
test_sed_pred = reshape_3Dto2D(test_sed_pred)
test_doa_pred = reshape_3Dto2D(test_doa_pred)
else:
test_sed_pred = reshape_3Dto2D(pred_test[0]) > 0.5
test_doa_pred = reshape_3Dto2D(pred_test[1] if params['doa_objective'] is 'mse' else pred_test[1][:, :, nb_classes:])
dcase_output_test_folder = os.path.join(params['dcase_output_dir'], '{}_{}_{}_test'.format(task_id, params['dataset'], params['mode']))
cls_feature_class.delete_and_create_folder(dcase_output_test_folder)
print('Dumping recording-wise test results in: {}'.format(dcase_output_test_folder))
dump_DCASE2021_results(data_gen_test, feat_cls, dcase_output_test_folder, test_sed_pred, test_doa_pred)
if params['mode'] is 'dev':
test_seld_metric = score_obj.get_SELD_Results(dcase_output_test_folder)
print('Results on test split:')
print('\tDCASE2021 Scores')
print('\tClass-aware localization scores: Localization Error: {:0.1f}, Localization Recall: {:0.1f}'.format(test_seld_metric[2], test_seld_metric[3]*100))
print('\tLocation-aware detection scores: Error rate: {:0.2f}, F-score: {:0.1f}'.format(test_seld_metric[0], test_seld_metric[1]*100))
print('\tSELD (early stopping metric): {:0.2f}'.format(test_seld_metric[-1]))
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| true | true |
f7241af30cfe709d69f072a6a9b64ee91c4729f2 | 6,083 | py | Python | tools/InterfaceGenerator/generator/generators/SmartFactoryJSONRPC.py | Sohei-Suzuki-Nexty/sdl_core | 68f082169e0a40fccd9eb0db3c83911c28870f07 | [
"BSD-3-Clause"
] | 249 | 2015-01-15T16:50:53.000Z | 2022-03-24T13:23:34.000Z | tools/InterfaceGenerator/generator/generators/SmartFactoryJSONRPC.py | Sohei-Suzuki-Nexty/sdl_core | 68f082169e0a40fccd9eb0db3c83911c28870f07 | [
"BSD-3-Clause"
] | 2,917 | 2015-01-12T16:17:49.000Z | 2022-03-31T11:57:47.000Z | tools/InterfaceGenerator/generator/generators/SmartFactoryJSONRPC.py | Sohei-Suzuki-Nexty/sdl_core | 68f082169e0a40fccd9eb0db3c83911c28870f07 | [
"BSD-3-Clause"
] | 306 | 2015-01-12T09:23:20.000Z | 2022-01-28T18:06:30.000Z | """SmartFactory code generator for JSONRPC format.
Defines JSONRPC format specific code generation rules.
"""
import string
from generator.generators import SmartFactoryBase
from model.enum_element import EnumElement
class CodeGenerator(SmartFactoryBase.CodeGenerator):
"""JSONRPC SmartFactory generator.
Defines special cases that affects base code generation to make JSONRPC
format-friendly code.
"""
def __init__(self):
"""Construct new object."""
SmartFactoryBase.CodeGenerator.__init__(self)
def _gen_pre_function_schemas(self, functions):
"""Generate specific code that goes before schema initialization.
JSON RPC generator generates code that adds specific schema for the
error_response and adds this schema for every available response.
Keyword arguments:
functions -- list of functions to generate code for.
Returns:
Source code with error_response schema initialization and adding to the
base SmartFactory..
"""
code = u""
for function in functions:
if function.message_type.primary_name == u"response":
code = u"".join(
[code, self._error_response_insert_template.substitute(
function_id=function.function_id.primary_name)])
if code:
return self._indent_code(
u"".join([self._error_response_schema_template, code]), 1)
return u""
def _preprocess_message_type(self, message_type):
"""Preprocess message_type enum.
JSON RPC generator needs to add new message_type "error_response" in
case if at least one response available.
Keyword arguments:
message_type -- message_type enum to preprocess.
Returns:
Preprocessed message_type enum.
"""
if "response" in message_type.elements:
message_type.elements[u"error_response"] = EnumElement(
name=u"error_response")
return message_type
def _gen_schema_params_fill(self, message_type_name):
"""Generate schema params fill code.
Provides constant set of params for the function in accordance to the
JSONRPC format.
Keyword arguments:
message_type_name -- Name of the messageType enum element.
Returns:
String with function schema params fill code.
"""
return u"".join(
[self._base_params,
self._correlation_id_param
if message_type_name != u"notification" else u"",
self._additional_response_params
if message_type_name == u"response" else u""])
_error_response_insert_template = string.Template(
u'''functions_schemes_.insert(std::make_pair('''
u'''ns_smart_device_link::ns_json_handler::'''
u'''SmartSchemaKey<FunctionID::eType, messageType::eType>('''
u'''FunctionID::${function_id}, messageType::error_response), '''
u'''error_response_schema));\n''')
_error_response_schema_template = (
u'''Members '''
u'''params_members;\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_FUNCTION_ID] = SMember('''
u'''TEnumSchemaItem<FunctionID::eType>::create('''
u'''function_id_items), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_MESSAGE_TYPE] = SMember('''
u'''TEnumSchemaItem<messageType::eType>::create('''
u'''message_type_items), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_PROTOCOL_VERSION] = SMember('''
u'''TNumberSchemaItem<int>::create(), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_PROTOCOL_TYPE] = SMember('''
u'''TNumberSchemaItem<int>::create(), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_CORRELATION_ID] = SMember('''
u'''TNumberSchemaItem<int>::create(), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::kCode] = SMember('''
u'''TNumberSchemaItem<int>::create(), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::kMessage] = SMember('''
u'''CStringSchemaItem::create(), true);\n'''
u'''\n'''
u'''Members root_members_map;\n'''
u'''root_members_map[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_PARAMS] = SMember('''
u'''CObjectSchemaItem::create(params_members), true);\n'''
u'''\n'''
u'''CSmartSchema error_response_schema('''
u'''CObjectSchemaItem::create(root_members_map));\n'''
u'''\n''')
_base_params = (
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_FUNCTION_ID] = SMember(TEnumSchemaItem<FunctionID::eType>::'''
u'''create(function_id_items), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_MESSAGE_TYPE] = SMember(TEnumSchemaItem<messageType::eType>::'''
u'''create(message_type_items), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_PROTOCOL_VERSION] = SMember(TNumberSchemaItem<int>::create(), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_PROTOCOL_TYPE] = SMember(TNumberSchemaItem<int>::create(), true);\n'''
)
_correlation_id_param = (
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_CORRELATION_ID] = SMember(TNumberSchemaItem<int>::create(), true);\n'''
)
_additional_response_params = (
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::kCode] = SMember(TNumberSchemaItem<int>::create(), true);\n'''
)
| 38.01875 | 96 | 0.640309 | import string
from generator.generators import SmartFactoryBase
from model.enum_element import EnumElement
class CodeGenerator(SmartFactoryBase.CodeGenerator):
def __init__(self):
SmartFactoryBase.CodeGenerator.__init__(self)
def _gen_pre_function_schemas(self, functions):
code = u""
for function in functions:
if function.message_type.primary_name == u"response":
code = u"".join(
[code, self._error_response_insert_template.substitute(
function_id=function.function_id.primary_name)])
if code:
return self._indent_code(
u"".join([self._error_response_schema_template, code]), 1)
return u""
def _preprocess_message_type(self, message_type):
if "response" in message_type.elements:
message_type.elements[u"error_response"] = EnumElement(
name=u"error_response")
return message_type
def _gen_schema_params_fill(self, message_type_name):
return u"".join(
[self._base_params,
self._correlation_id_param
if message_type_name != u"notification" else u"",
self._additional_response_params
if message_type_name == u"response" else u""])
_error_response_insert_template = string.Template(
u'''functions_schemes_.insert(std::make_pair('''
u'''ns_smart_device_link::ns_json_handler::'''
u'''SmartSchemaKey<FunctionID::eType, messageType::eType>('''
u'''FunctionID::${function_id}, messageType::error_response), '''
u'''error_response_schema));\n''')
_error_response_schema_template = (
u'''Members '''
u'''params_members;\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_FUNCTION_ID] = SMember('''
u'''TEnumSchemaItem<FunctionID::eType>::create('''
u'''function_id_items), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_MESSAGE_TYPE] = SMember('''
u'''TEnumSchemaItem<messageType::eType>::create('''
u'''message_type_items), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_PROTOCOL_VERSION] = SMember('''
u'''TNumberSchemaItem<int>::create(), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_PROTOCOL_TYPE] = SMember('''
u'''TNumberSchemaItem<int>::create(), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_CORRELATION_ID] = SMember('''
u'''TNumberSchemaItem<int>::create(), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::kCode] = SMember('''
u'''TNumberSchemaItem<int>::create(), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::kMessage] = SMember('''
u'''CStringSchemaItem::create(), true);\n'''
u'''\n'''
u'''Members root_members_map;\n'''
u'''root_members_map[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_PARAMS] = SMember('''
u'''CObjectSchemaItem::create(params_members), true);\n'''
u'''\n'''
u'''CSmartSchema error_response_schema('''
u'''CObjectSchemaItem::create(root_members_map));\n'''
u'''\n''')
_base_params = (
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_FUNCTION_ID] = SMember(TEnumSchemaItem<FunctionID::eType>::'''
u'''create(function_id_items), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_MESSAGE_TYPE] = SMember(TEnumSchemaItem<messageType::eType>::'''
u'''create(message_type_items), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_PROTOCOL_VERSION] = SMember(TNumberSchemaItem<int>::create(), true);\n'''
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_PROTOCOL_TYPE] = SMember(TNumberSchemaItem<int>::create(), true);\n'''
)
_correlation_id_param = (
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::S_CORRELATION_ID] = SMember(TNumberSchemaItem<int>::create(), true);\n'''
)
_additional_response_params = (
u'''params_members[ns_smart_device_link::ns_json_handler::'''
u'''strings::kCode] = SMember(TNumberSchemaItem<int>::create(), true);\n'''
)
| true | true |
f7241b0aa473034d8cb366bc0b2c1baf700088b7 | 2,724 | py | Python | tests/test_exceptions.py | wgarlock/prelaunch | 827b4520441fce537c4618421a3e815c933ab907 | [
"BSD-2-Clause"
] | 13 | 2017-06-15T07:35:39.000Z | 2020-08-06T13:25:47.000Z | tests/test_exceptions.py | suutari-ai/pip-requ | 93adbd63691cff1003cdc88868b20b2ad0c766cc | [
"BSD-2-Clause"
] | 28 | 2017-06-18T12:02:50.000Z | 2020-02-19T08:26:36.000Z | tests/test_exceptions.py | suutari-ai/pip-requ | 93adbd63691cff1003cdc88868b20b2ad0c766cc | [
"BSD-2-Clause"
] | 4 | 2018-04-28T11:14:51.000Z | 2020-12-27T16:01:16.000Z | import os
from prequ._pip_compat import (
create_package_finder, install_req_from_editable, install_req_from_line)
from prequ.exceptions import (
IncompatibleRequirements, NoCandidateFound, UnsupportedConstraint)
from .dirs import FAKE_PYPI_WHEELS_DIR
from .test_repositories import get_pypi_repository
try:
from pip.index import InstallationCandidate
except ImportError:
from pip._internal.index import InstallationCandidate
def get_finder():
repo = get_pypi_repository()
finder = create_package_finder(
find_links=[],
index_urls=['pypi.localhost'],
allow_all_prereleases=False,
session=repo.session)
return finder
def test_no_candidate_found_with_versions():
ireq = install_req_from_line('some-package==12.3.4')
tried = [
InstallationCandidate('some-package', ver, None)
for ver in ['1.2.3', '12.3.0', '12.3.5']]
no_candidate_found = NoCandidateFound(ireq, tried, get_finder())
assert '{}'.format(no_candidate_found) == (
"Could not find a version that matches some-package==12.3.4\n"
"Tried: 1.2.3, 12.3.0, 12.3.5\n"
"There are incompatible versions in the resolved dependencies.")
def test_no_candidate_found_no_versions():
ireq = install_req_from_line('some-package==12.3.4')
tried = []
no_candidate_found = NoCandidateFound(ireq, tried, get_finder())
assert '{}'.format(no_candidate_found) == (
"Could not find a version that matches some-package==12.3.4\n"
"No versions found\n"
"Was pypi.localhost reachable?")
def test_unsupported_constraint_simple():
msg = "Foo bar distribution is not supported"
ireq = install_req_from_line('foo-bar')
unsupported_constraint = UnsupportedConstraint(msg, ireq)
assert '{}'.format(unsupported_constraint) == (
"Foo bar distribution is not supported (constraint was: foo-bar)")
def test_unsupported_constraint_editable_wheel():
wheel_path = os.path.join(
FAKE_PYPI_WHEELS_DIR, 'small_fake_a-0.1-py2.py3-none-any.whl')
msg = "Editable wheel is too square"
ireq_wheel = install_req_from_line(wheel_path)
ireq = install_req_from_editable(str(ireq_wheel.link))
unsupported_constraint = UnsupportedConstraint(msg, ireq)
assert '{}'.format(unsupported_constraint) == (
"Editable wheel is too square (constraint was: {})".format(ireq))
def test_incompatible_requirements():
ireq_a = install_req_from_line('dummy==1.5')
ireq_b = install_req_from_line('dummy==2.6')
incompatible_reqs = IncompatibleRequirements(ireq_a, ireq_b)
assert '{}'.format(incompatible_reqs) == (
"Incompatible requirements found: dummy==1.5 and dummy==2.6")
| 36.810811 | 76 | 0.715859 | import os
from prequ._pip_compat import (
create_package_finder, install_req_from_editable, install_req_from_line)
from prequ.exceptions import (
IncompatibleRequirements, NoCandidateFound, UnsupportedConstraint)
from .dirs import FAKE_PYPI_WHEELS_DIR
from .test_repositories import get_pypi_repository
try:
from pip.index import InstallationCandidate
except ImportError:
from pip._internal.index import InstallationCandidate
def get_finder():
repo = get_pypi_repository()
finder = create_package_finder(
find_links=[],
index_urls=['pypi.localhost'],
allow_all_prereleases=False,
session=repo.session)
return finder
def test_no_candidate_found_with_versions():
ireq = install_req_from_line('some-package==12.3.4')
tried = [
InstallationCandidate('some-package', ver, None)
for ver in ['1.2.3', '12.3.0', '12.3.5']]
no_candidate_found = NoCandidateFound(ireq, tried, get_finder())
assert '{}'.format(no_candidate_found) == (
"Could not find a version that matches some-package==12.3.4\n"
"Tried: 1.2.3, 12.3.0, 12.3.5\n"
"There are incompatible versions in the resolved dependencies.")
def test_no_candidate_found_no_versions():
ireq = install_req_from_line('some-package==12.3.4')
tried = []
no_candidate_found = NoCandidateFound(ireq, tried, get_finder())
assert '{}'.format(no_candidate_found) == (
"Could not find a version that matches some-package==12.3.4\n"
"No versions found\n"
"Was pypi.localhost reachable?")
def test_unsupported_constraint_simple():
msg = "Foo bar distribution is not supported"
ireq = install_req_from_line('foo-bar')
unsupported_constraint = UnsupportedConstraint(msg, ireq)
assert '{}'.format(unsupported_constraint) == (
"Foo bar distribution is not supported (constraint was: foo-bar)")
def test_unsupported_constraint_editable_wheel():
wheel_path = os.path.join(
FAKE_PYPI_WHEELS_DIR, 'small_fake_a-0.1-py2.py3-none-any.whl')
msg = "Editable wheel is too square"
ireq_wheel = install_req_from_line(wheel_path)
ireq = install_req_from_editable(str(ireq_wheel.link))
unsupported_constraint = UnsupportedConstraint(msg, ireq)
assert '{}'.format(unsupported_constraint) == (
"Editable wheel is too square (constraint was: {})".format(ireq))
def test_incompatible_requirements():
ireq_a = install_req_from_line('dummy==1.5')
ireq_b = install_req_from_line('dummy==2.6')
incompatible_reqs = IncompatibleRequirements(ireq_a, ireq_b)
assert '{}'.format(incompatible_reqs) == (
"Incompatible requirements found: dummy==1.5 and dummy==2.6")
| true | true |
f7241bb68ff3065f7846b4e98897be67ff1b21a7 | 8,733 | py | Python | core/dbt/linker.py | pushpay/dbt | 8b96de893af692bd77fe9eb9a8104317be7b5413 | [
"Apache-2.0"
] | null | null | null | core/dbt/linker.py | pushpay/dbt | 8b96de893af692bd77fe9eb9a8104317be7b5413 | [
"Apache-2.0"
] | null | null | null | core/dbt/linker.py | pushpay/dbt | 8b96de893af692bd77fe9eb9a8104317be7b5413 | [
"Apache-2.0"
] | null | null | null | import networkx as nx
import threading
from dbt.compat import PriorityQueue
from dbt.node_types import NodeType
GRAPH_SERIALIZE_BLACKLIST = [
'agate_table'
]
def from_file(graph_file):
linker = Linker()
linker.read_graph(graph_file)
return linker
def is_blocking_dependency(node):
return node.resource_type == NodeType.Model
class GraphQueue(object):
"""A fancy queue that is backed by the dependency graph.
Note: this will mutate input!
This queue is thread-safe for `mark_done` calls, though you must ensure
that separate threads do not call `.empty()` or `__len__()` and `.get()` at
the same time, as there is an unlocked race!
"""
def __init__(self, graph, manifest):
self.graph = graph
self.manifest = manifest
# store the queue as a priority queue.
self.inner = PriorityQueue()
# things that have been popped off the queue but not finished
# and worker thread reservations
self.in_progress = set()
# things that are in the queue
self.queued = set()
# this lock controls most things
self.lock = threading.Lock()
# store the 'score' of each node as a number. Lower is higher priority.
self._scores = self._calculate_scores()
# populate the initial queue
self._find_new_additions()
def get_node(self, node_id):
return self.manifest.nodes[node_id]
def _include_in_cost(self, node_id):
node = self.get_node(node_id)
if not is_blocking_dependency(node):
return False
if node.get_materialization() == 'ephemeral':
return False
return True
def _calculate_scores(self):
"""Calculate the 'value' of each node in the graph based on how many
blocking descendants it has. We use this score for the internal
priority queue's ordering, so the quality of this metric is important.
The score is stored as a negative number because the internal
PriorityQueue picks lowest values first.
We could do this in one pass over the graph instead of len(self.graph)
passes but this is easy. For large graphs this may hurt performance.
This operates on the graph, so it would require a lock if called from
outside __init__.
:return Dict[str, int]: The score dict, mapping unique IDs to integer
scores. Lower scores are higher priority.
"""
scores = {}
for node in self.graph.nodes():
score = -1 * len([
d for d in nx.descendants(self.graph, node)
if self._include_in_cost(d)
])
scores[node] = score
return scores
def get(self, block=True, timeout=None):
"""Get a node off the inner priority queue. By default, this blocks.
This takes the lock, but only for part of it.
:param bool block: If True, block until the inner queue has data
:param Optional[float] timeout: If set, block for timeout seconds
waiting for data.
:return ParsedNode: The node as present in the manifest.
See `queue.PriorityQueue` for more information on `get()` behavior and
exceptions.
"""
_, node_id = self.inner.get(block=block, timeout=timeout)
with self.lock:
self._mark_in_progress(node_id)
return self.get_node(node_id)
def __len__(self):
"""The length of the queue is the number of tasks left for the queue to
give out, regardless of where they are. Incomplete tasks are not part
of the length.
This takes the lock.
"""
with self.lock:
return len(self.graph) - len(self.in_progress)
def empty(self):
"""The graph queue is 'empty' if it all remaining nodes in the graph
are in progress.
This takes the lock.
"""
return len(self) == 0
def _already_known(self, node):
"""Decide if a node is already known (either handed out as a task, or
in the queue).
Callers must hold the lock.
:param str node: The node ID to check
:returns bool: If the node is in progress/queued.
"""
return node in self.in_progress or node in self.queued
def _find_new_additions(self):
"""Find any nodes in the graph that need to be added to the internal
queue and add them.
Callers must hold the lock.
"""
for node, in_degree in dict(self.graph.in_degree()).items():
if not self._already_known(node) and in_degree == 0:
self.inner.put((self._scores[node], node))
self.queued.add(node)
def mark_done(self, node_id):
"""Given a node's unique ID, mark it as done.
This method takes the lock.
:param str node_id: The node ID to mark as complete.
"""
with self.lock:
self.in_progress.remove(node_id)
self.graph.remove_node(node_id)
self._find_new_additions()
self.inner.task_done()
def _mark_in_progress(self, node_id):
"""Mark the node as 'in progress'.
Callers must hold the lock.
:param str node_id: The node ID to mark as in progress.
"""
self.queued.remove(node_id)
self.in_progress.add(node_id)
def join(self):
"""Join the queue. Blocks until all tasks are marked as done.
Make sure not to call this before the queue reports that it is empty.
"""
self.inner.join()
def _subset_graph(graph, include_nodes):
"""Create and return a new graph that is a shallow copy of graph but with
only the nodes in include_nodes. Transitive edges across removed nodes are
preserved as explicit new edges.
"""
new_graph = nx.algorithms.transitive_closure(graph)
include_nodes = set(include_nodes)
for node in graph.nodes():
if node not in include_nodes:
new_graph.remove_node(node)
for node in include_nodes:
if node not in new_graph:
raise RuntimeError(
"Couldn't find model '{}' -- does it exist or is "
"it disabled?".format(node)
)
return new_graph
class Linker(object):
def __init__(self, data=None):
if data is None:
data = {}
self.graph = nx.DiGraph(**data)
def edges(self):
return self.graph.edges()
def nodes(self):
return self.graph.nodes()
def find_cycles(self):
# There's a networkx find_cycle function, but there's a bug in the
# nx 1.11 release that prevents us from using it. We should use that
# function when we upgrade to 2.X. More info:
# https://github.com/networkx/networkx/pull/2473
cycles = list(nx.simple_cycles(self.graph))
if len(cycles) > 0:
cycle_nodes = cycles[0]
cycle_nodes.append(cycle_nodes[0])
return " --> ".join(cycle_nodes)
return None
def as_graph_queue(self, manifest, limit_to=None):
"""Returns a queue over nodes in the graph that tracks progress of
dependecies.
"""
if limit_to is None:
graph_nodes = self.graph.nodes()
else:
graph_nodes = limit_to
new_graph = _subset_graph(self.graph, graph_nodes)
return GraphQueue(new_graph, manifest)
def get_dependent_nodes(self, node):
return nx.descendants(self.graph, node)
def dependency(self, node1, node2):
"indicate that node1 depends on node2"
self.graph.add_node(node1)
self.graph.add_node(node2)
self.graph.add_edge(node2, node1)
def add_node(self, node):
self.graph.add_node(node)
def remove_node(self, node):
children = nx.descendants(self.graph, node)
self.graph.remove_node(node)
return children
def write_graph(self, outfile, manifest):
"""Write the graph to a gpickle file. Before doing so, serialize and
include all nodes in their corresponding graph entries.
"""
out_graph = _updated_graph(self.graph, manifest)
nx.write_gpickle(out_graph, outfile)
def read_graph(self, infile):
self.graph = nx.read_gpickle(infile)
def _updated_graph(graph, manifest):
graph = graph.copy()
for node_id in graph.nodes():
# serialize() removes the agate table
data = manifest.nodes[node_id].serialize()
for key in GRAPH_SERIALIZE_BLACKLIST:
if key in data:
del data[key]
graph.add_node(node_id, **data)
return graph
| 31.989011 | 79 | 0.624986 | import networkx as nx
import threading
from dbt.compat import PriorityQueue
from dbt.node_types import NodeType
GRAPH_SERIALIZE_BLACKLIST = [
'agate_table'
]
def from_file(graph_file):
linker = Linker()
linker.read_graph(graph_file)
return linker
def is_blocking_dependency(node):
return node.resource_type == NodeType.Model
class GraphQueue(object):
def __init__(self, graph, manifest):
self.graph = graph
self.manifest = manifest
self.inner = PriorityQueue()
self.in_progress = set()
self.queued = set()
self.lock = threading.Lock()
self._scores = self._calculate_scores()
self._find_new_additions()
def get_node(self, node_id):
return self.manifest.nodes[node_id]
def _include_in_cost(self, node_id):
node = self.get_node(node_id)
if not is_blocking_dependency(node):
return False
if node.get_materialization() == 'ephemeral':
return False
return True
def _calculate_scores(self):
scores = {}
for node in self.graph.nodes():
score = -1 * len([
d for d in nx.descendants(self.graph, node)
if self._include_in_cost(d)
])
scores[node] = score
return scores
def get(self, block=True, timeout=None):
_, node_id = self.inner.get(block=block, timeout=timeout)
with self.lock:
self._mark_in_progress(node_id)
return self.get_node(node_id)
def __len__(self):
with self.lock:
return len(self.graph) - len(self.in_progress)
def empty(self):
return len(self) == 0
def _already_known(self, node):
return node in self.in_progress or node in self.queued
def _find_new_additions(self):
for node, in_degree in dict(self.graph.in_degree()).items():
if not self._already_known(node) and in_degree == 0:
self.inner.put((self._scores[node], node))
self.queued.add(node)
def mark_done(self, node_id):
with self.lock:
self.in_progress.remove(node_id)
self.graph.remove_node(node_id)
self._find_new_additions()
self.inner.task_done()
def _mark_in_progress(self, node_id):
self.queued.remove(node_id)
self.in_progress.add(node_id)
def join(self):
self.inner.join()
def _subset_graph(graph, include_nodes):
new_graph = nx.algorithms.transitive_closure(graph)
include_nodes = set(include_nodes)
for node in graph.nodes():
if node not in include_nodes:
new_graph.remove_node(node)
for node in include_nodes:
if node not in new_graph:
raise RuntimeError(
"Couldn't find model '{}' -- does it exist or is "
"it disabled?".format(node)
)
return new_graph
class Linker(object):
def __init__(self, data=None):
if data is None:
data = {}
self.graph = nx.DiGraph(**data)
def edges(self):
return self.graph.edges()
def nodes(self):
return self.graph.nodes()
def find_cycles(self):
# There's a networkx find_cycle function, but there's a bug in the
# nx 1.11 release that prevents us from using it. We should use that
# function when we upgrade to 2.X. More info:
# https://github.com/networkx/networkx/pull/2473
cycles = list(nx.simple_cycles(self.graph))
if len(cycles) > 0:
cycle_nodes = cycles[0]
cycle_nodes.append(cycle_nodes[0])
return " --> ".join(cycle_nodes)
return None
def as_graph_queue(self, manifest, limit_to=None):
if limit_to is None:
graph_nodes = self.graph.nodes()
else:
graph_nodes = limit_to
new_graph = _subset_graph(self.graph, graph_nodes)
return GraphQueue(new_graph, manifest)
def get_dependent_nodes(self, node):
return nx.descendants(self.graph, node)
def dependency(self, node1, node2):
self.graph.add_node(node1)
self.graph.add_node(node2)
self.graph.add_edge(node2, node1)
def add_node(self, node):
self.graph.add_node(node)
def remove_node(self, node):
children = nx.descendants(self.graph, node)
self.graph.remove_node(node)
return children
def write_graph(self, outfile, manifest):
out_graph = _updated_graph(self.graph, manifest)
nx.write_gpickle(out_graph, outfile)
def read_graph(self, infile):
self.graph = nx.read_gpickle(infile)
def _updated_graph(graph, manifest):
graph = graph.copy()
for node_id in graph.nodes():
# serialize() removes the agate table
data = manifest.nodes[node_id].serialize()
for key in GRAPH_SERIALIZE_BLACKLIST:
if key in data:
del data[key]
graph.add_node(node_id, **data)
return graph
| true | true |
f7241bfc54141abb811285ad6654b872e3b97148 | 18,771 | bzl | Python | third_party/js.bzl | wdirons/tensorboard | 605842dbe7857ae435fab6ff21e285ef77cb1323 | [
"Apache-2.0"
] | null | null | null | third_party/js.bzl | wdirons/tensorboard | 605842dbe7857ae435fab6ff21e285ef77cb1323 | [
"Apache-2.0"
] | null | null | null | third_party/js.bzl | wdirons/tensorboard | 605842dbe7857ae435fab6ff21e285ef77cb1323 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TensorBoard external JS dependencies (both infrastructure and frontend libs)
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "web_library_external")
def tensorboard_js_workspace():
##############################################################################
# TensorBoard Build Tools
filegroup_external(
name = "org_nodejs",
# MIT with portions licensed:
# - MIT
# - Old MIT
# - 2-Clause-BSD
# - 3-Clause-BSD
# - ISC
# - Unicode
# - zlib
# - Artistic 2.0
licenses = ["notice"],
sha256_urls_extract_macos = {
"910395e1e98fb351c62b5702a9deef22aaecf05d6df1d7edc283337542207f3f": [
"https://mirror.bazel.build/nodejs.org/dist/v6.9.1/node-v6.9.1-darwin-x64.tar.xz",
"http://nodejs.org/dist/v6.9.1/node-v6.9.1-darwin-x64.tar.xz",
],
},
sha256_urls_windows = {
"1914bfb950be8d576ce9e49c8a0e51c9f2402560fe3c19093e69bc1306a56e9e": [
"https://mirror.bazel.build/raw.githubusercontent.com/nodejs/node/v6.9.1/LICENSE",
"https://raw.githubusercontent.com/nodejs/node/v6.9.1/LICENSE",
],
"513923b0490ebb7466a56483a62595814ed9d036d6f35476debb0cd606bec526": [
"https://mirror.bazel.build/nodejs.org/dist/v6.9.1/win-x64/node.exe",
"http://nodejs.org/dist/v6.9.1/win-x64/node.exe",
],
"3951aefa4afd6fb836ab06468b1fc2a69fa75bd66ec2f5a0e08c4e32547681e3": [
"https://mirror.bazel.build/nodejs.org/dist/v6.9.1/win-x64/node.lib",
"http://nodejs.org/dist/v6.9.1/win-x64/node.lib",
],
},
sha256_urls_extract = {
"d4eb161e4715e11bbef816a6c577974271e2bddae9cf008744627676ff00036a": [
"https://mirror.bazel.build/nodejs.org/dist/v6.9.1/node-v6.9.1-linux-x64.tar.xz",
"http://nodejs.org/dist/v6.9.1/node-v6.9.1-linux-x64.tar.xz",
],
},
strip_prefix = {
"node-v6.9.1-darwin-x64.tar.xz": "node-v6.9.1-darwin-x64",
"node-v6.9.1-linux-x64.tar.xz": "node-v6.9.1-linux-x64",
},
executable = [
"node",
"node.exe",
],
)
filegroup_external(
name = "com_microsoft_typescript",
licenses = ["notice"], # Apache 2.0
sha256_urls = {
"a7d00bfd54525bc694b6e32f64c7ebcf5e6b7ae3657be5cc12767bce74654a47": [
"https://mirror.bazel.build/raw.githubusercontent.com/Microsoft/TypeScript/v2.7.2/LICENSE.txt",
"https://raw.githubusercontent.com/Microsoft/TypeScript/v2.9.2/LICENSE.txt",
],
"9632bfccde117a8c82690a324bc5c18c3869e9b89ac536fc134ba655d7ec1e98": [
"https://mirror.bazel.build/raw.githubusercontent.com/Microsoft/TypeScript/v2.9.2/lib/tsc.js",
"https://raw.githubusercontent.com/Microsoft/TypeScript/v2.9.2/lib/tsc.js",
],
"529c9f8b45939e0fa80950208bf80452ccb982b460cc25433813c919b67a3b2f": [
"https://mirror.bazel.build/raw.githubusercontent.com/Microsoft/TypeScript/v2.9.2/lib/lib.es6.d.ts",
"https://raw.githubusercontent.com/Microsoft/TypeScript/v2.9.2/lib/lib.es6.d.ts",
],
"f6e6efe57fb9fcf72eed013e2755d04505300f32b78577118ca5dacc85ec852d": [
"https://mirror.bazel.build/raw.githubusercontent.com/Microsoft/TypeScript/v2.9.2/lib/lib.dom.d.ts",
"https://raw.githubusercontent.com/Microsoft/TypeScript/v2.9.2/lib/lib.dom.d.ts",
],
},
extra_build_file_content = "\n".join([
"sh_binary(",
" name = \"tsc\",",
" srcs = [\"tsc.sh\"],",
" data = [",
" \"tsc.js\",",
" \"@org_nodejs\",",
" ],",
")",
"",
"genrule(",
" name = \"tsc_sh\",",
" outs = [\"tsc.sh\"],",
" cmd = \"cat >$@ <<'EOF'\\n\" +",
" \"#!/bin/bash\\n\" +",
" \"NODE=external/org_nodejs/bin/node\\n\" +",
" \"if [[ -e external/org_nodejs/node.exe ]]; then\\n\" +",
" \" NODE=external/org_nodejs/node.exe\\n\" +",
" \"fi\\n\" +",
" \"exec $${NODE} external/com_microsoft_typescript/tsc.js \\\"$$@\\\"\\n\" +",
" \"EOF\",",
" executable = True,",
")",
]),
)
native.new_http_archive(
name = "io_angular_clutz",
build_file = str(Label("//third_party:clutz.BUILD")),
sha256 = "7a5c785dbcc3ae0daa1fcf4507de6a23bbecdb2bf80460651e4c2b88c1ad7582",
strip_prefix = "clutz-7f1a3ee9ad9f85a9056084dc039496bbd35e11f6",
urls = [
"https://mirror.bazel.build/github.com/angular/clutz/archive/7f1a3ee9ad9f85a9056084dc039496bbd35e11f6.tar.gz", # 2017-11-02
"https://github.com/angular/clutz/archive/7f1a3ee9ad9f85a9056084dc039496bbd35e11f6.tar.gz",
],
)
filegroup_external(
name = "com_google_javascript_closure_compiler_externs",
licenses = ["notice"], # Apache 2.0
sha256_urls_extract = {
"55bdf8dc5d74534b63edbce5f510557a18a2b7aa578938ba300eb65f2da48092": [
"https://mirror.bazel.build/github.com/google/closure-compiler/archive/v20180402.tar.gz",
"https://github.com/google/closure-compiler/archive/v20180402.tar.gz",
],
},
strip_prefix = {"v20180402.tar.gz": "closure-compiler-20180402/externs"},
)
filegroup_external(
name = "com_google_javascript_closure_compiler_externs_polymer",
licenses = ["notice"], # Apache 2.0
sha256_urls = {
"737af73d7b02226e6e1516044a8eb8283376d44f64839979936ca163c00900f4": [
"https://mirror.bazel.build/raw.githubusercontent.com/google/closure-compiler/v20180402/contrib/externs/polymer-1.0.js",
"https://raw.githubusercontent.com/google/closure-compiler/v20180402/contrib/externs/polymer-1.0.js",
],
},
)
filegroup_external(
name = "org_threejs",
# no @license header
licenses = ["notice"], # MIT
sha256_urls = {
"5eb9be209f84c4588f573b9abd8e13c04ce187ad6f40e8b12993d00b1428de54": [
"https://mirror.bazel.build/raw.githubusercontent.com/mrdoob/three.js/r77/LICENSE",
"https://raw.githubusercontent.com/mrdoob/three.js/r77/LICENSE",
],
"881cc79c84c34a1f61f8c8af0ee3f237d83a2eda3868720fdcb47bcacf8da44a": [
"https://mirror.bazel.build/raw.githubusercontent.com/mrdoob/three.js/r77/build/three.js",
"https://raw.githubusercontent.com/mrdoob/three.js/r77/build/three.js",
],
"98b8b5954901025a98033c8bdd65969be1f30b59e11f823ec864253bb72f768d": [
"https://mirror.bazel.build/raw.githubusercontent.com/mrdoob/three.js/r77/examples/js/controls/OrbitControls.js",
"https://raw.githubusercontent.com/mrdoob/three.js/r77/examples/js/controls/OrbitControls.js",
],
},
)
##############################################################################
# TensorBoard JavaScript Production Dependencies
web_library_external(
name = "com_lodash",
licenses = ["notice"], # MIT
sha256 = "6c5fa80d0fa9dc4eba634ab042404ff7c162dcb4cfe3473338801aeca0042285",
urls = [
"https://mirror.bazel.build/github.com/lodash/lodash/archive/4.17.5.tar.gz",
"https://github.com/lodash/lodash/archive/4.17.5.tar.gz",
],
strip_prefix = "lodash-4.17.5",
path = "/lodash",
srcs = ["lodash.js"],
extra_build_file_content = "exports_files([\"LICENSE\"])",
)
filegroup_external(
name = "com_numericjs",
# no @license header
licenses = ["notice"], # MIT
sha256_urls = {
"0e94aada97f12dee6118064add9170484c55022f5d53206ee4407143cd36ddcd": [
"https://mirror.bazel.build/raw.githubusercontent.com/sloisel/numeric/v1.2.6/license.txt",
"https://raw.githubusercontent.com/sloisel/numeric/v1.2.6/license.txt",
],
"5dcaba2016fd237091e3a17b0dc272fb21f0e2b15d7628f95a0ad0cd4cdf4020": [
"https://mirror.bazel.build/www.numericjs.com/lib/numeric-1.2.6.js",
"http://www.numericjs.com/lib/numeric-1.2.6.js",
],
},
rename = {"numeric-1.2.6.js": "numeric.js"},
)
filegroup_external(
name = "com_palantir_plottable",
# no @license header
licenses = ["notice"], # MIT
sha256_urls_extract = {
# Plottable doesn't have a release tarball on GitHub. Using the
# sources directly from git also requires running Node tooling
# beforehand to generate files. NPM is the only place to get it.
"08df639782baf9b8cfeeb5fcdfbe3a1ce25b5a916903fc580e201a0a1142a6c4": [
"https://mirror.bazel.build/registry.npmjs.org/plottable/-/plottable-3.7.0.tgz",
"https://registry.npmjs.org/plottable/-/plottable-3.7.0.tgz",
],
},
)
filegroup_external(
name = "io_github_cpettitt_dagre",
# no @license header
licenses = ["notice"], # MIT
sha256_urls = {
"6a349742a6cb219d5a2fc8d0844f6d89a6efc62e20c664450d884fc7ff2d6015": [
"https://mirror.bazel.build/raw.githubusercontent.com/cpettitt/dagre/v0.8.2/LICENSE",
"https://raw.githubusercontent.com/cpettitt/dagre/v0.8.2/LICENSE",
],
"43cb4e919196c177c149b63880d262074670af99db6a1e174b25e266da4935a9": [
"https://mirror.bazel.build/raw.githubusercontent.com/cpettitt/dagre/v0.8.2/dist/dagre.core.js",
"https://raw.githubusercontent.com/cpettitt/dagre/v0.8.2/dist/dagre.core.js",
],
},
)
filegroup_external(
name = "io_github_cpettitt_graphlib",
licenses = ["notice"], # MIT
sha256_urls = {
"6a349742a6cb219d5a2fc8d0844f6d89a6efc62e20c664450d884fc7ff2d6015": [
"https://mirror.bazel.build/raw.githubusercontent.com/cpettitt/graphlib/v2.1.5/LICENSE",
"https://raw.githubusercontent.com/cpettitt/graphlib/v2.1.5/LICENSE",
],
"ddc33a6aaf955ee24b0e0d30110adf350c65eedc5c0f2c424ca85bc128199a66": [
"https://mirror.bazel.build/raw.githubusercontent.com/cpettitt/graphlib/v2.1.5/dist/graphlib.core.js",
"https://raw.githubusercontent.com/cpettitt/graphlib/v2.1.5/dist/graphlib.core.js",
],
},
)
filegroup_external(
name = "io_github_waylonflinn_weblas",
# no @license header
licenses = ["notice"], # MIT
sha256_urls = {
"633f2861a9a862b9cd7967e841e14dd3527912f209d6563595774fa31e3d84cb": [
"https://mirror.bazel.build/raw.githubusercontent.com/waylonflinn/weblas/v0.9.0/LICENSE",
"https://raw.githubusercontent.com/waylonflinn/weblas/v0.9.0/LICENSE",
],
"f138fce57f673ca8a633f4aee5ae5b6fcb6ad0de59069a42a74e996fd04d8fcc": [
"https://mirror.bazel.build/raw.githubusercontent.com/waylonflinn/weblas/v0.9.0/dist/weblas.js",
"https://raw.githubusercontent.com/waylonflinn/weblas/v0.9.0/dist/weblas.js",
],
},
)
filegroup_external(
name = "org_d3js",
# no @license header
licenses = ["notice"], # BSD-3-Clause
sha256_urls_extract = {
"05a9c2b9c206447be0e26b3a705e7f8df4943df2d063ddc5bf0274f50ec44727": [
"https://mirror.bazel.build/github.com/d3/d3/releases/download/v5.7.0/d3.zip",
"https://github.com/d3/d3/releases/download/v5.7.0/d3.zip",
],
},
# TODO(jart): Use srcs=["d3.js"] instead of this once supported.
generated_rule_name = "all_files",
extra_build_file_content = "\n".join([
"filegroup(",
" name = \"org_d3js\",",
" srcs = [\"d3.js\"],",
")",
]),
)
filegroup_external(
name = "org_chromium_catapult_vulcanized_trace_viewer",
licenses = ["notice"], # BSD-3-Clause
sha256_urls = {
"f0df289ba9d03d857ad1c2f5918861376b1510b71588ffc60eff5c7a7bfedb09": [
"https://mirror.bazel.build/raw.githubusercontent.com/catapult-project/catapult/2f7ee994984f3ebd3dd3dc3e05777bf180ec2ee8/LICENSE",
"https://raw.githubusercontent.com/catapult-project/catapult/2f7ee994984f3ebd3dd3dc3e05777bf180ec2ee8/LICENSE",
],
"b1f0195f305ca66fdb7dae264771f162ae03f04aa642848f15cd871c043e04d1": [
"https://mirror.bazel.build/raw.githubusercontent.com/catapult-project/catapult/237aea8b58a37a2991318b6a0db60d84078e5f7e/trace_viewer_full.html",
"https://raw.githubusercontent.com/catapult-project/catapult/237aea8b58a37a2991318b6a0db60d84078e5f7e/trace_viewer_full.html" # 2017-06-19
],
},
)
##############################################################################
# TensorBoard Testing Dependencies
web_library_external(
name = "org_npmjs_registry_accessibility_developer_tools",
licenses = ["notice"], # Apache License 2.0
sha256 = "1d6a72f401c9d53f68238c617dd43a05cd85ca5aa2e676a5b3c352711448e093",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/accessibility-developer-tools/-/accessibility-developer-tools-2.10.0.tgz",
"https://registry.npmjs.org/accessibility-developer-tools/-/accessibility-developer-tools-2.10.0.tgz",
],
strip_prefix = "package",
path = "/accessibility-developer-tools",
suppress = ["strictDependencies"],
)
web_library_external(
name = "org_npmjs_registry_async",
licenses = ["notice"], # MIT
sha256 = "08655255ae810bf4d1cb1642df57658fcce823776d3ba8f4b46f4bbff6c87ece",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/async/-/async-1.5.0.tgz",
"https://registry.npmjs.org/async/-/async-1.5.0.tgz",
],
strip_prefix = "package",
path = "/async",
)
web_library_external(
name = "org_npmjs_registry_chai",
licenses = ["notice"], # MIT
sha256 = "aca8137bed5bb295bd7173325b7ad604cd2aeb341d739232b4f9f0b26745be90",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/chai/-/chai-3.5.0.tgz",
"https://registry.npmjs.org/chai/-/chai-3.5.0.tgz",
],
strip_prefix = "package",
path = "/chai",
)
web_library_external(
name = "org_npmjs_registry_mocha",
licenses = ["notice"], # MIT
sha256 = "13ef37a071196a2fba680799b906555d3f0ab61e80a7e8f73f93e77914590dd4",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/mocha/-/mocha-2.5.3.tgz",
"https://registry.npmjs.org/mocha/-/mocha-2.5.3.tgz",
],
suppress = ["strictDependencies"],
strip_prefix = "package",
path = "/mocha",
)
web_library_external(
name = "org_npmjs_registry_sinon",
licenses = ["notice"], # BSD-3-Clause
sha256 = "49edb057695fc9019aae992bf7e677a07de7c6ce2bf9f9facde4a245045d1532",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/sinon/-/sinon-1.17.4.tgz",
"https://registry.npmjs.org/sinon/-/sinon-1.17.4.tgz",
],
strip_prefix = "package/lib",
path = "/sinonjs",
)
web_library_external(
name = "org_npmjs_registry_sinon_chai",
licenses = ["notice"], # BSD-3-Clause
sha256 = "b85fc56f713832960b56fe9269ee4bb2cd41edd2ceb130b0936e5bdbed5dea63",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/sinon-chai/-/sinon-chai-2.8.0.tgz",
"https://registry.npmjs.org/sinon-chai/-/sinon-chai-2.8.0.tgz",
],
strip_prefix = "package",
path = "/sinon-chai",
)
web_library_external(
name = "org_npmjs_registry_stacky",
licenses = ["notice"], # BSD-3-Clause
sha256 = "c659e60f7957d9d80c23a7aacc4d71b19c6421a08f91174c0062de369595acae",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/stacky/-/stacky-1.3.1.tgz",
"https://registry.npmjs.org/stacky/-/stacky-1.3.1.tgz",
],
strip_prefix = "package",
path = "/stacky",
)
web_library_external(
name = "org_npmjs_registry_web_component_tester",
licenses = ["notice"], # BSD-3-Clause
sha256 = "9d4ebd4945df8a936916d4d32b7f280f2a3afa35f79e7ca8ad3ed0a42770c537",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/web-component-tester/-/web-component-tester-4.3.6.tgz",
"https://registry.npmjs.org/web-component-tester/-/web-component-tester-4.3.6.tgz",
],
strip_prefix = "package",
path = "/web-component-tester",
suppress = [
"absolutePaths",
"strictDependencies",
],
deps = [
"@com_lodash",
"@org_npmjs_registry_accessibility_developer_tools",
"@org_npmjs_registry_async",
"@org_npmjs_registry_chai",
"@org_npmjs_registry_mocha",
"@org_npmjs_registry_sinon",
"@org_npmjs_registry_sinon_chai",
"@org_npmjs_registry_stacky",
"@org_polymer_test_fixture",
],
)
web_library_external(
name = "org_polymer_test_fixture",
licenses = ["notice"], # BSD-3-Clause
sha256 = "59d6cfb1187733b71275becfea181fe0aa1f734df5ff77f5850c806bbbf9a0d9",
strip_prefix = "test-fixture-2.0.1",
urls = [
"https://mirror.bazel.build/github.com/PolymerElements/test-fixture/archive/v2.0.1.tar.gz",
"https://github.com/PolymerElements/test-fixture/archive/v2.0.1.tar.gz",
],
path = "/test-fixture",
exclude = ["test/**"],
)
filegroup_external(
name = "org_tensorflow_tfjs",
licenses = ["notice"], # Apache 2.0
sha256_urls = {
"fccd26db2da462ec48e2d90fbdff1ee9a9d740f2c7efbd9789ba46eb98ecd1ae": [
"https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@0.11.5/dist/tf.min.js",
],
},
)
| 42.37246 | 159 | 0.623408 |
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "web_library_external")
def tensorboard_js_workspace():
"https://raw.githubusercontent.com/Microsoft/TypeScript/v2.9.2/lib/lib.dom.d.ts",
],
},
extra_build_file_content = "\n".join([
"sh_binary(",
" name = \"tsc\",",
" srcs = [\"tsc.sh\"],",
" data = [",
" \"tsc.js\",",
" \"@org_nodejs\",",
" ],",
")",
"",
"genrule(",
" name = \"tsc_sh\",",
" outs = [\"tsc.sh\"],",
" cmd = \"cat >$@ <<'EOF'\\n\" +",
" \"
" \"NODE=external/org_nodejs/bin/node\\n\" +",
" \"if [[ -e external/org_nodejs/node.exe ]]; then\\n\" +",
" \" NODE=external/org_nodejs/node.exe\\n\" +",
" \"fi\\n\" +",
" \"exec $${NODE} external/com_microsoft_typescript/tsc.js \\\"$$@\\\"\\n\" +",
" \"EOF\",",
" executable = True,",
")",
]),
)
native.new_http_archive(
name = "io_angular_clutz",
build_file = str(Label("//third_party:clutz.BUILD")),
sha256 = "7a5c785dbcc3ae0daa1fcf4507de6a23bbecdb2bf80460651e4c2b88c1ad7582",
strip_prefix = "clutz-7f1a3ee9ad9f85a9056084dc039496bbd35e11f6",
urls = [
"https://mirror.bazel.build/github.com/angular/clutz/archive/7f1a3ee9ad9f85a9056084dc039496bbd35e11f6.tar.gz",
"https://github.com/angular/clutz/archive/7f1a3ee9ad9f85a9056084dc039496bbd35e11f6.tar.gz",
],
)
filegroup_external(
name = "com_google_javascript_closure_compiler_externs",
licenses = ["notice"],
sha256_urls_extract = {
"55bdf8dc5d74534b63edbce5f510557a18a2b7aa578938ba300eb65f2da48092": [
"https://mirror.bazel.build/github.com/google/closure-compiler/archive/v20180402.tar.gz",
"https://github.com/google/closure-compiler/archive/v20180402.tar.gz",
],
},
strip_prefix = {"v20180402.tar.gz": "closure-compiler-20180402/externs"},
)
filegroup_external(
name = "com_google_javascript_closure_compiler_externs_polymer",
licenses = ["notice"],
sha256_urls = {
"737af73d7b02226e6e1516044a8eb8283376d44f64839979936ca163c00900f4": [
"https://mirror.bazel.build/raw.githubusercontent.com/google/closure-compiler/v20180402/contrib/externs/polymer-1.0.js",
"https://raw.githubusercontent.com/google/closure-compiler/v20180402/contrib/externs/polymer-1.0.js",
],
},
)
filegroup_external(
name = "org_threejs",
licenses = ["notice"],
sha256_urls = {
"5eb9be209f84c4588f573b9abd8e13c04ce187ad6f40e8b12993d00b1428de54": [
"https://mirror.bazel.build/raw.githubusercontent.com/mrdoob/three.js/r77/LICENSE",
"https://raw.githubusercontent.com/mrdoob/three.js/r77/LICENSE",
],
"881cc79c84c34a1f61f8c8af0ee3f237d83a2eda3868720fdcb47bcacf8da44a": [
"https://mirror.bazel.build/raw.githubusercontent.com/mrdoob/three.js/r77/build/three.js",
"https://raw.githubusercontent.com/mrdoob/three.js/r77/build/three.js",
],
"98b8b5954901025a98033c8bdd65969be1f30b59e11f823ec864253bb72f768d": [
"https://mirror.bazel.build/raw.githubusercontent.com/mrdoob/three.js/r77/examples/js/controls/OrbitControls.js",
"https://raw.githubusercontent.com/mrdoob/three.js/r77/examples/js/controls/OrbitControls.js",
],
},
)
"https://mirror.bazel.build/raw.githubusercontent.com/cpettitt/graphlib/v2.1.5/dist/graphlib.core.js",
"https://raw.githubusercontent.com/cpettitt/graphlib/v2.1.5/dist/graphlib.core.js",
],
},
)
filegroup_external(
name = "io_github_waylonflinn_weblas",
# no @license header
licenses = ["notice"], # MIT
sha256_urls = {
"633f2861a9a862b9cd7967e841e14dd3527912f209d6563595774fa31e3d84cb": [
"https://mirror.bazel.build/raw.githubusercontent.com/waylonflinn/weblas/v0.9.0/LICENSE",
"https://raw.githubusercontent.com/waylonflinn/weblas/v0.9.0/LICENSE",
],
"f138fce57f673ca8a633f4aee5ae5b6fcb6ad0de59069a42a74e996fd04d8fcc": [
"https://mirror.bazel.build/raw.githubusercontent.com/waylonflinn/weblas/v0.9.0/dist/weblas.js",
"https://raw.githubusercontent.com/waylonflinn/weblas/v0.9.0/dist/weblas.js",
],
},
)
filegroup_external(
name = "org_d3js",
# no @license header
licenses = ["notice"], # BSD-3-Clause
sha256_urls_extract = {
"05a9c2b9c206447be0e26b3a705e7f8df4943df2d063ddc5bf0274f50ec44727": [
"https://mirror.bazel.build/github.com/d3/d3/releases/download/v5.7.0/d3.zip",
"https://github.com/d3/d3/releases/download/v5.7.0/d3.zip",
],
},
# TODO(jart): Use srcs=["d3.js"] instead of this once supported.
generated_rule_name = "all_files",
extra_build_file_content = "\n".join([
"filegroup(",
" name = \"org_d3js\",",
" srcs = [\"d3.js\"],",
")",
]),
)
filegroup_external(
name = "org_chromium_catapult_vulcanized_trace_viewer",
licenses = ["notice"], # BSD-3-Clause
sha256_urls = {
"f0df289ba9d03d857ad1c2f5918861376b1510b71588ffc60eff5c7a7bfedb09": [
"https://mirror.bazel.build/raw.githubusercontent.com/catapult-project/catapult/2f7ee994984f3ebd3dd3dc3e05777bf180ec2ee8/LICENSE",
"https://raw.githubusercontent.com/catapult-project/catapult/2f7ee994984f3ebd3dd3dc3e05777bf180ec2ee8/LICENSE",
],
"b1f0195f305ca66fdb7dae264771f162ae03f04aa642848f15cd871c043e04d1": [
"https://mirror.bazel.build/raw.githubusercontent.com/catapult-project/catapult/237aea8b58a37a2991318b6a0db60d84078e5f7e/trace_viewer_full.html",
"https://raw.githubusercontent.com/catapult-project/catapult/237aea8b58a37a2991318b6a0db60d84078e5f7e/trace_viewer_full.html" # 2017-06-19
],
},
)
##############################################################################
# TensorBoard Testing Dependencies
web_library_external(
name = "org_npmjs_registry_accessibility_developer_tools",
licenses = ["notice"], # Apache License 2.0
sha256 = "1d6a72f401c9d53f68238c617dd43a05cd85ca5aa2e676a5b3c352711448e093",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/accessibility-developer-tools/-/accessibility-developer-tools-2.10.0.tgz",
"https://registry.npmjs.org/accessibility-developer-tools/-/accessibility-developer-tools-2.10.0.tgz",
],
strip_prefix = "package",
path = "/accessibility-developer-tools",
suppress = ["strictDependencies"],
)
web_library_external(
name = "org_npmjs_registry_async",
licenses = ["notice"], # MIT
sha256 = "08655255ae810bf4d1cb1642df57658fcce823776d3ba8f4b46f4bbff6c87ece",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/async/-/async-1.5.0.tgz",
"https://registry.npmjs.org/async/-/async-1.5.0.tgz",
],
strip_prefix = "package",
path = "/async",
)
web_library_external(
name = "org_npmjs_registry_chai",
licenses = ["notice"], # MIT
sha256 = "aca8137bed5bb295bd7173325b7ad604cd2aeb341d739232b4f9f0b26745be90",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/chai/-/chai-3.5.0.tgz",
"https://registry.npmjs.org/chai/-/chai-3.5.0.tgz",
],
strip_prefix = "package",
path = "/chai",
)
web_library_external(
name = "org_npmjs_registry_mocha",
licenses = ["notice"], # MIT
sha256 = "13ef37a071196a2fba680799b906555d3f0ab61e80a7e8f73f93e77914590dd4",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/mocha/-/mocha-2.5.3.tgz",
"https://registry.npmjs.org/mocha/-/mocha-2.5.3.tgz",
],
suppress = ["strictDependencies"],
strip_prefix = "package",
path = "/mocha",
)
web_library_external(
name = "org_npmjs_registry_sinon",
licenses = ["notice"], # BSD-3-Clause
sha256 = "49edb057695fc9019aae992bf7e677a07de7c6ce2bf9f9facde4a245045d1532",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/sinon/-/sinon-1.17.4.tgz",
"https://registry.npmjs.org/sinon/-/sinon-1.17.4.tgz",
],
strip_prefix = "package/lib",
path = "/sinonjs",
)
web_library_external(
name = "org_npmjs_registry_sinon_chai",
licenses = ["notice"], # BSD-3-Clause
sha256 = "b85fc56f713832960b56fe9269ee4bb2cd41edd2ceb130b0936e5bdbed5dea63",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/sinon-chai/-/sinon-chai-2.8.0.tgz",
"https://registry.npmjs.org/sinon-chai/-/sinon-chai-2.8.0.tgz",
],
strip_prefix = "package",
path = "/sinon-chai",
)
web_library_external(
name = "org_npmjs_registry_stacky",
licenses = ["notice"], # BSD-3-Clause
sha256 = "c659e60f7957d9d80c23a7aacc4d71b19c6421a08f91174c0062de369595acae",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/stacky/-/stacky-1.3.1.tgz",
"https://registry.npmjs.org/stacky/-/stacky-1.3.1.tgz",
],
strip_prefix = "package",
path = "/stacky",
)
web_library_external(
name = "org_npmjs_registry_web_component_tester",
licenses = ["notice"], # BSD-3-Clause
sha256 = "9d4ebd4945df8a936916d4d32b7f280f2a3afa35f79e7ca8ad3ed0a42770c537",
urls = [
"https://mirror.bazel.build/registry.npmjs.org/web-component-tester/-/web-component-tester-4.3.6.tgz",
"https://registry.npmjs.org/web-component-tester/-/web-component-tester-4.3.6.tgz",
],
strip_prefix = "package",
path = "/web-component-tester",
suppress = [
"absolutePaths",
"strictDependencies",
],
deps = [
"@com_lodash",
"@org_npmjs_registry_accessibility_developer_tools",
"@org_npmjs_registry_async",
"@org_npmjs_registry_chai",
"@org_npmjs_registry_mocha",
"@org_npmjs_registry_sinon",
"@org_npmjs_registry_sinon_chai",
"@org_npmjs_registry_stacky",
"@org_polymer_test_fixture",
],
)
web_library_external(
name = "org_polymer_test_fixture",
licenses = ["notice"], # BSD-3-Clause
sha256 = "59d6cfb1187733b71275becfea181fe0aa1f734df5ff77f5850c806bbbf9a0d9",
strip_prefix = "test-fixture-2.0.1",
urls = [
"https://mirror.bazel.build/github.com/PolymerElements/test-fixture/archive/v2.0.1.tar.gz",
"https://github.com/PolymerElements/test-fixture/archive/v2.0.1.tar.gz",
],
path = "/test-fixture",
exclude = ["test/**"],
)
filegroup_external(
name = "org_tensorflow_tfjs",
licenses = ["notice"], # Apache 2.0
sha256_urls = {
"fccd26db2da462ec48e2d90fbdff1ee9a9d740f2c7efbd9789ba46eb98ecd1ae": [
"https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@0.11.5/dist/tf.min.js",
],
},
)
| true | true |
f7241cb443d374973211d2521ea063e08ac1cacc | 9,374 | py | Python | model/librarian.py | jbrew/wikiwords | 269803b7f4b5e18caf2a1d4544f066200d04d982 | [
"MIT"
] | null | null | null | model/librarian.py | jbrew/wikiwords | 269803b7f4b5e18caf2a1d4544f066200d04d982 | [
"MIT"
] | null | null | null | model/librarian.py | jbrew/wikiwords | 269803b7f4b5e18caf2a1d4544f066200d04d982 | [
"MIT"
] | null | null | null | import csv
import os
import math
from . import dictionary
from . import texthandler
from itertools import islice
import string
punctuation = ['.', ',', '!', '?', '(', ')', '$', ':', ';', '{', '}', '[', ']', '•', '|']
def text_from_path(path):
with open(path) as f:
return f.read()
def text_from_directory(dirpath):
text = ''
for fname in os.listdir(dirpath)[1:]:
fpath = '%s/%s' % (dirpath, fname)
text += text_from_path(fpath)
return text
def clean_all(lines):
return [remove_punctuation(line.strip()) for line in lines]
def filter_characters(lines, characters):
char_set = set(characters)
return [line for line in lines if none_in(line, char_set)]
def in_size_range(lines, min_length=20, max_length=20):
"""
return lines whose size is within the given range
"""
return [line for line in lines if len(line) > min_length and len(line) < max_length]
def none_in(line, character_set):
for c in line:
if c in character_set:
return False
return True
def term_frequency_dict_by_name(dirpath, name):
all_files = get_data_from_directory(dirpath, headers=['name','text'])
for rowname, rowtext in all_files:
if rowname == name:
return term_dict(rowtext)
return None
def term_frequency_dicts_from_directory(dirpath):
all_files = get_data_from_directory(dirpath, headers=['name','text'])
all_text = all_text_from_column(all_files, 'text')
tds = [term_dict(text) for name, text in all_files]
return tds
def document_frequency_dict_from_tf_dicts(tf_dicts):
big_td = dictionary.union(tf_dicts) # one term dictionary for all documents
doc_frequency_dict = df_dict(big_td, tf_dicts, threshold=1)
return doc_frequency_dict
def lines_from_lyrics_directory(dirpath):
text_files = get_data_from_directory(dirpath, headers=['name','text'])
lines = [remove_punctuation(line.strip().lower()) for line in get_all_lines(text_files) if len(line.strip()) > 0]
lines = sanitize(lines)
return lines
def sanitize(lines):
"""
remove bad words using this list:
https://github.com/dariusk/wordfilter/blob/master/lib/badwords.json
"""
with open('resources/badwords.txt') as f:
badwords = set([line.strip() for line in f.readlines()]) | set([line.strip() + 's' for line in f.readlines()])
lines = [line for line in lines if is_clean(line, badwords)]
return lines
def get_all_lines(text_files):
lines = []
for name, text in text_files:
lines.extend(text.split('\n'))
return lines
def is_clean(line, badwords):
words = line.split()
for w in words:
if w in badwords:
return False
return True
### LINES AND SENTENCES ###
def lines_from_file(fpath):
with open(fpath) as f:
lines = f.readlines()
return sanitize(lines)
def lines_from_directory(dirpath):
lines = []
for fname in os.listdir(dirpath)[1:]:
fpath = '%s/%s' % (dirpath, fname)
lines.extend(lines_from_file(fpath))
return lines
def sentences_from_file(filepath):
with open(filepath) as f:
text = f.read()
return texthandler.split_into_sentences(text)
def fragments_from_file(filepath):
with open(filepath) as f:
text = f.read()
return texthandler.split_into_sentences(text.replace(',','.'))
def sentences_from_directory(dirpath):
sentences = []
for fname in os.listdir(dirpath)[1:]:
print(fname)
fpath = '%s/%s' % (dirpath, fname)
with open(fpath) as f:
text = f.read()
sentences.extend(texthandler.split_into_sentences(text))
return sentences
def sentences_from_TED_file(fpath):
with open(fpath) as f:
text = f.read()
sentences = [s.replace('\n',' ') for s in text.split('.\n')]
return sentences
def sentences_from_TED_directory(dirpath):
sentences = []
for fname in os.listdir(dirpath)[1:]:
fpath = '%s/%s' % (dirpath, fname)
sentences.extend(sentences_from_TED_file(fpath))
return sentences
def get_data(path, n=100000):
with open(path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
rows = [row for row in islice(csv_reader,0,n)]
return rows
def get_data_from_tab_delimited(path, n=100000, headers=None):
with open(path) as f:
next(f)
reader=csv.reader(f,delimiter='\t')
rows = [row for row in islice(reader,0,n)]
if headers:
rows.insert(0, headers) # prepend to list
return rows
# directory of only text files (NOTE: In future, consider scraping to CSV instead)
def get_data_from_directory(dirpath, headers=['','']):
rows = [headers]
for filename in os.listdir(dirpath):
if os.path.isdir(filename):
return "directory must only contain text files"
if not dirpath[-1] == '/':
dirpath += '/'
filepath = dirpath+filename
with open(filepath) as f:
try:
text = f.read()
rows.append([filename, text])
except:
pass
return rows
# remove any dictionaries with suspiciously similar profiles
def eliminate_duplicates(tds, threshold=.9):
if len(tds) == 0:
return []
else:
head = tds[0]
remainder = tds[1:]
if list_contains_duplicate(head, remainder, threshold):
return eliminate_duplicates(remainder, threshold)
else:
return [head] + eliminate_duplicates(remainder, threshold)
# given a dictionary and a list of dictionaries, returns whether any is a duplicate
def list_contains_duplicate(d, dlist, threshold=.9):
for head in dlist:
if dictionary.similarity(d, head) > threshold:
return True
return False
def all_text_from_column(rows, col_name):
if col_name in rows[0]:
n = rows[0].index(col_name)
return '\n'.join([row[n] for row in rows[1:]])
else:
return ''
def remove_punctuation(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
# a dictionary of all terms in the document of length 1
def term_dict(doc):
term_dict = {}
words = [remove_punctuation(w.strip().lower()) for w in doc.split()]
for word in words:
if word in term_dict:
term_dict[word] += 1
else:
term_dict[word] = 1
return term_dict
# a list of dictionaries of terms in the document of length n
def term_dicts(corpus):
return [term_dict(d) for d in corpus]
# returns lower and upper bounds containing 95 percent of occurrence rates of the term
def tf_bounds(term, tds, n=2):
distribution = frequency_distribution(term, tds)
m = mean(distribution)
sd = stdev(distribution)
return m - n*sd, m + n*sd
# returns terms in a dictionary that occur in at least two (or n) dictionaries from a list of dictionaries
def non_unique_terms(term_dict, dict_list, n=1):
return {k: v for k, v in term_dict.items() if doc_frequency(k, dict_list) >= n}
# how many documents in the corpus include the term
def doc_frequency(term, term_dicts):
return len([1 for td in term_dicts if term in td])
# returns a dictionary of document frequencies (df) for all terms in the dictionary list
# only includes terms with a df of at least n
def df_dict(term_dict, dict_list, threshold=1):
to_return = {}
for k, v in term_dict.items():
df = doc_frequency(k, dict_list)
if df >= threshold:
to_return[k] = df
return to_return
#### SEARCH ####
# takes a list of docs and a corresponding (equal length) list of term dicts
def docs_containing_term(term, docs, term_dicts):
return [docs[i] for i, td in enumerate(term_dicts) if term in td]
### LOAD RESOURCES ###
def stopwords():
with open('resources/stopwords.txt') as f:
return set(f.read().split('\n'))
### STATISTICAL METHODS ####
# standard deviation
def stdev(values):
N = len(values)
mean = sum(values) / N
sum_squared_differences = sum([(x-mean)**2 for x in values])
return math.sqrt(sum_squared_differences / (N-1))
def mean(values):
return sum(values) / len(values)
#### SAVING DICTIONARIES ####
def save_ngrams_from_field(docs, field, n, dirname='NO DIR', dup_threshold=.8):
text = all_text_from_column(docs,field)
tds = term_dicts(text,n) # one term dictionary per document
unique_tds = eliminate_duplicates(tds, threshold=dup_threshold)
big_td = dictionary.union(unique_tds) # one term dictionary for all documents
to_save = df_dict(big_td, unique_tds, threshold=2)
print(len(to_save))
dirpath = 'stats/%s/' % dirname
if not os.path.exists(dirpath):
os.mkdir(dirpath)
filename = '%s_%sg_df.txt' % (field.lower(), n)
savepath = dirpath + filename
dictionary.to_tab_delimited(to_save, savepath)
def process_directory(dirname, dup_threshold=.8):
dirpath = 'data/%s' % dirname
savepath = 'stats/%s/' % dirname
if not os.path.exists(dirpath):
os.mkdir(dirpath)
if not os.path.exists(savepath):
os.mkdir(savepath)
rows = get_data_from_directory(dirpath, headers=['Filename','Songtext'])
for n in range(1, 6):
save_ngrams_from_field(rows, 'Songtext', n, dirname=dirname, dup_threshold=dup_threshold)
# arranges documents into groups of a given size
def group_into_documents(small_docs, group_size):
num_docs = len(small_docs)
new_docs = ['' for x in range(num_docs//group_size)]
for doc_index in range(len(new_docs)):
for i in range(group_size):
full_index = doc_index*group_size+i
new_docs[doc_index] += small_docs[full_index] + '\n'
return new_docs
# returns a list of keywords for the doc in the context of the corpus
def keywords(tf_dict, df_dict, tf_dicts):
total_docs = len(tf_dicts)
tfidfs = {k: tfidf(k, tf_dict, df_dict, total_docs) for k in tf_dict.keys()}
return dictionary.sort_descending(tfidfs)
def tfidf(k, tf_dict, df_dict, total_docs):
tf = tf_dict[k]
df = df_dict[k]
idf = -1 * math.log(df/total_docs)
return tf*idf
| 26.555241 | 114 | 0.717943 | import csv
import os
import math
from . import dictionary
from . import texthandler
from itertools import islice
import string
punctuation = ['.', ',', '!', '?', '(', ')', '$', ':', ';', '{', '}', '[', ']', '•', '|']
def text_from_path(path):
with open(path) as f:
return f.read()
def text_from_directory(dirpath):
text = ''
for fname in os.listdir(dirpath)[1:]:
fpath = '%s/%s' % (dirpath, fname)
text += text_from_path(fpath)
return text
def clean_all(lines):
return [remove_punctuation(line.strip()) for line in lines]
def filter_characters(lines, characters):
char_set = set(characters)
return [line for line in lines if none_in(line, char_set)]
def in_size_range(lines, min_length=20, max_length=20):
return [line for line in lines if len(line) > min_length and len(line) < max_length]
def none_in(line, character_set):
for c in line:
if c in character_set:
return False
return True
def term_frequency_dict_by_name(dirpath, name):
all_files = get_data_from_directory(dirpath, headers=['name','text'])
for rowname, rowtext in all_files:
if rowname == name:
return term_dict(rowtext)
return None
def term_frequency_dicts_from_directory(dirpath):
all_files = get_data_from_directory(dirpath, headers=['name','text'])
all_text = all_text_from_column(all_files, 'text')
tds = [term_dict(text) for name, text in all_files]
return tds
def document_frequency_dict_from_tf_dicts(tf_dicts):
big_td = dictionary.union(tf_dicts)
doc_frequency_dict = df_dict(big_td, tf_dicts, threshold=1)
return doc_frequency_dict
def lines_from_lyrics_directory(dirpath):
text_files = get_data_from_directory(dirpath, headers=['name','text'])
lines = [remove_punctuation(line.strip().lower()) for line in get_all_lines(text_files) if len(line.strip()) > 0]
lines = sanitize(lines)
return lines
def sanitize(lines):
with open('resources/badwords.txt') as f:
badwords = set([line.strip() for line in f.readlines()]) | set([line.strip() + 's' for line in f.readlines()])
lines = [line for line in lines if is_clean(line, badwords)]
return lines
def get_all_lines(text_files):
lines = []
for name, text in text_files:
lines.extend(text.split('\n'))
return lines
def is_clean(line, badwords):
words = line.split()
for w in words:
if w in badwords:
return False
return True
lines = f.readlines()
return sanitize(lines)
def lines_from_directory(dirpath):
lines = []
for fname in os.listdir(dirpath)[1:]:
fpath = '%s/%s' % (dirpath, fname)
lines.extend(lines_from_file(fpath))
return lines
def sentences_from_file(filepath):
with open(filepath) as f:
text = f.read()
return texthandler.split_into_sentences(text)
def fragments_from_file(filepath):
with open(filepath) as f:
text = f.read()
return texthandler.split_into_sentences(text.replace(',','.'))
def sentences_from_directory(dirpath):
sentences = []
for fname in os.listdir(dirpath)[1:]:
print(fname)
fpath = '%s/%s' % (dirpath, fname)
with open(fpath) as f:
text = f.read()
sentences.extend(texthandler.split_into_sentences(text))
return sentences
def sentences_from_TED_file(fpath):
with open(fpath) as f:
text = f.read()
sentences = [s.replace('\n',' ') for s in text.split('.\n')]
return sentences
def sentences_from_TED_directory(dirpath):
sentences = []
for fname in os.listdir(dirpath)[1:]:
fpath = '%s/%s' % (dirpath, fname)
sentences.extend(sentences_from_TED_file(fpath))
return sentences
def get_data(path, n=100000):
with open(path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
rows = [row for row in islice(csv_reader,0,n)]
return rows
def get_data_from_tab_delimited(path, n=100000, headers=None):
with open(path) as f:
next(f)
reader=csv.reader(f,delimiter='\t')
rows = [row for row in islice(reader,0,n)]
if headers:
rows.insert(0, headers)
return rows
def get_data_from_directory(dirpath, headers=['','']):
rows = [headers]
for filename in os.listdir(dirpath):
if os.path.isdir(filename):
return "directory must only contain text files"
if not dirpath[-1] == '/':
dirpath += '/'
filepath = dirpath+filename
with open(filepath) as f:
try:
text = f.read()
rows.append([filename, text])
except:
pass
return rows
def eliminate_duplicates(tds, threshold=.9):
if len(tds) == 0:
return []
else:
head = tds[0]
remainder = tds[1:]
if list_contains_duplicate(head, remainder, threshold):
return eliminate_duplicates(remainder, threshold)
else:
return [head] + eliminate_duplicates(remainder, threshold)
def list_contains_duplicate(d, dlist, threshold=.9):
for head in dlist:
if dictionary.similarity(d, head) > threshold:
return True
return False
def all_text_from_column(rows, col_name):
if col_name in rows[0]:
n = rows[0].index(col_name)
return '\n'.join([row[n] for row in rows[1:]])
else:
return ''
def remove_punctuation(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def term_dict(doc):
term_dict = {}
words = [remove_punctuation(w.strip().lower()) for w in doc.split()]
for word in words:
if word in term_dict:
term_dict[word] += 1
else:
term_dict[word] = 1
return term_dict
def term_dicts(corpus):
return [term_dict(d) for d in corpus]
def tf_bounds(term, tds, n=2):
distribution = frequency_distribution(term, tds)
m = mean(distribution)
sd = stdev(distribution)
return m - n*sd, m + n*sd
def non_unique_terms(term_dict, dict_list, n=1):
return {k: v for k, v in term_dict.items() if doc_frequency(k, dict_list) >= n}
def doc_frequency(term, term_dicts):
return len([1 for td in term_dicts if term in td])
def df_dict(term_dict, dict_list, threshold=1):
to_return = {}
for k, v in term_dict.items():
df = doc_frequency(k, dict_list)
if df >= threshold:
to_return[k] = df
return to_return
return [docs[i] for i, td in enumerate(term_dicts) if term in td]
rds.txt') as f:
return set(f.read().split('\n'))
sum_squared_differences = sum([(x-mean)**2 for x in values])
return math.sqrt(sum_squared_differences / (N-1))
def mean(values):
return sum(values) / len(values)
ll_text_from_column(docs,field)
tds = term_dicts(text,n)
unique_tds = eliminate_duplicates(tds, threshold=dup_threshold)
big_td = dictionary.union(unique_tds)
to_save = df_dict(big_td, unique_tds, threshold=2)
print(len(to_save))
dirpath = 'stats/%s/' % dirname
if not os.path.exists(dirpath):
os.mkdir(dirpath)
filename = '%s_%sg_df.txt' % (field.lower(), n)
savepath = dirpath + filename
dictionary.to_tab_delimited(to_save, savepath)
def process_directory(dirname, dup_threshold=.8):
dirpath = 'data/%s' % dirname
savepath = 'stats/%s/' % dirname
if not os.path.exists(dirpath):
os.mkdir(dirpath)
if not os.path.exists(savepath):
os.mkdir(savepath)
rows = get_data_from_directory(dirpath, headers=['Filename','Songtext'])
for n in range(1, 6):
save_ngrams_from_field(rows, 'Songtext', n, dirname=dirname, dup_threshold=dup_threshold)
def group_into_documents(small_docs, group_size):
num_docs = len(small_docs)
new_docs = ['' for x in range(num_docs//group_size)]
for doc_index in range(len(new_docs)):
for i in range(group_size):
full_index = doc_index*group_size+i
new_docs[doc_index] += small_docs[full_index] + '\n'
return new_docs
def keywords(tf_dict, df_dict, tf_dicts):
total_docs = len(tf_dicts)
tfidfs = {k: tfidf(k, tf_dict, df_dict, total_docs) for k in tf_dict.keys()}
return dictionary.sort_descending(tfidfs)
def tfidf(k, tf_dict, df_dict, total_docs):
tf = tf_dict[k]
df = df_dict[k]
idf = -1 * math.log(df/total_docs)
return tf*idf
| true | true |
f7241cd61a0bf8e14920dbf630f13d68b111a34c | 30,616 | py | Python | flow/scenarios/base_scenario.py | nathanlct/flow | 5ca98bbf095293c54d2840770cc3f93608c0680e | [
"MIT"
] | null | null | null | flow/scenarios/base_scenario.py | nathanlct/flow | 5ca98bbf095293c54d2840770cc3f93608c0680e | [
"MIT"
] | null | null | null | flow/scenarios/base_scenario.py | nathanlct/flow | 5ca98bbf095293c54d2840770cc3f93608c0680e | [
"MIT"
] | null | null | null | """Contains the base scenario class."""
from flow.core.params import InitialConfig
from flow.core.params import TrafficLightParams
from flow.core.params import SumoCarFollowingParams
from flow.core.params import SumoLaneChangeParams
import time
import xml.etree.ElementTree as ElementTree
from lxml import etree
from collections import defaultdict
try:
# Import serializable if rllab is installed
from rllab.core.serializable import Serializable
except ImportError:
Serializable = object
# default sumo probability value TODO (ak): remove
DEFAULT_PROBABILITY = 0
# default sumo vehicle length value (in meters) TODO (ak): remove
DEFAULT_LENGTH = 5
# default sumo vehicle class class TODO (ak): remove
DEFAULT_VCLASS = 0
class Scenario(Serializable):
"""Base scenario class.
Initializes a new scenario. Scenarios are used to specify features of
a network, including the positions of nodes, properties of the edges
and junctions connecting these nodes, properties of vehicles and
traffic lights, and other features as well. These features can later be
acquired from this class via a plethora of get methods (see
documentation).
This class uses network specific features to generate the necessary network
configuration files needed to initialize a simulation instance. The methods
of this class are called by the base scenario class.
The network files can be created in one of three ways:
* Custom networks can be generated by defining the properties of the
network's directed graph. This is done by defining the nodes and edges
properties using the ``specify_nodes`` and ``specify_edges`` methods,
respectively, as well as other properties via methods including
``specify_types``, ``specify_connections``, etc... For more on this,
see the tutorial on creating custom scenarios or refer to some of the
available scenarios.
* Scenario data can be collected from an OpenStreetMap (.osm) file. The
.osm file is specified in the NetParams object. For example:
>>> from flow.core.params import NetParams
>>> net_params = NetParams(osm_path='/path/to/osm_file.osm')
In this case, no ``specify_nodes`` and ``specify_edges`` methods are
needed. However, a ``specify_routes`` method is still needed to specify
the appropriate routes vehicles can traverse in the network.
* Scenario data can be collected from an sumo-specific network (.net.xml)
file. This file is specified in the NetParams object. For example:
>>> from flow.core.params import NetParams
>>> net_params = NetParams(template='/path/to/template')
In this case, no ``specify_nodes`` and ``specify_edges`` methods are
needed. However, a ``specify_routes`` method is still needed to specify
the appropriate routes vehicles can traverse in the network.
This class can be instantiated once and reused in multiple experiments.
Note that this function stores all the relevant parameters. The
generate() function still needs to be called separately.
Attributes
----------
orig_name : str
the variable provided under the `name` parameter to this object upon
instantiation
name : str
the variable provided under the `name` parameter to this object upon
instantiation, appended with a timestamp variable. This timestamp is
meant to differentiate generated scenario files during parallelism
vehicles : flow.core.params.VehicleParams
vehicle specific parameters, used to specify the types and number of
vehicles at the start of a simulation
net_params : flow.core.params.NetParams
network specific parameters, used primarily to identify properties of a
network such as the lengths of edges and the number of lanes in each
edge. This attribute is very network-specific, and should contain the
variables denoted by the `ADDITIONAL_NET_PARAMS` dict in each scenario
class file
initial_config : flow.core.params.InitialConfig
specifies parameters that affect the positioning of vehicle in the
network at the start of a simulation. For more, see flow/core/params.py
traffic_lights : flow.core.params.TrafficLightParams
used to describe the positions and types of traffic lights in the
network. For more, see flow/core/params.py
nodes : list of dict or None
list of nodes that are assigned to the scenario via the `specify_nodes`
method. All nodes in this variable are expected to have the following
properties:
* **name**: a unique identifier for the node
* **x**: x-coordinate of the node, in meters
* **y**: y-coordinate of the node, in meters
If the scenario is meant to generate the network from an OpenStreetMap
or template file, this variable is set to None
edges : list of dict or None
edges that are assigned to the scenario via the `specify_edges` method.
This include the shape, position, and properties of all edges in the
network. These properties include the following mandatory properties:
* **id**: name of the edge
* **from**: name of the node the edge starts from
* **to**: the name of the node the edges ends at
* **length**: length of the edge
In addition, either the following properties need to be specifically
defined or a **type** variable property must be defined with equivalent
attributes in `self.types`:
* **numLanes**: the number of lanes on the edge
* **speed**: the speed limit for vehicles on the edge
Moreover, the following attributes may optionally be available:
* **shape**: the positions of intermediary nodes used to define the
shape of an edge. If no shape is specified, then the edge will appear
as a straight line.
Note that, if the scenario is meant to generate the network from an
OpenStreetMap or template file, this variable is set to None
types : list of dict or None
A variable used to ease the definition of the properties of various
edges. Each element in the list consists of a dict consisting of the
following property:
* **id**: name of the edge type. Edges in the `self.edges` attribute
with a similar value under the "type" key will adopt the properties
of other components of this list, such as "speed" and "numLanes".
If the type variable is None, then no types are available within the
scenario. Furthermore, a proper example of this variable being used can
be found under `specify_types` in flow/scenarios/loop.py.
Note that, if the scenario is meant to generate the network from an
OpenStreetMap or template file, this variable is set to None
connections : list of dict or None
A variable used to describe how any specific node's incoming and
outgoing edges/lane pairs are connected. If no connections are
specified, sumo generates default connections.
If the connections attribute is set to None, then the connections
within the network will be specified by the simulator.
Note that, if the scenario is meant to generate the network from an
OpenStreetMap or template file, this variable is set to None
routes : dict
A variable whose keys are the starting edge of a specific route, and
whose values are the list of edges a vehicle is meant to traverse
starting from that edge. These are only applied at the start of a
simulation; vehicles are allowed to reroute within the environment
immediately afterwards.
edge_starts : list of (str, float)
a list of tuples in which the first element of the tuple is the name of
the edge/intersection/internal_link, and the second value is the
distance of the link from some global reference, i.e. [(link_0, pos_0),
(link_1, pos_1), ...]
internal_edge_starts : list of (str, float)
A variable similar to `edge_starts` but for junctions within the
network. If no junctions are available, this variable will return the
default variable: `[(':', -1)]` needed by sumo simulations.
intersection_edge_starts : list of (str, float)
A variable similar to `edge_starts` but for intersections within
the network. This variable will be deprecated in future releases.
Example
-------
The following examples are derived from the `LoopScenario` Scenario class
located in flow/scenarios/loop.py, and should serve as an example of the
types of outputs to be expected from the different variables of a scenario
class.
First of all, the ring road scenario class can be instantiated by running
the following commands (note if this this unclear please refer to Tutorial
1):
>>> from flow.scenarios import LoopScenario
>>> from flow.core.params import NetParams, VehicleParams
>>>
>>> scenario = LoopScenario(
>>> name='test',
>>> vehicles=VehicleParams(),
>>> net_params=NetParams(
>>> additional_params={
>>> 'length': 230,
>>> 'lanes': 1,
>>> 'speed_limit': 30,
>>> 'resolution': 40,
>>> }
>>> )
>>> )
The various attributes then look as follows:
>>> print(scenario.nodes)
>>> [{'id': 'bottom', 'x': '0', 'y': '-36.60563691113593'},
>>> {'id': 'right', 'x': '36.60563691113593', 'y': '0'},
>>> {'id': 'top', 'x': '0', 'y': '36.60563691113593'},
>>> {'id': 'left', 'x': '-36.60563691113593', 'y': '0'}]
>>> print(scenario.edges)
>>> [
>>> {'id': 'bottom',
>>> 'type': 'edgeType',
>>> 'from': 'bottom',
>>> 'to': 'right',
>>> 'length': '57.5',
>>> 'shape': '0.00,-36.61 1.47,-36.58 2.95,-36.49 4.41,-36.34 '
>>> '5.87,-36.13 7.32,-35.87 8.76,-35.54 10.18,-35.16 '
>>> '11.59,-34.72 12.98,-34.23 14.35,-33.68 15.69,-33.07 '
>>> '17.01,-32.41 18.30,-31.70 19.56,-30.94 20.79,-30.13 '
>>> '21.99,-29.26 23.15,-28.35 24.27,-27.40 25.36,-26.40 '
>>> '26.40,-25.36 27.40,-24.27 28.35,-23.15 29.26,-21.99 '
>>> '30.13,-20.79 30.94,-19.56 31.70,-18.30 32.41,-17.01 '
>>> '33.07,-15.69 33.68,-14.35 34.23,-12.98 34.72,-11.59 '
>>> '35.16,-10.18 35.54,-8.76 35.87,-7.32 36.13,-5.87 '
>>> '36.34,-4.41 36.49,-2.95 36.58,-1.47 36.61,0.00'
>>> },
>>> {'id': 'right',
>>> 'type': 'edgeType',
>>> 'from': 'right',
>>> 'to': 'top',
>>> 'length': '57.5',
>>> 'shape': '36.61,0.00 36.58,1.47 36.49,2.95 36.34,4.41 36.13,5.87 '
>>> '35.87,7.32 35.54,8.76 35.16,10.18 34.72,11.59 '
>>> '34.23,12.98 33.68,14.35 33.07,15.69 32.41,17.01 '
>>> '31.70,18.30 30.94,19.56 30.13,20.79 29.26,21.99 '
>>> '28.35,23.15 27.40,24.27 26.40,25.36 25.36,26.40 '
>>> '24.27,27.40 23.15,28.35 21.99,29.26 20.79,30.13 '
>>> '19.56,30.94 18.30,31.70 17.01,32.41 15.69,33.07 '
>>> '14.35,33.68 12.98,34.23 11.59,34.72 10.18,35.16 '
>>> '8.76,35.54 7.32,35.87 5.87,36.13 4.41,36.34 2.95,36.49 '
>>> '1.47,36.58 0.00,36.61'
>>> },
>>> {'id': 'top',
>>> 'type': 'edgeType',
>>> 'from': 'top',
>>> 'to': 'left',
>>> 'length': '57.5',
>>> 'shape': '0.00,36.61 -1.47,36.58 -2.95,36.49 -4.41,36.34 '
>>> '-5.87,36.13 -7.32,35.87 -8.76,35.54 -10.18,35.16 '
>>> '-11.59,34.72 -12.98,34.23 -14.35,33.68 -15.69,33.07 '
>>> '-17.01,32.41 -18.30,31.70 -19.56,30.94 -20.79,30.13 '
>>> '-21.99,29.26 -23.15,28.35 -24.27,27.40 -25.36,26.40 '
>>> '-26.40,25.36 -27.40,24.27 -28.35,23.15 -29.26,21.99 '
>>> '-30.13,20.79 -30.94,19.56 -31.70,18.30 -32.41,17.01 '
>>> '-33.07,15.69 -33.68,14.35 -34.23,12.98 -34.72,11.59 '
>>> '-35.16,10.18 -35.54,8.76 -35.87,7.32 -36.13,5.87 '
>>> '-36.34,4.41 -36.49,2.95 -36.58,1.47 -36.61,0.00'
>>> },
>>> {'id': 'left',
>>> 'type': 'edgeType',
>>> 'from': 'left',
>>> 'to': 'bottom',
>>> 'length': '57.5',
>>> 'shape': '-36.61,0.00 -36.58,-1.47 -36.49,-2.95 -36.34,-4.41 '
>>> '-36.13,-5.87 -35.87,-7.32 -35.54,-8.76 -35.16,-10.18 '
>>> '-34.72,-11.59 -34.23,-12.98 -33.68,-14.35 '
>>> '-33.07,-15.69 -32.41,-17.01 -31.70,-18.30 '
>>> '-30.94,-19.56 -30.13,-20.79 -29.26,-21.99 '
>>> '-28.35,-23.15 -27.40,-24.27 -26.40,-25.36 '
>>> '-25.36,-26.40 -24.27,-27.40 -23.15,-28.35 '
>>> '-21.99,-29.26 -20.79,-30.13 -19.56,-30.94 '
>>> '-18.30,-31.70 -17.01,-32.41 -15.69,-33.07 '
>>> '-14.35,-33.68 -12.98,-34.23 -11.59,-34.72 '
>>> '-10.18,-35.16 -8.76,-35.54 -7.32,-35.87 -5.87,-36.13 '
>>> '-4.41,-36.34 -2.95,-36.49 -1.47,-36.58 -0.00,-36.61'
>>> }
>>> ]
>>> print(scenario.types)
>>> [{'id': 'edgeType', 'numLanes': '1', 'speed': '30'}]
>>> print(scenario.connections)
>>> None
>>> print(scenario.routes)
>>> {
>>> 'top': ['top', 'left', 'bottom', 'right'],
>>> 'left': ['left', 'bottom', 'right', 'top'],
>>> 'bottom': ['bottom', 'right', 'top', 'left'],
>>> 'right': ['right', 'top', 'left', 'bottom']
>>> }
>>> print(scenario.edge_starts)
>>> [('bottom', 0), ('right', 57.5), ('top', 115.0), ('left', 172.5)]
Finally, the loop scenario does not contain any junctions or intersections,
and as a result the `internal_edge_starts` and `intersection_edge_starts`
attributes are both set to None. For an example of a network with junctions
and intersections, please refer to: flow/scenarios/figure_eight.py.
>>> print(scenario.internal_edge_starts)
>>> [(':', -1)]
>>> print(scenario.intersection_edge_starts)
>>> []
"""
def __init__(self,
name,
vehicles,
net_params,
initial_config=InitialConfig(),
traffic_lights=TrafficLightParams()):
"""Instantiate the base scenario class.
Attributes
----------
name : str
A tag associated with the scenario
vehicles : flow.core.params.VehicleParams
see flow/core/params.py
net_params : flow.core.params.NetParams
see flow/core/params.py
initial_config : flow.core.params.InitialConfig
see flow/core/params.py
traffic_lights : flow.core.params.TrafficLightParams
see flow/core/params.py
"""
# Invoke serializable if using rllab
if Serializable is not object:
Serializable.quick_init(self, locals())
self.orig_name = name # To avoid repeated concatenation upon reset
self.name = name + time.strftime('_%Y%m%d-%H%M%S') + str(time.time())
self.vehicles = vehicles
self.net_params = net_params
self.initial_config = initial_config
self.traffic_lights = traffic_lights
# specify routes vehicles can take
self.routes = self.specify_routes(net_params)
if net_params.template is None and net_params.osm_path is None:
# specify the attributes of the nodes
self.nodes = self.specify_nodes(net_params)
# collect the attributes of each edge
self.edges = self.specify_edges(net_params)
# specify the types attributes (default is None)
self.types = self.specify_types(net_params)
# specify the connection attributes (default is None)
self.connections = self.specify_connections(net_params)
# this is to be used if file paths other than the the network geometry
# file is specified
elif type(net_params.template) is dict:
if 'rou' in net_params.template:
veh, rou = self._vehicle_infos(net_params.template['rou'])
vtypes = self._vehicle_type(net_params.template.get('vtype'))
cf = self._get_cf_params(vtypes)
lc = self._get_lc_params(vtypes)
# add the vehicle types to the VehicleParams object
for t in vtypes:
vehicles.add(veh_id=t, car_following_params=cf[t],
lane_change_params=lc[t], num_vehicles=0)
# add the routes of the vehicles that will be departed later
# under the name of the vehicle. This will later be identified
# by k.vehicles._add_departed
self.routes = rou
# vehicles to be added with different departure times
self.template_vehicles = veh
self.types = None
self.nodes = None
self.edges = None
self.connections = None
# osm_path or template as type str
else:
self.nodes = None
self.edges = None
self.types = None
self.connections = None
# optional parameters, used to get positions from some global reference
self.edge_starts = self.specify_edge_starts()
self.internal_edge_starts = self.specify_internal_edge_starts()
self.intersection_edge_starts = [] # this will be deprecated
# TODO: convert to property
def specify_edge_starts(self):
"""Define edge starts for road sections in the network.
This is meant to provide some global reference frame for the road
edges in the network.
By default, the edge starts are specified from the network
configuration file. Note that, the values are arbitrary but do not
allow the positions of any two edges to overlap, thereby making them
compatible with all starting position methods for vehicles.
Returns
-------
list of (str, float)
list of edge names and starting positions,
ex: [(edge0, pos0), (edge1, pos1), ...]
"""
return None
# TODO: convert to property
def specify_internal_edge_starts(self):
"""Define the edge starts for internal edge nodes.
This is meant to provide some global reference frame for the internal
edges in the network.
These edges are the result of finite-length connections between road
sections. This methods does not need to be specified if "no-internal-
links" is set to True in net_params.
By default, all internal edge starts are given a position of -1. This
may be overridden; however, in general we do not worry about internal
edges and junctions in large networks.
Returns
-------
list of (str, float)
list of internal junction names and starting positions,
ex: [(internal0, pos0), (internal1, pos1), ...]
"""
return [(':', -1)]
# TODO: convert to property
def specify_nodes(self, net_params):
"""Specify the attributes of nodes in the network.
Parameters
----------
net_params : flow.core.params.NetParams
see flow/core/params.py
Returns
-------
list of dict
A list of node attributes (a separate dict for each node). Nodes
attributes must include:
* id {string} -- name of the node
* x {float} -- x coordinate of the node
* y {float} -- y coordinate of the node
Other attributes may also be specified. See:
http://sumo.dlr.de/wiki/Networks/Building_Networks_from_own_XML-descriptions#Node_Descriptions
"""
raise NotImplementedError
# TODO: convert to property
def specify_edges(self, net_params):
"""Specify the attributes of edges connecting pairs on nodes.
Parameters
----------
net_params : flow.core.params.NetParams
see flow/core/params.py
Returns
-------
list of dict
A list of edges attributes (a separate dict for each edge). Edge
attributes must include:
* id {string} -- name of the edge
* from {string} -- name of node the directed edge starts from
* to {string} -- name of the node the directed edge ends at
In addition, the attributes must contain at least one of the
following:
* "numLanes" {int} and "speed" {float} -- the number of lanes and
speed limit of the edge, respectively
* type {string} -- a type identifier for the edge, which can be
used if several edges are supposed to possess the same number of
lanes, speed limits, etc...
Other attributes may also be specified. See:
http://sumo.dlr.de/wiki/Networks/Building_Networks_from_own_XML-descriptions#Edge_Descriptions
"""
raise NotImplementedError
# TODO: convert to property
def specify_types(self, net_params):
"""Specify the attributes of various edge types (if any exist).
Parameters
----------
net_params : flow.core.params.NetParams
see flow/core/params.py
Returns
-------
list of dict
A list of type attributes for specific groups of edges. If none are
specified, no .typ.xml file is created.
For information on type attributes, see:
http://sumo.dlr.de/wiki/Networks/Building_Networks_from_own_XML-descriptions#Type_Descriptions
"""
return None
# TODO: convert to property
def specify_connections(self, net_params):
"""Specify the attributes of connections.
These attributes are used to describe how any specific node's incoming
and outgoing edges/lane pairs are connected. If no connections are
specified, sumo generates default connections.
Parameters
----------
net_params : flow.core.params.NetParams
see flow/core/params.py
Returns
-------
list of dict
A list of connection attributes. If none are specified, no .con.xml
file is created.
For information on type attributes, see:
http://sumo.dlr.de/wiki/Networks/Building_Networks_from_own_XML-descriptions#Connection_Descriptions
"""
return None
# TODO: convert to property
def specify_routes(self, net_params):
"""Specify the routes vehicles can take starting from any edge.
The routes are specified as lists of edges the vehicle must traverse,
with the first edge corresponding to the edge the vehicle begins on.
Note that the edges must be connected for the route to be valid. If
this method is not implemented, vehicles that enter a network are
assigned routes consisting solely on their current edges, and exit the
network once they reach the end of their edge.
Currently, only one route is allowed from any given starting edge.
Parameters
----------
net_params : flow.core.params.NetParams
see flow/core/params.py
Returns
-------
dict
Key = name of the starting edge
Element = list of edges a vehicle starting from this edge must
traverse.
"""
return None
@staticmethod
def gen_custom_start_pos(cls, net_params, initial_config, num_vehicles):
"""Generate a user defined set of starting positions.
Parameters
----------
cls : flow.core.kernel.scenario.KernelScenario
flow scenario kernel, with all the relevant methods implemented
net_params : flow.core.params.NetParams
network-specific parameters
initial_config : flow.core.params.InitialConfig
see flow/core/params.py
num_vehicles : int
number of vehicles to be placed on the network
Returns
-------
list of tuple (float, float)
list of start positions [(edge0, pos0), (edge1, pos1), ...]
list of int
list of start lanes
list of float
list of start speeds
"""
raise NotImplementedError
@staticmethod
def _vehicle_infos(file_names):
"""Import of vehicle from a configuration file.
This is a utility function for computing vehicle information. It
imports a network configuration file, and returns the information on
the vehicle and add it into the Vehicle object.
Parameters
----------
file_names : list of str
path to the xml file to load
Returns
-------
dict <dict>
* Key = id of the vehicle
* Element = dict of departure speed, vehicle type, depart Position,
depart edges
"""
# this is meant to deal with the case that there is only one rou file
if isinstance(file_names, str):
file_names = [file_names]
vehicle_data = dict()
routes_data = dict()
type_data = defaultdict(int)
for filename in file_names:
# import the .net.xml file containing all edge/type data
parser = etree.XMLParser(recover=True)
tree = ElementTree.parse(filename, parser=parser)
root = tree.getroot()
# collect the departure properties and routes and vehicles whose
# properties are instantiated within the .rou.xml file. This will
# only apply if such data is within the file (it is not implemented
# by scenarios in Flow).
for vehicle in root.findall('vehicle'):
# collect the edges the vehicle is meant to traverse
route = vehicle.find('route')
route_edges = route.attrib["edges"].split(' ')
# collect the names of each vehicle type and number of vehicles
# of each type
type_vehicle = vehicle.attrib['type']
type_data[type_vehicle] += 1
vehicle_data[vehicle.attrib['id']] = {
'departSpeed': vehicle.attrib['departSpeed'],
'depart': vehicle.attrib['depart'],
'typeID': type_vehicle,
'departPos': vehicle.attrib['departPos'],
}
routes_data[vehicle.attrib['id']] = route_edges
# collect the edges the vehicle is meant to traverse for the given
# sets of routes that are not associated with individual vehicles
for route in root.findall('route'):
route_edges = route.attrib["edges"].split(' ')
routes_data[route.attrib['id']] = route_edges
return vehicle_data, routes_data
@staticmethod
def _vehicle_type(filename):
"""Import vehicle type data from a *.add.xml file.
This is a utility function for outputting all the type of vehicle.
Parameters
----------
filename : str
path to the vtypes.add.xml file to load
Returns
-------
dict or None
the key is the vehicle_type id and the value is a dict we've type
of the vehicle, depart edges, depart Speed, departPos. If no
filename is provided, this method returns None as well.
"""
if filename is None:
return None
parser = etree.XMLParser(recover=True)
tree = ElementTree.parse(filename, parser=parser)
root = tree.getroot()
veh_type = {}
# this hack is meant to support the LuST scenario and Flow scenarios
root = [root] if len(root.findall('vTypeDistribution')) == 0 \
else root.findall('vTypeDistribution')
for r in root:
for vtype in r.findall('vType'):
# TODO: make for everything
veh_type[vtype.attrib['id']] = {
'vClass': vtype.attrib.get('vClass', DEFAULT_VCLASS),
'accel': vtype.attrib['accel'],
'decel': vtype.attrib['decel'],
'sigma': vtype.attrib['sigma'],
'length': vtype.attrib.get('length', DEFAULT_LENGTH),
'minGap': vtype.attrib['minGap'],
'maxSpeed': vtype.attrib['maxSpeed'],
'probability': vtype.attrib.get(
'probability', DEFAULT_PROBABILITY),
'speedDev': vtype.attrib['speedDev']
}
return veh_type
@staticmethod
def _get_cf_params(vtypes):
"""Return the car-following sumo params from vtypes."""
ret = {}
for typ in vtypes:
# TODO: add vClass
ret[typ] = SumoCarFollowingParams(
speed_mode='all_checks',
accel=float(vtypes[typ]['accel']),
decel=float(vtypes[typ]['decel']),
sigma=float(vtypes[typ]['sigma']),
length=float(vtypes[typ]['length']),
min_gap=float(vtypes[typ]['minGap']),
max_speed=float(vtypes[typ]['maxSpeed']),
probability=float(vtypes[typ]['probability']),
speed_dev=float(vtypes[typ]['speedDev'])
)
return ret
@staticmethod
def _get_lc_params(vtypes):
"""Return the lane change sumo params from vtypes."""
ret = {}
for typ in vtypes:
ret[typ] = SumoLaneChangeParams(lane_change_mode=1621)
return ret
def __str__(self):
"""Return the name of the scenario and the number of vehicles."""
return 'Scenario ' + self.name + ' with ' + \
str(self.vehicles.num_vehicles) + ' vehicles.'
| 40.930481 | 108 | 0.595179 |
from flow.core.params import InitialConfig
from flow.core.params import TrafficLightParams
from flow.core.params import SumoCarFollowingParams
from flow.core.params import SumoLaneChangeParams
import time
import xml.etree.ElementTree as ElementTree
from lxml import etree
from collections import defaultdict
try:
from rllab.core.serializable import Serializable
except ImportError:
Serializable = object
DEFAULT_PROBABILITY = 0
DEFAULT_LENGTH = 5
DEFAULT_VCLASS = 0
class Scenario(Serializable):
def __init__(self,
name,
vehicles,
net_params,
initial_config=InitialConfig(),
traffic_lights=TrafficLightParams()):
if Serializable is not object:
Serializable.quick_init(self, locals())
self.orig_name = name
self.name = name + time.strftime('_%Y%m%d-%H%M%S') + str(time.time())
self.vehicles = vehicles
self.net_params = net_params
self.initial_config = initial_config
self.traffic_lights = traffic_lights
self.routes = self.specify_routes(net_params)
if net_params.template is None and net_params.osm_path is None:
self.nodes = self.specify_nodes(net_params)
self.edges = self.specify_edges(net_params)
self.types = self.specify_types(net_params)
self.connections = self.specify_connections(net_params)
elif type(net_params.template) is dict:
if 'rou' in net_params.template:
veh, rou = self._vehicle_infos(net_params.template['rou'])
vtypes = self._vehicle_type(net_params.template.get('vtype'))
cf = self._get_cf_params(vtypes)
lc = self._get_lc_params(vtypes)
for t in vtypes:
vehicles.add(veh_id=t, car_following_params=cf[t],
lane_change_params=lc[t], num_vehicles=0)
self.routes = rou
self.template_vehicles = veh
self.types = None
self.nodes = None
self.edges = None
self.connections = None
else:
self.nodes = None
self.edges = None
self.types = None
self.connections = None
self.edge_starts = self.specify_edge_starts()
self.internal_edge_starts = self.specify_internal_edge_starts()
self.intersection_edge_starts = []
def specify_edge_starts(self):
return None
def specify_internal_edge_starts(self):
return [(':', -1)]
def specify_nodes(self, net_params):
raise NotImplementedError
def specify_edges(self, net_params):
raise NotImplementedError
def specify_types(self, net_params):
return None
def specify_connections(self, net_params):
return None
def specify_routes(self, net_params):
return None
@staticmethod
def gen_custom_start_pos(cls, net_params, initial_config, num_vehicles):
raise NotImplementedError
@staticmethod
def _vehicle_infos(file_names):
if isinstance(file_names, str):
file_names = [file_names]
vehicle_data = dict()
routes_data = dict()
type_data = defaultdict(int)
for filename in file_names:
parser = etree.XMLParser(recover=True)
tree = ElementTree.parse(filename, parser=parser)
root = tree.getroot()
for vehicle in root.findall('vehicle'):
route = vehicle.find('route')
route_edges = route.attrib["edges"].split(' ')
type_vehicle = vehicle.attrib['type']
type_data[type_vehicle] += 1
vehicle_data[vehicle.attrib['id']] = {
'departSpeed': vehicle.attrib['departSpeed'],
'depart': vehicle.attrib['depart'],
'typeID': type_vehicle,
'departPos': vehicle.attrib['departPos'],
}
routes_data[vehicle.attrib['id']] = route_edges
for route in root.findall('route'):
route_edges = route.attrib["edges"].split(' ')
routes_data[route.attrib['id']] = route_edges
return vehicle_data, routes_data
@staticmethod
def _vehicle_type(filename):
if filename is None:
return None
parser = etree.XMLParser(recover=True)
tree = ElementTree.parse(filename, parser=parser)
root = tree.getroot()
veh_type = {}
root = [root] if len(root.findall('vTypeDistribution')) == 0 \
else root.findall('vTypeDistribution')
for r in root:
for vtype in r.findall('vType'):
veh_type[vtype.attrib['id']] = {
'vClass': vtype.attrib.get('vClass', DEFAULT_VCLASS),
'accel': vtype.attrib['accel'],
'decel': vtype.attrib['decel'],
'sigma': vtype.attrib['sigma'],
'length': vtype.attrib.get('length', DEFAULT_LENGTH),
'minGap': vtype.attrib['minGap'],
'maxSpeed': vtype.attrib['maxSpeed'],
'probability': vtype.attrib.get(
'probability', DEFAULT_PROBABILITY),
'speedDev': vtype.attrib['speedDev']
}
return veh_type
@staticmethod
def _get_cf_params(vtypes):
ret = {}
for typ in vtypes:
ret[typ] = SumoCarFollowingParams(
speed_mode='all_checks',
accel=float(vtypes[typ]['accel']),
decel=float(vtypes[typ]['decel']),
sigma=float(vtypes[typ]['sigma']),
length=float(vtypes[typ]['length']),
min_gap=float(vtypes[typ]['minGap']),
max_speed=float(vtypes[typ]['maxSpeed']),
probability=float(vtypes[typ]['probability']),
speed_dev=float(vtypes[typ]['speedDev'])
)
return ret
@staticmethod
def _get_lc_params(vtypes):
ret = {}
for typ in vtypes:
ret[typ] = SumoLaneChangeParams(lane_change_mode=1621)
return ret
def __str__(self):
return 'Scenario ' + self.name + ' with ' + \
str(self.vehicles.num_vehicles) + ' vehicles.'
| true | true |
f7241d76045fd1214f48d0e54f8255fdc59b914f | 7,786 | py | Python | iotbx/data_manager/model.py | dermen/cctbx_project | 43bd136e4edce123ecc62197024f2e9b85d6b446 | [
"BSD-3-Clause-LBNL"
] | 2 | 2018-02-01T14:25:48.000Z | 2021-09-15T16:36:29.000Z | iotbx/data_manager/model.py | dermen/cctbx_project | 43bd136e4edce123ecc62197024f2e9b85d6b446 | [
"BSD-3-Clause-LBNL"
] | 2 | 2018-06-14T17:04:17.000Z | 2019-06-24T20:54:12.000Z | iotbx/data_manager/model.py | dermen/cctbx_project | 43bd136e4edce123ecc62197024f2e9b85d6b446 | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import absolute_import, division, print_function
'''
'''
import iotbx.pdb
import mmtbx.model
from iotbx.file_reader import any_file
from iotbx.data_manager import DataManagerBase
from libtbx import Auto
from libtbx.utils import Sorry
# =============================================================================
class ModelDataManager(DataManagerBase):
datatype = 'model'
# ---------------------------------------------------------------------------
# Models
def add_model_phil_str(self):
'''
Add custom PHIL and storage for type
'''
# set up storage
# self._model_types = dict() # [filename] = type
self._model_types = dict()
self._default_model_type = 'x_ray'
self._possible_model_types = ['x_ray', 'neutron', 'electron']
# custom PHIL section
custom_phil_str = '''
model
.multiple = True
{
file = None
.type = path
.short_caption = Model file
.style = file_type:pdb input_file
type = *%s
.type = choice(multi=False)
}
''' % ' '.join(self._possible_model_types)
# custom PHIL scope
self._custom_model_phil = iotbx.phil.parse(custom_phil_str)
return custom_phil_str
def export_model_phil_extract(self):
'''
Export custom PHIL extract
'''
extract = list()
filenames = self.get_model_names()
for filename in filenames:
item_extract = self._custom_model_phil.extract().model[0]
item_extract.file = filename
item_extract.type = self._model_types.get(
filename, self._default_model_type)
extract.append(item_extract)
return extract
def load_model_phil_extract(self, phil_extract):
'''
Load custom PHIL extract
'''
extract = phil_extract.data_manager.model
for item_extract in extract:
if ((not hasattr(item_extract, 'file')) or
(not hasattr(item_extract, 'type'))):
raise Sorry('This PHIL is not properly defined for the "model" datatype.\n There should be a parameter for the filename ("file") and type ("type").\n')
# process file
self.process_model_file(item_extract.file)
self._model_types[item_extract.file] = item_extract.type
def add_model(self, filename, data):
return self._add(ModelDataManager.datatype, filename, data)
def set_default_model_type(self, model_type):
if (model_type not in self._possible_model_types):
raise Sorry('Unrecognized model type, "%s," possible choices are %s.' %
(model_type, ', '.join(self._possible_model_types)))
self._default_model_type = model_type
def get_default_model_type(self):
return self._default_model_type
def set_default_model(self, filename):
return self._set_default(ModelDataManager.datatype, filename)
def get_model(self, filename=None):
model = self._get(ModelDataManager.datatype, filename)
if (self.supports('restraint')):
restraint_objects = list()
for filename in self.get_restraint_names():
restraint_objects.append((filename, self.get_restraint(filename)))
model.set_restraint_objects(restraint_objects)
return model
def set_model_type(self, filename=None, model_type=None):
if (filename is None):
filename = self.get_default_model_name()
if (model_type is None):
model_type = self._default_model_type
elif (model_type not in self._possible_model_types):
raise Sorry('Unrecognized model type, "%s," possible choices are %s.' %
(model_type, ', '.join(self._possible_model_types)))
self._model_types[filename] = model_type
def get_model_type(self, filename=None):
if (filename is None):
filename = self.get_default_model_name()
return self._model_types.get(filename, self._default_model_type)
def get_model_names(self, model_type=None):
all_names = self._get_names(ModelDataManager.datatype)
names = list()
if (model_type is None):
names = all_names
else:
for filename in all_names:
if (model_type == self.get_model_type(filename)):
names.append(filename)
return names
def get_default_model_name(self):
return self._get_default_name(ModelDataManager.datatype)
def remove_model(self, filename):
return self._remove(ModelDataManager.datatype, filename)
def has_models(self, expected_n=1, exact_count=False, raise_sorry=False):
return self._has_data(ModelDataManager.datatype, expected_n=expected_n,
exact_count=exact_count, raise_sorry=raise_sorry)
def process_model_file(self, filename):
# unique because any_file does not return a model object
if (filename not in self.get_model_names()):
a = any_file(filename)
if (a.file_type != 'pdb'):
raise Sorry('%s is not a recognized model file' % filename)
else:
model_in = iotbx.pdb.input(a.file_name)
expand_with_mtrix = True # default
if 'model_skip_expand_with_mtrix' in self.custom_options:
expand_with_mtrix = False
model = mmtbx.model.manager(
model_input=model_in,
expand_with_mtrix=expand_with_mtrix,
log=self.logger)
self.add_model(filename, model)
def process_model_str(self, label, model_str):
model = mmtbx.model.manager(
model_input=iotbx.pdb.input(source_info=None, lines=model_str),
log=self.logger)
self.add_model(label, model)
def get_default_output_model_filename(self, extension=Auto):
'''
Function for returning the filename with extension. By default ".cif" will
be used.
'''
filename = self.get_default_output_filename()
if extension is Auto:
extension = '.cif'
if not (filename.endswith('.cif') or filename.endswith('.pdb')):
filename += extension
return filename
def write_model_file(self, model_str, filename=Auto, extension=Auto,
format=Auto, overwrite=Auto):
'''
Function for writing a model to file
Parameters
----------
model_str: str or mmtbx.model.manager object
The string to be written or a model object. If a model object is
provided, the format (PDB or mmCIF) of the original file is kept
unless specified with format below
filename: str or Auto
The output filename. If set to Auto, a default filename is
generated based on params.output.prefix, params.output.suffix,
and params.output.serial
extension: str or Auto
The extension to be added. If set to Auto, defaults to .cif
format: pdb or cif or Auto. If set to Auto, defaults to format of
original file.
overwrite: bool or Auto
Overwrite filename if it exists. If set to Auto, the overwrite
state of the DataManager is used.
Returns
-------
filename: str
The actual output filename. This may differ from the
get_default_output_model_filename function since that sets the
extension to cif by default. This function may alter the extension
based on the desired format.
'''
if isinstance(model_str, mmtbx.model.manager):
if format == 'cif' or (
format is Auto and model_str.input_model_format_cif()):
extension = '.cif'
model_str = model_str.model_as_mmcif()
else:
extension = '.pdb'
model_str = model_str.model_as_pdb()
if filename is Auto:
filename = self.get_default_output_model_filename(extension=extension)
elif extension is not Auto and (not filename.endswith(extension)):
filename += extension
return self._write_text(ModelDataManager.datatype, model_str,
filename=filename, overwrite=overwrite)
# =============================================================================
# end
| 35.230769 | 159 | 0.666581 | from __future__ import absolute_import, division, print_function
import iotbx.pdb
import mmtbx.model
from iotbx.file_reader import any_file
from iotbx.data_manager import DataManagerBase
from libtbx import Auto
from libtbx.utils import Sorry
class ModelDataManager(DataManagerBase):
datatype = 'model'
def add_model_phil_str(self):
pes = dict()
self._default_model_type = 'x_ray'
self._possible_model_types = ['x_ray', 'neutron', 'electron']
custom_phil_str = '''
model
.multiple = True
{
file = None
.type = path
.short_caption = Model file
.style = file_type:pdb input_file
type = *%s
.type = choice(multi=False)
}
''' % ' '.join(self._possible_model_types)
self._custom_model_phil = iotbx.phil.parse(custom_phil_str)
return custom_phil_str
def export_model_phil_extract(self):
extract = list()
filenames = self.get_model_names()
for filename in filenames:
item_extract = self._custom_model_phil.extract().model[0]
item_extract.file = filename
item_extract.type = self._model_types.get(
filename, self._default_model_type)
extract.append(item_extract)
return extract
def load_model_phil_extract(self, phil_extract):
extract = phil_extract.data_manager.model
for item_extract in extract:
if ((not hasattr(item_extract, 'file')) or
(not hasattr(item_extract, 'type'))):
raise Sorry('This PHIL is not properly defined for the "model" datatype.\n There should be a parameter for the filename ("file") and type ("type").\n')
self.process_model_file(item_extract.file)
self._model_types[item_extract.file] = item_extract.type
def add_model(self, filename, data):
return self._add(ModelDataManager.datatype, filename, data)
def set_default_model_type(self, model_type):
if (model_type not in self._possible_model_types):
raise Sorry('Unrecognized model type, "%s," possible choices are %s.' %
(model_type, ', '.join(self._possible_model_types)))
self._default_model_type = model_type
def get_default_model_type(self):
return self._default_model_type
def set_default_model(self, filename):
return self._set_default(ModelDataManager.datatype, filename)
def get_model(self, filename=None):
model = self._get(ModelDataManager.datatype, filename)
if (self.supports('restraint')):
restraint_objects = list()
for filename in self.get_restraint_names():
restraint_objects.append((filename, self.get_restraint(filename)))
model.set_restraint_objects(restraint_objects)
return model
def set_model_type(self, filename=None, model_type=None):
if (filename is None):
filename = self.get_default_model_name()
if (model_type is None):
model_type = self._default_model_type
elif (model_type not in self._possible_model_types):
raise Sorry('Unrecognized model type, "%s," possible choices are %s.' %
(model_type, ', '.join(self._possible_model_types)))
self._model_types[filename] = model_type
def get_model_type(self, filename=None):
if (filename is None):
filename = self.get_default_model_name()
return self._model_types.get(filename, self._default_model_type)
def get_model_names(self, model_type=None):
all_names = self._get_names(ModelDataManager.datatype)
names = list()
if (model_type is None):
names = all_names
else:
for filename in all_names:
if (model_type == self.get_model_type(filename)):
names.append(filename)
return names
def get_default_model_name(self):
return self._get_default_name(ModelDataManager.datatype)
def remove_model(self, filename):
return self._remove(ModelDataManager.datatype, filename)
def has_models(self, expected_n=1, exact_count=False, raise_sorry=False):
return self._has_data(ModelDataManager.datatype, expected_n=expected_n,
exact_count=exact_count, raise_sorry=raise_sorry)
def process_model_file(self, filename):
if (filename not in self.get_model_names()):
a = any_file(filename)
if (a.file_type != 'pdb'):
raise Sorry('%s is not a recognized model file' % filename)
else:
model_in = iotbx.pdb.input(a.file_name)
expand_with_mtrix = True
if 'model_skip_expand_with_mtrix' in self.custom_options:
expand_with_mtrix = False
model = mmtbx.model.manager(
model_input=model_in,
expand_with_mtrix=expand_with_mtrix,
log=self.logger)
self.add_model(filename, model)
def process_model_str(self, label, model_str):
model = mmtbx.model.manager(
model_input=iotbx.pdb.input(source_info=None, lines=model_str),
log=self.logger)
self.add_model(label, model)
def get_default_output_model_filename(self, extension=Auto):
filename = self.get_default_output_filename()
if extension is Auto:
extension = '.cif'
if not (filename.endswith('.cif') or filename.endswith('.pdb')):
filename += extension
return filename
def write_model_file(self, model_str, filename=Auto, extension=Auto,
format=Auto, overwrite=Auto):
if isinstance(model_str, mmtbx.model.manager):
if format == 'cif' or (
format is Auto and model_str.input_model_format_cif()):
extension = '.cif'
model_str = model_str.model_as_mmcif()
else:
extension = '.pdb'
model_str = model_str.model_as_pdb()
if filename is Auto:
filename = self.get_default_output_model_filename(extension=extension)
elif extension is not Auto and (not filename.endswith(extension)):
filename += extension
return self._write_text(ModelDataManager.datatype, model_str,
filename=filename, overwrite=overwrite)
| true | true |
f7241dd029a49d81d312da3966adf13ebafac0c7 | 2,779 | py | Python | tests/test_issues/output/issue_113.py | rajshruti18/biolinkml | 451e71c9d3fd11aa3b08c6a713d9ab8b127ece77 | [
"CC0-1.0"
] | null | null | null | tests/test_issues/output/issue_113.py | rajshruti18/biolinkml | 451e71c9d3fd11aa3b08c6a713d9ab8b127ece77 | [
"CC0-1.0"
] | null | null | null | tests/test_issues/output/issue_113.py | rajshruti18/biolinkml | 451e71c9d3fd11aa3b08c6a713d9ab8b127ece77 | [
"CC0-1.0"
] | null | null | null | # Auto generated from issue_113.yaml by pythongen.py version: 0.4.0
# Generation date: 2020-08-04 09:37
# Schema: schema
#
# id: https://microbiomedata/schema
# description:
# license: https://creativecommons.org/publicdomain/zero/1.0/
import dataclasses
import sys
from typing import Optional, List, Union, Dict, ClassVar, Any
from dataclasses import dataclass
from biolinkml.utils.slot import Slot
from biolinkml.utils.metamodelcore import empty_list, empty_dict, bnode
from biolinkml.utils.yamlutils import YAMLRoot, extended_str, extended_float, extended_int
if sys.version_info < (3, 7, 6):
from biolinkml.utils.dataclass_extensions_375 import dataclasses_init_fn_with_kwargs
else:
from biolinkml.utils.dataclass_extensions_376 import dataclasses_init_fn_with_kwargs
from biolinkml.utils.formatutils import camelcase, underscore, sfx
from rdflib import Namespace, URIRef
from biolinkml.utils.curienamespace import CurieNamespace
from includes.types import String
metamodel_version = "1.5.3"
# Overwrite dataclasses _init_fn to add **kwargs in __init__
dataclasses._init_fn = dataclasses_init_fn_with_kwargs
# Namespaces
BIOLINKML = CurieNamespace('biolinkml', 'https://w3id.org/biolink/biolinkml/')
DEFAULT_ = CurieNamespace('', 'https://microbiomedata/schema/')
# Types
# Class references
class NamedThing(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = URIRef("https://microbiomedata/schema/NamedThing")
class_class_curie: ClassVar[str] = None
class_name: ClassVar[str] = "named thing"
class_model_uri: ClassVar[URIRef] = URIRef("https://microbiomedata/schema/NamedThing")
@dataclass
class TestClass(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = URIRef("https://microbiomedata/schema/TestClass")
class_class_curie: ClassVar[str] = None
class_name: ClassVar[str] = "test class"
class_model_uri: ClassVar[URIRef] = URIRef("https://microbiomedata/schema/TestClass")
test_attribute_1: Optional[str] = None
test_attribute_2: Optional[str] = None
# Slots
class slots:
pass
slots.attribute = Slot(uri=DEFAULT_.attribute, name="attribute", curie=DEFAULT_.curie('attribute'),
model_uri=DEFAULT_.attribute, domain=NamedThing, range=Optional[str])
slots.test_attribute_1 = Slot(uri=DEFAULT_.test_attribute_1, name="test attribute 1", curie=DEFAULT_.curie('test_attribute_1'),
model_uri=DEFAULT_.test_attribute_1, domain=NamedThing, range=Optional[str])
slots.test_attribute_2 = Slot(uri=DEFAULT_.test_attribute_2, name="test attribute 2", curie=DEFAULT_.curie('test_attribute_2'),
model_uri=DEFAULT_.test_attribute_2, domain=None, range=Optional[str]) | 37.554054 | 127 | 0.766103 |
import dataclasses
import sys
from typing import Optional, List, Union, Dict, ClassVar, Any
from dataclasses import dataclass
from biolinkml.utils.slot import Slot
from biolinkml.utils.metamodelcore import empty_list, empty_dict, bnode
from biolinkml.utils.yamlutils import YAMLRoot, extended_str, extended_float, extended_int
if sys.version_info < (3, 7, 6):
from biolinkml.utils.dataclass_extensions_375 import dataclasses_init_fn_with_kwargs
else:
from biolinkml.utils.dataclass_extensions_376 import dataclasses_init_fn_with_kwargs
from biolinkml.utils.formatutils import camelcase, underscore, sfx
from rdflib import Namespace, URIRef
from biolinkml.utils.curienamespace import CurieNamespace
from includes.types import String
metamodel_version = "1.5.3"
dataclasses._init_fn = dataclasses_init_fn_with_kwargs
BIOLINKML = CurieNamespace('biolinkml', 'https://w3id.org/biolink/biolinkml/')
DEFAULT_ = CurieNamespace('', 'https://microbiomedata/schema/')
class NamedThing(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = URIRef("https://microbiomedata/schema/NamedThing")
class_class_curie: ClassVar[str] = None
class_name: ClassVar[str] = "named thing"
class_model_uri: ClassVar[URIRef] = URIRef("https://microbiomedata/schema/NamedThing")
@dataclass
class TestClass(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = URIRef("https://microbiomedata/schema/TestClass")
class_class_curie: ClassVar[str] = None
class_name: ClassVar[str] = "test class"
class_model_uri: ClassVar[URIRef] = URIRef("https://microbiomedata/schema/TestClass")
test_attribute_1: Optional[str] = None
test_attribute_2: Optional[str] = None
class slots:
pass
slots.attribute = Slot(uri=DEFAULT_.attribute, name="attribute", curie=DEFAULT_.curie('attribute'),
model_uri=DEFAULT_.attribute, domain=NamedThing, range=Optional[str])
slots.test_attribute_1 = Slot(uri=DEFAULT_.test_attribute_1, name="test attribute 1", curie=DEFAULT_.curie('test_attribute_1'),
model_uri=DEFAULT_.test_attribute_1, domain=NamedThing, range=Optional[str])
slots.test_attribute_2 = Slot(uri=DEFAULT_.test_attribute_2, name="test attribute 2", curie=DEFAULT_.curie('test_attribute_2'),
model_uri=DEFAULT_.test_attribute_2, domain=None, range=Optional[str]) | true | true |
f72420fba96513d94ff7d9c9661f5608d4400556 | 125,643 | py | Python | mrcnn/model.py | jongwookyi/Mask_RCNN | 9a26fa067a2087dbdf07f21a43dc2aa872ffe059 | [
"MIT"
] | null | null | null | mrcnn/model.py | jongwookyi/Mask_RCNN | 9a26fa067a2087dbdf07f21a43dc2aa872ffe059 | [
"MIT"
] | null | null | null | mrcnn/model.py | jongwookyi/Mask_RCNN | 9a26fa067a2087dbdf07f21a43dc2aa872ffe059 | [
"MIT"
] | null | null | null | """
Mask R-CNN
The main Mask R-CNN model implementation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import datetime
import re
import math
from collections import OrderedDict
import multiprocessing
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
import tensorflow.keras.layers as KL
import tensorflow.keras.layers as KE
import tensorflow.keras.utils as KU
import tensorflow.keras.models as KM
from mrcnn import utils
# Requires TensorFlow 2.0+
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("2.0")
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(), array.max()))
else:
text += ("min: {:10} max: {:10}".format("", ""))
text += " {}".format(array.dtype)
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when making inferences
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
if callable(config.BACKBONE):
return config.COMPUTE_BACKBONE_SHAPE(image_shape)
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layers
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, num_anchors, (bg prob, fg prob)]
rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementation of Log2. TF doesn't have a native implementation."""
return tf.math.log(x) / tf.math.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- feature_maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indices for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)
pooled = tf.reshape(pooled, shape)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeat boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeat() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]
masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [proposals, crowd_boxes]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine positive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random.shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random.shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.cond(
tf.greater(tf.shape(positive_overlaps)[1], 0),
true_fn=lambda: tf.argmax(positive_overlaps, axis=1),
false_fn=lambda: tf.cast(tf.constant([]), tf.int64)
)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI coordinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in normalized coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(tf.shape(probs)[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse.to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indices
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse.to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.cast(tf.gather(class_ids, keep), tf.float32)[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the feature map
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location * depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True,
fc_layers_size=1024):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
fc_layers_size: Size of the 2 FC layers
Returns:
logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)
probs: [batch, num_rois, NUM_CLASSES] classifier probabilities
bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
s1 = s[1] if s[1] != None else -1
mrcnn_bbox = KL.Reshape((s1, num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augmentation=None,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Ground truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indices of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = utils.resize(m, config.MASK_SHAPE)
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:, 0]
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
class DataGenerator(KU.Sequence):
"""An iterable that returns images and corresponding target class ids,
bounding box deltas, and masks.
It inherits from keras.utils.Sequence to avoid data redundancy when multiprocessing=True.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python iterable. Upon calling __getitem__() on it, the
iterable returns two lists, inputs and outputs. The contents
of the lists differ depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
def __init__(self, dataset, config, shuffle=True, augmentation=None,
random_rois=0, batch_size=1, detection_targets=False):
self.dataset = dataset
self.config = config
self.shuffle = shuffle
self.augmentation = augmentation
self.random_rois = random_rois
self.batch_size = batch_size
self.detection_targets = detection_targets
self.image_ids = np.copy(dataset.image_ids)
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
self.backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
self.backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
def __len__(self):
return int(np.ceil(len(self.image_ids) / float(self.batch_size)))
def __getitem__(self, idx):
b = 0 # batch item index
image_index = -1
while b < self.batch_size:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(self.image_ids)
if self.shuffle and image_index == 0:
np.random.shuffle(self.image_ids)
# Get GT bounding boxes and masks for image.
image_id = self.image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(self.dataset, self.config, image_id,
augmentation=self.augmentation,
use_mini_mask=self.config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,
gt_class_ids, gt_boxes, self.config)
# Mask R-CNN Targets
if self.random_rois:
rpn_rois = generate_random_rois(
image.shape, self.random_rois, gt_class_ids, gt_boxes)
if self.detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask = \
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(self.batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[self.batch_size, self.anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[self.batch_size, self.config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(self.batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(self.batch_size, self.config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(self.batch_size, self.config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(self.batch_size, gt_masks.shape[0], gt_masks.shape[1],
self.config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if self.random_rois:
batch_rpn_rois = np.zeros(
(self.batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if self.detection_targets:
batch_rois = np.zeros(
(self.batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(self.batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(self.batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(self.batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), self.config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if self.random_rois:
batch_rpn_rois[b] = rpn_rois
if self.detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if self.random_rois:
inputs.extend([batch_rpn_rois])
if self.detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
return inputs, outputs
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
if callable(config.BACKBONE):
_, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,
train_bn=config.TRAIN_BN)
else:
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
# This class returns a constant layer
class ConstLayer(KE.Layer):
def __init__(self, x, name=None):
super(ConstLayer, self).__init__(name=name)
self.x = tf.Variable(x)
def call(self, input):
return self.x
anchors = ConstLayer(anchors, name="anchors")(input_image)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
The path of the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the corresponding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exclude: list of layer names to exclude
"""
import h5py
from tensorflow.python.keras.saving import hdf5_format
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
with h5py.File(filepath, mode='r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
hdf5_format.load_weights_from_hdf5_group_by_name(f, layers)
else:
hdf5_format.load_weights_from_hdf5_group(f, layers)
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_metric(loss, name, aggregation='mean')
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainable layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# \path\to\logs\coco20171029T2315\mask_rcnn_coco_0001.h5 (Windows)
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)
regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mask\_rcnn\_[\w-]+(\d{4})\.h5"
# Use string for regex since we might want to use pathlib.Path as model_path
m = re.match(regex, str(model_path))
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
print('Re-starting from epoch %d' % self.epoch)
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,
augmentation=None, custom_callbacks=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gaussian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
custom_callbacks: Optional. Add custom callbacks to be called
with the keras fit_generator method. Must be list of type keras.callbacks.
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = DataGenerator(train_dataset, self.config, shuffle=True,
augmentation=augmentation,
batch_size=self.config.BATCH_SIZE)
val_generator = DataGenerator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Create log_dir if it does not exist
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Add custom callbacks to the list
if custom_callbacks:
callbacks += custom_callbacks
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name == 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=(1 < workers),
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matrices [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matrices:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty(original_image_shape[:2] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also returned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and normalized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Prepare inputs
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
# Run inference
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtracts
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name='trim_zeros'):
"""Often boxes are represented with matrices of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
| 44.256076 | 115 | 0.612203 |
import os
import datetime
import re
import math
from collections import OrderedDict
import multiprocessing
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
import tensorflow.keras.layers as KL
import tensorflow.keras.layers as KE
import tensorflow.keras.utils as KU
import tensorflow.keras.models as KM
from mrcnn import utils
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("2.0")
ther_nd(boxes, ix)
# Box indices for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
pooled = tf.concat(pooled, axis=0)
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)
pooled = tf.reshape(pooled, shape)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
# 1. Tile boxes2 and repeat boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeat() so simulate it
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
overlaps = overlaps_graph(proposals, gt_boxes)
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
roi_iou_max = tf.reduce_max(overlaps, axis=1)
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random.shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random.shuffle(negative_indices)[:negative_count]
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.cond(
tf.greater(tf.shape(positive_overlaps)[1], 0),
true_fn=lambda: tf.argmax(positive_overlaps, axis=1),
false_fn=lambda: tf.cast(tf.constant([]), tf.int64)
)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
boxes = positive_rois
if config.USE_MINI_MASK:
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
masks = tf.squeeze(masks, axis=3)
masks = tf.round(masks)
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4),
(None, self.config.TRAIN_ROIS_PER_IMAGE),
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4),
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1])
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
y, w, z, self.config),
self.config.IMAGES_PER_GPU)
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
es = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = utils.resize(m, config.MASK_SHAPE)
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# and they don't influence the loss function.
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:, 0]
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
ids = np.where(rpn_match == 1)[0]
ix = 0
for i, a in zip(ids, anchors[ids]):
gt = gt_boxes[anchor_iou_argmax[i]]
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
rois = np.zeros((count, 4), dtype=np.int32)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
class DataGenerator(KU.Sequence):
def __init__(self, dataset, config, shuffle=True, augmentation=None,
random_rois=0, batch_size=1, detection_targets=False):
self.dataset = dataset
self.config = config
self.shuffle = shuffle
self.augmentation = augmentation
self.random_rois = random_rois
self.batch_size = batch_size
self.detection_targets = detection_targets
self.image_ids = np.copy(dataset.image_ids)
self.backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
self.backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
def __len__(self):
return int(np.ceil(len(self.image_ids) / float(self.batch_size)))
def __getitem__(self, idx):
b = 0
image_index = -1
while b < self.batch_size:
image_index = (image_index + 1) % len(self.image_ids)
if self.shuffle and image_index == 0:
np.random.shuffle(self.image_ids)
image_id = self.image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(self.dataset, self.config, image_id,
augmentation=self.augmentation,
use_mini_mask=self.config.USE_MINI_MASK)
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,
gt_class_ids, gt_boxes, self.config)
# Mask R-CNN Targets
if self.random_rois:
rpn_rois = generate_random_rois(
image.shape, self.random_rois, gt_class_ids, gt_boxes)
if self.detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask = \
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(self.batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[self.batch_size, self.anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[self.batch_size, self.config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(self.batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(self.batch_size, self.config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(self.batch_size, self.config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(self.batch_size, gt_masks.shape[0], gt_masks.shape[1],
self.config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if self.random_rois:
batch_rpn_rois = np.zeros(
(self.batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if self.detection_targets:
batch_rois = np.zeros(
(self.batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(self.batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(self.batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(self.batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), self.config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if self.random_rois:
batch_rpn_rois[b] = rpn_rois
if self.detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if self.random_rois:
inputs.extend([batch_rpn_rois])
if self.detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
return inputs, outputs
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
def __init__(self, mode, config, model_dir):
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
if callable(config.BACKBONE):
_, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,
train_bn=config.TRAIN_BN)
else:
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
class ConstLayer(KE.Layer):
def __init__(self, x, name=None):
super(ConstLayer, self).__init__(name=name)
self.x = tf.Variable(x)
def call(self, input):
return self.x
anchors = ConstLayer(anchors, name="anchors")(input_image)
else:
anchors = input_anchors
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)
layer_outputs = []
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
dir_name = os.path.join(self.model_dir, dir_names[-1])
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
import h5py
from tensorflow.python.keras.saving import hdf5_format
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
with h5py.File(filepath, mode='r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
hdf5_format.load_weights_from_hdf5_group_by_name(f, layers)
else:
hdf5_format.load_weights_from_hdf5_group(f, layers)
self.set_log_dir(filepath)
def get_imagenet_weights(self):
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_metric(loss, name, aggregation='mean')
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
trainable = bool(re.fullmatch(layer_regex, layer.name))
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
self.epoch = 0
now = datetime.datetime.now()
if model_path:
regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mask\_rcnn\_[\w-]+(\d{4})\.h5"
m = re.match(regex, str(model_path))
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
print('Re-starting from epoch %d' % self.epoch)
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,
augmentation=None, custom_callbacks=None):
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = DataGenerator(train_dataset, self.config, shuffle=True,
augmentation=augmentation,
batch_size=self.config.BATCH_SIZE)
val_generator = DataGenerator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Create log_dir if it does not exist
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Add custom callbacks to the list
if custom_callbacks:
callbacks += custom_callbacks
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name == 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=(1 < workers),
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty(original_image_shape[:2] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
self.anchors = a
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
checked = checked if checked is not None else []
if len(checked) > 500:
return None
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
layers = []
for l in self.keras_model.layers:
l = self.find_trainable_layer(l)
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
model = self.keras_model
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
anchors = self.get_anchors(image_shape)
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
| true | true |
f72422d21c721489ea30f17fc3c39c216a26c7fe | 295 | py | Python | pyrism/tests/test_pyrism.py | 2AUK/pyrism | 7067fa7a261adc2faabcffbcb2d40d395e42a3c8 | [
"MIT"
] | 4 | 2020-10-26T14:32:08.000Z | 2021-03-26T01:23:37.000Z | pyrism/tests/test_pyrism.py | 2AUK/pyrism | 7067fa7a261adc2faabcffbcb2d40d395e42a3c8 | [
"MIT"
] | 1 | 2021-09-17T18:21:19.000Z | 2021-11-22T00:01:46.000Z | pyrism/tests/test_pyrism.py | 2AUK/pyrism | 7067fa7a261adc2faabcffbcb2d40d395e42a3c8 | [
"MIT"
] | 1 | 2022-03-08T12:00:35.000Z | 2022-03-08T12:00:35.000Z | """
Unit and regression test for the pyrism package.
"""
# Import package, test suite, and other packages as needed
import pyrism
import pytest
import sys
def test_pyrism_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "pyrism" in sys.modules
| 22.692308 | 74 | 0.742373 |
import pyrism
import pytest
import sys
def test_pyrism_imported():
assert "pyrism" in sys.modules
| true | true |
f72422e7430720064ce9521dd3b750947c119390 | 4,013 | py | Python | examples/simultaneous_translation/utils/functions.py | ictnlp/Dual-Path | 8c4577236908797ede1d971c11c2b3ef247e3469 | [
"MIT"
] | 3 | 2022-03-22T09:02:02.000Z | 2022-03-28T14:16:37.000Z | examples/simultaneous_translation/utils/functions.py | ictnlp/Dual-Path | 8c4577236908797ede1d971c11c2b3ef247e3469 | [
"MIT"
] | null | null | null | examples/simultaneous_translation/utils/functions.py | ictnlp/Dual-Path | 8c4577236908797ede1d971c11c2b3ef247e3469 | [
"MIT"
] | 1 | 2022-03-11T08:00:17.000Z | 2022-03-11T08:00:17.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10):
"""
Implementing exclusive cumprod.
There is cumprod in pytorch, however there is no exclusive mode.
cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i]
exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i]
"""
tensor_size = list(tensor.size())
tensor_size[dim] = 1
return_tensor = safe_cumprod(
torch.cat([torch.ones(tensor_size, device=tensor.device), tensor], dim=dim),
dim=dim,
eps=eps,
)
if dim == 0:
return return_tensor[:-1]
elif dim == 1:
return return_tensor[:, :-1]
elif dim == 2:
return return_tensor[:, :, :-1]
else:
raise RuntimeError("Cumprod on dimension 3 and more is not implemented")
def safe_cumprod(tensor, dim: int, eps: float = 1e-10):
"""
An implementation of cumprod to prevent precision issue.
cumprod(x)
= [x1, x1x2, x1x2x3, ....]
= [exp(log(x1)), exp(log(x1) + log(x2)), exp(log(x1) + log(x2) + log(x3)), ...]
= exp(cumsum(log(x)))
"""
if (tensor + eps < 0).any().item():
raise RuntimeError(
"Safe cumprod can only take non-negative tensors as input."
"Consider use torch.cumprod if you want to calculate negative values."
)
log_tensor = torch.log(tensor + eps)
cumsum_log_tensor = torch.cumsum(log_tensor, dim)
exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor)
return exp_cumsum_log_tensor
def lengths_to_mask(lengths, max_len: int, dim: int = 0, negative_mask: bool = False):
"""
Convert a tensor of lengths to mask
For example, lengths = [[2, 3, 4]], max_len = 5
mask =
[[1, 1, 1],
[1, 1, 1],
[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]
"""
assert len(lengths.size()) <= 2
if len(lengths) == 2:
if dim == 1:
lengths = lengths.t()
lengths = lengths
else:
lengths = lengths.unsqueeze(1)
# lengths : batch_size, 1
lengths = lengths.view(-1, 1)
batch_size = lengths.size(0)
# batch_size, max_len
mask = (
torch.arange(max_len, device="cuda")
.expand(batch_size, max_len)
.type_as(lengths)
< lengths
)
if negative_mask:
mask = ~mask
if dim == 0:
# max_len, batch_size
mask = mask.t()
return mask
def moving_sum(x, start_idx: int, end_idx: int):
"""
From MONOTONIC CHUNKWISE ATTENTION
https://arxiv.org/pdf/1712.05382.pdf
Equation (18)
x = [x_1, x_2, ..., x_N]
MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m
for n in {1, 2, 3, ..., N}
x : src_len, batch_size
start_idx : start idx
end_idx : end idx
Example
src_len = 5
batch_size = 3
x =
[[ 0, 5, 10],
[ 1, 6, 11],
[ 2, 7, 12],
[ 3, 8, 13],
[ 4, 9, 14]]
MovingSum(x, 3, 1) =
[[ 0, 5, 10],
[ 1, 11, 21],
[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39]]
MovingSum(x, 1, 3) =
[[ 3, 18, 33],
[ 6, 21, 36],
[ 9, 24, 39],
[ 7, 17, 27],
[ 4, 9, 14]]
"""
assert start_idx > 0 and end_idx > 0
assert len(x.size()) == 2
src_len, batch_size = x.size()
# batch_size, 1, src_len
x = x.t().unsqueeze(1)
# batch_size, 1, src_len
moving_sum_weight = x.new_ones([1, 1, end_idx + start_idx - 1])
moving_sum = (
torch.nn.functional.conv1d(
x, moving_sum_weight, padding=start_idx + end_idx - 1
)
.squeeze(1)
.t()
)
moving_sum = moving_sum[end_idx:-start_idx]
assert src_len == moving_sum.size(0)
assert batch_size == moving_sum.size(1)
return moving_sum
| 25.890323 | 86 | 0.554448 |
import torch
def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10):
tensor_size = list(tensor.size())
tensor_size[dim] = 1
return_tensor = safe_cumprod(
torch.cat([torch.ones(tensor_size, device=tensor.device), tensor], dim=dim),
dim=dim,
eps=eps,
)
if dim == 0:
return return_tensor[:-1]
elif dim == 1:
return return_tensor[:, :-1]
elif dim == 2:
return return_tensor[:, :, :-1]
else:
raise RuntimeError("Cumprod on dimension 3 and more is not implemented")
def safe_cumprod(tensor, dim: int, eps: float = 1e-10):
if (tensor + eps < 0).any().item():
raise RuntimeError(
"Safe cumprod can only take non-negative tensors as input."
"Consider use torch.cumprod if you want to calculate negative values."
)
log_tensor = torch.log(tensor + eps)
cumsum_log_tensor = torch.cumsum(log_tensor, dim)
exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor)
return exp_cumsum_log_tensor
def lengths_to_mask(lengths, max_len: int, dim: int = 0, negative_mask: bool = False):
assert len(lengths.size()) <= 2
if len(lengths) == 2:
if dim == 1:
lengths = lengths.t()
lengths = lengths
else:
lengths = lengths.unsqueeze(1)
lengths = lengths.view(-1, 1)
batch_size = lengths.size(0)
mask = (
torch.arange(max_len, device="cuda")
.expand(batch_size, max_len)
.type_as(lengths)
< lengths
)
if negative_mask:
mask = ~mask
if dim == 0:
mask = mask.t()
return mask
def moving_sum(x, start_idx: int, end_idx: int):
assert start_idx > 0 and end_idx > 0
assert len(x.size()) == 2
src_len, batch_size = x.size()
x = x.t().unsqueeze(1)
moving_sum_weight = x.new_ones([1, 1, end_idx + start_idx - 1])
moving_sum = (
torch.nn.functional.conv1d(
x, moving_sum_weight, padding=start_idx + end_idx - 1
)
.squeeze(1)
.t()
)
moving_sum = moving_sum[end_idx:-start_idx]
assert src_len == moving_sum.size(0)
assert batch_size == moving_sum.size(1)
return moving_sum
| true | true |
f72423086a36f87912c69f9ab0d75e652edb58d3 | 7,080 | py | Python | train.py | uclyyu/over9000 | 42db9fa6ac5a9a2e177f1f9a9a660bee9cd5d587 | [
"Apache-2.0"
] | null | null | null | train.py | uclyyu/over9000 | 42db9fa6ac5a9a2e177f1f9a9a660bee9cd5d587 | [
"Apache-2.0"
] | null | null | null | train.py | uclyyu/over9000 | 42db9fa6ac5a9a2e177f1f9a9a660bee9cd5d587 | [
"Apache-2.0"
] | null | null | null | # the code mostly from https://github.com/sdoria/SimpleSelfAttention
# adapted from https://github.com/fastai/fastai/blob/master/examples/train_imagenette.py
# added self attention parameter
# changed per gpu bs for bs_rat
from fastai.script import *
from fastai.vision import *
from fastai.callbacks import *
from fastai.distributed import *
from fastprogress import fastprogress
from torchvision.models import *
#from fastai.vision.models.xresnet import *
#from fastai.vision.models.xresnet2 import *
#from fastai.vision.models.presnet import *
from xresnet import *
from functools import partial
import statsmodels.stats.api as sms
torch.backends.cudnn.benchmark = True
fastprogress.MAX_COLS = 80
def get_data(size, woof, bs, workers=None):
if size<=128: path = URLs.IMAGEWOOF_160 if woof else URLs.IMAGENETTE_160
elif size<=224: path = URLs.IMAGEWOOF_320 if woof else URLs.IMAGENETTE_320
else : path = URLs.IMAGEWOOF if woof else URLs.IMAGENETTE
path = untar_data(path)
n_gpus = num_distrib() or 1
if workers is None: workers = min(8, num_cpus()//n_gpus)
return (ImageList.from_folder(path).split_by_folder(valid='val')
.label_from_folder().transform(([flip_lr(p=0.5)], []), size=size)
.databunch(bs=bs, num_workers=workers)
.presize(size, scale=(0.35,1))
.normalize(imagenet_stats))
from radam import *
from novograd import *
from ranger import *
from ralamb import *
from over9000 import *
from lookahead import *
from lamb import *
from diffgrad import DiffGrad
from adamod import AdaMod
from madam import Madam
def fit_with_annealing(learn:Learner, num_epoch:int, lr:float=defaults.lr, annealing_start:float=0.7)->None:
n = len(learn.data.train_dl)
anneal_start = int(n*num_epoch*annealing_start)
phase0 = TrainingPhase(anneal_start).schedule_hp('lr', lr)
phase1 = TrainingPhase(n*num_epoch - anneal_start).schedule_hp('lr', lr, anneal=annealing_cos)
phases = [phase0, phase1]
sched = GeneralScheduler(learn, phases)
learn.callbacks.append(sched)
learn.fit(num_epoch)
def train(
gpu:Param("GPU to run on", str)=None,
woof: Param("Use imagewoof (otherwise imagenette)", int)=0,
lr: Param("Learning rate", float)=1e-3,
size: Param("Size (px: 128,192,224)", int)=128,
alpha: Param("Alpha", float)=0.99,
mom: Param("Momentum", float)=0.9,
eps: Param("epsilon", float)=1e-6,
epochs: Param("Number of epochs", int)=5,
bs: Param("Batch size", int)=256,
mixup: Param("Mixup", float)=0.,
opt: Param("Optimizer (adam,rms,sgd)", str)='adam',
arch: Param("Architecture (xresnet34, xresnet50)", str)='xresnet50',
sa: Param("Self-attention", int)=0,
sym: Param("Symmetry for self-attention", int)=0,
dump: Param("Print model; don't train", int)=0,
lrfinder: Param("Run learning rate finder; don't train", int)=0,
log: Param("Log file name", str)='log',
sched_type: Param("LR schedule type", str)='one_cycle',
ann_start: Param("Mixup", float)=-1.0,
):
"Distributed training of Imagenette."
bs_one_gpu = bs
gpu = setup_distrib(gpu)
if gpu is None: bs *= torch.cuda.device_count()
if opt=='adam' : opt_func = partial(optim.Adam, betas=(mom,alpha), eps=eps)
elif opt=='radam' : opt_func = partial(RAdam, betas=(mom,alpha), eps=eps)
elif opt=='novograd' : opt_func = partial(Novograd, betas=(mom,alpha), eps=eps)
elif opt=='rms' : opt_func = partial(optim.RMSprop, alpha=alpha, eps=eps)
elif opt=='sgd' : opt_func = partial(optim.SGD, momentum=mom)
elif opt=='ranger' : opt_func = partial(Ranger, betas=(mom,alpha), eps=eps)
elif opt=='ralamb' : opt_func = partial(Ralamb, betas=(mom,alpha), eps=eps)
elif opt=='over9000' : opt_func = partial(Over9000, betas=(mom,alpha), eps=eps)
elif opt=='lookahead' : opt_func = partial(LookaheadAdam, betas=(mom,alpha), eps=eps)
elif opt=='lamb' : opt_func = partial(Lamb, betas=(mom,alpha), eps=eps)
elif opt=='diffgrad' : opt_func = partial(DiffGrad, version=1, betas=(mom,alpha),eps=eps)
elif opt=='adamod' : opt_func = partial(AdaMod, betas=(mom,alpha), eps=eps, beta3=0.999)
elif opt=='madam' : opt_func = partial(Madam, p_scale=3.0, g_bound=10.0)
data = get_data(size, woof, bs)
bs_rat = bs/bs_one_gpu #originally bs/256
if gpu is not None: bs_rat *= max(num_distrib(), 1)
if not gpu: print(f'lr: {lr}; eff_lr: {lr*bs_rat}; size: {size}; alpha: {alpha}; mom: {mom}; eps: {eps}')
lr *= bs_rat
m = globals()[arch]
log_cb = partial(CSVLogger,filename=log)
learn = (Learner(data, m(c_out=10, sa=sa, sym=sym), wd=1e-2, opt_func=opt_func,
metrics=[accuracy,top_k_accuracy],
bn_wd=False, true_wd=True,
loss_func = LabelSmoothingCrossEntropy(),
callback_fns=[log_cb])
)
print(learn.path)
if dump: print(learn.model); exit()
if mixup: learn = learn.mixup(alpha=mixup)
learn = learn.to_fp16(dynamic=True)
if gpu is None: learn.to_parallel()
elif num_distrib()>1: learn.to_distributed(gpu) # Requires `-m fastai.launch`
if lrfinder:
# run learning rate finder
IN_NOTEBOOK = 1
learn.lr_find(wd=1e-2)
learn.recorder.plot()
else:
if sched_type == 'one_cycle':
learn.fit_one_cycle(epochs, lr, div_factor=10, pct_start=0.3)
elif sched_type == 'flat_and_anneal':
fit_with_annealing(learn, epochs, lr, ann_start)
return learn.recorder.metrics[-1][0]
@call_parse
def main(
run: Param("Number of run", int)=20,
gpu:Param("GPU to run on", str)=None,
woof: Param("Use imagewoof (otherwise imagenette)", int)=0,
lr: Param("Learning rate", float)=1e-3,
size: Param("Size (px: 128,192,224)", int)=128,
alpha: Param("Alpha", float)=0.99,
mom: Param("Momentum", float)=0.9,
eps: Param("epsilon", float)=1e-6,
epochs: Param("Number of epochs", int)=5,
bs: Param("Batch size", int)=256,
mixup: Param("Mixup", float)=0.,
opt: Param("Optimizer (adam,rms,sgd)", str)='adam',
arch: Param("Architecture (xresnet34, xresnet50)", str)='xresnet50',
sa: Param("Self-attention", int)=0,
sym: Param("Symmetry for self-attention", int)=0,
dump: Param("Print model; don't train", int)=0,
lrfinder: Param("Run learning rate finder; don't train", int)=0,
log: Param("Log file name", str)='log',
sched_type: Param("LR schedule type", str)='one_cycle',
ann_start: Param("Mixup", float)=-1.0,
):
acc = np.array(
[train(gpu,woof,lr,size,alpha,mom,eps,epochs,bs,mixup,opt,arch,sa,sym,dump,lrfinder,log,sched_type,ann_start)
for i in range(run)])
print(acc)
print(f'mean = {np.mean(acc)}, std = {np.std(acc)}, ci-95 = {sms.DescrStatsW(acc).tconfint_mean()}')
| 42.142857 | 117 | 0.644068 |
from fastai.script import *
from fastai.vision import *
from fastai.callbacks import *
from fastai.distributed import *
from fastprogress import fastprogress
from torchvision.models import *
from xresnet import *
from functools import partial
import statsmodels.stats.api as sms
torch.backends.cudnn.benchmark = True
fastprogress.MAX_COLS = 80
def get_data(size, woof, bs, workers=None):
if size<=128: path = URLs.IMAGEWOOF_160 if woof else URLs.IMAGENETTE_160
elif size<=224: path = URLs.IMAGEWOOF_320 if woof else URLs.IMAGENETTE_320
else : path = URLs.IMAGEWOOF if woof else URLs.IMAGENETTE
path = untar_data(path)
n_gpus = num_distrib() or 1
if workers is None: workers = min(8, num_cpus()//n_gpus)
return (ImageList.from_folder(path).split_by_folder(valid='val')
.label_from_folder().transform(([flip_lr(p=0.5)], []), size=size)
.databunch(bs=bs, num_workers=workers)
.presize(size, scale=(0.35,1))
.normalize(imagenet_stats))
from radam import *
from novograd import *
from ranger import *
from ralamb import *
from over9000 import *
from lookahead import *
from lamb import *
from diffgrad import DiffGrad
from adamod import AdaMod
from madam import Madam
def fit_with_annealing(learn:Learner, num_epoch:int, lr:float=defaults.lr, annealing_start:float=0.7)->None:
n = len(learn.data.train_dl)
anneal_start = int(n*num_epoch*annealing_start)
phase0 = TrainingPhase(anneal_start).schedule_hp('lr', lr)
phase1 = TrainingPhase(n*num_epoch - anneal_start).schedule_hp('lr', lr, anneal=annealing_cos)
phases = [phase0, phase1]
sched = GeneralScheduler(learn, phases)
learn.callbacks.append(sched)
learn.fit(num_epoch)
def train(
gpu:Param("GPU to run on", str)=None,
woof: Param("Use imagewoof (otherwise imagenette)", int)=0,
lr: Param("Learning rate", float)=1e-3,
size: Param("Size (px: 128,192,224)", int)=128,
alpha: Param("Alpha", float)=0.99,
mom: Param("Momentum", float)=0.9,
eps: Param("epsilon", float)=1e-6,
epochs: Param("Number of epochs", int)=5,
bs: Param("Batch size", int)=256,
mixup: Param("Mixup", float)=0.,
opt: Param("Optimizer (adam,rms,sgd)", str)='adam',
arch: Param("Architecture (xresnet34, xresnet50)", str)='xresnet50',
sa: Param("Self-attention", int)=0,
sym: Param("Symmetry for self-attention", int)=0,
dump: Param("Print model; don't train", int)=0,
lrfinder: Param("Run learning rate finder; don't train", int)=0,
log: Param("Log file name", str)='log',
sched_type: Param("LR schedule type", str)='one_cycle',
ann_start: Param("Mixup", float)=-1.0,
):
bs_one_gpu = bs
gpu = setup_distrib(gpu)
if gpu is None: bs *= torch.cuda.device_count()
if opt=='adam' : opt_func = partial(optim.Adam, betas=(mom,alpha), eps=eps)
elif opt=='radam' : opt_func = partial(RAdam, betas=(mom,alpha), eps=eps)
elif opt=='novograd' : opt_func = partial(Novograd, betas=(mom,alpha), eps=eps)
elif opt=='rms' : opt_func = partial(optim.RMSprop, alpha=alpha, eps=eps)
elif opt=='sgd' : opt_func = partial(optim.SGD, momentum=mom)
elif opt=='ranger' : opt_func = partial(Ranger, betas=(mom,alpha), eps=eps)
elif opt=='ralamb' : opt_func = partial(Ralamb, betas=(mom,alpha), eps=eps)
elif opt=='over9000' : opt_func = partial(Over9000, betas=(mom,alpha), eps=eps)
elif opt=='lookahead' : opt_func = partial(LookaheadAdam, betas=(mom,alpha), eps=eps)
elif opt=='lamb' : opt_func = partial(Lamb, betas=(mom,alpha), eps=eps)
elif opt=='diffgrad' : opt_func = partial(DiffGrad, version=1, betas=(mom,alpha),eps=eps)
elif opt=='adamod' : opt_func = partial(AdaMod, betas=(mom,alpha), eps=eps, beta3=0.999)
elif opt=='madam' : opt_func = partial(Madam, p_scale=3.0, g_bound=10.0)
data = get_data(size, woof, bs)
bs_rat = bs/bs_one_gpu
if gpu is not None: bs_rat *= max(num_distrib(), 1)
if not gpu: print(f'lr: {lr}; eff_lr: {lr*bs_rat}; size: {size}; alpha: {alpha}; mom: {mom}; eps: {eps}')
lr *= bs_rat
m = globals()[arch]
log_cb = partial(CSVLogger,filename=log)
learn = (Learner(data, m(c_out=10, sa=sa, sym=sym), wd=1e-2, opt_func=opt_func,
metrics=[accuracy,top_k_accuracy],
bn_wd=False, true_wd=True,
loss_func = LabelSmoothingCrossEntropy(),
callback_fns=[log_cb])
)
print(learn.path)
if dump: print(learn.model); exit()
if mixup: learn = learn.mixup(alpha=mixup)
learn = learn.to_fp16(dynamic=True)
if gpu is None: learn.to_parallel()
elif num_distrib()>1: learn.to_distributed(gpu)
if lrfinder:
IN_NOTEBOOK = 1
learn.lr_find(wd=1e-2)
learn.recorder.plot()
else:
if sched_type == 'one_cycle':
learn.fit_one_cycle(epochs, lr, div_factor=10, pct_start=0.3)
elif sched_type == 'flat_and_anneal':
fit_with_annealing(learn, epochs, lr, ann_start)
return learn.recorder.metrics[-1][0]
@call_parse
def main(
run: Param("Number of run", int)=20,
gpu:Param("GPU to run on", str)=None,
woof: Param("Use imagewoof (otherwise imagenette)", int)=0,
lr: Param("Learning rate", float)=1e-3,
size: Param("Size (px: 128,192,224)", int)=128,
alpha: Param("Alpha", float)=0.99,
mom: Param("Momentum", float)=0.9,
eps: Param("epsilon", float)=1e-6,
epochs: Param("Number of epochs", int)=5,
bs: Param("Batch size", int)=256,
mixup: Param("Mixup", float)=0.,
opt: Param("Optimizer (adam,rms,sgd)", str)='adam',
arch: Param("Architecture (xresnet34, xresnet50)", str)='xresnet50',
sa: Param("Self-attention", int)=0,
sym: Param("Symmetry for self-attention", int)=0,
dump: Param("Print model; don't train", int)=0,
lrfinder: Param("Run learning rate finder; don't train", int)=0,
log: Param("Log file name", str)='log',
sched_type: Param("LR schedule type", str)='one_cycle',
ann_start: Param("Mixup", float)=-1.0,
):
acc = np.array(
[train(gpu,woof,lr,size,alpha,mom,eps,epochs,bs,mixup,opt,arch,sa,sym,dump,lrfinder,log,sched_type,ann_start)
for i in range(run)])
print(acc)
print(f'mean = {np.mean(acc)}, std = {np.std(acc)}, ci-95 = {sms.DescrStatsW(acc).tconfint_mean()}')
| true | true |
f724235889b40ee1930afc3bdbdf76d3255df0ce | 664 | py | Python | manage.py | aryanicosa/ppdb_mvt | cb4674fba6cb6663e7a5710f9a4b695c3bd574be | [
"MIT"
] | null | null | null | manage.py | aryanicosa/ppdb_mvt | cb4674fba6cb6663e7a5710f9a4b695c3bd574be | [
"MIT"
] | null | null | null | manage.py | aryanicosa/ppdb_mvt | cb4674fba6cb6663e7a5710f9a4b695c3bd574be | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ppdb_mvt.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.869565 | 73 | 0.679217 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ppdb_mvt.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f7242487ac92a8fd05d59314d185adfdc7c880fd | 6,226 | py | Python | lexis/Lib/site-packages/oauth2_provider/settings.py | ALEXIS2ES/sherom-Serve | d63bc023f7cf45898f32dcce5d808aef944bc741 | [
"MIT"
] | null | null | null | lexis/Lib/site-packages/oauth2_provider/settings.py | ALEXIS2ES/sherom-Serve | d63bc023f7cf45898f32dcce5d808aef944bc741 | [
"MIT"
] | 7 | 2020-06-05T18:33:09.000Z | 2021-09-20T23:07:52.000Z | lexis/Lib/site-packages/oauth2_provider/settings.py | ALEXIS2ES/sherom-Serve | d63bc023f7cf45898f32dcce5d808aef944bc741 | [
"MIT"
] | null | null | null | """
This module is largely inspired by django-rest-framework settings.
Settings for the OAuth2 Provider are all namespaced in the OAUTH2_PROVIDER setting.
For example your project's `settings.py` file might look like this:
OAUTH2_PROVIDER = {
"CLIENT_ID_GENERATOR_CLASS":
"oauth2_provider.generators.ClientIdGenerator",
"CLIENT_SECRET_GENERATOR_CLASS":
"oauth2_provider.generators.ClientSecretGenerator",
}
This module provides the `oauth2_settings` object, that is used to access
OAuth2 Provider settings, checking for user settings first, then falling
back to the defaults.
"""
from __future__ import unicode_literals
import importlib
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
USER_SETTINGS = getattr(settings, "OAUTH2_PROVIDER", None)
APPLICATION_MODEL = getattr(settings, "OAUTH2_PROVIDER_APPLICATION_MODEL", "oauth2_provider.Application")
ACCESS_TOKEN_MODEL = getattr(settings, "OAUTH2_PROVIDER_ACCESS_TOKEN_MODEL", "oauth2_provider.AccessToken")
GRANT_MODEL = getattr(settings, "OAUTH2_PROVIDER_GRANT_MODEL", "oauth2_provider.Grant")
REFRESH_TOKEN_MODEL = getattr(settings, "OAUTH2_PROVIDER_REFRESH_MODEL", "oauth2_provider.RefreshToken")
DEFAULTS = {
"CLIENT_ID_GENERATOR_CLASS": "oauth2_provider.generators.ClientIdGenerator",
"CLIENT_SECRET_GENERATOR_CLASS": "oauth2_provider.generators.ClientSecretGenerator",
"CLIENT_SECRET_GENERATOR_LENGTH": 128,
"OAUTH2_SERVER_CLASS": "oauthlib.oauth2.Server",
"OAUTH2_VALIDATOR_CLASS": "oauth2_provider.oauth2_validators.OAuth2Validator",
"OAUTH2_BACKEND_CLASS": "oauth2_provider.oauth2_backends.OAuthLibCore",
"SCOPES": {"read": "Reading scope", "write": "Writing scope"},
"DEFAULT_SCOPES": ["__all__"],
"SCOPES_BACKEND_CLASS": "oauth2_provider.scopes.SettingsScopes",
"READ_SCOPE": "read",
"WRITE_SCOPE": "write",
"AUTHORIZATION_CODE_EXPIRE_SECONDS": 60,
"ACCESS_TOKEN_EXPIRE_SECONDS": 36000,
"REFRESH_TOKEN_EXPIRE_SECONDS": None,
"ROTATE_REFRESH_TOKEN": True,
"APPLICATION_MODEL": APPLICATION_MODEL,
"ACCESS_TOKEN_MODEL": ACCESS_TOKEN_MODEL,
"GRANT_MODEL": GRANT_MODEL,
"REFRESH_TOKEN_MODEL": REFRESH_TOKEN_MODEL,
"REQUEST_APPROVAL_PROMPT": "force",
"ALLOWED_REDIRECT_URI_SCHEMES": ["http", "https"],
# Special settings that will be evaluated at runtime
"_SCOPES": [],
"_DEFAULT_SCOPES": [],
# Resource Server with Token Introspection
"RESOURCE_SERVER_INTROSPECTION_URL": None,
"RESOURCE_SERVER_AUTH_TOKEN": None,
"RESOURCE_SERVER_TOKEN_CACHING_SECONDS": 36000,
}
# List of settings that cannot be empty
MANDATORY = (
"CLIENT_ID_GENERATOR_CLASS",
"CLIENT_SECRET_GENERATOR_CLASS",
"OAUTH2_SERVER_CLASS",
"OAUTH2_VALIDATOR_CLASS",
"OAUTH2_BACKEND_CLASS",
"SCOPES",
"ALLOWED_REDIRECT_URI_SCHEMES",
)
# List of settings that may be in string import notation.
IMPORT_STRINGS = (
"CLIENT_ID_GENERATOR_CLASS",
"CLIENT_SECRET_GENERATOR_CLASS",
"OAUTH2_SERVER_CLASS",
"OAUTH2_VALIDATOR_CLASS",
"OAUTH2_BACKEND_CLASS",
"SCOPES_BACKEND_CLASS",
)
def perform_import(val, setting_name):
"""
If the given setting is a string import notation,
then perform the necessary import or imports.
"""
if isinstance(val, (list, tuple)):
return [import_from_string(item, setting_name) for item in val]
elif "." in val:
return import_from_string(val, setting_name)
else:
raise ImproperlyConfigured("Bad value for %r: %r" % (setting_name, val))
def import_from_string(val, setting_name):
"""
Attempt to import a class from a string representation.
"""
try:
parts = val.split(".")
module_path, class_name = ".".join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
msg = "Could not import %r for setting %r. %s: %s." % (val, setting_name, e.__class__.__name__, e)
raise ImportError(msg)
class OAuth2ProviderSettings(object):
"""
A settings object, that allows OAuth2 Provider settings to be accessed as properties.
Any setting with string import paths will be automatically resolved
and return the class, rather than the string literal.
"""
def __init__(self, user_settings=None, defaults=None, import_strings=None, mandatory=None):
self.user_settings = user_settings or {}
self.defaults = defaults or {}
self.import_strings = import_strings or ()
self.mandatory = mandatory or ()
def __getattr__(self, attr):
if attr not in self.defaults.keys():
raise AttributeError("Invalid OAuth2Provider setting: %r" % (attr))
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
# Coerce import strings into classes
if val and attr in self.import_strings:
val = perform_import(val, attr)
# Overriding special settings
if attr == "_SCOPES":
val = list(self.SCOPES.keys())
if attr == "_DEFAULT_SCOPES":
if "__all__" in self.DEFAULT_SCOPES:
# If DEFAULT_SCOPES is set to ["__all__"] the whole set of scopes is returned
val = list(self._SCOPES)
else:
# Otherwise we return a subset (that can be void) of SCOPES
val = []
for scope in self.DEFAULT_SCOPES:
if scope in self._SCOPES:
val.append(scope)
else:
raise ImproperlyConfigured("Defined DEFAULT_SCOPES not present in SCOPES")
self.validate_setting(attr, val)
# Cache the result
setattr(self, attr, val)
return val
def validate_setting(self, attr, val):
if not val and attr in self.mandatory:
raise AttributeError("OAuth2Provider setting: %r is mandatory" % (attr))
oauth2_settings = OAuth2ProviderSettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS, MANDATORY)
| 36.197674 | 107 | 0.694346 | from __future__ import unicode_literals
import importlib
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
USER_SETTINGS = getattr(settings, "OAUTH2_PROVIDER", None)
APPLICATION_MODEL = getattr(settings, "OAUTH2_PROVIDER_APPLICATION_MODEL", "oauth2_provider.Application")
ACCESS_TOKEN_MODEL = getattr(settings, "OAUTH2_PROVIDER_ACCESS_TOKEN_MODEL", "oauth2_provider.AccessToken")
GRANT_MODEL = getattr(settings, "OAUTH2_PROVIDER_GRANT_MODEL", "oauth2_provider.Grant")
REFRESH_TOKEN_MODEL = getattr(settings, "OAUTH2_PROVIDER_REFRESH_MODEL", "oauth2_provider.RefreshToken")
DEFAULTS = {
"CLIENT_ID_GENERATOR_CLASS": "oauth2_provider.generators.ClientIdGenerator",
"CLIENT_SECRET_GENERATOR_CLASS": "oauth2_provider.generators.ClientSecretGenerator",
"CLIENT_SECRET_GENERATOR_LENGTH": 128,
"OAUTH2_SERVER_CLASS": "oauthlib.oauth2.Server",
"OAUTH2_VALIDATOR_CLASS": "oauth2_provider.oauth2_validators.OAuth2Validator",
"OAUTH2_BACKEND_CLASS": "oauth2_provider.oauth2_backends.OAuthLibCore",
"SCOPES": {"read": "Reading scope", "write": "Writing scope"},
"DEFAULT_SCOPES": ["__all__"],
"SCOPES_BACKEND_CLASS": "oauth2_provider.scopes.SettingsScopes",
"READ_SCOPE": "read",
"WRITE_SCOPE": "write",
"AUTHORIZATION_CODE_EXPIRE_SECONDS": 60,
"ACCESS_TOKEN_EXPIRE_SECONDS": 36000,
"REFRESH_TOKEN_EXPIRE_SECONDS": None,
"ROTATE_REFRESH_TOKEN": True,
"APPLICATION_MODEL": APPLICATION_MODEL,
"ACCESS_TOKEN_MODEL": ACCESS_TOKEN_MODEL,
"GRANT_MODEL": GRANT_MODEL,
"REFRESH_TOKEN_MODEL": REFRESH_TOKEN_MODEL,
"REQUEST_APPROVAL_PROMPT": "force",
"ALLOWED_REDIRECT_URI_SCHEMES": ["http", "https"],
"_SCOPES": [],
"_DEFAULT_SCOPES": [],
"RESOURCE_SERVER_INTROSPECTION_URL": None,
"RESOURCE_SERVER_AUTH_TOKEN": None,
"RESOURCE_SERVER_TOKEN_CACHING_SECONDS": 36000,
}
MANDATORY = (
"CLIENT_ID_GENERATOR_CLASS",
"CLIENT_SECRET_GENERATOR_CLASS",
"OAUTH2_SERVER_CLASS",
"OAUTH2_VALIDATOR_CLASS",
"OAUTH2_BACKEND_CLASS",
"SCOPES",
"ALLOWED_REDIRECT_URI_SCHEMES",
)
IMPORT_STRINGS = (
"CLIENT_ID_GENERATOR_CLASS",
"CLIENT_SECRET_GENERATOR_CLASS",
"OAUTH2_SERVER_CLASS",
"OAUTH2_VALIDATOR_CLASS",
"OAUTH2_BACKEND_CLASS",
"SCOPES_BACKEND_CLASS",
)
def perform_import(val, setting_name):
if isinstance(val, (list, tuple)):
return [import_from_string(item, setting_name) for item in val]
elif "." in val:
return import_from_string(val, setting_name)
else:
raise ImproperlyConfigured("Bad value for %r: %r" % (setting_name, val))
def import_from_string(val, setting_name):
try:
parts = val.split(".")
module_path, class_name = ".".join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
msg = "Could not import %r for setting %r. %s: %s." % (val, setting_name, e.__class__.__name__, e)
raise ImportError(msg)
class OAuth2ProviderSettings(object):
def __init__(self, user_settings=None, defaults=None, import_strings=None, mandatory=None):
self.user_settings = user_settings or {}
self.defaults = defaults or {}
self.import_strings = import_strings or ()
self.mandatory = mandatory or ()
def __getattr__(self, attr):
if attr not in self.defaults.keys():
raise AttributeError("Invalid OAuth2Provider setting: %r" % (attr))
try:
val = self.user_settings[attr]
except KeyError:
val = self.defaults[attr]
if val and attr in self.import_strings:
val = perform_import(val, attr)
if attr == "_SCOPES":
val = list(self.SCOPES.keys())
if attr == "_DEFAULT_SCOPES":
if "__all__" in self.DEFAULT_SCOPES:
val = list(self._SCOPES)
else:
val = []
for scope in self.DEFAULT_SCOPES:
if scope in self._SCOPES:
val.append(scope)
else:
raise ImproperlyConfigured("Defined DEFAULT_SCOPES not present in SCOPES")
self.validate_setting(attr, val)
setattr(self, attr, val)
return val
def validate_setting(self, attr, val):
if not val and attr in self.mandatory:
raise AttributeError("OAuth2Provider setting: %r is mandatory" % (attr))
oauth2_settings = OAuth2ProviderSettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS, MANDATORY)
| true | true |
f72424ec1fa1a589ada062442ccfe87c8521630e | 8,530 | py | Python | stable_baselines3/dqn/policies.py | LucasAlegre/stable-baselines3 | 6b598323ae070bb0a998d25230f6e11eca4cbe61 | [
"MIT"
] | 26 | 2021-11-05T08:46:06.000Z | 2022-03-22T05:53:57.000Z | stable_baselines3/dqn/policies.py | LucasAlegre/stable-baselines3 | 6b598323ae070bb0a998d25230f6e11eca4cbe61 | [
"MIT"
] | 1 | 2021-11-19T11:13:37.000Z | 2021-11-30T09:08:04.000Z | stable_baselines3/dqn/policies.py | LucasAlegre/stable-baselines3 | 6b598323ae070bb0a998d25230f6e11eca4cbe61 | [
"MIT"
] | 5 | 2021-11-05T08:46:12.000Z | 2022-03-25T21:56:58.000Z | from typing import Any, Dict, List, Optional, Type
import gym
import torch as th
from torch import nn
from stable_baselines3.common.policies import BasePolicy, register_policy
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp
from stable_baselines3.common.type_aliases import Schedule
class QNetwork(BasePolicy):
"""
Action-Value (Q-Value) network for DQN
:param observation_space: Observation space
:param action_space: Action space
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor: nn.Module,
features_dim: int,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
):
super(QNetwork, self).__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
if net_arch is None:
net_arch = [64, 64]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.features_extractor = features_extractor
self.features_dim = features_dim
self.normalize_images = normalize_images
action_dim = self.action_space.n # number of actions
q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn)
self.q_net = nn.Sequential(*q_net)
def forward(self, obs: th.Tensor) -> th.Tensor:
"""
Predict the q-values.
:param obs: Observation
:return: The estimated Q-Value for each action.
"""
return self.q_net(self.extract_features(obs))
def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
q_values = self.forward(observation)
# Greedy action
action = q_values.argmax(dim=1).reshape(-1)
return action
def _get_data(self) -> Dict[str, Any]:
data = super()._get_data()
data.update(
dict(
net_arch=self.net_arch,
features_dim=self.features_dim,
activation_fn=self.activation_fn,
features_extractor=self.features_extractor,
epsilon=self.epsilon,
)
)
return data
class DQNPolicy(BasePolicy):
"""
Policy class with Q-Value Net and target net for DQN
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(DQNPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
)
if net_arch is None:
if features_extractor_class == FlattenExtractor:
net_arch = [64, 64]
else:
net_arch = []
self.net_arch = net_arch
self.activation_fn = activation_fn
self.normalize_images = normalize_images
self.net_args = {
"observation_space": self.observation_space,
"action_space": self.action_space,
"net_arch": self.net_arch,
"activation_fn": self.activation_fn,
"normalize_images": normalize_images,
}
self.q_net, self.q_net_target = None, None
self._build(lr_schedule)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the network and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self.q_net = self.make_q_net()
self.q_net_target = self.make_q_net()
self.q_net_target.load_state_dict(self.q_net.state_dict())
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def make_q_net(self) -> QNetwork:
# Make sure we always have separate networks for features extractors etc
net_args = self._update_features_extractor(self.net_args, features_extractor=None)
return QNetwork(**net_args).to(self.device)
def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self.q_net._predict(obs, deterministic=deterministic)
def _get_data(self) -> Dict[str, Any]:
data = super()._get_data()
data.update(
dict(
net_arch=self.net_args["net_arch"],
activation_fn=self.net_args["activation_fn"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
MlpPolicy = DQNPolicy
class CnnPolicy(DQNPolicy):
"""
Policy class for DQN when using images as input.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(CnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
register_policy("MlpPolicy", MlpPolicy)
register_policy("CnnPolicy", CnnPolicy)
| 35.690377 | 112 | 0.654513 | from typing import Any, Dict, List, Optional, Type
import gym
import torch as th
from torch import nn
from stable_baselines3.common.policies import BasePolicy, register_policy
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp
from stable_baselines3.common.type_aliases import Schedule
class QNetwork(BasePolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor: nn.Module,
features_dim: int,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
):
super(QNetwork, self).__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
if net_arch is None:
net_arch = [64, 64]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.features_extractor = features_extractor
self.features_dim = features_dim
self.normalize_images = normalize_images
action_dim = self.action_space.n
q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn)
self.q_net = nn.Sequential(*q_net)
def forward(self, obs: th.Tensor) -> th.Tensor:
return self.q_net(self.extract_features(obs))
def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
q_values = self.forward(observation)
action = q_values.argmax(dim=1).reshape(-1)
return action
def _get_data(self) -> Dict[str, Any]:
data = super()._get_data()
data.update(
dict(
net_arch=self.net_arch,
features_dim=self.features_dim,
activation_fn=self.activation_fn,
features_extractor=self.features_extractor,
epsilon=self.epsilon,
)
)
return data
class DQNPolicy(BasePolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(DQNPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
)
if net_arch is None:
if features_extractor_class == FlattenExtractor:
net_arch = [64, 64]
else:
net_arch = []
self.net_arch = net_arch
self.activation_fn = activation_fn
self.normalize_images = normalize_images
self.net_args = {
"observation_space": self.observation_space,
"action_space": self.action_space,
"net_arch": self.net_arch,
"activation_fn": self.activation_fn,
"normalize_images": normalize_images,
}
self.q_net, self.q_net_target = None, None
self._build(lr_schedule)
def _build(self, lr_schedule: Schedule) -> None:
self.q_net = self.make_q_net()
self.q_net_target = self.make_q_net()
self.q_net_target.load_state_dict(self.q_net.state_dict())
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def make_q_net(self) -> QNetwork:
net_args = self._update_features_extractor(self.net_args, features_extractor=None)
return QNetwork(**net_args).to(self.device)
def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self.q_net._predict(obs, deterministic=deterministic)
def _get_data(self) -> Dict[str, Any]:
data = super()._get_data()
data.update(
dict(
net_arch=self.net_args["net_arch"],
activation_fn=self.net_args["activation_fn"],
lr_schedule=self._dummy_schedule,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
MlpPolicy = DQNPolicy
class CnnPolicy(DQNPolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(CnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
register_policy("MlpPolicy", MlpPolicy)
register_policy("CnnPolicy", CnnPolicy)
| true | true |
f72425164382860b69a51927940bdfcff668687c | 454 | py | Python | env/Lib/site-packages/plotly/validators/histogram2d/_zauto.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/Lib/site-packages/plotly/validators/histogram2d/_zauto.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/Lib/site-packages/plotly/validators/histogram2d/_zauto.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class ZautoValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="zauto", parent_name="histogram2d", **kwargs):
super(ZautoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs
)
| 34.923077 | 81 | 0.662996 | import _plotly_utils.basevalidators
class ZautoValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="zauto", parent_name="histogram2d", **kwargs):
super(ZautoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs
)
| true | true |
f724265fd36867b631d89c00e4ac1e68ed8f44ba | 6,317 | py | Python | nameshark_vcard/nameshark_vcard.py | proinsias/nameshark-vcard | 3bd82201db34fb370c9b646f1adcce15e31c6137 | [
"MIT"
] | 1 | 2017-11-03T21:35:36.000Z | 2017-11-03T21:35:36.000Z | nameshark_vcard/nameshark_vcard.py | proinsias/nameshark-vcard | 3bd82201db34fb370c9b646f1adcce15e31c6137 | [
"MIT"
] | 336 | 2016-11-05T00:37:38.000Z | 2022-03-31T20:34:38.000Z | nameshark_vcard/nameshark_vcard.py | proinsias/nameshark-vcard | 3bd82201db34fb370c9b646f1adcce15e31c6137 | [
"MIT"
] | 1 | 2016-11-18T19:00:27.000Z | 2016-11-18T19:00:27.000Z | # The MIT License (MIT)
#
# Copyright (c) 2016 Francis T. O'Donovan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Convert vCard-formatted string to the JSON format expected by Name Shark."""
# coding=utf-8
import base64
import json
import collections
import argparse
import vobject
NAMES = collections.namedtuple('Names', ['first_name', 'surname'])
def get_pp_names(fn_field):
"""
Use probablepeople to extract firstname/surname from vCard 'fn' field.
:param fn_field: the input vCard 'fn' field.
:return: a namedtuple containing the first name and surname.
>>> get_names('John Smith')
Extracting data for John Smith
Names(first_name='John', surname='Smith')
"""
first_name = None
surname = None
try:
import probablepeople as pp # not python 2.6 compatible
# Use probablepeople to tag the parts of the name.
full_name_dict = pp.tag(fn_field)[0]
if 'GivenName' in full_name_dict:
# If probablepeople has successfully extracted the first name,
# use it.
first_name = full_name_dict['GivenName']
if 'Surname' in full_name_dict:
# If probablepeople has successfully extracted the surname,
# use it.
surname = full_name_dict['Surname']
except (ImportError, SyntaxError, TypeError) as error:
print(error)
return NAMES(first_name, surname)
def get_names(fn_field):
"""
Extract the first name and surname from a vCard 'fn' field.
:param fn_field: the input vCard 'fn' field.
:return: a namedtuple containing the first name and surname.
>>> get_names('John Smith')
Extracting data for John Smith
Names(first_name='John', surname='Smith')
"""
names = get_pp_names(fn_field)
first_name = names.first_name
surname = names.surname
try:
fn_field_split = fn_field.split(' ')
except (TypeError, AttributeError):
fn_field_split = ['']
if first_name is None:
# If we can't get first name from probablepeople, assume it's the
# first part of the string.
first_name = fn_field_split[0]
if first_name == surname:
first_name = ''
if surname is None:
# If we can't get surname from probablepeople, assume it's the
# second part of the string, if that exists.
surname = fn_field_split[1] if len(fn_field_split) > 1 else ''
print('Extracting data for ' + first_name + ' ' + surname)
return NAMES(first_name, surname)
def get_photo(photo):
"""
Extract the photo data (if it exists) from a vCard 'photo' field.
:param photo: the input vCard 'photo' field.
:return: a base64-encoded string containing the photo data.
"""
if photo is not None:
photo_data = base64.b64encode(photo)
photo_data = 'data:image/jpeg;base64,' + photo_data.decode('utf8')
else:
photo_data = ''
return photo_data
def extract_contact_from_component(component):
"""
Extract the contact info from a vCard component.
:param component: the input vCard component text.
:return: a dictionary containing the extracted contact info.
"""
names = get_names(component.getChildValue('fn'))
photo_data = get_photo(component.getChildValue('photo'))
if photo_data == '':
print(
'Warning: Missing photo for ' + names.first_name + ' ' +
names.surname + '...!',
)
return {
'first': names.first_name, 'last': names.surname,
'photoData': photo_data, 'details': '',
}
def extract_contacts_from_vcard(vcard):
"""
Extract the contact info from a vCard.
:param vcard: the vCard text to convert.
:return: a list containing the extracted contact info.
"""
contacts = []
for v_component in vobject.readComponents(vcard):
entry = extract_contact_from_component(v_component)
contacts.append(entry)
return contacts
def convert_to_nameshark(group_name, contacts):
"""
Convert a list containing contact info into JSON for Name Shark.
:param group_name: the Name Shark group to use.
:param contacts:
:return: the list containing contact info extracted from a vCard.
"""
shark = {'name': group_name, 'contacts': contacts}
return json.dumps(shark, sort_keys=True, indent=4)
def vcard_to_nameshark(vcard, group_name):
"""
Convert vCard-formatted string to the JSON format expected by Name Shark.
:param vcard: the vCard text to convert.
:param group_name: the Name Shark group to use.
:return: JSON version of vCard input.
"""
contacts = extract_contacts_from_vcard(vcard)
return convert_to_nameshark(group_name, contacts)
def main():
"""
The main nameshark_vcard module.
:return: None
"""
parser = argparse.ArgumentParser()
parser.add_argument('file', help='the input file')
parser.add_argument('group', help='the output group name')
args = parser.parse_args()
with open(args.file, 'r') as input_file:
text = input_file.read()
json_str = vcard_to_nameshark(text, args.group)
with open(args.group + '.json', 'w') as output_file:
output_file.write(json_str)
if __name__ == '__main__':
main()
| 30.370192 | 79 | 0.678012 |
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# coding=utf-8
import base64
import json
import collections
import argparse
import vobject
NAMES = collections.namedtuple('Names', ['first_name', 'surname'])
def get_pp_names(fn_field):
first_name = None
surname = None
try:
import probablepeople as pp # not python 2.6 compatible
# Use probablepeople to tag the parts of the name.
full_name_dict = pp.tag(fn_field)[0]
if 'GivenName' in full_name_dict:
# If probablepeople has successfully extracted the first name,
# use it.
first_name = full_name_dict['GivenName']
if 'Surname' in full_name_dict:
# If probablepeople has successfully extracted the surname,
# use it.
surname = full_name_dict['Surname']
except (ImportError, SyntaxError, TypeError) as error:
print(error)
return NAMES(first_name, surname)
def get_names(fn_field):
names = get_pp_names(fn_field)
first_name = names.first_name
surname = names.surname
try:
fn_field_split = fn_field.split(' ')
except (TypeError, AttributeError):
fn_field_split = ['']
if first_name is None:
# If we can't get first name from probablepeople, assume it's the
# first part of the string.
first_name = fn_field_split[0]
if first_name == surname:
first_name = ''
if surname is None:
# If we can't get surname from probablepeople, assume it's the
# second part of the string, if that exists.
surname = fn_field_split[1] if len(fn_field_split) > 1 else ''
print('Extracting data for ' + first_name + ' ' + surname)
return NAMES(first_name, surname)
def get_photo(photo):
if photo is not None:
photo_data = base64.b64encode(photo)
photo_data = 'data:image/jpeg;base64,' + photo_data.decode('utf8')
else:
photo_data = ''
return photo_data
def extract_contact_from_component(component):
names = get_names(component.getChildValue('fn'))
photo_data = get_photo(component.getChildValue('photo'))
if photo_data == '':
print(
'Warning: Missing photo for ' + names.first_name + ' ' +
names.surname + '...!',
)
return {
'first': names.first_name, 'last': names.surname,
'photoData': photo_data, 'details': '',
}
def extract_contacts_from_vcard(vcard):
contacts = []
for v_component in vobject.readComponents(vcard):
entry = extract_contact_from_component(v_component)
contacts.append(entry)
return contacts
def convert_to_nameshark(group_name, contacts):
shark = {'name': group_name, 'contacts': contacts}
return json.dumps(shark, sort_keys=True, indent=4)
def vcard_to_nameshark(vcard, group_name):
contacts = extract_contacts_from_vcard(vcard)
return convert_to_nameshark(group_name, contacts)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file', help='the input file')
parser.add_argument('group', help='the output group name')
args = parser.parse_args()
with open(args.file, 'r') as input_file:
text = input_file.read()
json_str = vcard_to_nameshark(text, args.group)
with open(args.group + '.json', 'w') as output_file:
output_file.write(json_str)
if __name__ == '__main__':
main()
| true | true |
f72427339dc0e06167064cbaba2854f7fa43b88f | 1,949 | py | Python | huskyai/scraper/bing-image-search.py | wunderwuzzi23/ai | e71e971026a5d010d00a97022cfe376eafc386aa | [
"MIT"
] | 4 | 2020-09-06T06:52:35.000Z | 2021-08-16T11:21:44.000Z | huskyai/scraper/bing-image-search.py | wunderwuzzi23/ai | e71e971026a5d010d00a97022cfe376eafc386aa | [
"MIT"
] | null | null | null | huskyai/scraper/bing-image-search.py | wunderwuzzi23/ai | e71e971026a5d010d00a97022cfe376eafc386aa | [
"MIT"
] | 2 | 2020-09-06T06:54:10.000Z | 2021-03-03T15:50:02.000Z | import os
import requests
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO, StringIO
import uuid
#config
download_folder = "data/huskies"
search_term = "siberian husky"
bing_api_key = os.path.join(os.getenv('HOME'), ".bingimagessearchkey")
subscription_key = open(bing_api_key,"rt").readline().rstrip("\n")
count = 100
max_page = 10
#setup
os.makedirs(download_folder,exist_ok=True)
search_url = "https://huskyai-imagesearch.cognitiveservices.azure.com/bing/v7.0/images/search"
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
#query and save images
offset = 0
for current_page in range(max_page):
print("Page:" + str(current_page+1))
params = {"q": search_term, "license": "public", "imageType": "photo", "count": count, "offset": offset}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
print("Offset:" + str(offset))
print("Next Offset" + str(search_results["nextOffset"]))
image_count = len(search_results["value"][:])
for i in range(image_count):
url = search_results["value"][:][i]["thumbnailUrl"] #contentUrl
id = search_results["value"][:][i]["imageId"]
print(f"Processing ({i}) - {id}")
image_data = requests.get(url)
image_data.raise_for_status()
filename = os.path.join(download_folder, id + ".jpg")
image = Image.open(BytesIO(image_data.content))
image = image.save(filename, "JPEG")
offset = search_results["nextOffset"]
print("Done")
#plot the images
# f, axes = plt.subplots(4, 4)
# for i in range(4):
# for j in range(4):
# image_data = requests.get(thumbnail_urls[i+4*j])
# image_data.raise_for_status()
# image = Image.open(BytesIO(image_data.content))
# axes[i][j].imshow(image)
# axes[i][j].axis("off")
# plt.show()
| 29.089552 | 109 | 0.662391 | import os
import requests
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO, StringIO
import uuid
download_folder = "data/huskies"
search_term = "siberian husky"
bing_api_key = os.path.join(os.getenv('HOME'), ".bingimagessearchkey")
subscription_key = open(bing_api_key,"rt").readline().rstrip("\n")
count = 100
max_page = 10
os.makedirs(download_folder,exist_ok=True)
search_url = "https://huskyai-imagesearch.cognitiveservices.azure.com/bing/v7.0/images/search"
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
offset = 0
for current_page in range(max_page):
print("Page:" + str(current_page+1))
params = {"q": search_term, "license": "public", "imageType": "photo", "count": count, "offset": offset}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
print("Offset:" + str(offset))
print("Next Offset" + str(search_results["nextOffset"]))
image_count = len(search_results["value"][:])
for i in range(image_count):
url = search_results["value"][:][i]["thumbnailUrl"]
id = search_results["value"][:][i]["imageId"]
print(f"Processing ({i}) - {id}")
image_data = requests.get(url)
image_data.raise_for_status()
filename = os.path.join(download_folder, id + ".jpg")
image = Image.open(BytesIO(image_data.content))
image = image.save(filename, "JPEG")
offset = search_results["nextOffset"]
print("Done")
| true | true |
f72427b5389f11ffe77ef095d0500e9eae4c2a4c | 3,622 | py | Python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/virtual_network.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure/mgmt/network/v2016_09_01/models/virtual_network.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 2 | 2016-09-30T21:40:24.000Z | 2017-11-10T18:16:18.000Z | azure/mgmt/network/v2016_09_01/models/virtual_network.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetwork(Resource):
"""Virtual Network resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param address_space: The AddressSpace that contains an array of IP
address ranges that can be used by subnets.
:type address_space: ~azure.mgmt.network.v2016_09_01.models.AddressSpace
:param dhcp_options: The dhcpOptions that contains an array of DNS servers
available to VMs deployed in the virtual network.
:type dhcp_options: ~azure.mgmt.network.v2016_09_01.models.DhcpOptions
:param subnets: A list of subnets in a Virtual Network.
:type subnets: list[~azure.mgmt.network.v2016_09_01.models.Subnet]
:param virtual_network_peerings: A list of peerings in a Virtual Network.
:type virtual_network_peerings:
list[~azure.mgmt.network.v2016_09_01.models.VirtualNetworkPeering]
:param resource_guid: The resourceGuid property of the Virtual Network
resource.
:type resource_guid: str
:param provisioning_state: The provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'address_space': {'key': 'properties.addressSpace', 'type': 'AddressSpace'},
'dhcp_options': {'key': 'properties.dhcpOptions', 'type': 'DhcpOptions'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'virtual_network_peerings': {'key': 'properties.virtualNetworkPeerings', 'type': '[VirtualNetworkPeering]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, address_space=None, dhcp_options=None, subnets=None, virtual_network_peerings=None, resource_guid=None, provisioning_state=None, etag=None):
super(VirtualNetwork, self).__init__(id=id, location=location, tags=tags)
self.address_space = address_space
self.dhcp_options = dhcp_options
self.subnets = subnets
self.virtual_network_peerings = virtual_network_peerings
self.resource_guid = resource_guid
self.provisioning_state = provisioning_state
self.etag = etag
| 44.170732 | 198 | 0.650469 |
from .resource import Resource
class VirtualNetwork(Resource):
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'address_space': {'key': 'properties.addressSpace', 'type': 'AddressSpace'},
'dhcp_options': {'key': 'properties.dhcpOptions', 'type': 'DhcpOptions'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'virtual_network_peerings': {'key': 'properties.virtualNetworkPeerings', 'type': '[VirtualNetworkPeering]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, address_space=None, dhcp_options=None, subnets=None, virtual_network_peerings=None, resource_guid=None, provisioning_state=None, etag=None):
super(VirtualNetwork, self).__init__(id=id, location=location, tags=tags)
self.address_space = address_space
self.dhcp_options = dhcp_options
self.subnets = subnets
self.virtual_network_peerings = virtual_network_peerings
self.resource_guid = resource_guid
self.provisioning_state = provisioning_state
self.etag = etag
| true | true |
f7242804448ff4b2f473b97c4a2b021f1d175c50 | 227 | py | Python | examples/multi_file_cli/api.py | pnijhara/hug | 95e2f66baa57494b8751b43ad3da6c2d0e2d535d | [
"MIT"
] | 6,045 | 2015-08-12T11:11:39.000Z | 2019-04-07T16:42:28.000Z | examples/multi_file_cli/api.py | Warlockk/hug | 95e2f66baa57494b8751b43ad3da6c2d0e2d535d | [
"MIT"
] | 623 | 2015-08-12T16:08:45.000Z | 2019-04-07T23:07:47.000Z | examples/multi_file_cli/api.py | Warlockk/hug | 95e2f66baa57494b8751b43ad3da6c2d0e2d535d | [
"MIT"
] | 426 | 2015-08-13T04:23:01.000Z | 2019-04-07T20:46:41.000Z | import hug
import sub_api
@hug.cli()
def echo(text: hug.types.text):
return text
@hug.extend_api(sub_command="sub_api")
def extend_with():
return (sub_api,)
if __name__ == "__main__":
hug.API(__name__).cli()
| 12.611111 | 38 | 0.678414 | import hug
import sub_api
@hug.cli()
def echo(text: hug.types.text):
return text
@hug.extend_api(sub_command="sub_api")
def extend_with():
return (sub_api,)
if __name__ == "__main__":
hug.API(__name__).cli()
| true | true |
f72429427d8a34e18ac30aab9d42a7b0aef4b528 | 858 | py | Python | adain/learning_rate_schedule.py | srihari-humbarwadi/adain-tensorflow2.x | c0da16e4d39d5316683ed0988787aedbb1c9768c | [
"MIT"
] | 6 | 2020-12-27T08:20:19.000Z | 2021-07-03T10:28:21.000Z | adain/learning_rate_schedule.py | srihari-humbarwadi/adain-tensorflow2.x | c0da16e4d39d5316683ed0988787aedbb1c9768c | [
"MIT"
] | null | null | null | adain/learning_rate_schedule.py | srihari-humbarwadi/adain-tensorflow2.x | c0da16e4d39d5316683ed0988787aedbb1c9768c | [
"MIT"
] | null | null | null | import tensorflow as tf
class InverseDecay(tf.optimizers.schedules.LearningRateSchedule):
def __init__(self, initial_learning_rate, decay_rate):
super(InverseDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_rate = decay_rate
def __call__(self, step):
learning_rate = tf.math.divide_no_nan(
self.initial_learning_rate,
(1.0 + self.decay_rate * tf.cast(step, dtype=tf.float32)))
return learning_rate
def get_config(self):
config = {
"initial_learning_rate": self.initial_learning_rate,
"decay_rate": self.decay_rate,
}
base_config = super(InverseDecay,
self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 34.32 | 71 | 0.62704 | import tensorflow as tf
class InverseDecay(tf.optimizers.schedules.LearningRateSchedule):
def __init__(self, initial_learning_rate, decay_rate):
super(InverseDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_rate = decay_rate
def __call__(self, step):
learning_rate = tf.math.divide_no_nan(
self.initial_learning_rate,
(1.0 + self.decay_rate * tf.cast(step, dtype=tf.float32)))
return learning_rate
def get_config(self):
config = {
"initial_learning_rate": self.initial_learning_rate,
"decay_rate": self.decay_rate,
}
base_config = super(InverseDecay,
self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| true | true |
f7242a2472438688a5b39ecd9f960bcbed390c2b | 3,144 | py | Python | Code/stack.py | StephanieCherubin/core-data-structures | ad0cdf204871d4fece225fe7bdcbc2dc39214ea4 | [
"MIT"
] | null | null | null | Code/stack.py | StephanieCherubin/core-data-structures | ad0cdf204871d4fece225fe7bdcbc2dc39214ea4 | [
"MIT"
] | 5 | 2019-06-16T21:01:43.000Z | 2019-07-04T21:51:09.000Z | Code/stack.py | StephanieCherubin/core-data-structures | ad0cdf204871d4fece225fe7bdcbc2dc39214ea4 | [
"MIT"
] | null | null | null | #!python
from linkedlist import LinkedList
class LinkedStack(object):
def __init__(self, iterable=None):
"""Initialize this stack and push the given items, if any."""
# Initialize a new linked list to store the items
self.list = LinkedList()
if iterable is not None:
for item in iterable:
self.push(item)
def __repr__(self):
"""Return a string representation of this stack."""
return 'Stack({} items, top={})'.format(self.length(), self.peek())
def is_empty(self):
"""Return True if this stack is empty, or False otherwise."""
return self.list.is_empty()
def length(self):
"""Return the number of items in this stack."""
return self.list.length()
def push(self, item):
"""Insert the given item on the top of this stack.
Running time: O(???) – Why? [TODO]"""
return self.list.append(item)
def peek(self):
"""Return the item on the top of this stack without removing it,
or None if this stack is empty."""
if self.is_empty():
return None
return self.list.tail.data
def pop(self):
"""Remove and return the item on the top of this stack,
or raise ValueError if this stack is empty.
Running time: O(???) – Why? [TODO]"""
if self.is_empty():
raise ValueError
else:
node = self.list.tail.data
self.list.delete(node)
return node
# Implement ArrayStack below, then change the assignment at the bottom
# to use this Stack implementation to verify it passes all tests
class ArrayStack(object):
def __init__(self, iterable=None):
"""Initialize this stack and push the given items, if any."""
self.list = list() # Initialize a new list (dynamic array) to store the items
if iterable is not None:
for item in iterable:
self.push(item)
def __repr__(self):
"""Return a string representation of this stack."""
return 'Stack({} items, top={})'.format(self.length(), self.peek())
def is_empty(self):
"""Return True if this stack is empty, or False otherwise."""
if len(self.list) == 0:
return True
return False
def length(self):
"""Return the number of items in this stack."""
return len(self.list)
def push(self, item):
"""Insert the given item on the top of this stack.
Running time: O(???) – Why? [TODO]"""
return self.list.append(item)
def peek(self):
"""Return the item on the top of this stack without removing it,
or None if this stack is empty."""
if self.is_empty():
return None
return self.list[-1]
def pop(self):
"""Remove and return the item on the top of this stack
Running time: O(???) – Why? [TODO]"""
if self.is_empty(): # if this stack is empty
raise ValueError() #raise ValueError
else:
return self.list.pop()
# Stack = LinkedStack
Stack = ArrayStack
| 31.128713 | 85 | 0.588422 |
from linkedlist import LinkedList
class LinkedStack(object):
def __init__(self, iterable=None):
self.list = LinkedList()
if iterable is not None:
for item in iterable:
self.push(item)
def __repr__(self):
return 'Stack({} items, top={})'.format(self.length(), self.peek())
def is_empty(self):
return self.list.is_empty()
def length(self):
return self.list.length()
def push(self, item):
return self.list.append(item)
def peek(self):
if self.is_empty():
return None
return self.list.tail.data
def pop(self):
if self.is_empty():
raise ValueError
else:
node = self.list.tail.data
self.list.delete(node)
return node
class ArrayStack(object):
def __init__(self, iterable=None):
self.list = list()
if iterable is not None:
for item in iterable:
self.push(item)
def __repr__(self):
return 'Stack({} items, top={})'.format(self.length(), self.peek())
def is_empty(self):
if len(self.list) == 0:
return True
return False
def length(self):
return len(self.list)
def push(self, item):
return self.list.append(item)
def peek(self):
if self.is_empty():
return None
return self.list[-1]
def pop(self):
if self.is_empty():
raise ValueError()
else:
return self.list.pop()
Stack = ArrayStack
| true | true |
f7242b61df16023c351969eeec311557005a9a87 | 7,795 | py | Python | grove/main.py | andrewgryan/bokeh-playground | aeab70627a5ccd7f210c354098d30bdf92bb553f | [
"BSD-3-Clause"
] | 3 | 2018-10-01T18:37:44.000Z | 2019-11-15T10:56:28.000Z | grove/main.py | andrewgryan/bokeh-playground | aeab70627a5ccd7f210c354098d30bdf92bb553f | [
"BSD-3-Clause"
] | null | null | null | grove/main.py | andrewgryan/bokeh-playground | aeab70627a5ccd7f210c354098d30bdf92bb553f | [
"BSD-3-Clause"
] | 1 | 2019-01-17T07:21:48.000Z | 2019-01-17T07:21:48.000Z | import argparse
import bokeh.plotting
import bokeh.models
import bokeh.palettes
import bokeh.colors
import cartopy
import numpy as np
import netCDF4
GOOGLE = cartopy.crs.Mercator.GOOGLE
PLATE_CARREE = cartopy.crs.PlateCarree()
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument("paths", nargs="+")
return parser.parse_args(args=argv)
def main():
args = parse_args()
x_range, y_range = google_mercator([-180, 180], [-80, 80])
figure = bokeh.plotting.figure(
sizing_mode="stretch_both",
x_range=x_range,
y_range=y_range,
x_axis_type="mercator",
y_axis_type="mercator",
active_scroll="wheel_zoom")
figure.toolbar_location = None
figure.axis.visible = False
figure.min_border = 0
tile = bokeh.models.WMTSTileSource(
url='http://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png',
attribution="Attribution text goes here"
)
figure.add_tile(tile)
box_select_tool = bokeh.models.BoxSelectTool()
figure.add_tools(box_select_tool)
figure.toolbar.active_drag = box_select_tool
# Plot Class 4 positions
source = bokeh.models.ColumnDataSource({
"x": [],
"y": [],
"v": []
})
circle_source = bokeh.models.ColumnDataSource({
"x": [],
"y": []})
def callback(attr, old, new):
v = np.ma.copy(source.data["v"])
x = np.argsort(np.argsort(v))[new]
y = v[new]
circle_source.data = {
"x": x,
"y": y
}
source.selected.on_change("indices", callback)
color_mapper = bokeh.models.LinearColorMapper(
palette=bokeh.palettes.Plasma[256],
nan_color=bokeh.colors.RGB(0, 0, 0, a=0),
)
view = View(args.paths, source, color_mapper, figure)
figure.circle(
x="x",
y="y",
source=source,
color={"field": "v", "transform": color_mapper},
line_color={"field": "v", "transform": color_mapper})
color_bar = bokeh.models.ColorBar(
color_mapper=color_mapper,
orientation='horizontal',
background_fill_alpha=0,
location='bottom_center',
major_tick_line_color='black',
bar_line_color='black')
figure.add_layout(color_bar, 'center')
widgets = []
check_box = bokeh.models.CheckboxGroup(labels=["quality control"])
check_box.on_change('active', view.on_change_quality)
widgets.append(check_box)
radio_group = bokeh.models.RadioGroup(labels=view.parameters)
radio_group.on_change('active', view.on_change_parameter)
widgets.append(radio_group)
radio_group = bokeh.models.RadioGroup(labels=view.fields)
radio_group.on_change('active', view.on_change_field)
widgets.append(radio_group)
radio_group = bokeh.models.RadioGroup(labels=view.models)
radio_group.on_change('active', view.on_change_model)
widgets.append(radio_group)
controls = bokeh.layouts.column(*widgets, name="hello")
second_figure = bokeh.plotting.figure(
name="hello",
plot_width=300,
plot_height=300)
line_source = bokeh.models.ColumnDataSource({
"x": [],
"y": []})
second_figure.line(x="x", y="y", source=line_source)
second_figure.circle(x="x", y="y", source=circle_source)
second_figure.toolbar.logo = None
second_figure.toolbar_location = None
second_figure.min_border_left = 20
second_figure.min_border_right = 20
second_figure.border_fill_alpha = 0
def on_change(attr, old, new):
values = np.ma.copy(source.data["v"]).compressed()
values.sort()
line_source.data = {
"x": np.arange(len(values)),
"y": values
}
source.on_change("data", on_change)
document = bokeh.plotting.curdoc()
document.title = "Geo-relational ocean verification exploration tool"
document.add_root(figure)
document.add_root(controls)
document.add_root(second_figure)
class View(object):
def __init__(self, paths, source, color_mapper, figure):
self.parameter = None
self.model = None
self.field = None
self.paths = paths
self.source = source
self.color_mapper = color_mapper
self.figure = figure
self.store = {}
models = []
parameters = []
for path in self.paths:
with netCDF4.Dataset(path) as dataset:
parameter = dataset.obs_type
model = " ".join([
dataset.system,
dataset.version,
dataset.configuration])
self.store[(parameter, model)] = path
models.append(model)
parameters.append(parameter)
self.parameters = list(sorted(set(parameters)))
self.models = list(sorted(set(models)))
self.fields = [
"observation",
"forecast",
"forecast - observation",
"|forecast - observation|"]
self.quality_control = False
def on_change_field(self, attr, old, new):
self.field = self.fields[new]
self.render()
def on_change_quality(self, attr, old, new):
self.quality_control = 0 in new
self.render()
def on_change_model(self, attr, old, new):
self.model = self.models[new]
self.render()
def on_change_parameter(self, attr, old, new):
self.parameter = self.parameters[new]
self.render()
def render(self):
if self.field is None:
return
if self.parameter is None:
return
if self.model is None:
return
path = self.store[(self.parameter, self.model)]
print(path, self.field)
with netCDF4.Dataset(path) as dataset:
lats = dataset.variables["latitude"][:]
lons = dataset.variables["longitude"][:]
if self.field == "forecast - observation":
f = dataset.variables["forecast"][:, 0, 0, 0]
o = dataset.variables["observation"][:, 0, 0]
v = f - o
elif self.field == "|forecast - observation|":
f = dataset.variables["forecast"][:, 0, 0, 0]
o = dataset.variables["observation"][:, 0, 0]
v = np.ma.abs(f - o)
elif self.field == "forecast":
v = dataset.variables["forecast"][:, 0, 0, 0]
elif self.field == "observation":
v = dataset.variables["observation"][:, 0, 0]
else:
raise Exception("unknown field: {}".format(self.field))
if self.quality_control:
flags = dataset.variables["qc"][:, 0, 0]
pts = np.ma.where(flags == 0)
lons = lons[pts]
lats = lats[pts]
v = v[pts]
x, y = google_mercator(lons, lats)
# Geographic filtering
pts = np.ma.where(
(x >= self.figure.x_range.start) &
(x <= self.figure.x_range.end) &
(y >= self.figure.y_range.start) &
(y <= self.figure.y_range.end))
x = x[pts]
y = y[pts]
v = v[pts]
self.source.data = {
"x": x,
"y": y,
"v": v
}
self.color_mapper.low = v.min()
self.color_mapper.high = v.max()
def google_mercator(lons, lats):
return transform(PLATE_CARREE, GOOGLE, lons, lats)
def plate_carree(x, y):
return transform(GOOGLE, PLATE_CARREE, x, y)
def transform(src_crs, dst_crs, x, y):
x, y = np.asarray(x), np.asarray(y)
xt, yt, _ = dst_crs.transform_points(src_crs, x.flatten(), y.flatten()).T
return xt, yt
if __name__.startswith("bk"):
main()
| 31.305221 | 77 | 0.585888 | import argparse
import bokeh.plotting
import bokeh.models
import bokeh.palettes
import bokeh.colors
import cartopy
import numpy as np
import netCDF4
GOOGLE = cartopy.crs.Mercator.GOOGLE
PLATE_CARREE = cartopy.crs.PlateCarree()
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument("paths", nargs="+")
return parser.parse_args(args=argv)
def main():
args = parse_args()
x_range, y_range = google_mercator([-180, 180], [-80, 80])
figure = bokeh.plotting.figure(
sizing_mode="stretch_both",
x_range=x_range,
y_range=y_range,
x_axis_type="mercator",
y_axis_type="mercator",
active_scroll="wheel_zoom")
figure.toolbar_location = None
figure.axis.visible = False
figure.min_border = 0
tile = bokeh.models.WMTSTileSource(
url='http://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png',
attribution="Attribution text goes here"
)
figure.add_tile(tile)
box_select_tool = bokeh.models.BoxSelectTool()
figure.add_tools(box_select_tool)
figure.toolbar.active_drag = box_select_tool
source = bokeh.models.ColumnDataSource({
"x": [],
"y": [],
"v": []
})
circle_source = bokeh.models.ColumnDataSource({
"x": [],
"y": []})
def callback(attr, old, new):
v = np.ma.copy(source.data["v"])
x = np.argsort(np.argsort(v))[new]
y = v[new]
circle_source.data = {
"x": x,
"y": y
}
source.selected.on_change("indices", callback)
color_mapper = bokeh.models.LinearColorMapper(
palette=bokeh.palettes.Plasma[256],
nan_color=bokeh.colors.RGB(0, 0, 0, a=0),
)
view = View(args.paths, source, color_mapper, figure)
figure.circle(
x="x",
y="y",
source=source,
color={"field": "v", "transform": color_mapper},
line_color={"field": "v", "transform": color_mapper})
color_bar = bokeh.models.ColorBar(
color_mapper=color_mapper,
orientation='horizontal',
background_fill_alpha=0,
location='bottom_center',
major_tick_line_color='black',
bar_line_color='black')
figure.add_layout(color_bar, 'center')
widgets = []
check_box = bokeh.models.CheckboxGroup(labels=["quality control"])
check_box.on_change('active', view.on_change_quality)
widgets.append(check_box)
radio_group = bokeh.models.RadioGroup(labels=view.parameters)
radio_group.on_change('active', view.on_change_parameter)
widgets.append(radio_group)
radio_group = bokeh.models.RadioGroup(labels=view.fields)
radio_group.on_change('active', view.on_change_field)
widgets.append(radio_group)
radio_group = bokeh.models.RadioGroup(labels=view.models)
radio_group.on_change('active', view.on_change_model)
widgets.append(radio_group)
controls = bokeh.layouts.column(*widgets, name="hello")
second_figure = bokeh.plotting.figure(
name="hello",
plot_width=300,
plot_height=300)
line_source = bokeh.models.ColumnDataSource({
"x": [],
"y": []})
second_figure.line(x="x", y="y", source=line_source)
second_figure.circle(x="x", y="y", source=circle_source)
second_figure.toolbar.logo = None
second_figure.toolbar_location = None
second_figure.min_border_left = 20
second_figure.min_border_right = 20
second_figure.border_fill_alpha = 0
def on_change(attr, old, new):
values = np.ma.copy(source.data["v"]).compressed()
values.sort()
line_source.data = {
"x": np.arange(len(values)),
"y": values
}
source.on_change("data", on_change)
document = bokeh.plotting.curdoc()
document.title = "Geo-relational ocean verification exploration tool"
document.add_root(figure)
document.add_root(controls)
document.add_root(second_figure)
class View(object):
def __init__(self, paths, source, color_mapper, figure):
self.parameter = None
self.model = None
self.field = None
self.paths = paths
self.source = source
self.color_mapper = color_mapper
self.figure = figure
self.store = {}
models = []
parameters = []
for path in self.paths:
with netCDF4.Dataset(path) as dataset:
parameter = dataset.obs_type
model = " ".join([
dataset.system,
dataset.version,
dataset.configuration])
self.store[(parameter, model)] = path
models.append(model)
parameters.append(parameter)
self.parameters = list(sorted(set(parameters)))
self.models = list(sorted(set(models)))
self.fields = [
"observation",
"forecast",
"forecast - observation",
"|forecast - observation|"]
self.quality_control = False
def on_change_field(self, attr, old, new):
self.field = self.fields[new]
self.render()
def on_change_quality(self, attr, old, new):
self.quality_control = 0 in new
self.render()
def on_change_model(self, attr, old, new):
self.model = self.models[new]
self.render()
def on_change_parameter(self, attr, old, new):
self.parameter = self.parameters[new]
self.render()
def render(self):
if self.field is None:
return
if self.parameter is None:
return
if self.model is None:
return
path = self.store[(self.parameter, self.model)]
print(path, self.field)
with netCDF4.Dataset(path) as dataset:
lats = dataset.variables["latitude"][:]
lons = dataset.variables["longitude"][:]
if self.field == "forecast - observation":
f = dataset.variables["forecast"][:, 0, 0, 0]
o = dataset.variables["observation"][:, 0, 0]
v = f - o
elif self.field == "|forecast - observation|":
f = dataset.variables["forecast"][:, 0, 0, 0]
o = dataset.variables["observation"][:, 0, 0]
v = np.ma.abs(f - o)
elif self.field == "forecast":
v = dataset.variables["forecast"][:, 0, 0, 0]
elif self.field == "observation":
v = dataset.variables["observation"][:, 0, 0]
else:
raise Exception("unknown field: {}".format(self.field))
if self.quality_control:
flags = dataset.variables["qc"][:, 0, 0]
pts = np.ma.where(flags == 0)
lons = lons[pts]
lats = lats[pts]
v = v[pts]
x, y = google_mercator(lons, lats)
pts = np.ma.where(
(x >= self.figure.x_range.start) &
(x <= self.figure.x_range.end) &
(y >= self.figure.y_range.start) &
(y <= self.figure.y_range.end))
x = x[pts]
y = y[pts]
v = v[pts]
self.source.data = {
"x": x,
"y": y,
"v": v
}
self.color_mapper.low = v.min()
self.color_mapper.high = v.max()
def google_mercator(lons, lats):
return transform(PLATE_CARREE, GOOGLE, lons, lats)
def plate_carree(x, y):
return transform(GOOGLE, PLATE_CARREE, x, y)
def transform(src_crs, dst_crs, x, y):
x, y = np.asarray(x), np.asarray(y)
xt, yt, _ = dst_crs.transform_points(src_crs, x.flatten(), y.flatten()).T
return xt, yt
if __name__.startswith("bk"):
main()
| true | true |
f7242c3e7e8f72ec6c4c08c81dda4862d59be050 | 3,976 | py | Python | JURISM-EXTRACT.py | zuphilip/legal-resource-registry | 310bea8d17e07e9818015a3c04aae81214de9f9c | [
"BSD-2-Clause"
] | null | null | null | JURISM-EXTRACT.py | zuphilip/legal-resource-registry | 310bea8d17e07e9818015a3c04aae81214de9f9c | [
"BSD-2-Clause"
] | null | null | null | JURISM-EXTRACT.py | zuphilip/legal-resource-registry | 310bea8d17e07e9818015a3c04aae81214de9f9c | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
import os,sys,re,json
TESTING = ['ca', 'un.int']
def sortInfo(a, b):
if a[0] > b[0]:
return -1
elif a[0] < b[0]:
return 1
else:
return 0
class Courts():
def __init__(self, opt):
self.opt = opt
self.walk()
def checkFile(self, dirname):
ifh = open(os.path.join(dirname,'index.txt'))
while 1:
line = ifh.readline()
if not line:
ifh.close()
break
line = line.strip()
m = re.match("^\.\.\s+category::\s*(.*)$",line)
if m:
name = m.group(1)
ifh.close()
return name
m = re.match("^\.\.\s+court::", line)
if m:
ifh.close()
return None
ifh.close()
raise
def walk(self):
for dirname,dirs,files in os.walk('./data/courts'):
#if dirname == './data/courts': continue
path = os.path.join('jurism','/'.join(dirname.split('/')[3:]))
dlst = dirname.split(os.path.sep)
key = dlst[-1]
if self.opt.testing and len(dlst) > 3 and not dlst[3] in TESTING:
continue
name = self.checkFile(dirname)
if name == None:
continue
# name (not needed)
# key
# path
# immediate child key/name pairs
# count
childJurisdictions = []
for i in range(len(dirs)-1,-1,-1):
d = dirs[i]
subname = self.checkFile(os.path.join(dirname,d))
if subname == None:
dirs.pop(i)
continue
hasChildren = 0
for subchild in os.listdir(os.path.join(dirname,d)):
subchildPath = os.path.join(dirname,d,subchild)
if (os.path.isdir(subchildPath) and self.checkFile(subchildPath)):
hasChildren = 1
break
childJurisdictions.append([d, subname, hasChildren])
#if len(childJurisdictions) == 0:
# continue
# Produce one file for each hit which
# (1) is saved to the path
# (2) is named by the single-element key
# (3) contains the key, name and child count of each entry
try:
os.makedirs(path)
except:
pass
# Sort in reverse order (for stable output - reverse-order sort
# has not special significance)
childJurisdictions.sort(sortInfo)
open(os.path.join(path,'info.json'),'w+').write(json.dumps(childJurisdictions))
sys.stdout.write('.')
sys.stdout.flush()
newCountries = json.loads(open('./tools/country-names.json').read())
countries = json.loads(open('./jurism/info.json').read())
oldCountries = {}
for entry in countries:
oldCountries[entry[0]] = True
for key in newCountries:
if not oldCountries.has_key(key.lower()):
countries.append([key.lower(),newCountries[key],0])
open('./jurism/info.json', 'w+').write(json.dumps(countries))
if __name__ == '__main__':
from ConfigParser import ConfigParser
from optparse import OptionParser
os.environ['LANG'] = "en_US.UTF-8"
usage = '\n%prog [options]'
description="Writes minimal JSON expression of LRR jurisdiction data into source file."
parser = OptionParser(usage=usage,description=description,epilog="And that's all for now!")
parser.add_option("-t", "--t", dest="testing",
default=False,
action="store_true",
help='Output minimal test data only.')
(opt, args) = parser.parse_args()
Courts(opt)
| 32.590164 | 95 | 0.502012 |
import os,sys,re,json
TESTING = ['ca', 'un.int']
def sortInfo(a, b):
if a[0] > b[0]:
return -1
elif a[0] < b[0]:
return 1
else:
return 0
class Courts():
def __init__(self, opt):
self.opt = opt
self.walk()
def checkFile(self, dirname):
ifh = open(os.path.join(dirname,'index.txt'))
while 1:
line = ifh.readline()
if not line:
ifh.close()
break
line = line.strip()
m = re.match("^\.\.\s+category::\s*(.*)$",line)
if m:
name = m.group(1)
ifh.close()
return name
m = re.match("^\.\.\s+court::", line)
if m:
ifh.close()
return None
ifh.close()
raise
def walk(self):
for dirname,dirs,files in os.walk('./data/courts'):
path = os.path.join('jurism','/'.join(dirname.split('/')[3:]))
dlst = dirname.split(os.path.sep)
key = dlst[-1]
if self.opt.testing and len(dlst) > 3 and not dlst[3] in TESTING:
continue
name = self.checkFile(dirname)
if name == None:
continue
childJurisdictions = []
for i in range(len(dirs)-1,-1,-1):
d = dirs[i]
subname = self.checkFile(os.path.join(dirname,d))
if subname == None:
dirs.pop(i)
continue
hasChildren = 0
for subchild in os.listdir(os.path.join(dirname,d)):
subchildPath = os.path.join(dirname,d,subchild)
if (os.path.isdir(subchildPath) and self.checkFile(subchildPath)):
hasChildren = 1
break
childJurisdictions.append([d, subname, hasChildren])
try:
os.makedirs(path)
except:
pass
childJurisdictions.sort(sortInfo)
open(os.path.join(path,'info.json'),'w+').write(json.dumps(childJurisdictions))
sys.stdout.write('.')
sys.stdout.flush()
newCountries = json.loads(open('./tools/country-names.json').read())
countries = json.loads(open('./jurism/info.json').read())
oldCountries = {}
for entry in countries:
oldCountries[entry[0]] = True
for key in newCountries:
if not oldCountries.has_key(key.lower()):
countries.append([key.lower(),newCountries[key],0])
open('./jurism/info.json', 'w+').write(json.dumps(countries))
if __name__ == '__main__':
from ConfigParser import ConfigParser
from optparse import OptionParser
os.environ['LANG'] = "en_US.UTF-8"
usage = '\n%prog [options]'
description="Writes minimal JSON expression of LRR jurisdiction data into source file."
parser = OptionParser(usage=usage,description=description,epilog="And that's all for now!")
parser.add_option("-t", "--t", dest="testing",
default=False,
action="store_true",
help='Output minimal test data only.')
(opt, args) = parser.parse_args()
Courts(opt)
| true | true |
f7242c909ddbbc02190ea1c52b9234a5504cda6f | 2,892 | py | Python | openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_check/Opengauss_Function_Tools_gs_check_Case0378.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_check/Opengauss_Function_Tools_gs_check_Case0378.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_check/Opengauss_Function_Tools_gs_check_Case0378.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 服务端工具
Case Name : 例行巡检场景检查跳过指定多个检查项
Description :
例行巡检场景检查跳过指定多个检查项:
gs_check -e inspect --skip-items CheckSshdService,CheckSshdConfig,
CheckHashIndex,CheckFilehandle
Expect :
检查完成
History :
"""
import os
import unittest
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
class Tools(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info('---Opengauss_Function_Tools_gs_check_Case0378_开始---')
self.dbuser_node = Node('dbuser')
self.root_node = Node('default')
self.clear_path = os.path.join(
os.path.dirname(macro.DB_INSTANCE_PATH), 'tool', 'script',
'gspylib', 'inspection', 'output', 'CheckReport*')
self.Constant = Constant()
self.skip_options = f'CheckSshdService,CheckSshdConfig,' \
f'CheckHashIndex,CheckFilehandle'
def test_server_tools1(self):
text = '------step1:例行巡检场景检查跳过指定多个检查项;expect:检查完成------'
self.log.info(text)
check_cmd = f'''su - {self.dbuser_node.ssh_user} -c "
source {macro.DB_ENV_PATH};
expect -c \\\"set timeout -1
spawn gs_check -e inspect --skip-items {self.skip_options}
expect *]:
send {self.root_node.ssh_user}\\n
expect *]:
send {self.root_node.ssh_password}\\n
expect eof\\\""'''
self.log.info(check_cmd)
shell_res = os.popen(check_cmd)
str_res = ''.join(shell_res.readlines())
self.log.info(str_res)
flag = (self.Constant.GS_CHECK_SUCCESS_MSG2[0] in str_res or
self.Constant.GS_CHECK_SUCCESS_MSG2[1] in str_res) and \
self.Constant.GS_CHECK_SUCCESS_MSG2[2] in str_res
self.assertTrue(flag, '执行失败:' + text)
def tearDown(self):
text = '----------清理环境----------'
self.log.info(text)
clear_cmd = f'rm -rf {self.clear_path};'
self.log.info(clear_cmd)
clear_msg = self.root_node.sh(clear_cmd).result()
self.log.info(clear_msg)
self.assertEqual('', clear_msg, '执行失败:' + text)
self.log.info(
'---Opengauss_Function_Tools_gs_check_Case0378_结束---')
| 36.15 | 84 | 0.629322 |
import os
import unittest
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
class Tools(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info('---Opengauss_Function_Tools_gs_check_Case0378_开始---')
self.dbuser_node = Node('dbuser')
self.root_node = Node('default')
self.clear_path = os.path.join(
os.path.dirname(macro.DB_INSTANCE_PATH), 'tool', 'script',
'gspylib', 'inspection', 'output', 'CheckReport*')
self.Constant = Constant()
self.skip_options = f'CheckSshdService,CheckSshdConfig,' \
f'CheckHashIndex,CheckFilehandle'
def test_server_tools1(self):
text = '------step1:例行巡检场景检查跳过指定多个检查项;expect:检查完成------'
self.log.info(text)
check_cmd = f'''su - {self.dbuser_node.ssh_user} -c "
source {macro.DB_ENV_PATH};
expect -c \\\"set timeout -1
spawn gs_check -e inspect --skip-items {self.skip_options}
expect *]:
send {self.root_node.ssh_user}\\n
expect *]:
send {self.root_node.ssh_password}\\n
expect eof\\\""'''
self.log.info(check_cmd)
shell_res = os.popen(check_cmd)
str_res = ''.join(shell_res.readlines())
self.log.info(str_res)
flag = (self.Constant.GS_CHECK_SUCCESS_MSG2[0] in str_res or
self.Constant.GS_CHECK_SUCCESS_MSG2[1] in str_res) and \
self.Constant.GS_CHECK_SUCCESS_MSG2[2] in str_res
self.assertTrue(flag, '执行失败:' + text)
def tearDown(self):
text = '----------清理环境----------'
self.log.info(text)
clear_cmd = f'rm -rf {self.clear_path};'
self.log.info(clear_cmd)
clear_msg = self.root_node.sh(clear_cmd).result()
self.log.info(clear_msg)
self.assertEqual('', clear_msg, '执行失败:' + text)
self.log.info(
'---Opengauss_Function_Tools_gs_check_Case0378_结束---')
| true | true |
f7242d7d37bde296d9898568e1954e17602cae9c | 294 | py | Python | src/1072.flip-columns-for-maximum-number-of-equal-rows/flip-columns-for-maximum-number-of-equal-rows.py | lyphui/Just-Code | e0c3c3ecb67cb805080ff686e88522b2bffe7741 | [
"MIT"
] | 782 | 2019-11-19T08:20:49.000Z | 2022-03-25T06:59:09.000Z | src/1072.flip-columns-for-maximum-number-of-equal-rows/flip-columns-for-maximum-number-of-equal-rows.py | Heitao5200/Just-Code | 5bb3ee485a103418e693b7ec8e26dc84f3691c79 | [
"MIT"
] | 1 | 2021-03-04T12:21:01.000Z | 2021-03-05T01:23:54.000Z | src/1072.flip-columns-for-maximum-number-of-equal-rows/flip-columns-for-maximum-number-of-equal-rows.py | Heitao5200/Just-Code | 5bb3ee485a103418e693b7ec8e26dc84f3691c79 | [
"MIT"
] | 155 | 2019-11-20T08:20:42.000Z | 2022-03-19T07:28:09.000Z | class Solution:
def maxEqualRowsAfterFlips(self, matrix: List[List[int]]) -> int:
pattern = collections.defaultdict(int)
for row in matrix:
pattern[tuple(row)] += 1
pattern[tuple(1 - c for c in row)] += 1
return max(pattern.values()) | 36.75 | 69 | 0.578231 | class Solution:
def maxEqualRowsAfterFlips(self, matrix: List[List[int]]) -> int:
pattern = collections.defaultdict(int)
for row in matrix:
pattern[tuple(row)] += 1
pattern[tuple(1 - c for c in row)] += 1
return max(pattern.values()) | true | true |
f7242e4d28fb291868f594740866678de4dde987 | 775 | py | Python | main_hub/migrations/0005_auto_20190717_1512.py | emiledelmas/vkshub | 214cc35985573654e3b81c6ab674049bd93262b8 | [
"MIT"
] | 1 | 2020-03-31T14:26:55.000Z | 2020-03-31T14:26:55.000Z | main_hub/migrations/0005_auto_20190717_1512.py | emiledelmas/vkshub | 214cc35985573654e3b81c6ab674049bd93262b8 | [
"MIT"
] | null | null | null | main_hub/migrations/0005_auto_20190717_1512.py | emiledelmas/vkshub | 214cc35985573654e3b81c6ab674049bd93262b8 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2019-07-17 13:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_hub', '0004_auto_20190717_1154'),
]
operations = [
migrations.RemoveField(
model_name='document',
name='file',
),
migrations.RemoveField(
model_name='document',
name='folder',
),
migrations.RemoveField(
model_name='document',
name='uploaded_at',
),
migrations.AddField(
model_name='document',
name='docfile',
field=models.FileField(default='test', upload_to='documents/%Y/%m/%d'),
preserve_default=False,
),
]
| 24.21875 | 83 | 0.547097 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_hub', '0004_auto_20190717_1154'),
]
operations = [
migrations.RemoveField(
model_name='document',
name='file',
),
migrations.RemoveField(
model_name='document',
name='folder',
),
migrations.RemoveField(
model_name='document',
name='uploaded_at',
),
migrations.AddField(
model_name='document',
name='docfile',
field=models.FileField(default='test', upload_to='documents/%Y/%m/%d'),
preserve_default=False,
),
]
| true | true |
f7242e88b17192c50c5629365da85996632cd697 | 5,633 | py | Python | fastweb/loader.py | BSlience/fastweb | 2c1b956e9846c4205d0201d39d09891d088754e4 | [
"Apache-2.0"
] | 123 | 2017-06-06T04:59:07.000Z | 2019-07-11T10:20:35.000Z | fastweb/loader.py | BSlience/fastweb | 2c1b956e9846c4205d0201d39d09891d088754e4 | [
"Apache-2.0"
] | null | null | null | fastweb/loader.py | BSlience/fastweb | 2c1b956e9846c4205d0201d39d09891d088754e4 | [
"Apache-2.0"
] | 2 | 2017-06-28T05:58:39.000Z | 2018-09-25T00:18:33.000Z | # coding:utf8
"""系统全局加载模块
外部调用想要影响fastweb行为,必须通过改模块中的方法
所有工作都是在启动前完成,外部导入全部使用全路径引用,防止错误的引入
"""
import json
from .accesspoint import ioloop
import fastweb.manager
from fastweb.util.tool import timing
from fastweb.accesspoint import AsyncHTTPClient
from fastweb.util.configuration import ConfigurationParser
from fastweb.util.log import setup_logging, getLogger, recorder, check_logging_level, set_record_color
__all__ = ['app']
DEFAULT_APP_LOG_PATH = 'fastweb@application.log'
DEFAULT_SYS_LOG_PATH = 'fastweb@system.log'
class Loader(object):
"""系统全局加载器
"""
def __init__(self):
# 配置系统
self.configs = None
self.configer = None
self.component_configers = []
# 日志系统
self.system_recorder = None
self.application_recorder = None
# 系统错误码
self.errcode = None
# 日志是否被设置过
self.bRecorder = False
# 增加最大数据量
AsyncHTTPClient.configure(None, max_body_size=1000000000)
def load_recorder(self, application_log_path=DEFAULT_APP_LOG_PATH, system_log_path=DEFAULT_SYS_LOG_PATH,
logging_setting=None, application_level='DEBUG', system_level='DEBUG', logging_colormap=None):
"""加载日志对象
需要最先加载,因为其他加载都需要使用recorder
其他server启动时会默认加载一遍,用户没有特殊需求可以不加载
:parameter:
- `application_log_path`: 应用日志路径
- `system_log_path`: 系统日志路径,默认系统日志路径和应用日志路径相同
- `logging_setting_path`: 默认从fastweb.settting.default_logging.yaml获取配置,
可以指定为自定义的日志配置,必须有application_recorder和system_recorder
- `logging_setting`: 自定以logging配置
- `application_level`: 应用日志输出级别
- `system_level`: 系统日志输出级别
- `logging_colormap`: 输出日志颜色
"""
if not logging_setting:
from fastweb.setting.default_logging import DEFAULT_LOGGING_SETTING
logging_setting = DEFAULT_LOGGING_SETTING
logging_setting['handlers']['application_file_time_handler']['filename'] = application_log_path
logging_setting['handlers']['system_file_size_handler']['filename'] = system_log_path
if application_level:
check_logging_level(application_level)
logging_setting['loggers']['application_recorder']['level'] = application_level
if system_level:
check_logging_level(system_level)
logging_setting['loggers']['system_recorder']['level'] = system_level
setup_logging(logging_setting)
self.system_recorder = getLogger('system_recorder')
self.application_recorder = getLogger('application_recorder')
if logging_colormap:
set_record_color(logging_colormap)
self.bRecorder = True
recorder('INFO',
'load recorder configuration\n{conf}\n\n'
'application log: {app_path} [{app_level}]\n'
'system log: {sys_path} [{sys_level}]'.format(conf=json.dumps(logging_setting, indent=4),
app_path=application_log_path,
app_level=application_level,
sys_path=system_log_path,
sys_level=system_level))
def load_configuration(self, backend='ini', **setting):
"""加载配置文件
:parameter:
- `backend`: 配置方式,目前支持ini
- `setting`: 该格式需要的设置参数
"""
self.configer = ConfigurationParser(backend, **setting)
self.configs = self.configer.configs
recorder('INFO', 'load configuration\nbackend:\t{backend}\n'
'setting:\t{setting}\nconfiguration:\t{config}'.format(backend=backend,
setting=setting,
config=self.configs))
def load_component(self, layout, backend='ini', **setting):
"""加载组件管理器
可以进行多次加载
:parameter:
- `layout`: 当前调用的层次,web, service, task
- `backend`: 配置方式,目前支持ini
- `setting`: 该格式需要的设置参数
"""
layout = layout.lower()
configer = ConfigurationParser(backend, **setting)
# 加载需要管理连接池的组件
recorder('INFO', 'load connection component start')
with timing('ms', 10) as t:
if layout in ['service']:
fastweb.manager.SyncConnManager.setup(configer)
elif layout in ['web']:
fastweb.manager.AsynConnManager.configer = configer
ioloop.IOLoop.current().run_sync(fastweb.manager.AsynConnManager.setup)
recorder('INFO', 'load connection component successful -- {time}'.format(time=t))
# 加载不需要管理连接池的组件
recorder('INFO', 'load component start')
with timing('ms', 10) as t:
fastweb.manager.Manager.setup(layout, configer)
recorder('INFO', 'load component successful -- {time}'.format(time=t))
self.component_configers.append(configer)
return configer
def load_errcode(self, errcode=None):
"""加载系统错误码
:parameter:
- `errcode`:自定义错误码
"""
if errcode:
self.errcode = errcode
else:
from fastweb.setting.default_errcode import ERRCODE
self.errcode = ERRCODE
recorder('INFO', 'load errcode\n{errcode}'.format(errcode=json.dumps(self.errcode, indent=4)))
return self.errcode
app = Loader()
| 34.347561 | 116 | 0.59968 |
import json
from .accesspoint import ioloop
import fastweb.manager
from fastweb.util.tool import timing
from fastweb.accesspoint import AsyncHTTPClient
from fastweb.util.configuration import ConfigurationParser
from fastweb.util.log import setup_logging, getLogger, recorder, check_logging_level, set_record_color
__all__ = ['app']
DEFAULT_APP_LOG_PATH = 'fastweb@application.log'
DEFAULT_SYS_LOG_PATH = 'fastweb@system.log'
class Loader(object):
def __init__(self):
self.configs = None
self.configer = None
self.component_configers = []
self.system_recorder = None
self.application_recorder = None
self.errcode = None
self.bRecorder = False
AsyncHTTPClient.configure(None, max_body_size=1000000000)
def load_recorder(self, application_log_path=DEFAULT_APP_LOG_PATH, system_log_path=DEFAULT_SYS_LOG_PATH,
logging_setting=None, application_level='DEBUG', system_level='DEBUG', logging_colormap=None):
if not logging_setting:
from fastweb.setting.default_logging import DEFAULT_LOGGING_SETTING
logging_setting = DEFAULT_LOGGING_SETTING
logging_setting['handlers']['application_file_time_handler']['filename'] = application_log_path
logging_setting['handlers']['system_file_size_handler']['filename'] = system_log_path
if application_level:
check_logging_level(application_level)
logging_setting['loggers']['application_recorder']['level'] = application_level
if system_level:
check_logging_level(system_level)
logging_setting['loggers']['system_recorder']['level'] = system_level
setup_logging(logging_setting)
self.system_recorder = getLogger('system_recorder')
self.application_recorder = getLogger('application_recorder')
if logging_colormap:
set_record_color(logging_colormap)
self.bRecorder = True
recorder('INFO',
'load recorder configuration\n{conf}\n\n'
'application log: {app_path} [{app_level}]\n'
'system log: {sys_path} [{sys_level}]'.format(conf=json.dumps(logging_setting, indent=4),
app_path=application_log_path,
app_level=application_level,
sys_path=system_log_path,
sys_level=system_level))
def load_configuration(self, backend='ini', **setting):
self.configer = ConfigurationParser(backend, **setting)
self.configs = self.configer.configs
recorder('INFO', 'load configuration\nbackend:\t{backend}\n'
'setting:\t{setting}\nconfiguration:\t{config}'.format(backend=backend,
setting=setting,
config=self.configs))
def load_component(self, layout, backend='ini', **setting):
layout = layout.lower()
configer = ConfigurationParser(backend, **setting)
recorder('INFO', 'load connection component start')
with timing('ms', 10) as t:
if layout in ['service']:
fastweb.manager.SyncConnManager.setup(configer)
elif layout in ['web']:
fastweb.manager.AsynConnManager.configer = configer
ioloop.IOLoop.current().run_sync(fastweb.manager.AsynConnManager.setup)
recorder('INFO', 'load connection component successful -- {time}'.format(time=t))
recorder('INFO', 'load component start')
with timing('ms', 10) as t:
fastweb.manager.Manager.setup(layout, configer)
recorder('INFO', 'load component successful -- {time}'.format(time=t))
self.component_configers.append(configer)
return configer
def load_errcode(self, errcode=None):
if errcode:
self.errcode = errcode
else:
from fastweb.setting.default_errcode import ERRCODE
self.errcode = ERRCODE
recorder('INFO', 'load errcode\n{errcode}'.format(errcode=json.dumps(self.errcode, indent=4)))
return self.errcode
app = Loader()
| true | true |
f7242f0f2b5f3d7eb503d60928904f226091cf0f | 4,284 | py | Python | circuitgraph/tests/test_analysis.py | ncasti/circuitgraph | 9fe129d6ba64b4254d2d27d16ae69b7dce5ce957 | [
"MIT"
] | 35 | 2020-07-17T21:02:31.000Z | 2022-03-22T20:48:30.000Z | circuitgraph/tests/test_analysis.py | ncasti/circuitgraph | 9fe129d6ba64b4254d2d27d16ae69b7dce5ce957 | [
"MIT"
] | 23 | 2020-07-30T17:58:33.000Z | 2021-09-24T16:41:34.000Z | circuitgraph/tests/test_analysis.py | ncasti/circuitgraph | 9fe129d6ba64b4254d2d27d16ae69b7dce5ce957 | [
"MIT"
] | 6 | 2020-07-31T18:27:14.000Z | 2021-11-11T19:32:47.000Z | import unittest
import circuitgraph as cg
from circuitgraph.analysis import *
from circuitgraph.sat import sat
from random import choice, randint
from itertools import product
class TestAnalysis(unittest.TestCase):
def setUp(self):
self.s27 = cg.strip_blackboxes(cg.from_lib("s27"))
def test_avg_sensitivity(self):
c = cg.Circuit()
c.add("and", "and")
c.add("in0", "input", fanout="and")
c.add("in1", "input", fanout="and")
self.assertEqual(avg_sensitivity(c, "and", approx=False), 1.0)
avg_sen = avg_sensitivity(self.s27, "G17", approx=False)
# get startpoints of node
avg_sen_comp = 0
n = "G17"
sp = self.s27.startpoints(n)
for s in sp:
# compute influence
infl = 0
for vs in product([False, True], repeat=len(sp)):
asmp = {i: v for i, v in zip(sp, vs)}
asmp_ns = {i: v if i != s else not v for i, v in zip(sp, vs)}
r = cg.sat(self.s27, asmp)[n]
r_ns = cg.sat(self.s27, asmp_ns)[n]
if r != r_ns:
infl += 1
avg_sen_comp += infl / (2 ** len(sp))
self.assertEqual(avg_sen, avg_sen_comp)
def test_sensitivity(self):
# pick random node and input value
n = choice(tuple(self.s27.nodes()))
sp = self.s27.startpoints(n)
while len(sp) < 1:
n = choice(tuple(self.s27.nodes()))
sp = self.s27.startpoints(n)
# find sensitivity
sen = sensitivity(self.s27, n)
# check
sen_sim = 0
for vs in product([False, True], repeat=len(sp)):
input_sen = 0
input_val = {i: v for i, v in zip(sp, vs)}
n_val = cg.sat(self.s27, input_val)[n]
for s in sp:
flip_input_val = {i: v if i != s else not v for i, v in zip(sp, vs)}
flip_n_val = cg.sat(self.s27, flip_input_val)[n]
if flip_n_val != n_val:
input_sen += 1
sen_sim = max(sen_sim, input_sen)
# check answer
if sen != sen_sim:
import code
code.interact(local=dict(**globals(), **locals()))
self.assertEqual(sen, sen_sim)
def test_sensitize(self):
# pick random node
nr = choice(
tuple(self.s27.nodes() - set(["clk"]) - self.s27.filter_type(["0", "1"]))
)
# pick startpoint
ns = choice(tuple(self.s27.startpoints() - set(["clk"])))
# pick endpoint
ne = choice(tuple(self.s27.endpoints() - set(["clk"])))
for n in [nr, ns, ne]:
# get input
input_val = sensitize(self.s27, n, {f"c0_{n}": True})
if not input_val:
import code
code.interact(local=dict(**globals(), **locals()))
# simulate input
result = sat(self.s27, input_val)
if not result[n]:
import code
code.interact(local=dict(**globals(), **locals()))
self.assertTrue(result[n])
# remove constrained input
if n in input_val:
input_val.pop(n)
# simulate on faulty circuit
c_f = cg.copy(self.s27)
c_f.disconnect(c_f.fanin(n), n)
c_f.set_type(n, "input")
result_f = sat(c_f, {**input_val, n: False})
self.assertFalse(result_f[n])
self.assertTrue(
any(result_f[e] != result[e] for e in self.s27.endpoints(n))
)
def test_signal_probability(self):
# pick random node
n = choice(
tuple(self.s27.nodes() - self.s27.startpoints() - self.s27.endpoints())
)
sp = self.s27.startpoints(n)
# get signal prob
p = signal_probability(self.s27, n, approx=False)
# compute prob
m = 0
for vs in product([False, True], repeat=len(sp)):
asmp = {i: v for i, v in zip(sp, vs)}
m += cg.sat(self.s27, asmp)[n]
if m / (2 ** len(sp)) != p:
import code
code.interact(local=dict(globals(), **locals()))
self.assertEqual(m / (2 ** len(sp)), p)
| 31.970149 | 85 | 0.516106 | import unittest
import circuitgraph as cg
from circuitgraph.analysis import *
from circuitgraph.sat import sat
from random import choice, randint
from itertools import product
class TestAnalysis(unittest.TestCase):
def setUp(self):
self.s27 = cg.strip_blackboxes(cg.from_lib("s27"))
def test_avg_sensitivity(self):
c = cg.Circuit()
c.add("and", "and")
c.add("in0", "input", fanout="and")
c.add("in1", "input", fanout="and")
self.assertEqual(avg_sensitivity(c, "and", approx=False), 1.0)
avg_sen = avg_sensitivity(self.s27, "G17", approx=False)
avg_sen_comp = 0
n = "G17"
sp = self.s27.startpoints(n)
for s in sp:
infl = 0
for vs in product([False, True], repeat=len(sp)):
asmp = {i: v for i, v in zip(sp, vs)}
asmp_ns = {i: v if i != s else not v for i, v in zip(sp, vs)}
r = cg.sat(self.s27, asmp)[n]
r_ns = cg.sat(self.s27, asmp_ns)[n]
if r != r_ns:
infl += 1
avg_sen_comp += infl / (2 ** len(sp))
self.assertEqual(avg_sen, avg_sen_comp)
def test_sensitivity(self):
n = choice(tuple(self.s27.nodes()))
sp = self.s27.startpoints(n)
while len(sp) < 1:
n = choice(tuple(self.s27.nodes()))
sp = self.s27.startpoints(n)
sen = sensitivity(self.s27, n)
sen_sim = 0
for vs in product([False, True], repeat=len(sp)):
input_sen = 0
input_val = {i: v for i, v in zip(sp, vs)}
n_val = cg.sat(self.s27, input_val)[n]
for s in sp:
flip_input_val = {i: v if i != s else not v for i, v in zip(sp, vs)}
flip_n_val = cg.sat(self.s27, flip_input_val)[n]
if flip_n_val != n_val:
input_sen += 1
sen_sim = max(sen_sim, input_sen)
if sen != sen_sim:
import code
code.interact(local=dict(**globals(), **locals()))
self.assertEqual(sen, sen_sim)
def test_sensitize(self):
nr = choice(
tuple(self.s27.nodes() - set(["clk"]) - self.s27.filter_type(["0", "1"]))
)
ns = choice(tuple(self.s27.startpoints() - set(["clk"])))
ne = choice(tuple(self.s27.endpoints() - set(["clk"])))
for n in [nr, ns, ne]:
input_val = sensitize(self.s27, n, {f"c0_{n}": True})
if not input_val:
import code
code.interact(local=dict(**globals(), **locals()))
result = sat(self.s27, input_val)
if not result[n]:
import code
code.interact(local=dict(**globals(), **locals()))
self.assertTrue(result[n])
if n in input_val:
input_val.pop(n)
c_f = cg.copy(self.s27)
c_f.disconnect(c_f.fanin(n), n)
c_f.set_type(n, "input")
result_f = sat(c_f, {**input_val, n: False})
self.assertFalse(result_f[n])
self.assertTrue(
any(result_f[e] != result[e] for e in self.s27.endpoints(n))
)
def test_signal_probability(self):
n = choice(
tuple(self.s27.nodes() - self.s27.startpoints() - self.s27.endpoints())
)
sp = self.s27.startpoints(n)
p = signal_probability(self.s27, n, approx=False)
m = 0
for vs in product([False, True], repeat=len(sp)):
asmp = {i: v for i, v in zip(sp, vs)}
m += cg.sat(self.s27, asmp)[n]
if m / (2 ** len(sp)) != p:
import code
code.interact(local=dict(globals(), **locals()))
self.assertEqual(m / (2 ** len(sp)), p)
| true | true |
f7242faaca258f687d6365a02303dbefc35f6202 | 2,619 | py | Python | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/azure_workload_sap_hana_restore_request.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/azure_workload_sap_hana_restore_request.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/azure_workload_sap_hana_restore_request.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .restore_request import RestoreRequest
class AzureWorkloadSAPHanaRestoreRequest(RestoreRequest):
"""AzureWorkload SAP Hana-specific restore.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AzureWorkloadSAPHanaPointInTimeRestoreRequest
All required parameters must be populated in order to send to Azure.
:param object_type: Required. Constant filled by server.
:type object_type: str
:param target_info: Details of target database
:type target_info:
~azure.mgmt.recoveryservicesbackup.models.TargetRestoreInfo
:param recovery_type: OLR/ALR, RestoreDisks is invalid option. Possible
values include: 'Invalid', 'OriginalLocation', 'AlternateLocation',
'RestoreDisks'
:type recovery_type: str or
~azure.mgmt.recoveryservicesbackup.models.RecoveryType
:param source_resource_id: Fully qualified ARM ID of the VM on which
workload that was running is being recovered.
:type source_resource_id: str
:param property_bag: Workload specific property bag.
:type property_bag: dict[str, str]
"""
_validation = {
'object_type': {'required': True},
}
_attribute_map = {
'object_type': {'key': 'objectType', 'type': 'str'},
'target_info': {'key': 'targetInfo', 'type': 'TargetRestoreInfo'},
'recovery_type': {'key': 'recoveryType', 'type': 'str'},
'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'},
'property_bag': {'key': 'propertyBag', 'type': '{str}'},
}
_subtype_map = {
'object_type': {'AzureWorkloadSAPHanaPointInTimeRestoreRequest': 'AzureWorkloadSAPHanaPointInTimeRestoreRequest'}
}
def __init__(self, **kwargs):
super(AzureWorkloadSAPHanaRestoreRequest, self).__init__(**kwargs)
self.target_info = kwargs.get('target_info', None)
self.recovery_type = kwargs.get('recovery_type', None)
self.source_resource_id = kwargs.get('source_resource_id', None)
self.property_bag = kwargs.get('property_bag', None)
self.object_type = 'AzureWorkloadSAPHanaRestoreRequest'
| 41.571429 | 121 | 0.670485 |
from .restore_request import RestoreRequest
class AzureWorkloadSAPHanaRestoreRequest(RestoreRequest):
_validation = {
'object_type': {'required': True},
}
_attribute_map = {
'object_type': {'key': 'objectType', 'type': 'str'},
'target_info': {'key': 'targetInfo', 'type': 'TargetRestoreInfo'},
'recovery_type': {'key': 'recoveryType', 'type': 'str'},
'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'},
'property_bag': {'key': 'propertyBag', 'type': '{str}'},
}
_subtype_map = {
'object_type': {'AzureWorkloadSAPHanaPointInTimeRestoreRequest': 'AzureWorkloadSAPHanaPointInTimeRestoreRequest'}
}
def __init__(self, **kwargs):
super(AzureWorkloadSAPHanaRestoreRequest, self).__init__(**kwargs)
self.target_info = kwargs.get('target_info', None)
self.recovery_type = kwargs.get('recovery_type', None)
self.source_resource_id = kwargs.get('source_resource_id', None)
self.property_bag = kwargs.get('property_bag', None)
self.object_type = 'AzureWorkloadSAPHanaRestoreRequest'
| true | true |
f7242fe5b59041f8406448a3ea74883747eb45c6 | 19,164 | py | Python | f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py | F5K-Jayson/f5-openstack-agent | f98f48266f029a719a2d446a06c741f0ed6653ff | [
"Apache-2.0"
] | null | null | null | f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py | F5K-Jayson/f5-openstack-agent | f98f48266f029a719a2d446a06c741f0ed6653ff | [
"Apache-2.0"
] | null | null | null | f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py | F5K-Jayson/f5-openstack-agent | f98f48266f029a719a2d446a06c741f0ed6653ff | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2014-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from oslo_log import log as logging
from f5_openstack_agent.lbaasv2.drivers.bigip.disconnected_service import \
DisconnectedService
from f5_openstack_agent.lbaasv2.drivers.bigip import resource_helper
from f5_openstack_agent.lbaasv2.drivers.bigip import ssl_profile
from neutron_lbaas.services.loadbalancer import constants as lb_const
from requests import HTTPError
LOG = logging.getLogger(__name__)
class ListenerServiceBuilder(object):
u"""Create LBaaS v2 Listener on BIG-IP®s.
Handles requests to create, update, delete LBaaS v2 listener
objects on one or more BIG-IP® systems. Maps LBaaS listener
defined in service object to a BIG-IP® virtual server.
"""
def __init__(self, service_adapter, cert_manager, parent_ssl_profile=None):
self.cert_manager = cert_manager
self.disconnected_service = DisconnectedService()
self.parent_ssl_profile = parent_ssl_profile
self.vs_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual)
self.service_adapter = service_adapter
LOG.debug("ListenerServiceBuilder: using parent_ssl_profile %s ",
parent_ssl_profile)
def create_listener(self, service, bigips):
u"""Create listener on set of BIG-IP®s.
Create a BIG-IP® virtual server to represent an LBaaS
Listener object.
:param service: Dictionary which contains a both a listener
and load balancer definition.
:param bigips: Array of BigIP class instances to create Listener.
"""
vip = self.service_adapter.get_virtual(service)
tls = self.service_adapter.get_tls(service)
if tls:
tls['name'] = vip['name']
tls['partition'] = vip['partition']
service['listener']['operating_status'] = lb_const.ONLINE
# Hierarchical Port Binding mode adjustments
if not self.disconnected_service.is_service_connected(service) \
and \
not self.service_adapter.vip_on_common_network(service):
# start the virtual server on a disconnected network if the neutron
# network does not yet exist
network_name = DisconnectedService.network_name
vip['vlansEnabled'] = True
vip.pop('vlansDisabled', None)
vip['vlans'] = [
'/%s/%s' % (vip['partition'], network_name)
]
# strip out references to network pieces that don't yet exist
vip.pop('sourceAddressTranslation', None)
# the listener is offline until we have a real network
service['listener']['operating_status'] = lb_const.OFFLINE
network_id = service['loadbalancer']['network_id']
for bigip in bigips:
self.service_adapter.get_vlan(vip, bigip, network_id)
try:
self.vs_helper.create(bigip, vip)
except HTTPError as err:
if err.response.status_code == 409:
LOG.debug("Virtual server already exists")
else:
LOG.exception("Virtual server creation error: %s" %
err.message)
raise
if tls:
self.add_ssl_profile(tls, bigip)
def get_listener(self, service, bigip):
u"""Retrieve BIG-IP® virtual from a single BIG-IP® system.
:param service: Dictionary which contains a both a listener
and load balancer definition.
:param bigip: Array of BigIP class instances to create Listener.
"""
vip = self.service_adapter.get_virtual_name(service)
obj = self.vs_helper.load(bigip=bigip,
name=vip["name"],
partition=vip["partition"])
return obj
def delete_listener(self, service, bigips):
u"""Delete Listener from a set of BIG-IP® systems.
Delete virtual server that represents a Listener object.
:param service: Dictionary which contains a both a listener
and load balancer definition.
:param bigips: Array of BigIP class instances to delete Listener.
"""
vip = self.service_adapter.get_virtual_name(service)
tls = self.service_adapter.get_tls(service)
if tls:
tls['name'] = vip['name']
tls['partition'] = vip['partition']
for bigip in bigips:
self.vs_helper.delete(bigip,
name=vip["name"],
partition=vip["partition"])
# delete ssl profiles
self.remove_ssl_profiles(tls, bigip)
def add_ssl_profile(self, tls, bigip):
# add profile to virtual server
vip = {'name': tls['name'],
'partition': tls['partition']}
if "default_tls_container_id" in tls:
container_ref = tls["default_tls_container_id"]
self.create_ssl_profile(
container_ref, bigip, vip, True)
if "sni_containers" in tls and tls["sni_containers"]:
for container in tls["sni_containers"]:
container_ref = container["tls_container_id"]
self.create_ssl_profile(container_ref, bigip, vip, False)
def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False):
cert = self.cert_manager.get_certificate(container_ref)
key = self.cert_manager.get_private_key(container_ref)
name = self.cert_manager.get_name(container_ref,
self.service_adapter.prefix)
try:
# upload cert/key and create SSL profile
ssl_profile.SSLProfileHelper.create_client_ssl_profile(
bigip,
name,
cert,
key,
sni_default=sni_default,
parent_profile=self.parent_ssl_profile)
finally:
del cert
del key
# add ssl profile to virtual server
self._add_profile(vip, name, bigip, context='clientside')
def update_listener(self, service, bigips):
u"""Update Listener from a single BIG-IP® system.
Updates virtual servers that represents a Listener object.
:param service: Dictionary which contains a both a listener
and load balancer definition.
:param bigips: Array of BigIP class instances to update.
"""
vip = self.service_adapter.get_virtual(service)
for bigip in bigips:
self.vs_helper.update(bigip, vip)
def update_listener_pool(self, service, name, bigips):
"""Update virtual server's default pool attribute.
Sets the virutal server's pool attribute to the name of the
pool (or empty when deleting pool). For LBaaS, this should be
call when the pool is created.
:param service: Dictionary which contains a listener, pool,
and load balancer definition.
:param name: Name of pool (empty string to unset).
:param bigips: Array of BigIP class instances to update.
"""
vip = self.service_adapter.get_virtual_name(service)
if vip:
vip["pool"] = name
for bigip in bigips:
v = bigip.tm.ltm.virtuals.virtual
if v.exists(name=vip["name"], partition=vip["partition"]):
obj = v.load(name=vip["name"], partition=vip["partition"])
obj.modify(**vip)
def update_session_persistence(self, service, bigips):
"""Update session persistence for virtual server.
Handles setting persistence type and creating associated
profiles if necessary. This should be called when the pool
is created because LBaaS pools define persistence types, not
listener objects.
:param service: Dictionary which contains a listener, pool,
and load balancer definition.
:param bigips: Array of BigIP class instances to update.
"""
pool = service["pool"]
if "session_persistence" in pool and pool['session_persistence']:
vip = self.service_adapter.get_virtual_name(service)
persistence = pool['session_persistence']
persistence_type = persistence['type']
vip_persist = self.service_adapter.get_session_persistence(service)
listener = service['listener']
for bigip in bigips:
# For TCP listeners, must remove fastL4 profile before adding
# adding http/oneconnect profiles.
if listener['protocol'] == 'TCP':
self._remove_profile(vip, 'fastL4', bigip)
# Standard virtual servers should already have these profiles,
# but make sure profiles in place for all virtual server types.
self._add_profile(vip, 'http', bigip)
self._add_profile(vip, 'oneconnect', bigip)
if persistence_type == 'APP_COOKIE' and \
'cookie_name' in persistence:
self._add_cookie_persist_rule(vip, persistence, bigip)
# profiles must be added before setting persistence
self.vs_helper.update(bigip, vip_persist)
LOG.debug("Set persist %s" % vip["name"])
def _add_profile(self, vip, profile_name, bigip, context='all'):
"""Add profile to virtual server instance. Assumes Common.
:param vip: Dictionary which contains name and partition of
virtual server.
:param profile_name: Name of profile to add.
:param bigip: Single BigIP instances to update.
"""
v = bigip.tm.ltm.virtuals.virtual
obj = v.load(name=vip["name"], partition=vip["partition"])
p = obj.profiles_s
profiles = p.get_collection()
# see if profile exists
for profile in profiles:
if profile.name == profile_name:
return
# not found -- add profile (assumes Common partition)
p.profiles.create(name=profile_name,
partition='Common',
context=context)
LOG.debug("Created profile %s" % profile_name)
def _add_cookie_persist_rule(self, vip, persistence, bigip):
"""Add cookie persist rules to virtual server instance.
:param vip: Dictionary which contains name and partition of
virtual server.
:param persistence: Persistence definition.
:param bigip: Single BigIP instances to update.
"""
cookie_name = persistence['cookie_name']
rule_def = self._create_app_cookie_persist_rule(cookie_name)
rule_name = 'app_cookie_' + vip['name']
r = bigip.tm.ltm.rules.rule
if not r.exists(name=rule_name, partition=vip["partition"]):
r.create(name=rule_name,
apiAnonymous=rule_def,
partition=vip["partition"])
LOG.debug("Created rule %s" % rule_name)
u = bigip.tm.ltm.persistence.universals.universal
if not u.exists(name=rule_name, partition=vip["partition"]):
u.create(name=rule_name,
rule=rule_name,
partition=vip["partition"])
LOG.debug("Created persistence universal %s" % rule_name)
def _create_app_cookie_persist_rule(self, cookiename):
"""Create cookie persistence rule.
:param cookiename: Name to substitute in rule.
"""
rule_text = "when HTTP_REQUEST {\n"
rule_text += " if { [HTTP::cookie " + str(cookiename)
rule_text += "] ne \"\" }{\n"
rule_text += " persist uie [string tolower [HTTP::cookie \""
rule_text += cookiename + "\"]] 3600\n"
rule_text += " }\n"
rule_text += "}\n\n"
rule_text += "when HTTP_RESPONSE {\n"
rule_text += " if { [HTTP::cookie \"" + str(cookiename)
rule_text += "\"] ne \"\" }{\n"
rule_text += " persist add uie [string tolower [HTTP::cookie \""
rule_text += cookiename + "\"]] 3600\n"
rule_text += " }\n"
rule_text += "}\n\n"
return rule_text
def remove_session_persistence(self, service, bigips):
"""Resest persistence for virtual server instance.
Clears persistence and deletes profiles.
:param service: Dictionary which contains a listener, pool
and load balancer definition.
:param bigips: Single BigIP instances to update.
"""
pool = service["pool"]
if "session_persistence" in pool and pool['session_persistence']:
vip = self.service_adapter.get_virtual_name(service)
vip["persist"] = []
vip["fallbackPersistence"] = ""
persistence = pool['session_persistence']
persistence_type = persistence['type']
listener = service["listener"]
if listener['protocol'] == 'TCP':
# Revert VS back to fastL4. Must do an update to replace
# profiles instead of using add/remove profile. Leave http
# profiles in place for non-TCP listeners.
vip['profiles'] = ['/Common/fastL4']
for bigip in bigips:
# remove persistence (and revert profiles if TCP)
self.vs_helper.update(bigip, vip)
LOG.debug("Cleared session persistence for %s" % vip["name"])
# remove profiles and rules
if persistence_type == 'APP_COOKIE' and \
'cookie_name' in persistence:
self._remove_cookie_persist_rule(vip, bigip)
def remove_ssl_profiles(self, tls, bigip):
if "default_tls_container_id" in tls and \
tls["default_tls_container_id"]:
container_ref = tls["default_tls_container_id"]
i = container_ref.rindex("/") + 1
name = self.service_adapter.prefix + container_ref[i:]
self._remove_ssl_profile(name, bigip)
if "sni_containers" in tls and tls["sni_containers"]:
for container in tls["sni_containers"]:
container_ref = container["tls_container_id"]
i = container_ref.rindex("/") + 1
name = self.service_adapter.prefix + container_ref[i:]
self._remove_ssl_profile(name, bigip)
def _remove_ssl_profile(self, name, bigip):
"""Delete profile.
:param name: Name of profile to delete.
:param bigip: Single BigIP instances to update.
"""
try:
ssl_client_profile = bigip.tm.ltm.profile.client_ssls.client_ssl
if ssl_client_profile.exists(name=name, partition='Common'):
obj = ssl_client_profile.load(name=name, partition='Common')
obj.delete()
except Exception as err:
# Not necessarily an error -- profile might be referenced
# by another virtual server.
LOG.warn(
"Unable to delete profile %s. "
"Response message: %s." % (name, err.message))
def _remove_profile(self, vip, profile_name, bigip):
"""Delete profile.
:param vip: Dictionary which contains name and partition of
virtual server.
:param profile_name: Name of profile to delete.
:param bigip: Single BigIP instances to update.
"""
try:
v = bigip.tm.ltm.virtuals.virtual
obj = v.load(name=vip["name"], partition=vip["partition"])
p = obj.profiles_s
profiles = p.get_collection()
# see if profile exists
for profile in profiles:
if profile.name == profile_name:
pr = p.profiles.load(name=profile_name, partition='Common')
pr.delete()
LOG.debug("Deleted profile %s" % profile.name)
return
except Exception as err:
# Not necessarily an error -- profile might be referenced
# by another virtual server.
LOG.warn(
"Unable to delete profile %s. "
"Response message: %s." % (profile_name, err.message))
def _remove_cookie_persist_rule(self, vip, bigip):
"""Delete cookie persist rule.
:param vip: Dictionary which contains name and partition of
virtual server.
:param bigip: Single BigIP instances to update.
"""
rule_name = 'app_cookie_' + vip['name']
u = bigip.tm.ltm.persistence.universals.universal
if u.exists(name=rule_name, partition=vip["partition"]):
obj = u.load(name=rule_name, partition=vip["partition"])
obj.delete()
LOG.debug("Deleted persistence universal %s" % rule_name)
r = bigip.tm.ltm.rules.rule
if r.exists(name=rule_name, partition=vip["partition"]):
obj = r.load(name=rule_name, partition=vip["partition"])
obj.delete()
LOG.debug("Deleted rule %s" % rule_name)
def get_stats(self, service, bigips, stat_keys):
"""Return stat values for a single virtual.
Stats to collect are defined as an array of strings in input stats.
Values are summed across one or more BIG-IPs defined in input bigips.
:param service: Has listener name/partition
:param bigips: One or more BIG-IPs to get listener stats from.
:param stat_keys: Array of strings that define which stats to collect.
:return: A dict with key/value pairs for each stat defined in
input stats.
"""
collected_stats = {}
for stat_key in stat_keys:
collected_stats[stat_key] = 0
virtual = self.service_adapter.get_virtual(service)
part = virtual["partition"]
for bigip in bigips:
try:
vs_stats = self.vs_helper.get_stats(
bigip,
name=virtual["name"],
partition=part,
stat_keys=stat_keys)
for stat_key in stat_keys:
if stat_key in vs_stats:
collected_stats[stat_key] += vs_stats[stat_key]
except Exception as e:
# log error but continue on
LOG.error("Error getting virtual server stats: %s", e.message)
return collected_stats
| 41.212903 | 79 | 0.605615 |
from oslo_log import log as logging
from f5_openstack_agent.lbaasv2.drivers.bigip.disconnected_service import \
DisconnectedService
from f5_openstack_agent.lbaasv2.drivers.bigip import resource_helper
from f5_openstack_agent.lbaasv2.drivers.bigip import ssl_profile
from neutron_lbaas.services.loadbalancer import constants as lb_const
from requests import HTTPError
LOG = logging.getLogger(__name__)
class ListenerServiceBuilder(object):
def __init__(self, service_adapter, cert_manager, parent_ssl_profile=None):
self.cert_manager = cert_manager
self.disconnected_service = DisconnectedService()
self.parent_ssl_profile = parent_ssl_profile
self.vs_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.virtual)
self.service_adapter = service_adapter
LOG.debug("ListenerServiceBuilder: using parent_ssl_profile %s ",
parent_ssl_profile)
def create_listener(self, service, bigips):
vip = self.service_adapter.get_virtual(service)
tls = self.service_adapter.get_tls(service)
if tls:
tls['name'] = vip['name']
tls['partition'] = vip['partition']
service['listener']['operating_status'] = lb_const.ONLINE
if not self.disconnected_service.is_service_connected(service) \
and \
not self.service_adapter.vip_on_common_network(service):
network_name = DisconnectedService.network_name
vip['vlansEnabled'] = True
vip.pop('vlansDisabled', None)
vip['vlans'] = [
'/%s/%s' % (vip['partition'], network_name)
]
vip.pop('sourceAddressTranslation', None)
# the listener is offline until we have a real network
service['listener']['operating_status'] = lb_const.OFFLINE
network_id = service['loadbalancer']['network_id']
for bigip in bigips:
self.service_adapter.get_vlan(vip, bigip, network_id)
try:
self.vs_helper.create(bigip, vip)
except HTTPError as err:
if err.response.status_code == 409:
LOG.debug("Virtual server already exists")
else:
LOG.exception("Virtual server creation error: %s" %
err.message)
raise
if tls:
self.add_ssl_profile(tls, bigip)
def get_listener(self, service, bigip):
vip = self.service_adapter.get_virtual_name(service)
obj = self.vs_helper.load(bigip=bigip,
name=vip["name"],
partition=vip["partition"])
return obj
def delete_listener(self, service, bigips):
vip = self.service_adapter.get_virtual_name(service)
tls = self.service_adapter.get_tls(service)
if tls:
tls['name'] = vip['name']
tls['partition'] = vip['partition']
for bigip in bigips:
self.vs_helper.delete(bigip,
name=vip["name"],
partition=vip["partition"])
# delete ssl profiles
self.remove_ssl_profiles(tls, bigip)
def add_ssl_profile(self, tls, bigip):
# add profile to virtual server
vip = {'name': tls['name'],
'partition': tls['partition']}
if "default_tls_container_id" in tls:
container_ref = tls["default_tls_container_id"]
self.create_ssl_profile(
container_ref, bigip, vip, True)
if "sni_containers" in tls and tls["sni_containers"]:
for container in tls["sni_containers"]:
container_ref = container["tls_container_id"]
self.create_ssl_profile(container_ref, bigip, vip, False)
def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False):
cert = self.cert_manager.get_certificate(container_ref)
key = self.cert_manager.get_private_key(container_ref)
name = self.cert_manager.get_name(container_ref,
self.service_adapter.prefix)
try:
# upload cert/key and create SSL profile
ssl_profile.SSLProfileHelper.create_client_ssl_profile(
bigip,
name,
cert,
key,
sni_default=sni_default,
parent_profile=self.parent_ssl_profile)
finally:
del cert
del key
# add ssl profile to virtual server
self._add_profile(vip, name, bigip, context='clientside')
def update_listener(self, service, bigips):
vip = self.service_adapter.get_virtual(service)
for bigip in bigips:
self.vs_helper.update(bigip, vip)
def update_listener_pool(self, service, name, bigips):
vip = self.service_adapter.get_virtual_name(service)
if vip:
vip["pool"] = name
for bigip in bigips:
v = bigip.tm.ltm.virtuals.virtual
if v.exists(name=vip["name"], partition=vip["partition"]):
obj = v.load(name=vip["name"], partition=vip["partition"])
obj.modify(**vip)
def update_session_persistence(self, service, bigips):
pool = service["pool"]
if "session_persistence" in pool and pool['session_persistence']:
vip = self.service_adapter.get_virtual_name(service)
persistence = pool['session_persistence']
persistence_type = persistence['type']
vip_persist = self.service_adapter.get_session_persistence(service)
listener = service['listener']
for bigip in bigips:
# For TCP listeners, must remove fastL4 profile before adding
# adding http/oneconnect profiles.
if listener['protocol'] == 'TCP':
self._remove_profile(vip, 'fastL4', bigip)
# Standard virtual servers should already have these profiles,
# but make sure profiles in place for all virtual server types.
self._add_profile(vip, 'http', bigip)
self._add_profile(vip, 'oneconnect', bigip)
if persistence_type == 'APP_COOKIE' and \
'cookie_name' in persistence:
self._add_cookie_persist_rule(vip, persistence, bigip)
# profiles must be added before setting persistence
self.vs_helper.update(bigip, vip_persist)
LOG.debug("Set persist %s" % vip["name"])
def _add_profile(self, vip, profile_name, bigip, context='all'):
v = bigip.tm.ltm.virtuals.virtual
obj = v.load(name=vip["name"], partition=vip["partition"])
p = obj.profiles_s
profiles = p.get_collection()
# see if profile exists
for profile in profiles:
if profile.name == profile_name:
return
# not found -- add profile (assumes Common partition)
p.profiles.create(name=profile_name,
partition='Common',
context=context)
LOG.debug("Created profile %s" % profile_name)
def _add_cookie_persist_rule(self, vip, persistence, bigip):
cookie_name = persistence['cookie_name']
rule_def = self._create_app_cookie_persist_rule(cookie_name)
rule_name = 'app_cookie_' + vip['name']
r = bigip.tm.ltm.rules.rule
if not r.exists(name=rule_name, partition=vip["partition"]):
r.create(name=rule_name,
apiAnonymous=rule_def,
partition=vip["partition"])
LOG.debug("Created rule %s" % rule_name)
u = bigip.tm.ltm.persistence.universals.universal
if not u.exists(name=rule_name, partition=vip["partition"]):
u.create(name=rule_name,
rule=rule_name,
partition=vip["partition"])
LOG.debug("Created persistence universal %s" % rule_name)
def _create_app_cookie_persist_rule(self, cookiename):
rule_text = "when HTTP_REQUEST {\n"
rule_text += " if { [HTTP::cookie " + str(cookiename)
rule_text += "] ne \"\" }{\n"
rule_text += " persist uie [string tolower [HTTP::cookie \""
rule_text += cookiename + "\"]] 3600\n"
rule_text += " }\n"
rule_text += "}\n\n"
rule_text += "when HTTP_RESPONSE {\n"
rule_text += " if { [HTTP::cookie \"" + str(cookiename)
rule_text += "\"] ne \"\" }{\n"
rule_text += " persist add uie [string tolower [HTTP::cookie \""
rule_text += cookiename + "\"]] 3600\n"
rule_text += " }\n"
rule_text += "}\n\n"
return rule_text
def remove_session_persistence(self, service, bigips):
pool = service["pool"]
if "session_persistence" in pool and pool['session_persistence']:
vip = self.service_adapter.get_virtual_name(service)
vip["persist"] = []
vip["fallbackPersistence"] = ""
persistence = pool['session_persistence']
persistence_type = persistence['type']
listener = service["listener"]
if listener['protocol'] == 'TCP':
# Revert VS back to fastL4. Must do an update to replace
# profiles instead of using add/remove profile. Leave http
# profiles in place for non-TCP listeners.
vip['profiles'] = ['/Common/fastL4']
for bigip in bigips:
# remove persistence (and revert profiles if TCP)
self.vs_helper.update(bigip, vip)
LOG.debug("Cleared session persistence for %s" % vip["name"])
# remove profiles and rules
if persistence_type == 'APP_COOKIE' and \
'cookie_name' in persistence:
self._remove_cookie_persist_rule(vip, bigip)
def remove_ssl_profiles(self, tls, bigip):
if "default_tls_container_id" in tls and \
tls["default_tls_container_id"]:
container_ref = tls["default_tls_container_id"]
i = container_ref.rindex("/") + 1
name = self.service_adapter.prefix + container_ref[i:]
self._remove_ssl_profile(name, bigip)
if "sni_containers" in tls and tls["sni_containers"]:
for container in tls["sni_containers"]:
container_ref = container["tls_container_id"]
i = container_ref.rindex("/") + 1
name = self.service_adapter.prefix + container_ref[i:]
self._remove_ssl_profile(name, bigip)
def _remove_ssl_profile(self, name, bigip):
try:
ssl_client_profile = bigip.tm.ltm.profile.client_ssls.client_ssl
if ssl_client_profile.exists(name=name, partition='Common'):
obj = ssl_client_profile.load(name=name, partition='Common')
obj.delete()
except Exception as err:
# Not necessarily an error -- profile might be referenced
# by another virtual server.
LOG.warn(
"Unable to delete profile %s. "
"Response message: %s." % (name, err.message))
def _remove_profile(self, vip, profile_name, bigip):
try:
v = bigip.tm.ltm.virtuals.virtual
obj = v.load(name=vip["name"], partition=vip["partition"])
p = obj.profiles_s
profiles = p.get_collection()
# see if profile exists
for profile in profiles:
if profile.name == profile_name:
pr = p.profiles.load(name=profile_name, partition='Common')
pr.delete()
LOG.debug("Deleted profile %s" % profile.name)
return
except Exception as err:
# Not necessarily an error -- profile might be referenced
# by another virtual server.
LOG.warn(
"Unable to delete profile %s. "
"Response message: %s." % (profile_name, err.message))
def _remove_cookie_persist_rule(self, vip, bigip):
rule_name = 'app_cookie_' + vip['name']
u = bigip.tm.ltm.persistence.universals.universal
if u.exists(name=rule_name, partition=vip["partition"]):
obj = u.load(name=rule_name, partition=vip["partition"])
obj.delete()
LOG.debug("Deleted persistence universal %s" % rule_name)
r = bigip.tm.ltm.rules.rule
if r.exists(name=rule_name, partition=vip["partition"]):
obj = r.load(name=rule_name, partition=vip["partition"])
obj.delete()
LOG.debug("Deleted rule %s" % rule_name)
def get_stats(self, service, bigips, stat_keys):
collected_stats = {}
for stat_key in stat_keys:
collected_stats[stat_key] = 0
virtual = self.service_adapter.get_virtual(service)
part = virtual["partition"]
for bigip in bigips:
try:
vs_stats = self.vs_helper.get_stats(
bigip,
name=virtual["name"],
partition=part,
stat_keys=stat_keys)
for stat_key in stat_keys:
if stat_key in vs_stats:
collected_stats[stat_key] += vs_stats[stat_key]
except Exception as e:
# log error but continue on
LOG.error("Error getting virtual server stats: %s", e.message)
return collected_stats
| true | true |
f7243021c6bc2b2b7e4fce423c5f1bc7c762d9c5 | 4,057 | bzl | Python | build_defs/kotlin_native/build_defs.bzl | bayesianmind/arcs | 2808acd1a30b77142c8c75c62880cb36930743c5 | [
"BSD-Source-Code"
] | null | null | null | build_defs/kotlin_native/build_defs.bzl | bayesianmind/arcs | 2808acd1a30b77142c8c75c62880cb36930743c5 | [
"BSD-Source-Code"
] | null | null | null | build_defs/kotlin_native/build_defs.bzl | bayesianmind/arcs | 2808acd1a30b77142c8c75c62880cb36930743c5 | [
"BSD-Source-Code"
] | null | null | null | load("//build_defs/kotlin_native:repo.bzl", "get_dependencies")
KtNativeInfo = provider(
doc = "The minimum info about a Kotlin/Native dependency",
fields = dict(
klibraries = "Depset of klib files to compile against.",
),
)
def _common_args(ctx, klibs):
args = ctx.actions.args()
# Pass dependencies for all platforms to wrapper script
args.add("|".join([
",".join([name for name, _ in get_dependencies(target)])
for target in ["windows", "macos", "linux"]
]))
# Arguments for kotlinc
args.add_all([
"-target",
"wasm32",
# Enable optimizations in the compilation
"-opt",
# Don't link the libraries from the dist/klib automatically
"-nodefaultlibs",
])
args.add_all(klibs, before_each = "-l")
args.add_all(ctx.attr.kotlincopts)
args.add_all(ctx.files.srcs)
return args
def _collect_deps(srcs, deps):
"""Builds depsets out of srcs and deps."""
srcs_depset = depset(srcs)
klib_depset = depset(
transitive = [dep[KtNativeInfo].klibraries for dep in deps],
)
return srcs_depset, klib_depset
def _kt_wasm_binary(ctx):
srcs_deps, klibs = _collect_deps(
srcs = ctx.files.srcs,
deps = ctx.attr.deps,
)
args = _common_args(ctx, klibs)
if ctx.attr.entry_point:
args.add("-e", ctx.attr.entry_point)
args.add("-o", ctx.outputs.wasm.path.replace(".wasm", ""))
ctx.actions.run(
progress_message = "Compiling Kotlin to WebAssembly: %s" % ctx.label.name,
inputs = depset(transitive = [srcs_deps, klibs]),
outputs = [ctx.outputs.wasm, ctx.outputs.js],
arguments = [args],
executable = ctx.executable.kotlinc_wrapper,
)
kt_wasm_binary = rule(
attrs = {
"srcs": attr.label_list(
allow_files = True,
allow_empty = True,
),
"deps": attr.label_list(providers = [KtNativeInfo]),
"kotlinc_wrapper": attr.label(
default = Label("//build_defs/kotlin_native:kotlinc_wrapper"),
executable = True,
cfg = "host",
),
"entry_point": attr.string(
default = "arcs.sdk.main",
doc = "Specify the entrypoint (path to main function) for the binary. For example, `arcs.sdk.main`.",
),
"kotlincopts": attr.string_list(
doc = """Flags to pass to kotlinc compiler.""",
default = [],
),
},
doc = "Builds a Wasm binary from Kotlin",
outputs = {
"wasm": "%{name}.wasm",
"js": "%{name}.wasm.js",
},
implementation = _kt_wasm_binary,
)
def _kt_wasm_library(ctx):
srcs_deps, klibs = _collect_deps(
srcs = ctx.files.srcs,
deps = ctx.attr.deps,
)
args = _common_args(ctx, klibs)
args.add("-produce", "library")
args.add("-o", ctx.outputs.klib.path.replace(".klib", ""))
ctx.actions.run(
progress_message = "Building a Kotlin Library with WebAssembly target: %s" % ctx.label.name,
inputs = depset(transitive = [srcs_deps, klibs]),
outputs = [ctx.outputs.klib],
arguments = [args],
executable = ctx.executable.kotlinc_wrapper,
)
return [KtNativeInfo(klibraries = depset(order = "preorder", direct = [ctx.outputs.klib], transitive = [klibs]))]
kt_wasm_library = rule(
attrs = {
"srcs": attr.label_list(
allow_files = True,
allow_empty = True,
),
"deps": attr.label_list(providers = [KtNativeInfo]),
"kotlinc_wrapper": attr.label(
default = Label("//build_defs/kotlin_native:kotlinc_wrapper"),
executable = True,
cfg = "host",
),
"kotlincopts": attr.string_list(
doc = """Flags to pass to kotlinc compiler.""",
default = [],
),
},
doc = "Builds a Wasm library (klib) from Kotlin files",
outputs = {
"klib": "%{name}.klib",
},
implementation = _kt_wasm_library,
)
| 29.398551 | 117 | 0.585408 | load("//build_defs/kotlin_native:repo.bzl", "get_dependencies")
KtNativeInfo = provider(
doc = "The minimum info about a Kotlin/Native dependency",
fields = dict(
klibraries = "Depset of klib files to compile against.",
),
)
def _common_args(ctx, klibs):
args = ctx.actions.args()
args.add("|".join([
",".join([name for name, _ in get_dependencies(target)])
for target in ["windows", "macos", "linux"]
]))
args.add_all([
"-target",
"wasm32",
"-opt",
"-nodefaultlibs",
])
args.add_all(klibs, before_each = "-l")
args.add_all(ctx.attr.kotlincopts)
args.add_all(ctx.files.srcs)
return args
def _collect_deps(srcs, deps):
srcs_depset = depset(srcs)
klib_depset = depset(
transitive = [dep[KtNativeInfo].klibraries for dep in deps],
)
return srcs_depset, klib_depset
def _kt_wasm_binary(ctx):
srcs_deps, klibs = _collect_deps(
srcs = ctx.files.srcs,
deps = ctx.attr.deps,
)
args = _common_args(ctx, klibs)
if ctx.attr.entry_point:
args.add("-e", ctx.attr.entry_point)
args.add("-o", ctx.outputs.wasm.path.replace(".wasm", ""))
ctx.actions.run(
progress_message = "Compiling Kotlin to WebAssembly: %s" % ctx.label.name,
inputs = depset(transitive = [srcs_deps, klibs]),
outputs = [ctx.outputs.wasm, ctx.outputs.js],
arguments = [args],
executable = ctx.executable.kotlinc_wrapper,
)
kt_wasm_binary = rule(
attrs = {
"srcs": attr.label_list(
allow_files = True,
allow_empty = True,
),
"deps": attr.label_list(providers = [KtNativeInfo]),
"kotlinc_wrapper": attr.label(
default = Label("//build_defs/kotlin_native:kotlinc_wrapper"),
executable = True,
cfg = "host",
),
"entry_point": attr.string(
default = "arcs.sdk.main",
doc = "Specify the entrypoint (path to main function) for the binary. For example, `arcs.sdk.main`.",
),
"kotlincopts": attr.string_list(
doc = """Flags to pass to kotlinc compiler.""",
default = [],
),
},
doc = "Builds a Wasm binary from Kotlin",
outputs = {
"wasm": "%{name}.wasm",
"js": "%{name}.wasm.js",
},
implementation = _kt_wasm_binary,
)
def _kt_wasm_library(ctx):
srcs_deps, klibs = _collect_deps(
srcs = ctx.files.srcs,
deps = ctx.attr.deps,
)
args = _common_args(ctx, klibs)
args.add("-produce", "library")
args.add("-o", ctx.outputs.klib.path.replace(".klib", ""))
ctx.actions.run(
progress_message = "Building a Kotlin Library with WebAssembly target: %s" % ctx.label.name,
inputs = depset(transitive = [srcs_deps, klibs]),
outputs = [ctx.outputs.klib],
arguments = [args],
executable = ctx.executable.kotlinc_wrapper,
)
return [KtNativeInfo(klibraries = depset(order = "preorder", direct = [ctx.outputs.klib], transitive = [klibs]))]
kt_wasm_library = rule(
attrs = {
"srcs": attr.label_list(
allow_files = True,
allow_empty = True,
),
"deps": attr.label_list(providers = [KtNativeInfo]),
"kotlinc_wrapper": attr.label(
default = Label("//build_defs/kotlin_native:kotlinc_wrapper"),
executable = True,
cfg = "host",
),
"kotlincopts": attr.string_list(
doc = """Flags to pass to kotlinc compiler.""",
default = [],
),
},
doc = "Builds a Wasm library (klib) from Kotlin files",
outputs = {
"klib": "%{name}.klib",
},
implementation = _kt_wasm_library,
)
| true | true |
f7243050540f676f74a71bf7c7821524843a1e15 | 25,625 | py | Python | experiments/tmhmm3/tm_models.py | PTPeraire/openprotein | 3f6ede8c63d18f14e938bd47935001a82c4d6897 | [
"MIT"
] | null | null | null | experiments/tmhmm3/tm_models.py | PTPeraire/openprotein | 3f6ede8c63d18f14e938bd47935001a82c4d6897 | [
"MIT"
] | null | null | null | experiments/tmhmm3/tm_models.py | PTPeraire/openprotein | 3f6ede8c63d18f14e938bd47935001a82c4d6897 | [
"MIT"
] | null | null | null | """
This file is part of the OpenProtein project.
For license information, please see the LICENSE file in the root directory.
"""
import sys
from enum import Enum
import glob
import pickle
import numpy as np
import torch
import torch.autograd as autograd
import torch.nn as nn
import openprotein
from experiments.tmhmm3.tm_util import label_list_to_topology
from experiments.tmhmm3.tm_util import get_predicted_type_from_labels
from experiments.tmhmm3.tm_util import remapped_labels_hmm_to_orginal_labels
from experiments.tmhmm3.tm_util import is_topologies_equal
from experiments.tmhmm3.tm_util import original_labels_to_fasta
from pytorchcrf.torchcrf import CRF
from util import write_out, get_experiment_id
# seed random generator for reproducibility
torch.manual_seed(1)
class TMHMM3(openprotein.BaseModel):
def __init__(self,
embedding,
hidden_size,
use_gpu,
model_mode,
use_marg_prob,
type_predictor_model,
profile_path):
super(TMHMM3, self).__init__(embedding, use_gpu)
# initialize model variables
num_tags = 5
num_labels = 5
self.max_signal_length = 67
if model_mode == TMHMM3Mode.LSTM_CRF_HMM:
num_tags += 2 * 40 + self.max_signal_length
elif model_mode == TMHMM3Mode.LSTM_CRF_MARG:
num_tags = num_tags * 4 # 4 different types
# num_labels = num_tags # 4 different types
self.hidden_size = hidden_size
self.use_gpu = use_gpu
self.use_marg_prob = use_marg_prob
self.model_mode = model_mode
self.embedding = embedding
self.profile_path = profile_path
self.bi_lstm = nn.LSTM(self.get_embedding_size(),
self.hidden_size,
num_layers=1,
bidirectional=True)
self.hidden_to_labels = nn.Linear(self.hidden_size * 2, num_labels) # * 2 for bidirectional
self.hidden_layer = None
crf_start_mask = torch.ones(num_tags).byte()
crf_end_mask = torch.ones(num_tags).byte()
if model_mode == TMHMM3Mode.LSTM_CRF_HMM:
allowed_transitions = [
(3, 3), (4, 4),
(3, 5), (4, 45)]
for i in range(5, 45 - 1):
allowed_transitions.append((i, i + 1))
if 8 < i < 43:
allowed_transitions.append((8, i))
allowed_transitions.append((44, 4))
for i in range(45, 85 - 1):
allowed_transitions.append((i, i + 1))
if 48 < i < 83:
allowed_transitions.append((48, i))
allowed_transitions.append((84, 3))
for i in range(85, 151):
allowed_transitions.append((i, i + 1))
allowed_transitions.append((2, i))
allowed_transitions.append((2, 151))
allowed_transitions.append((2, 4))
allowed_transitions.append((151, 4))
crf_start_mask[2] = 0
crf_start_mask[3] = 0
crf_start_mask[4] = 0
crf_end_mask[3] = 0
crf_end_mask[4] = 0
elif model_mode == TMHMM3Mode.LSTM_CRF_MARG:
allowed_transitions = [
(0, 0), (1, 1), (3, 3), (4, 4), (3, 0), (0, 4), (4, 1), (1, 3),
(5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (8, 5), (5, 9), (9, 6), (6, 8), (7, 9),
(12, 12), (14, 14), (12, 14),
(18, 18),
]
crf_start_mask[3] = 0
crf_start_mask[4] = 0
crf_start_mask[7] = 0
crf_start_mask[8] = 0
crf_start_mask[9] = 0
crf_start_mask[12] = 0
crf_start_mask[18] = 0
crf_end_mask[3] = 0
crf_end_mask[4] = 0
crf_end_mask[8] = 0
crf_end_mask[9] = 0
crf_end_mask[14] = 0
crf_end_mask[18] = 0
else:
allowed_transitions = [
(0, 0), (1, 1), (2, 2), (3, 3), (4, 4),
(3, 0), (0, 4), (4, 1), (1, 3), (2, 4)]
crf_start_mask[2] = 0
crf_start_mask[3] = 0
crf_start_mask[4] = 0
crf_end_mask[3] = 0
crf_end_mask[4] = 0
self.allowed_transitions = allowed_transitions
self.crf_model = CRF(num_tags)
self.type_classifier = type_predictor_model
self.type_tm_classier = None
self.type_sp_classier = None
crf_transitions_mask = torch.ones((num_tags, num_tags)).byte()
self.label_01loss_values = []
self.type_01loss_values = []
self.topology_01loss_values = []
# if on GPU, move state to GPU memory
if self.use_gpu:
self.crf_model = self.crf_model.cuda()
self.bi_lstm = self.bi_lstm.cuda()
self.hidden_to_labels = self.hidden_to_labels.cuda()
crf_transitions_mask = crf_transitions_mask.cuda()
crf_start_mask = crf_start_mask.cuda()
crf_end_mask = crf_end_mask.cuda()
# compute mask matrix from allow transitions list
for i in range(num_tags):
for k in range(num_tags):
if (i, k) in self.allowed_transitions:
crf_transitions_mask[i][k] = 0
# generate masked transition parameters
crf_start_transitions, crf_end_transitions, crf_transitions = \
generate_masked_crf_transitions(
self.crf_model, (crf_start_mask, crf_transitions_mask, crf_end_mask)
)
# initialize CRF
initialize_crf_parameters(self.crf_model,
start_transitions=crf_start_transitions,
end_transitions=crf_end_transitions,
transitions=crf_transitions)
def get_embedding_size(self):
if self.embedding == "BLOSUM62":
return 24 # bloom matrix has size 24
elif self.embedding == "PROFILE":
return 51 # protein profiles have size 51
def flatten_parameters(self):
self.bi_lstm.flatten_parameters()
def encode_amino_acid(self, letter):
if self.embedding == "BLOSUM62":
# blosum encoding
if not globals().get('blosum_encoder'):
blosum = \
"""4,-1,-2,-2,0,-1,-1,0,-2,-1,-1,-1,-1,-2,-1,1,0,-3,-2,0,-2,-1,0,-4
-1,5,0,-2,-3,1,0,-2,0,-3,-2,2,-1,-3,-2,-1,-1,-3,-2,-3,-1,0,-1,-4
-2,0,6,1,-3,0,0,0,1,-3,-3,0,-2,-3,-2,1,0,-4,-2,-3,3,0,-1,-4
-2,-2,1,6,-3,0,2,-1,-1,-3,-4,-1,-3,-3,-1,0,-1,-4,-3,-3,4,1,-1,-4
0,-3,-3,-3,9,-3,-4,-3,-3,-1,-1,-3,-1,-2,-3,-1,-1,-2,-2,-1,-3,-3,-2,-4
-1,1,0,0,-3,5,2,-2,0,-3,-2,1,0,-3,-1,0,-1,-2,-1,-2,0,3,-1,-4
-1,0,0,2,-4,2,5,-2,0,-3,-3,1,-2,-3,-1,0,-1,-3,-2,-2,1,4,-1,-4
0,-2,0,-1,-3,-2,-2,6,-2,-4,-4,-2,-3,-3,-2,0,-2,-2,-3,-3,-1,-2,-1,-4
-2,0,1,-1,-3,0,0,-2,8,-3,-3,-1,-2,-1,-2,-1,-2,-2,2,-3,0,0,-1,-4
-1,-3,-3,-3,-1,-3,-3,-4,-3,4,2,-3,1,0,-3,-2,-1,-3,-1,3,-3,-3,-1,-4
-1,-2,-3,-4,-1,-2,-3,-4,-3,2,4,-2,2,0,-3,-2,-1,-2,-1,1,-4,-3,-1,-4
-1,2,0,-1,-3,1,1,-2,-1,-3,-2,5,-1,-3,-1,0,-1,-3,-2,-2,0,1,-1,-4
-1,-1,-2,-3,-1,0,-2,-3,-2,1,2,-1,5,0,-2,-1,-1,-1,-1,1,-3,-1,-1,-4
-2,-3,-3,-3,-2,-3,-3,-3,-1,0,0,-3,0,6,-4,-2,-2,1,3,-1,-3,-3,-1,-4
-1,-2,-2,-1,-3,-1,-1,-2,-2,-3,-3,-1,-2,-4,7,-1,-1,-4,-3,-2,-2,-1,-2,-4
1,-1,1,0,-1,0,0,0,-1,-2,-2,0,-1,-2,-1,4,1,-3,-2,-2,0,0,0,-4
0,-1,0,-1,-1,-1,-1,-2,-2,-1,-1,-1,-1,-2,-1,1,5,-2,-2,0,-1,-1,0,-4
-3,-3,-4,-4,-2,-2,-3,-2,-2,-3,-2,-3,-1,1,-4,-3,-2,11,2,-3,-4,-3,-2,-4
-2,-2,-2,-3,-2,-1,-2,-3,2,-1,-1,-2,-1,3,-3,-2,-2,2,7,-1,-3,-2,-1,-4
0,-3,-3,-3,-1,-2,-2,-3,-3,3,1,-2,1,-1,-2,-2,0,-3,-1,4,-3,-2,-1,-4
-2,-1,3,4,-3,0,1,-1,0,-3,-4,0,-3,-3,-2,0,-1,-4,-3,-3,4,1,-1,-4
-1,0,0,1,-3,3,4,-2,0,-3,-3,1,-1,-3,-1,0,-1,-3,-2,-2,1,4,-1,-4
0,-1,-1,-1,-2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-2,0,0,-2,-1,-1,-1,-1,-1,-4
-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,1""" \
.replace('\n', ',')
blosum_matrix = np.fromstring(blosum, sep=",").reshape(24, 24)
blosum_key = "A,R,N,D,C,Q,E,G,H,I,L,K,M,F,P,S,T,W,Y,V,B,Z,X,U".split(",")
key_map = {}
for idx, value in enumerate(blosum_key):
key_map[value] = list([int(v) for v in blosum_matrix[idx].astype('int')])
globals().__setitem__("blosum_encoder", key_map)
return globals().get('blosum_encoder')[letter]
elif self.embedding == "ONEHOT":
# one hot encoding
one_hot_key = "A,R,N,D,C,Q,E,G,H,I,L,K,M,F,P,S,T,W,Y,V,B,Z,X,U".split(",")
arr = []
for idx, k in enumerate(one_hot_key):
if k == letter:
arr.append(1)
else:
arr.append(0)
return arr
elif self.embedding == "PYTORCH":
key_id = "A,R,N,D,C,Q,E,G,H,I,L,K,M,F,P,S,T,W,Y,V,B,Z,X,U".split(",")
for idx, k in enumerate(key_id):
if k == letter:
return idx
def embed(self, prot_aa_list):
embed_list = []
for aa_list in prot_aa_list:
if self.embedding == "PYTORCH":
tensor = torch.LongTensor(tensor)
elif self.embedding == "PROFILE":
if not globals().get('profile_encoder'):
print("Load profiles...")
files = glob.glob(self.profile_path.strip("/") + "/*")
profile_dict = {}
for profile_file in files:
profile = pickle.load(open(profile_file, "rb")).popitem()[1]
profile_dict[profile["seq"]] = torch.from_numpy(profile["profile"]).float()
globals().__setitem__("profile_encoder", profile_dict)
print("Loaded profiles")
tensor = globals().get('profile_encoder')[aa_list]
else:
tensor = list([self.encode_amino_acid(aa) for aa in aa_list])
tensor = torch.FloatTensor(tensor)
if self.use_gpu:
tensor = tensor.cuda()
embed_list.append(tensor)
return embed_list
def init_hidden(self, minibatch_size):
# number of layers (* 2 since bidirectional), minibatch_size, hidden size
initial_hidden_state = torch.zeros(1 * 2, minibatch_size, self.hidden_size)
initial_cell_state = torch.zeros(1 * 2, minibatch_size, self.hidden_size)
if self.use_gpu:
initial_hidden_state = initial_hidden_state.cuda()
initial_cell_state = initial_cell_state.cuda()
self.hidden_layer = (autograd.Variable(initial_hidden_state),
autograd.Variable(initial_cell_state))
def _get_network_emissions(self, input_sequences):
batch_sizes = torch.LongTensor(list([i.size(0) for i in input_sequences]))
pad_seq_embed = torch.nn.utils.rnn.pad_sequence(input_sequences)
minibatch_size = len(input_sequences)
self.init_hidden(minibatch_size)
bi_lstm_out, self.hidden_layer = self.bi_lstm(pad_seq_embed, self.hidden_layer)
emissions = self.hidden_to_labels(bi_lstm_out)
if self.model_mode == TMHMM3Mode.LSTM_CRF_HMM:
inout_select = torch.LongTensor([0])
outin_select = torch.LongTensor([1])
signal_select = torch.LongTensor([2])
if self.use_gpu:
inout_select = inout_select.cuda()
outin_select = outin_select.cuda()
signal_select = signal_select.cuda()
inout = torch.index_select(emissions, 2, autograd.Variable(inout_select))
outin = torch.index_select(emissions, 2, autograd.Variable(outin_select))
signal = torch.index_select(emissions, 2, autograd.Variable(signal_select))
emissions = torch.cat((emissions, inout.expand(-1, len(batch_sizes), 40),
outin.expand(-1, len(batch_sizes), 40),
signal.expand(-1, len(batch_sizes), self.max_signal_length)), 2)
elif self.model_mode == TMHMM3Mode.LSTM_CRF_MARG:
emissions = emissions.repeat(1, 1, 4)
return emissions, batch_sizes
def batch_sizes_to_mask(self, batch_sizes):
mask = torch.autograd.Variable(torch.t(torch.ByteTensor(
[[1] * int(batch_size) + [0] * (int(batch_sizes[0])
- int(batch_size)) for batch_size in batch_sizes]
)))
if self.use_gpu:
mask = mask.cuda()
return mask
def compute_loss(self, training_minibatch):
_, labels_list, remapped_labels_list_crf_hmm, remapped_labels_list_crf_marg, \
_prot_type_list, _prot_topology_list, _prot_name_list, original_aa_string, \
_original_label_string = training_minibatch
minibatch_size = len(labels_list)
if self.model_mode == TMHMM3Mode.LSTM_CRF_MARG:
labels_to_use = remapped_labels_list_crf_marg
elif self.model_mode == TMHMM3Mode.LSTM_CRF_HMM:
labels_to_use = remapped_labels_list_crf_hmm
else:
labels_to_use = labels_list
input_sequences = [autograd.Variable(x) for x in self.embed(original_aa_string)]
actual_labels = torch.nn.utils.rnn.pad_sequence([autograd.Variable(l)
for l in labels_to_use])
emissions, batch_sizes = self._get_network_emissions(input_sequences)
if self.model_mode == TMHMM3Mode.LSTM:
prediction = emissions.transpose(0, 1).contiguous().view(-1, emissions.size(-1))
target = actual_labels.transpose(0, 1).contiguous().view(-1, 1)
losses = -torch.gather(nn.functional.log_softmax(prediction),
dim=1, index=target).view(*actual_labels
.transpose(0, 1).size())
mask_expand = torch.range(0, batch_sizes.data.max() - 1).long() \
.unsqueeze(0).expand(batch_sizes.size(0), batch_sizes.data.max())
if self.use_gpu:
mask_expand = mask_expand.cuda()
batch_sizes = batch_sizes.cuda()
mask = mask_expand < batch_sizes.unsqueeze(1).expand_as(mask_expand)
loss = (losses * mask.float()).sum() / batch_sizes.float().sum()
else:
mask = (self.batch_sizes_to_mask(batch_sizes))
loss = -1 * self.crf_model(emissions, actual_labels, mask=mask) / minibatch_size
if float(loss) > 100000: # if loss is this large, an invalid tx must have been found
for idx, batch_size in enumerate(batch_sizes):
last_label = None
for i in range(batch_size):
label = int(actual_labels[i][idx])
write_out(str(label) + ",", end='')
if last_label is not None and (last_label, label) \
not in self.allowed_transitions:
write_out("Error: invalid transition found")
write_out((last_label, label))
sys.exit(1)
last_label = label
write_out(" ")
return loss
def forward(self, input_sequences, forced_types=None):
emissions, batch_sizes = self._get_network_emissions(input_sequences)
if self.model_mode == TMHMM3Mode.LSTM:
output = torch.nn.functional.log_softmax(emissions, dim=2)
_, predicted_labels = output[:, :, 0:5].max(dim=2)
predicted_labels = list(
[list(map(int, x[:batch_sizes[idx]])) for idx, x in enumerate(predicted_labels
.transpose(0, 1))])
predicted_labels = list(
torch.cuda.LongTensor(l) if self.use_gpu else torch.LongTensor(l)
for l in predicted_labels)
predicted_topologies = list(map(label_list_to_topology, predicted_labels))
predicted_types = torch.LongTensor(list(map(get_predicted_type_from_labels,
predicted_labels)))
else:
mask = self.batch_sizes_to_mask(batch_sizes)
labels_predicted = list(torch.cuda.LongTensor(l) if self.use_gpu
else torch.LongTensor(l) for l in
self.crf_model.decode(emissions, mask=mask))
if self.model_mode == TMHMM3Mode.LSTM_CRF_HMM:
predicted_labels = list(map(remapped_labels_hmm_to_orginal_labels,
labels_predicted))
predicted_types = torch.LongTensor(list(map(get_predicted_type_from_labels,
predicted_labels)))
elif self.model_mode == TMHMM3Mode.LSTM_CRF_MARG:
alpha = self.crf_model._compute_log_alpha(emissions, mask, run_backwards=False)
z_value = alpha[alpha.size(0) - 1] + self.crf_model.end_transitions
types = z_value.view((-1, 4, 5))
types = logsumexp(types, dim=2)
_, predicted_types = torch.max(types, dim=1)
predicted_labels = list([l % 5 for l in labels_predicted]) # remap
else:
predicted_labels = labels_predicted
predicted_types = torch.LongTensor(list(map(get_predicted_type_from_labels,
predicted_labels)))
if self.use_gpu:
predicted_types = predicted_types.cuda()
predicted_topologies = list(map(label_list_to_topology, predicted_labels))
# if all O's, change to all I's (by convention)
for idx, labels in enumerate(predicted_labels):
if torch.eq(labels, 4).all():
predicted_labels[idx] = labels - 1
return predicted_labels, predicted_types if forced_types \
is None else forced_types, predicted_topologies
def evaluate_model(self, data_loader):
validation_loss_tracker = []
validation_type_loss_tracker = []
validation_topology_loss_tracker = []
confusion_matrix = np.zeros((5, 5), dtype=np.int64)
protein_names = []
protein_aa_strings = []
protein_label_actual = []
protein_label_prediction = []
for _, minibatch in enumerate(data_loader, 0):
validation_loss_tracker.append(self.compute_loss(minibatch).detach())
_, _, _, _, prot_type_list, prot_topology_list, \
prot_name_list, original_aa_string, original_label_string = minibatch
input_sequences = [x for x in self.embed(original_aa_string)]
predicted_labels, predicted_types, predicted_topologies = self(input_sequences)
protein_names.extend(prot_name_list)
protein_aa_strings.extend(original_aa_string)
protein_label_actual.extend(original_label_string)
# if we're using an external type predictor
if self.type_classifier is not None:
predicted_labels_type_classifer, \
predicted_types_type_classifier, \
predicted_topologies_type_classifier = self.type_classifier(input_sequences)
for idx, actual_type in enumerate(prot_type_list):
predicted_type = predicted_types[idx]
predicted_topology = predicted_topologies[idx]
predicted_labels_for_protein = predicted_labels[idx]
if self.type_classifier is not None:
if predicted_type != predicted_types_type_classifier[idx]:
# we must always use the type predicted by the type predictor if available
predicted_type = predicted_types_type_classifier[idx]
predicted_topology = predicted_topologies_type_classifier[idx]
predicted_labels_for_protein = predicted_labels_type_classifer[idx]
prediction_topology_match = is_topologies_equal(prot_topology_list[idx],
predicted_topology, 5)
if actual_type == predicted_type:
validation_type_loss_tracker.append(0)
# if we guessed the type right for SP+GLOB or GLOB,
# count the topology as correct
if actual_type == 2 or actual_type == 3 or prediction_topology_match:
validation_topology_loss_tracker.append(0)
confusion_matrix[actual_type][4] += 1
else:
validation_topology_loss_tracker.append(1)
confusion_matrix[actual_type][predicted_type] += 1
# if the type was correctly guessed to be 2 or 3 by the type classifier,
# use its topology prediction
if (actual_type in (2, 3)) and self.type_classifier is not None:
protein_label_prediction.append(predicted_labels_type_classifer[idx])
else:
protein_label_prediction.append(predicted_labels_for_protein)
else:
confusion_matrix[actual_type][predicted_type] += 1
validation_type_loss_tracker.append(1)
validation_topology_loss_tracker.append(1)
protein_label_prediction.append(predicted_labels_for_protein)
write_out(confusion_matrix)
_loss = float(torch.stack(validation_loss_tracker).mean())
type_loss = float(torch.FloatTensor(validation_type_loss_tracker).mean().detach())
topology_loss = float(torch.FloatTensor(validation_topology_loss_tracker).mean().detach())
self.type_01loss_values.append(type_loss)
self.topology_01loss_values.append(topology_loss)
if get_experiment_id() is not None and "TYPE" in get_experiment_id():
# optimize for type
validation_loss = type_loss
else:
# optimize for topology
validation_loss = topology_loss
data = {}
data['type_01loss_values'] = self.type_01loss_values
data['topology_01loss_values'] = self.topology_01loss_values
data['confusion_matrix'] = confusion_matrix.tolist()
return validation_loss, data, (
protein_names, protein_aa_strings, protein_label_actual, protein_label_prediction)
def post_process_prediction_data(prediction_data):
data = []
for (name, aa_string, actual, prediction) in zip(*prediction_data):
data.append("\n".join([">" + name,
aa_string,
actual,
original_labels_to_fasta(prediction)]))
return "\n".join(data)
def logsumexp(data, dim):
return data.max(dim)[0] + torch.log(torch.sum(
torch.exp(data - data.max(dim)[0].unsqueeze(dim)), dim))
def initialize_crf_parameters(crf_model,
start_transitions=None,
end_transitions=None,
transitions=None) -> None:
"""Initialize the transition parameters.
The parameters will be initialized randomly from a uniform distribution
between -0.1 and 0.1, unless given explicitly as an argument.
"""
if start_transitions is None:
nn.init.uniform(crf_model.start_transitions, -0.1, 0.1)
else:
crf_model.start_transitions.data = start_transitions
if end_transitions is None:
nn.init.uniform(crf_model.end_transitions, -0.1, 0.1)
else:
crf_model.end_transitions.data = end_transitions
if transitions is None:
nn.init.uniform(crf_model.transitions, -0.1, 0.1)
else:
crf_model.transitions.data = transitions
def generate_masked_crf_transitions(crf_model, transition_mask):
start_transitions_mask, transitions_mask, end_transition_mask = transition_mask
start_transitions = crf_model.start_transitions.data.clone()
end_transitions = crf_model.end_transitions.data.clone()
transitions = crf_model.transitions.data.clone()
if start_transitions_mask is not None:
start_transitions.masked_fill_(start_transitions_mask, -100000000)
if end_transition_mask is not None:
end_transitions.masked_fill_(end_transition_mask, -100000000)
if transitions_mask is not None:
transitions.masked_fill_(transitions_mask, -100000000)
return start_transitions, end_transitions, transitions
class TMHMM3Mode(Enum):
LSTM = 1
LSTM_CRF = 2
LSTM_CRF_HMM = 3
LSTM_CRF_MARG = 4
| 48.258004 | 100 | 0.561639 |
import sys
from enum import Enum
import glob
import pickle
import numpy as np
import torch
import torch.autograd as autograd
import torch.nn as nn
import openprotein
from experiments.tmhmm3.tm_util import label_list_to_topology
from experiments.tmhmm3.tm_util import get_predicted_type_from_labels
from experiments.tmhmm3.tm_util import remapped_labels_hmm_to_orginal_labels
from experiments.tmhmm3.tm_util import is_topologies_equal
from experiments.tmhmm3.tm_util import original_labels_to_fasta
from pytorchcrf.torchcrf import CRF
from util import write_out, get_experiment_id
torch.manual_seed(1)
class TMHMM3(openprotein.BaseModel):
def __init__(self,
embedding,
hidden_size,
use_gpu,
model_mode,
use_marg_prob,
type_predictor_model,
profile_path):
super(TMHMM3, self).__init__(embedding, use_gpu)
num_tags = 5
num_labels = 5
self.max_signal_length = 67
if model_mode == TMHMM3Mode.LSTM_CRF_HMM:
num_tags += 2 * 40 + self.max_signal_length
elif model_mode == TMHMM3Mode.LSTM_CRF_MARG:
num_tags = num_tags * 4
n_size = hidden_size
self.use_gpu = use_gpu
self.use_marg_prob = use_marg_prob
self.model_mode = model_mode
self.embedding = embedding
self.profile_path = profile_path
self.bi_lstm = nn.LSTM(self.get_embedding_size(),
self.hidden_size,
num_layers=1,
bidirectional=True)
self.hidden_to_labels = nn.Linear(self.hidden_size * 2, num_labels)
self.hidden_layer = None
crf_start_mask = torch.ones(num_tags).byte()
crf_end_mask = torch.ones(num_tags).byte()
if model_mode == TMHMM3Mode.LSTM_CRF_HMM:
allowed_transitions = [
(3, 3), (4, 4),
(3, 5), (4, 45)]
for i in range(5, 45 - 1):
allowed_transitions.append((i, i + 1))
if 8 < i < 43:
allowed_transitions.append((8, i))
allowed_transitions.append((44, 4))
for i in range(45, 85 - 1):
allowed_transitions.append((i, i + 1))
if 48 < i < 83:
allowed_transitions.append((48, i))
allowed_transitions.append((84, 3))
for i in range(85, 151):
allowed_transitions.append((i, i + 1))
allowed_transitions.append((2, i))
allowed_transitions.append((2, 151))
allowed_transitions.append((2, 4))
allowed_transitions.append((151, 4))
crf_start_mask[2] = 0
crf_start_mask[3] = 0
crf_start_mask[4] = 0
crf_end_mask[3] = 0
crf_end_mask[4] = 0
elif model_mode == TMHMM3Mode.LSTM_CRF_MARG:
allowed_transitions = [
(0, 0), (1, 1), (3, 3), (4, 4), (3, 0), (0, 4), (4, 1), (1, 3),
(5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (8, 5), (5, 9), (9, 6), (6, 8), (7, 9),
(12, 12), (14, 14), (12, 14),
(18, 18),
]
crf_start_mask[3] = 0
crf_start_mask[4] = 0
crf_start_mask[7] = 0
crf_start_mask[8] = 0
crf_start_mask[9] = 0
crf_start_mask[12] = 0
crf_start_mask[18] = 0
crf_end_mask[3] = 0
crf_end_mask[4] = 0
crf_end_mask[8] = 0
crf_end_mask[9] = 0
crf_end_mask[14] = 0
crf_end_mask[18] = 0
else:
allowed_transitions = [
(0, 0), (1, 1), (2, 2), (3, 3), (4, 4),
(3, 0), (0, 4), (4, 1), (1, 3), (2, 4)]
crf_start_mask[2] = 0
crf_start_mask[3] = 0
crf_start_mask[4] = 0
crf_end_mask[3] = 0
crf_end_mask[4] = 0
self.allowed_transitions = allowed_transitions
self.crf_model = CRF(num_tags)
self.type_classifier = type_predictor_model
self.type_tm_classier = None
self.type_sp_classier = None
crf_transitions_mask = torch.ones((num_tags, num_tags)).byte()
self.label_01loss_values = []
self.type_01loss_values = []
self.topology_01loss_values = []
if self.use_gpu:
self.crf_model = self.crf_model.cuda()
self.bi_lstm = self.bi_lstm.cuda()
self.hidden_to_labels = self.hidden_to_labels.cuda()
crf_transitions_mask = crf_transitions_mask.cuda()
crf_start_mask = crf_start_mask.cuda()
crf_end_mask = crf_end_mask.cuda()
for i in range(num_tags):
for k in range(num_tags):
if (i, k) in self.allowed_transitions:
crf_transitions_mask[i][k] = 0
crf_start_transitions, crf_end_transitions, crf_transitions = \
generate_masked_crf_transitions(
self.crf_model, (crf_start_mask, crf_transitions_mask, crf_end_mask)
)
initialize_crf_parameters(self.crf_model,
start_transitions=crf_start_transitions,
end_transitions=crf_end_transitions,
transitions=crf_transitions)
def get_embedding_size(self):
if self.embedding == "BLOSUM62":
return 24
elif self.embedding == "PROFILE":
return 51
def flatten_parameters(self):
self.bi_lstm.flatten_parameters()
def encode_amino_acid(self, letter):
if self.embedding == "BLOSUM62":
if not globals().get('blosum_encoder'):
blosum = \
"""4,-1,-2,-2,0,-1,-1,0,-2,-1,-1,-1,-1,-2,-1,1,0,-3,-2,0,-2,-1,0,-4
-1,5,0,-2,-3,1,0,-2,0,-3,-2,2,-1,-3,-2,-1,-1,-3,-2,-3,-1,0,-1,-4
-2,0,6,1,-3,0,0,0,1,-3,-3,0,-2,-3,-2,1,0,-4,-2,-3,3,0,-1,-4
-2,-2,1,6,-3,0,2,-1,-1,-3,-4,-1,-3,-3,-1,0,-1,-4,-3,-3,4,1,-1,-4
0,-3,-3,-3,9,-3,-4,-3,-3,-1,-1,-3,-1,-2,-3,-1,-1,-2,-2,-1,-3,-3,-2,-4
-1,1,0,0,-3,5,2,-2,0,-3,-2,1,0,-3,-1,0,-1,-2,-1,-2,0,3,-1,-4
-1,0,0,2,-4,2,5,-2,0,-3,-3,1,-2,-3,-1,0,-1,-3,-2,-2,1,4,-1,-4
0,-2,0,-1,-3,-2,-2,6,-2,-4,-4,-2,-3,-3,-2,0,-2,-2,-3,-3,-1,-2,-1,-4
-2,0,1,-1,-3,0,0,-2,8,-3,-3,-1,-2,-1,-2,-1,-2,-2,2,-3,0,0,-1,-4
-1,-3,-3,-3,-1,-3,-3,-4,-3,4,2,-3,1,0,-3,-2,-1,-3,-1,3,-3,-3,-1,-4
-1,-2,-3,-4,-1,-2,-3,-4,-3,2,4,-2,2,0,-3,-2,-1,-2,-1,1,-4,-3,-1,-4
-1,2,0,-1,-3,1,1,-2,-1,-3,-2,5,-1,-3,-1,0,-1,-3,-2,-2,0,1,-1,-4
-1,-1,-2,-3,-1,0,-2,-3,-2,1,2,-1,5,0,-2,-1,-1,-1,-1,1,-3,-1,-1,-4
-2,-3,-3,-3,-2,-3,-3,-3,-1,0,0,-3,0,6,-4,-2,-2,1,3,-1,-3,-3,-1,-4
-1,-2,-2,-1,-3,-1,-1,-2,-2,-3,-3,-1,-2,-4,7,-1,-1,-4,-3,-2,-2,-1,-2,-4
1,-1,1,0,-1,0,0,0,-1,-2,-2,0,-1,-2,-1,4,1,-3,-2,-2,0,0,0,-4
0,-1,0,-1,-1,-1,-1,-2,-2,-1,-1,-1,-1,-2,-1,1,5,-2,-2,0,-1,-1,0,-4
-3,-3,-4,-4,-2,-2,-3,-2,-2,-3,-2,-3,-1,1,-4,-3,-2,11,2,-3,-4,-3,-2,-4
-2,-2,-2,-3,-2,-1,-2,-3,2,-1,-1,-2,-1,3,-3,-2,-2,2,7,-1,-3,-2,-1,-4
0,-3,-3,-3,-1,-2,-2,-3,-3,3,1,-2,1,-1,-2,-2,0,-3,-1,4,-3,-2,-1,-4
-2,-1,3,4,-3,0,1,-1,0,-3,-4,0,-3,-3,-2,0,-1,-4,-3,-3,4,1,-1,-4
-1,0,0,1,-3,3,4,-2,0,-3,-3,1,-1,-3,-1,0,-1,-3,-2,-2,1,4,-1,-4
0,-1,-1,-1,-2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-2,0,0,-2,-1,-1,-1,-1,-1,-4
-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,1""" \
.replace('\n', ',')
blosum_matrix = np.fromstring(blosum, sep=",").reshape(24, 24)
blosum_key = "A,R,N,D,C,Q,E,G,H,I,L,K,M,F,P,S,T,W,Y,V,B,Z,X,U".split(",")
key_map = {}
for idx, value in enumerate(blosum_key):
key_map[value] = list([int(v) for v in blosum_matrix[idx].astype('int')])
globals().__setitem__("blosum_encoder", key_map)
return globals().get('blosum_encoder')[letter]
elif self.embedding == "ONEHOT":
one_hot_key = "A,R,N,D,C,Q,E,G,H,I,L,K,M,F,P,S,T,W,Y,V,B,Z,X,U".split(",")
arr = []
for idx, k in enumerate(one_hot_key):
if k == letter:
arr.append(1)
else:
arr.append(0)
return arr
elif self.embedding == "PYTORCH":
key_id = "A,R,N,D,C,Q,E,G,H,I,L,K,M,F,P,S,T,W,Y,V,B,Z,X,U".split(",")
for idx, k in enumerate(key_id):
if k == letter:
return idx
def embed(self, prot_aa_list):
embed_list = []
for aa_list in prot_aa_list:
if self.embedding == "PYTORCH":
tensor = torch.LongTensor(tensor)
elif self.embedding == "PROFILE":
if not globals().get('profile_encoder'):
print("Load profiles...")
files = glob.glob(self.profile_path.strip("/") + "/*")
profile_dict = {}
for profile_file in files:
profile = pickle.load(open(profile_file, "rb")).popitem()[1]
profile_dict[profile["seq"]] = torch.from_numpy(profile["profile"]).float()
globals().__setitem__("profile_encoder", profile_dict)
print("Loaded profiles")
tensor = globals().get('profile_encoder')[aa_list]
else:
tensor = list([self.encode_amino_acid(aa) for aa in aa_list])
tensor = torch.FloatTensor(tensor)
if self.use_gpu:
tensor = tensor.cuda()
embed_list.append(tensor)
return embed_list
def init_hidden(self, minibatch_size):
initial_hidden_state = torch.zeros(1 * 2, minibatch_size, self.hidden_size)
initial_cell_state = torch.zeros(1 * 2, minibatch_size, self.hidden_size)
if self.use_gpu:
initial_hidden_state = initial_hidden_state.cuda()
initial_cell_state = initial_cell_state.cuda()
self.hidden_layer = (autograd.Variable(initial_hidden_state),
autograd.Variable(initial_cell_state))
def _get_network_emissions(self, input_sequences):
batch_sizes = torch.LongTensor(list([i.size(0) for i in input_sequences]))
pad_seq_embed = torch.nn.utils.rnn.pad_sequence(input_sequences)
minibatch_size = len(input_sequences)
self.init_hidden(minibatch_size)
bi_lstm_out, self.hidden_layer = self.bi_lstm(pad_seq_embed, self.hidden_layer)
emissions = self.hidden_to_labels(bi_lstm_out)
if self.model_mode == TMHMM3Mode.LSTM_CRF_HMM:
inout_select = torch.LongTensor([0])
outin_select = torch.LongTensor([1])
signal_select = torch.LongTensor([2])
if self.use_gpu:
inout_select = inout_select.cuda()
outin_select = outin_select.cuda()
signal_select = signal_select.cuda()
inout = torch.index_select(emissions, 2, autograd.Variable(inout_select))
outin = torch.index_select(emissions, 2, autograd.Variable(outin_select))
signal = torch.index_select(emissions, 2, autograd.Variable(signal_select))
emissions = torch.cat((emissions, inout.expand(-1, len(batch_sizes), 40),
outin.expand(-1, len(batch_sizes), 40),
signal.expand(-1, len(batch_sizes), self.max_signal_length)), 2)
elif self.model_mode == TMHMM3Mode.LSTM_CRF_MARG:
emissions = emissions.repeat(1, 1, 4)
return emissions, batch_sizes
def batch_sizes_to_mask(self, batch_sizes):
mask = torch.autograd.Variable(torch.t(torch.ByteTensor(
[[1] * int(batch_size) + [0] * (int(batch_sizes[0])
- int(batch_size)) for batch_size in batch_sizes]
)))
if self.use_gpu:
mask = mask.cuda()
return mask
def compute_loss(self, training_minibatch):
_, labels_list, remapped_labels_list_crf_hmm, remapped_labels_list_crf_marg, \
_prot_type_list, _prot_topology_list, _prot_name_list, original_aa_string, \
_original_label_string = training_minibatch
minibatch_size = len(labels_list)
if self.model_mode == TMHMM3Mode.LSTM_CRF_MARG:
labels_to_use = remapped_labels_list_crf_marg
elif self.model_mode == TMHMM3Mode.LSTM_CRF_HMM:
labels_to_use = remapped_labels_list_crf_hmm
else:
labels_to_use = labels_list
input_sequences = [autograd.Variable(x) for x in self.embed(original_aa_string)]
actual_labels = torch.nn.utils.rnn.pad_sequence([autograd.Variable(l)
for l in labels_to_use])
emissions, batch_sizes = self._get_network_emissions(input_sequences)
if self.model_mode == TMHMM3Mode.LSTM:
prediction = emissions.transpose(0, 1).contiguous().view(-1, emissions.size(-1))
target = actual_labels.transpose(0, 1).contiguous().view(-1, 1)
losses = -torch.gather(nn.functional.log_softmax(prediction),
dim=1, index=target).view(*actual_labels
.transpose(0, 1).size())
mask_expand = torch.range(0, batch_sizes.data.max() - 1).long() \
.unsqueeze(0).expand(batch_sizes.size(0), batch_sizes.data.max())
if self.use_gpu:
mask_expand = mask_expand.cuda()
batch_sizes = batch_sizes.cuda()
mask = mask_expand < batch_sizes.unsqueeze(1).expand_as(mask_expand)
loss = (losses * mask.float()).sum() / batch_sizes.float().sum()
else:
mask = (self.batch_sizes_to_mask(batch_sizes))
loss = -1 * self.crf_model(emissions, actual_labels, mask=mask) / minibatch_size
if float(loss) > 100000:
for idx, batch_size in enumerate(batch_sizes):
last_label = None
for i in range(batch_size):
label = int(actual_labels[i][idx])
write_out(str(label) + ",", end='')
if last_label is not None and (last_label, label) \
not in self.allowed_transitions:
write_out("Error: invalid transition found")
write_out((last_label, label))
sys.exit(1)
last_label = label
write_out(" ")
return loss
def forward(self, input_sequences, forced_types=None):
emissions, batch_sizes = self._get_network_emissions(input_sequences)
if self.model_mode == TMHMM3Mode.LSTM:
output = torch.nn.functional.log_softmax(emissions, dim=2)
_, predicted_labels = output[:, :, 0:5].max(dim=2)
predicted_labels = list(
[list(map(int, x[:batch_sizes[idx]])) for idx, x in enumerate(predicted_labels
.transpose(0, 1))])
predicted_labels = list(
torch.cuda.LongTensor(l) if self.use_gpu else torch.LongTensor(l)
for l in predicted_labels)
predicted_topologies = list(map(label_list_to_topology, predicted_labels))
predicted_types = torch.LongTensor(list(map(get_predicted_type_from_labels,
predicted_labels)))
else:
mask = self.batch_sizes_to_mask(batch_sizes)
labels_predicted = list(torch.cuda.LongTensor(l) if self.use_gpu
else torch.LongTensor(l) for l in
self.crf_model.decode(emissions, mask=mask))
if self.model_mode == TMHMM3Mode.LSTM_CRF_HMM:
predicted_labels = list(map(remapped_labels_hmm_to_orginal_labels,
labels_predicted))
predicted_types = torch.LongTensor(list(map(get_predicted_type_from_labels,
predicted_labels)))
elif self.model_mode == TMHMM3Mode.LSTM_CRF_MARG:
alpha = self.crf_model._compute_log_alpha(emissions, mask, run_backwards=False)
z_value = alpha[alpha.size(0) - 1] + self.crf_model.end_transitions
types = z_value.view((-1, 4, 5))
types = logsumexp(types, dim=2)
_, predicted_types = torch.max(types, dim=1)
predicted_labels = list([l % 5 for l in labels_predicted])
else:
predicted_labels = labels_predicted
predicted_types = torch.LongTensor(list(map(get_predicted_type_from_labels,
predicted_labels)))
if self.use_gpu:
predicted_types = predicted_types.cuda()
predicted_topologies = list(map(label_list_to_topology, predicted_labels))
for idx, labels in enumerate(predicted_labels):
if torch.eq(labels, 4).all():
predicted_labels[idx] = labels - 1
return predicted_labels, predicted_types if forced_types \
is None else forced_types, predicted_topologies
def evaluate_model(self, data_loader):
validation_loss_tracker = []
validation_type_loss_tracker = []
validation_topology_loss_tracker = []
confusion_matrix = np.zeros((5, 5), dtype=np.int64)
protein_names = []
protein_aa_strings = []
protein_label_actual = []
protein_label_prediction = []
for _, minibatch in enumerate(data_loader, 0):
validation_loss_tracker.append(self.compute_loss(minibatch).detach())
_, _, _, _, prot_type_list, prot_topology_list, \
prot_name_list, original_aa_string, original_label_string = minibatch
input_sequences = [x for x in self.embed(original_aa_string)]
predicted_labels, predicted_types, predicted_topologies = self(input_sequences)
protein_names.extend(prot_name_list)
protein_aa_strings.extend(original_aa_string)
protein_label_actual.extend(original_label_string)
if self.type_classifier is not None:
predicted_labels_type_classifer, \
predicted_types_type_classifier, \
predicted_topologies_type_classifier = self.type_classifier(input_sequences)
for idx, actual_type in enumerate(prot_type_list):
predicted_type = predicted_types[idx]
predicted_topology = predicted_topologies[idx]
predicted_labels_for_protein = predicted_labels[idx]
if self.type_classifier is not None:
if predicted_type != predicted_types_type_classifier[idx]:
# we must always use the type predicted by the type predictor if available
predicted_type = predicted_types_type_classifier[idx]
predicted_topology = predicted_topologies_type_classifier[idx]
predicted_labels_for_protein = predicted_labels_type_classifer[idx]
prediction_topology_match = is_topologies_equal(prot_topology_list[idx],
predicted_topology, 5)
if actual_type == predicted_type:
validation_type_loss_tracker.append(0)
# if we guessed the type right for SP+GLOB or GLOB,
# count the topology as correct
if actual_type == 2 or actual_type == 3 or prediction_topology_match:
validation_topology_loss_tracker.append(0)
confusion_matrix[actual_type][4] += 1
else:
validation_topology_loss_tracker.append(1)
confusion_matrix[actual_type][predicted_type] += 1
# if the type was correctly guessed to be 2 or 3 by the type classifier,
# use its topology prediction
if (actual_type in (2, 3)) and self.type_classifier is not None:
protein_label_prediction.append(predicted_labels_type_classifer[idx])
else:
protein_label_prediction.append(predicted_labels_for_protein)
else:
confusion_matrix[actual_type][predicted_type] += 1
validation_type_loss_tracker.append(1)
validation_topology_loss_tracker.append(1)
protein_label_prediction.append(predicted_labels_for_protein)
write_out(confusion_matrix)
_loss = float(torch.stack(validation_loss_tracker).mean())
type_loss = float(torch.FloatTensor(validation_type_loss_tracker).mean().detach())
topology_loss = float(torch.FloatTensor(validation_topology_loss_tracker).mean().detach())
self.type_01loss_values.append(type_loss)
self.topology_01loss_values.append(topology_loss)
if get_experiment_id() is not None and "TYPE" in get_experiment_id():
# optimize for type
validation_loss = type_loss
else:
# optimize for topology
validation_loss = topology_loss
data = {}
data['type_01loss_values'] = self.type_01loss_values
data['topology_01loss_values'] = self.topology_01loss_values
data['confusion_matrix'] = confusion_matrix.tolist()
return validation_loss, data, (
protein_names, protein_aa_strings, protein_label_actual, protein_label_prediction)
def post_process_prediction_data(prediction_data):
data = []
for (name, aa_string, actual, prediction) in zip(*prediction_data):
data.append("\n".join([">" + name,
aa_string,
actual,
original_labels_to_fasta(prediction)]))
return "\n".join(data)
def logsumexp(data, dim):
return data.max(dim)[0] + torch.log(torch.sum(
torch.exp(data - data.max(dim)[0].unsqueeze(dim)), dim))
def initialize_crf_parameters(crf_model,
start_transitions=None,
end_transitions=None,
transitions=None) -> None:
if start_transitions is None:
nn.init.uniform(crf_model.start_transitions, -0.1, 0.1)
else:
crf_model.start_transitions.data = start_transitions
if end_transitions is None:
nn.init.uniform(crf_model.end_transitions, -0.1, 0.1)
else:
crf_model.end_transitions.data = end_transitions
if transitions is None:
nn.init.uniform(crf_model.transitions, -0.1, 0.1)
else:
crf_model.transitions.data = transitions
def generate_masked_crf_transitions(crf_model, transition_mask):
start_transitions_mask, transitions_mask, end_transition_mask = transition_mask
start_transitions = crf_model.start_transitions.data.clone()
end_transitions = crf_model.end_transitions.data.clone()
transitions = crf_model.transitions.data.clone()
if start_transitions_mask is not None:
start_transitions.masked_fill_(start_transitions_mask, -100000000)
if end_transition_mask is not None:
end_transitions.masked_fill_(end_transition_mask, -100000000)
if transitions_mask is not None:
transitions.masked_fill_(transitions_mask, -100000000)
return start_transitions, end_transitions, transitions
class TMHMM3Mode(Enum):
LSTM = 1
LSTM_CRF = 2
LSTM_CRF_HMM = 3
LSTM_CRF_MARG = 4
| true | true |
f724310ed89a048b1602d1084baca21f8eecd141 | 1,427 | py | Python | examples/PsyNeuLink/SimpleLinear-conditional.reconstructed.py | singular-value/MDF | 227216ffb2c9beea8539829b0b891196787d33ee | [
"Apache-2.0"
] | 12 | 2021-01-18T20:38:21.000Z | 2022-03-29T15:01:10.000Z | examples/PsyNeuLink/SimpleLinear-conditional.reconstructed.py | singular-value/MDF | 227216ffb2c9beea8539829b0b891196787d33ee | [
"Apache-2.0"
] | 101 | 2020-12-14T15:23:07.000Z | 2022-03-31T17:06:19.000Z | examples/PsyNeuLink/SimpleLinear-conditional.reconstructed.py | singular-value/MDF | 227216ffb2c9beea8539829b0b891196787d33ee | [
"Apache-2.0"
] | 15 | 2020-12-04T22:37:14.000Z | 2022-03-31T09:48:03.000Z | import psyneulink as pnl
comp = pnl.Composition(name="comp")
A = pnl.TransferMechanism(
name="A",
function=pnl.Linear(default_variable=[[0]]),
termination_measure=pnl.Distance(
metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
),
)
B = pnl.TransferMechanism(
name="B",
function=pnl.Linear(default_variable=[[0]]),
termination_measure=pnl.Distance(
metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
),
)
C = pnl.TransferMechanism(
name="C",
function=pnl.Linear(default_variable=[[0]]),
termination_measure=pnl.Distance(
metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
),
)
comp.add_node(A)
comp.add_node(B)
comp.add_node(C)
comp.add_projection(
projection=pnl.MappingProjection(
name="MappingProjection from A[RESULT] to B[InputPort-0]",
function=pnl.LinearMatrix(matrix=[[1.0]]),
),
sender=A,
receiver=B,
)
comp.add_projection(
projection=pnl.MappingProjection(
name="MappingProjection from B[RESULT] to C[InputPort-0]",
function=pnl.LinearMatrix(matrix=[[1.0]]),
),
sender=B,
receiver=C,
)
comp.scheduler.add_condition(A, pnl.AtNCalls(A, 0))
comp.scheduler.add_condition(B, pnl.Always())
comp.scheduler.add_condition(C, pnl.EveryNCalls(B, 5))
comp.scheduler.termination_conds = {
pnl.TimeScale.RUN: pnl.Never(),
pnl.TimeScale.TRIAL: pnl.AllHaveRun(),
}
| 25.482143 | 66 | 0.669236 | import psyneulink as pnl
comp = pnl.Composition(name="comp")
A = pnl.TransferMechanism(
name="A",
function=pnl.Linear(default_variable=[[0]]),
termination_measure=pnl.Distance(
metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
),
)
B = pnl.TransferMechanism(
name="B",
function=pnl.Linear(default_variable=[[0]]),
termination_measure=pnl.Distance(
metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
),
)
C = pnl.TransferMechanism(
name="C",
function=pnl.Linear(default_variable=[[0]]),
termination_measure=pnl.Distance(
metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
),
)
comp.add_node(A)
comp.add_node(B)
comp.add_node(C)
comp.add_projection(
projection=pnl.MappingProjection(
name="MappingProjection from A[RESULT] to B[InputPort-0]",
function=pnl.LinearMatrix(matrix=[[1.0]]),
),
sender=A,
receiver=B,
)
comp.add_projection(
projection=pnl.MappingProjection(
name="MappingProjection from B[RESULT] to C[InputPort-0]",
function=pnl.LinearMatrix(matrix=[[1.0]]),
),
sender=B,
receiver=C,
)
comp.scheduler.add_condition(A, pnl.AtNCalls(A, 0))
comp.scheduler.add_condition(B, pnl.Always())
comp.scheduler.add_condition(C, pnl.EveryNCalls(B, 5))
comp.scheduler.termination_conds = {
pnl.TimeScale.RUN: pnl.Never(),
pnl.TimeScale.TRIAL: pnl.AllHaveRun(),
}
| true | true |
f724311b8aa29b6d723ea5b6061ce5d9ef2b7e24 | 58,123 | py | Python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_07_01/aio/operations/_blob_containers_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_07_01/aio/operations/_blob_containers_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_07_01/aio/operations/_blob_containers_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BlobContainersOperations:
"""BlobContainersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> "_models.ListContainerItems":
"""Lists all containers and does not support a prefix like data plane. Also SRP today does not
return continuation token.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListContainerItems, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.ListContainerItems
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainerItems"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListContainerItems', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers'} # type: ignore
async def create(
self,
resource_group_name: str,
account_name: str,
container_name: str,
blob_container: "_models.BlobContainer",
**kwargs
) -> "_models.BlobContainer":
"""Creates a new container under the specified account as described by request body. The container
resource includes metadata and properties for that container. It does not include a list of the
blobs contained by the container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param blob_container: Properties of the blob container to create.
:type blob_container: ~azure.mgmt.storage.v2018_07_01.models.BlobContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(blob_container, 'BlobContainer')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
async def update(
self,
resource_group_name: str,
account_name: str,
container_name: str,
blob_container: "_models.BlobContainer",
**kwargs
) -> "_models.BlobContainer":
"""Updates container properties as specified in request body. Properties not mentioned in the
request will be unchanged. Update fails if the specified container doesn't already exist.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param blob_container: Properties to update for the blob container.
:type blob_container: ~azure.mgmt.storage.v2018_07_01.models.BlobContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(blob_container, 'BlobContainer')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
async def get(
self,
resource_group_name: str,
account_name: str,
container_name: str,
**kwargs
) -> "_models.BlobContainer":
"""Gets properties of a specified container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
account_name: str,
container_name: str,
**kwargs
) -> None:
"""Deletes specified container under its account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
async def set_legal_hold(
self,
resource_group_name: str,
account_name: str,
container_name: str,
legal_hold: "_models.LegalHold",
**kwargs
) -> "_models.LegalHold":
"""Sets legal hold tags. Setting the same tag results in an idempotent operation. SetLegalHold
follows an append pattern and does not clear out the existing tags that are not specified in
the request.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param legal_hold: The LegalHold property that will be set to a blob container.
:type legal_hold: ~azure.mgmt.storage.v2018_07_01.models.LegalHold
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LegalHold, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.LegalHold
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LegalHold"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_legal_hold.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(legal_hold, 'LegalHold')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/setLegalHold'} # type: ignore
async def clear_legal_hold(
self,
resource_group_name: str,
account_name: str,
container_name: str,
legal_hold: "_models.LegalHold",
**kwargs
) -> "_models.LegalHold":
"""Clears legal hold tags. Clearing the same or non-existent tag results in an idempotent
operation. ClearLegalHold clears out only the specified tags in the request.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param legal_hold: The LegalHold property that will be clear from a blob container.
:type legal_hold: ~azure.mgmt.storage.v2018_07_01.models.LegalHold
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LegalHold, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.LegalHold
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LegalHold"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.clear_legal_hold.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(legal_hold, 'LegalHold')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
clear_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/clearLegalHold'} # type: ignore
async def create_or_update_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: Optional[str] = None,
parameters: Optional["_models.ImmutabilityPolicy"] = None,
**kwargs
) -> "_models.ImmutabilityPolicy":
"""Creates or updates an unlocked immutability policy. ETag in If-Match is honored if given but
not required for this operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:param parameters: The ImmutabilityPolicy Properties that will be created or updated to a blob
container.
:type parameters: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
async def get_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: Optional[str] = None,
**kwargs
) -> "_models.ImmutabilityPolicy":
"""Gets the existing immutability policy along with the corresponding ETag in response headers and
body.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
async def delete_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
**kwargs
) -> "_models.ImmutabilityPolicy":
"""Aborts an unlocked immutability policy. The response of delete has
immutabilityPeriodSinceCreationInDays set to 0. ETag in If-Match is required for this
operation. Deleting a locked immutability policy is not allowed, only way is to delete the
container after deleting all blobs inside the container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.delete_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
delete_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
async def lock_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
**kwargs
) -> "_models.ImmutabilityPolicy":
"""Sets the ImmutabilityPolicy to Locked state. The only action allowed on a Locked policy is
ExtendImmutabilityPolicy action. ETag in If-Match is required for this operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.lock_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
lock_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/lock'} # type: ignore
async def extend_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
parameters: Optional["_models.ImmutabilityPolicy"] = None,
**kwargs
) -> "_models.ImmutabilityPolicy":
"""Extends the immutabilityPeriodSinceCreationInDays of a locked immutabilityPolicy. The only
action allowed on a Locked policy will be this action. ETag in If-Match is required for this
operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:param parameters: The ImmutabilityPolicy Properties that will be extended for a blob
container.
:type parameters: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.extend_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
extend_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/extend'} # type: ignore
async def lease(
self,
resource_group_name: str,
account_name: str,
container_name: str,
parameters: Optional["_models.LeaseContainerRequest"] = None,
**kwargs
) -> "_models.LeaseContainerResponse":
"""The Lease Container operation establishes and manages a lock on a container for delete
operations. The lock duration can be 15 to 60 seconds, or can be infinite.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param parameters: Lease Container request body.
:type parameters: ~azure.mgmt.storage.v2018_07_01.models.LeaseContainerRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LeaseContainerResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.LeaseContainerResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LeaseContainerResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.lease.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'LeaseContainerRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LeaseContainerResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
lease.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/lease'} # type: ignore
| 55.620096 | 297 | 0.687525 |
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BlobContainersOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> "_models.ListContainerItems":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListContainerItems', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers'}
async def create(
self,
resource_group_name: str,
account_name: str,
container_name: str,
blob_container: "_models.BlobContainer",
**kwargs
) -> "_models.BlobContainer":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.create.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(blob_container, 'BlobContainer')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'}
async def update(
self,
resource_group_name: str,
account_name: str,
container_name: str,
blob_container: "_models.BlobContainer",
**kwargs
) -> "_models.BlobContainer":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(blob_container, 'BlobContainer')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'}
async def get(
self,
resource_group_name: str,
account_name: str,
container_name: str,
**kwargs
) -> "_models.BlobContainer":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'}
async def delete(
self,
resource_group_name: str,
account_name: str,
container_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'}
async def set_legal_hold(
self,
resource_group_name: str,
account_name: str,
container_name: str,
legal_hold: "_models.LegalHold",
**kwargs
) -> "_models.LegalHold":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.set_legal_hold.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(legal_hold, 'LegalHold')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/setLegalHold'}
async def clear_legal_hold(
self,
resource_group_name: str,
account_name: str,
container_name: str,
legal_hold: "_models.LegalHold",
**kwargs
) -> "_models.LegalHold":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.clear_legal_hold.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(legal_hold, 'LegalHold')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
clear_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/clearLegalHold'}
async def create_or_update_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: Optional[str] = None,
parameters: Optional["_models.ImmutabilityPolicy"] = None,
**kwargs
) -> "_models.ImmutabilityPolicy":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.create_or_update_immutability_policy.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
if parameters is not None:
body_content = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'}
async def get_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: Optional[str] = None,
**kwargs
) -> "_models.ImmutabilityPolicy":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2018-07-01"
accept = "application/json"
url = self.get_immutability_policy.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'}
async def delete_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
**kwargs
) -> "_models.ImmutabilityPolicy":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2018-07-01"
accept = "application/json"
url = self.delete_immutability_policy.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
delete_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'}
async def lock_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
**kwargs
) -> "_models.ImmutabilityPolicy":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
url = self.lock_immutability_policy.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
lock_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/lock'}
async def extend_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
parameters: Optional["_models.ImmutabilityPolicy"] = None,
**kwargs
) -> "_models.ImmutabilityPolicy":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.extend_immutability_policy.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
if parameters is not None:
body_content = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
extend_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/extend'}
async def lease(
self,
resource_group_name: str,
account_name: str,
container_name: str,
parameters: Optional["_models.LeaseContainerRequest"] = None,
**kwargs
) -> "_models.LeaseContainerResponse":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.lease.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
if parameters is not None:
body_content = self._serialize.body(parameters, 'LeaseContainerRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LeaseContainerResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
lease.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/lease'}
| true | true |
f72432220e47112d35c216df1584e2f8fc4a4e9c | 2,059 | py | Python | Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/demos/analytical_values/demo_i_renyi.py | gonzalo-munillag/Private_AI_OpenMined | c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca | [
"MIT"
] | 5 | 2021-01-06T16:49:22.000Z | 2021-02-19T05:34:27.000Z | Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/demos/analytical_values/demo_i_renyi.py | gonzalo-munillag/Private_AI_OpenMined | c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca | [
"MIT"
] | null | null | null | Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/demos/analytical_values/demo_i_renyi.py | gonzalo-munillag/Private_AI_OpenMined | c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
""" Demo for Renyi mutual information estimators.
Analytical vs estimated value is illustrated for normal random variables.
"""
from numpy.random import rand, multivariate_normal
from numpy import arange, zeros, dot, ones
import matplotlib.pyplot as plt
from ite.cost.x_factory import co_factory
from ite.cost.x_analytical_values import analytical_value_i_renyi
def main():
# parameters:
alpha = 0.7 # parameter of Renyi mutual information, \ne 1
dim = 2 # >=2; dimension of the distribution
num_of_samples_v = arange(100, 10*1000+1, 500)
cost_name = 'MIRenyi_DR'
# cost_name = 'MIRenyi_HR'
# initialization:
distr = 'normal' # distribution; fixed
ds = ones(dim, dtype='int') # dimensions of the 'subspaces'
num_of_samples_max = num_of_samples_v[-1]
length = len(num_of_samples_v)
co = co_factory(cost_name, mult=True, alpha=alpha) # cost object
# vector of estimated mutual information values:
i_hat_v = zeros(length)
# distr, dim -> samples (y), distribution parameters (par), analytical
# value (i):
if distr == 'normal':
# mean (m), covariance matrix (c):
m = rand(dim)
l = rand(dim, dim)
c = dot(l, l.T)
# generate samples (y~N(m,c)):
y = multivariate_normal(m, c, num_of_samples_max)
par = {"cov": c}
else:
raise Exception('Distribution=?')
i = analytical_value_i_renyi(distr, alpha, par)
# estimation:
for (tk, num_of_samples) in enumerate(num_of_samples_v):
i_hat_v[tk] = co.estimation(y[0:num_of_samples], ds) # broadcast
print("tk={0}/{1}".format(tk+1, length))
# plot:
plt.plot(num_of_samples_v, i_hat_v, num_of_samples_v, ones(length)*i)
plt.xlabel('Number of samples')
plt.ylabel('Renyi mutual information')
plt.legend(('estimation', 'analytical value'), loc='best')
plt.title("Estimator: " + cost_name)
plt.show()
if __name__ == "__main__":
main()
| 30.279412 | 75 | 0.643031 |
from numpy.random import rand, multivariate_normal
from numpy import arange, zeros, dot, ones
import matplotlib.pyplot as plt
from ite.cost.x_factory import co_factory
from ite.cost.x_analytical_values import analytical_value_i_renyi
def main():
alpha = 0.7
dim = 2
num_of_samples_v = arange(100, 10*1000+1, 500)
cost_name = 'MIRenyi_DR'
distr = 'normal'
ds = ones(dim, dtype='int')
num_of_samples_max = num_of_samples_v[-1]
length = len(num_of_samples_v)
co = co_factory(cost_name, mult=True, alpha=alpha)
i_hat_v = zeros(length)
if distr == 'normal':
m = rand(dim)
l = rand(dim, dim)
c = dot(l, l.T)
y = multivariate_normal(m, c, num_of_samples_max)
par = {"cov": c}
else:
raise Exception('Distribution=?')
i = analytical_value_i_renyi(distr, alpha, par)
for (tk, num_of_samples) in enumerate(num_of_samples_v):
i_hat_v[tk] = co.estimation(y[0:num_of_samples], ds)
print("tk={0}/{1}".format(tk+1, length))
plt.plot(num_of_samples_v, i_hat_v, num_of_samples_v, ones(length)*i)
plt.xlabel('Number of samples')
plt.ylabel('Renyi mutual information')
plt.legend(('estimation', 'analytical value'), loc='best')
plt.title("Estimator: " + cost_name)
plt.show()
if __name__ == "__main__":
main()
| true | true |
f724339a771fde8c1bbb61d0248adbfa3b022d68 | 1,873 | py | Python | Nemea/backend/Reporters.py | Akhady/Nemea-GUI | 34820f13a588ed18529200d31c7d16d3f53f2020 | [
"BSD-3-Clause"
] | null | null | null | Nemea/backend/Reporters.py | Akhady/Nemea-GUI | 34820f13a588ed18529200d31c7d16d3f53f2020 | [
"BSD-3-Clause"
] | null | null | null | Nemea/backend/Reporters.py | Akhady/Nemea-GUI | 34820f13a588ed18529200d31c7d16d3f53f2020 | [
"BSD-3-Clause"
] | 1 | 2019-06-05T08:04:04.000Z | 2019-06-05T08:04:04.000Z | """
Advanced reporter configuration module
This module retrieves configuration in YAML format and converts it to JSON
There is only PUT method for editing the configuration
Path to configuration is specified in config.ini in this folder.
"""
from liberouterapi import config
from liberouterapi.error import ApiException
from bson import json_util as json
import yaml
from flask import request
import os
# Python 2 fix for FileNotFoundError
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
class ReporterError(ApiException):
status_code = 500
if 'reporters_config' not in config['nemea']:
raise ReporterError("missing path to reporters configuration file 'reporters_config'")
else:
rc_path = config['nemea']['reporters_config']
def get_nr_config():
rconf = None
try:
with open(rc_path) as f:
try:
rconf = yaml.load(f)
except Exception as e:
raise ReporterError("Error while parsing config file")
except FileNotFoundError as e:
# report not found file with absolute path
raise ReporterError("File %s not found" % os.path.abspath(config['nemea']['reporters_config']),
status_code = 404)
except Exception as e:
raise ReporterError(str(e))
return(json.dumps(rconf))
def edit_nr_config():
"""
Receive JSON formatted reporter config and dump it as YAML in desired location
This creates the file if needed
"""
conf = request.get_json()
with open(rc_path, 'w') as yf:
# must use safe_dump and encoding for Python 2
# https://stackoverflow.com/questions/20352794/pyyaml-is-producing-undesired-python-unicode-output
yaml.safe_dump(conf, yf,
default_flow_style=False, indent = 4, encoding='utf-8', allow_unicode=True)
return json.dumps(conf)
| 30.209677 | 106 | 0.698879 | from liberouterapi import config
from liberouterapi.error import ApiException
from bson import json_util as json
import yaml
from flask import request
import os
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
class ReporterError(ApiException):
status_code = 500
if 'reporters_config' not in config['nemea']:
raise ReporterError("missing path to reporters configuration file 'reporters_config'")
else:
rc_path = config['nemea']['reporters_config']
def get_nr_config():
rconf = None
try:
with open(rc_path) as f:
try:
rconf = yaml.load(f)
except Exception as e:
raise ReporterError("Error while parsing config file")
except FileNotFoundError as e:
raise ReporterError("File %s not found" % os.path.abspath(config['nemea']['reporters_config']),
status_code = 404)
except Exception as e:
raise ReporterError(str(e))
return(json.dumps(rconf))
def edit_nr_config():
conf = request.get_json()
with open(rc_path, 'w') as yf:
yaml.safe_dump(conf, yf,
default_flow_style=False, indent = 4, encoding='utf-8', allow_unicode=True)
return json.dumps(conf)
| true | true |
f72433dd66be88aac1eb531a991f3b9501a547f9 | 890 | py | Python | scripts/hash_value.py | kemysr/cloud-run-sample-flask | c9e7c71a2671f06ec9948e43f57aad69d6074f79 | [
"Apache-2.0"
] | 6 | 2020-06-03T19:48:28.000Z | 2022-03-13T13:35:23.000Z | scripts/hash_value.py | kemysr/cloud-run-sample-flask | c9e7c71a2671f06ec9948e43f57aad69d6074f79 | [
"Apache-2.0"
] | null | null | null | scripts/hash_value.py | kemysr/cloud-run-sample-flask | c9e7c71a2671f06ec9948e43f57aad69d6074f79 | [
"Apache-2.0"
] | 21 | 2020-06-03T19:59:37.000Z | 2022-03-29T21:44:19.000Z | #!/usr/bin/env python3
import argparse
import hashlib
# This script reads the --value argument from the command
# line and outputs its SHA 512 hash.
# In this tutorial, we use this for generating the value
# that the application uses for its basic authentication.
# We do this, so the basic auth secret is not stored in
# raw format on Google Cloud Run, which could impose a
# security risk.
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--value', help='The value to hash', required=True)
args = parser.parse_args()
if (not args.value.strip()):
raise ValueError("--value argument can't be blank!")
hash_object = hashlib.sha512(bytearray(args.value, encoding='utf8'))
hashed = hash_object.hexdigest()
print(hashed)
| 28.709677 | 75 | 0.71573 |
import argparse
import hashlib
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--value', help='The value to hash', required=True)
args = parser.parse_args()
if (not args.value.strip()):
raise ValueError("--value argument can't be blank!")
hash_object = hashlib.sha512(bytearray(args.value, encoding='utf8'))
hashed = hash_object.hexdigest()
print(hashed)
| true | true |
f72433f9b152958ddcdfd7ecaa36b8bc22aec097 | 71 | py | Python | aiida_orca/workchains/__init__.py | pzarabadip/aiida-orca | 5b2cba2b518837c35179b52ac1141eda27609f4b | [
"MIT"
] | 3 | 2021-03-24T08:29:07.000Z | 2021-06-26T20:53:03.000Z | aiida_orca/workchains/__init__.py | pzarabadip/aiida-orca | 5b2cba2b518837c35179b52ac1141eda27609f4b | [
"MIT"
] | 25 | 2020-03-13T23:18:48.000Z | 2021-12-06T19:07:41.000Z | aiida_orca/workchains/__init__.py | pzarabadip/aiida-orca | 5b2cba2b518837c35179b52ac1141eda27609f4b | [
"MIT"
] | 2 | 2020-03-19T19:54:32.000Z | 2021-07-05T18:41:09.000Z | """Initialize OrcaBaseWorkChain"""
from .base import OrcaBaseWorkChain
| 23.666667 | 35 | 0.816901 | from .base import OrcaBaseWorkChain
| true | true |
f72437e9ba479ba40bff626aedeb17230a9014ce | 247 | py | Python | mongoapi/hotline_database/hotline_model.py | 133794m3r/i_am_not_forgotten | f70d117fa2f36aacd335575c6932840c8a3f8204 | [
"MIT"
] | null | null | null | mongoapi/hotline_database/hotline_model.py | 133794m3r/i_am_not_forgotten | f70d117fa2f36aacd335575c6932840c8a3f8204 | [
"MIT"
] | null | null | null | mongoapi/hotline_database/hotline_model.py | 133794m3r/i_am_not_forgotten | f70d117fa2f36aacd335575c6932840c8a3f8204 | [
"MIT"
] | 2 | 2020-06-14T11:27:04.000Z | 2020-06-15T23:09:29.000Z | from .hotline_db import hotline_db
class crisis_numbers(hotline_db.Document):
country = hotline_db.StringField(required=True, unique=True)
numbers = hotline_db.StringField(required=True)
website = hotline_db.StringField(required=True) | 41.166667 | 64 | 0.797571 | from .hotline_db import hotline_db
class crisis_numbers(hotline_db.Document):
country = hotline_db.StringField(required=True, unique=True)
numbers = hotline_db.StringField(required=True)
website = hotline_db.StringField(required=True) | true | true |
f7243801a587811168765ed7f35e7bc015e97c4f | 621 | py | Python | bag_serdes_ec-master/scripts_test/passives/ctle.py | tinapiao/Software-IC-Automation | 74b23cd94aa6e4658b110e93b5deb635e014f3a6 | [
"BSD-3-Clause"
] | null | null | null | bag_serdes_ec-master/scripts_test/passives/ctle.py | tinapiao/Software-IC-Automation | 74b23cd94aa6e4658b110e93b5deb635e014f3a6 | [
"BSD-3-Clause"
] | null | null | null | bag_serdes_ec-master/scripts_test/passives/ctle.py | tinapiao/Software-IC-Automation | 74b23cd94aa6e4658b110e93b5deb635e014f3a6 | [
"BSD-3-Clause"
] | 1 | 2020-01-07T04:53:53.000Z | 2020-01-07T04:53:53.000Z | # -*- coding: utf-8 -*-
import yaml
from bag.core import BagProject
from serdes_ec.layout.analog.passives import PassiveCTLE
if __name__ == '__main__':
with open('specs_test/serdes_ec/passives/ctle.yaml', 'r') as f:
block_specs = yaml.load(f)
local_dict = locals()
if 'bprj' not in local_dict:
print('creating BAG project')
bprj = BagProject()
else:
print('loading BAG project')
bprj = local_dict['bprj']
bprj.generate_cell(block_specs, PassiveCTLE, debug=True)
# bprj.generate_cell(block_specs, PassiveCTLE, gen_sch=True, run_lvs=True, debug=True)
| 24.84 | 90 | 0.673108 |
import yaml
from bag.core import BagProject
from serdes_ec.layout.analog.passives import PassiveCTLE
if __name__ == '__main__':
with open('specs_test/serdes_ec/passives/ctle.yaml', 'r') as f:
block_specs = yaml.load(f)
local_dict = locals()
if 'bprj' not in local_dict:
print('creating BAG project')
bprj = BagProject()
else:
print('loading BAG project')
bprj = local_dict['bprj']
bprj.generate_cell(block_specs, PassiveCTLE, debug=True)
| true | true |
f72438931987b95f573f3d0f7e0d72e2f7f4286b | 255 | py | Python | config.py | Yupps00/Addarr | 8e5edccbd02484eacadce3c1301199849d5c24e5 | [
"MIT"
] | null | null | null | config.py | Yupps00/Addarr | 8e5edccbd02484eacadce3c1301199849d5c24e5 | [
"MIT"
] | null | null | null | config.py | Yupps00/Addarr | 8e5edccbd02484eacadce3c1301199849d5c24e5 | [
"MIT"
] | null | null | null | import yaml
from definitions import CONFIG_PATH, DEFAULT_SETTINGS
config = yaml.safe_load(open(CONFIG_PATH, encoding="utf8"))
for setting, default_value in DEFAULT_SETTINGS.items():
if setting not in config:
config[setting] = default_value
| 25.5 | 59 | 0.768627 | import yaml
from definitions import CONFIG_PATH, DEFAULT_SETTINGS
config = yaml.safe_load(open(CONFIG_PATH, encoding="utf8"))
for setting, default_value in DEFAULT_SETTINGS.items():
if setting not in config:
config[setting] = default_value
| true | true |
f7243924de59d376bd6df8d43b9f799febe44bf6 | 1,861 | py | Python | src/installer/src/tortuga/node/types.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 33 | 2018-03-02T17:07:39.000Z | 2021-05-21T18:02:51.000Z | src/installer/src/tortuga/node/types.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 201 | 2018-03-05T14:28:24.000Z | 2020-11-23T19:58:27.000Z | src/installer/src/tortuga/node/types.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 23 | 2018-03-02T17:21:59.000Z | 2020-11-18T14:52:38.000Z | from typing import Dict, Optional
from marshmallow import fields, validate
from tortuga.node.state import ALLOWED_NODE_STATES
from tortuga.types.base import BaseType, BaseTypeSchema
NodeStateValidator = validate.OneOf(
choices=ALLOWED_NODE_STATES,
error="Invalid node state '{input}'; must be one of {choices}"
)
class NodeSchema(BaseTypeSchema):
name = fields.String()
public_hostname = fields.String()
state = fields.String(validate=NodeStateValidator)
hardwareprofile_id = fields.String()
softwareprofile_id = fields.String()
locked = fields.String()
tags = fields.Dict()
last_update = fields.String(dump_only=True)
class Node(BaseType):
schema_class = NodeSchema
type = 'node'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.name: Optional[str] = kwargs.get('name', None)
self.public_hostname: Optional[str] = \
kwargs.get('public_hostname', None)
self.state: Optional[str] = kwargs.get('state', None)
self.hardwareprofile_id: Optional[str] = \
kwargs.get('hardwareprofile_id', None)
self.softwareprofile_id: Optional[str] = \
kwargs.get('softwareprofile_id', None)
self.locked: Optional[str] = kwargs.get('locked', None)
self.tags: Dict[str, str] = kwargs.get('tags', {})
self.last_update: Optional[str] = kwargs.get('last_update', None)
class NodeStatusSchema(BaseTypeSchema):
state = fields.String(validate=NodeStateValidator)
last_update = fields.String(dump_only=True)
class NodeStatus(BaseType):
schema_class = NodeStatusSchema
type = 'node'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.state: Optional[str] = kwargs.get('state', None)
self.last_update: Optional[str] = kwargs.get('last_update', None)
| 32.086207 | 73 | 0.681891 | from typing import Dict, Optional
from marshmallow import fields, validate
from tortuga.node.state import ALLOWED_NODE_STATES
from tortuga.types.base import BaseType, BaseTypeSchema
NodeStateValidator = validate.OneOf(
choices=ALLOWED_NODE_STATES,
error="Invalid node state '{input}'; must be one of {choices}"
)
class NodeSchema(BaseTypeSchema):
name = fields.String()
public_hostname = fields.String()
state = fields.String(validate=NodeStateValidator)
hardwareprofile_id = fields.String()
softwareprofile_id = fields.String()
locked = fields.String()
tags = fields.Dict()
last_update = fields.String(dump_only=True)
class Node(BaseType):
schema_class = NodeSchema
type = 'node'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.name: Optional[str] = kwargs.get('name', None)
self.public_hostname: Optional[str] = \
kwargs.get('public_hostname', None)
self.state: Optional[str] = kwargs.get('state', None)
self.hardwareprofile_id: Optional[str] = \
kwargs.get('hardwareprofile_id', None)
self.softwareprofile_id: Optional[str] = \
kwargs.get('softwareprofile_id', None)
self.locked: Optional[str] = kwargs.get('locked', None)
self.tags: Dict[str, str] = kwargs.get('tags', {})
self.last_update: Optional[str] = kwargs.get('last_update', None)
class NodeStatusSchema(BaseTypeSchema):
state = fields.String(validate=NodeStateValidator)
last_update = fields.String(dump_only=True)
class NodeStatus(BaseType):
schema_class = NodeStatusSchema
type = 'node'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.state: Optional[str] = kwargs.get('state', None)
self.last_update: Optional[str] = kwargs.get('last_update', None)
| true | true |
f72439a2484b51b9b7304a2216a642943331134b | 1,769 | py | Python | tests/util/key_tool.py | HiveProject2021/chives-light-wallet | 0c7c36bfc703b26ce3c938027de643dc90e4191f | [
"Apache-2.0"
] | 7 | 2021-12-26T11:05:19.000Z | 2022-02-24T10:42:45.000Z | tests/util/key_tool.py | HiveProject2021/chives-light-wallet | 0c7c36bfc703b26ce3c938027de643dc90e4191f | [
"Apache-2.0"
] | 8 | 2021-12-14T17:27:29.000Z | 2022-03-29T18:18:22.000Z | tests/util/key_tool.py | HiveProject2021/chives-light-wallet | 0c7c36bfc703b26ce3c938027de643dc90e4191f | [
"Apache-2.0"
] | 1 | 2021-12-09T23:51:12.000Z | 2021-12-09T23:51:12.000Z | from typing import List
from blspy import AugSchemeMPL, G2Element, PrivateKey
from chives.types.blockchain_format.sized_bytes import bytes32
from chives.types.coin_spend import CoinSpend
from chives.util.condition_tools import conditions_by_opcode, conditions_for_solution, pkm_pairs_for_conditions_dict
from tests.core.make_block_generator import GROUP_ORDER, int_to_public_key
from tests.block_tools import test_constants
class KeyTool(dict):
@classmethod
def __new__(cls, *args):
return dict.__new__(*args)
def add_secret_exponents(self, secret_exponents: List[int]) -> None:
for _ in secret_exponents:
self[bytes(int_to_public_key(_))] = _ % GROUP_ORDER
def sign(self, public_key: bytes, message_hash: bytes32) -> G2Element:
secret_exponent = self.get(public_key)
if not secret_exponent:
raise ValueError("unknown pubkey %s" % public_key.hex())
bls_private_key = PrivateKey.from_bytes(secret_exponent.to_bytes(32, "big"))
return AugSchemeMPL.sign(bls_private_key, message_hash)
def signature_for_solution(self, coin_spend: CoinSpend, additional_data: bytes) -> AugSchemeMPL:
signatures = []
err, conditions, cost = conditions_for_solution(
coin_spend.puzzle_reveal, coin_spend.solution, test_constants.MAX_BLOCK_COST_CLVM
)
assert conditions is not None
conditions_dict = conditions_by_opcode(conditions)
for public_key, message_hash in pkm_pairs_for_conditions_dict(
conditions_dict, coin_spend.coin.name(), additional_data
):
signature = self.sign(bytes(public_key), message_hash)
signatures.append(signature)
return AugSchemeMPL.aggregate(signatures)
| 43.146341 | 116 | 0.734878 | from typing import List
from blspy import AugSchemeMPL, G2Element, PrivateKey
from chives.types.blockchain_format.sized_bytes import bytes32
from chives.types.coin_spend import CoinSpend
from chives.util.condition_tools import conditions_by_opcode, conditions_for_solution, pkm_pairs_for_conditions_dict
from tests.core.make_block_generator import GROUP_ORDER, int_to_public_key
from tests.block_tools import test_constants
class KeyTool(dict):
@classmethod
def __new__(cls, *args):
return dict.__new__(*args)
def add_secret_exponents(self, secret_exponents: List[int]) -> None:
for _ in secret_exponents:
self[bytes(int_to_public_key(_))] = _ % GROUP_ORDER
def sign(self, public_key: bytes, message_hash: bytes32) -> G2Element:
secret_exponent = self.get(public_key)
if not secret_exponent:
raise ValueError("unknown pubkey %s" % public_key.hex())
bls_private_key = PrivateKey.from_bytes(secret_exponent.to_bytes(32, "big"))
return AugSchemeMPL.sign(bls_private_key, message_hash)
def signature_for_solution(self, coin_spend: CoinSpend, additional_data: bytes) -> AugSchemeMPL:
signatures = []
err, conditions, cost = conditions_for_solution(
coin_spend.puzzle_reveal, coin_spend.solution, test_constants.MAX_BLOCK_COST_CLVM
)
assert conditions is not None
conditions_dict = conditions_by_opcode(conditions)
for public_key, message_hash in pkm_pairs_for_conditions_dict(
conditions_dict, coin_spend.coin.name(), additional_data
):
signature = self.sign(bytes(public_key), message_hash)
signatures.append(signature)
return AugSchemeMPL.aggregate(signatures)
| true | true |
f7243b01372fbdcb620be6192d4501a32b4cd452 | 89 | py | Python | apps/credito/apps.py | andipandiber/CajaAhorros | cb0769fc04529088768ea650f9ee048bd9a55837 | [
"MIT"
] | null | null | null | apps/credito/apps.py | andipandiber/CajaAhorros | cb0769fc04529088768ea650f9ee048bd9a55837 | [
"MIT"
] | 8 | 2021-03-30T13:39:24.000Z | 2022-03-12T00:36:15.000Z | apps/credito/apps.py | andresbermeoq/CajaAhorros | cb0769fc04529088768ea650f9ee048bd9a55837 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class CreditoConfig(AppConfig):
name = 'credito'
| 14.833333 | 33 | 0.752809 | from django.apps import AppConfig
class CreditoConfig(AppConfig):
name = 'credito'
| true | true |
f7243b66f9c1cefc6724cfff38cd884d864c2db8 | 531 | py | Python | university_entities.py | kozlovsky/ponymodules | 4b123ca31cfe48bdb314e41eb21b842ae0825554 | [
"MIT"
] | 3 | 2015-02-03T12:05:23.000Z | 2018-02-14T12:37:30.000Z | university_entities.py | kozlovsky/ponymodules | 4b123ca31cfe48bdb314e41eb21b842ae0825554 | [
"MIT"
] | null | null | null | university_entities.py | kozlovsky/ponymodules | 4b123ca31cfe48bdb314e41eb21b842ae0825554 | [
"MIT"
] | null | null | null | # This module contains entities for some specific application domain,
# namely - for the university
from pony.orm import *
from base_entities import db
class Teacher(db.User):
degree = Required(str)
courses = Set("Course")
class Student(db.User):
group = Required("Group")
courses = Set("Course")
gpa = Required(float)
class Group(db.Entity):
number = PrimaryKey(int)
students = Set(Student)
class Course(db.Entity):
name = Required(str)
students = Set(Student)
teachers = Set(Teacher)
| 22.125 | 69 | 0.689266 |
from pony.orm import *
from base_entities import db
class Teacher(db.User):
degree = Required(str)
courses = Set("Course")
class Student(db.User):
group = Required("Group")
courses = Set("Course")
gpa = Required(float)
class Group(db.Entity):
number = PrimaryKey(int)
students = Set(Student)
class Course(db.Entity):
name = Required(str)
students = Set(Student)
teachers = Set(Teacher)
| true | true |
f7243c0f06670ed1457b934c4914d2983fea2179 | 2,951 | py | Python | src/cobra/templatetags/organization_tags.py | lyoniionly/django-cobra | 2427e5cf74b7739115b1224da3306986b3ee345c | [
"Apache-2.0"
] | 1 | 2015-01-27T08:56:46.000Z | 2015-01-27T08:56:46.000Z | src/cobra/templatetags/organization_tags.py | lyoniionly/django-cobra | 2427e5cf74b7739115b1224da3306986b3ee345c | [
"Apache-2.0"
] | null | null | null | src/cobra/templatetags/organization_tags.py | lyoniionly/django-cobra | 2427e5cf74b7739115b1224da3306986b3ee345c | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
from django import template
from django.db.models import Q
from django.utils.safestring import SafeString
from cobra.apps.accounts.utils import get_user_info
from cobra.core.loading import get_model
from cobra.core.permissions import is_organization_admin, can_manage_org
from cobra.core.configure.user_config import UserConfig
## Django 1.5+ compat
try:
import json
except ImportError: # pragma: no cover
from django.utils import simplejson as json
register = template.Library()
Organization = get_model('organization', 'Organization')
OrganizationMember = get_model('organization', 'OrganizationMember')
@register.filter
def list_organizations(user):
return Organization.objects.get_for_user(user)
@register.filter
def organization_members(organization):
queryset = OrganizationMember.objects.filter(
Q(user__isnull=False) & Q(user__is_active=True),
organization=organization,
).select_related('user')
queryset = sorted(queryset, key=lambda x: x.user.get_display_name() or x.email)
return queryset
@register.assignment_tag
def organization_members_with_filter(organization, with_invited=False, limit=None):
if with_invited:
queryset = OrganizationMember.objects.filter(
organization=organization,
).select_related('user')
else:
queryset = OrganizationMember.objects.filter(
Q(user__isnull=False) & Q(user__is_active=True),
organization=organization,
).select_related('user')
queryset = sorted(queryset, key=lambda x: x.user.get_display_name() or x.email)
if limit:
queryset = queryset[0:limit]
return queryset
@register.filter
def has_subordinates(user, organization):
'''
If the user is the admin of the organization,
and the organization has other members except this user,
we consider that the user has subordinate
'''
if is_organization_admin(user, organization) and len(organization_members(organization)) > 1:
return True
else:
return False
@register.assignment_tag
def organization_current_user(organization, user):
current_user = get_user_info(user)
current_user['admin'] = can_manage_org(user, organization)
return SafeString(json.dumps(current_user))
@register.assignment_tag
def organization_user_config(organization, user):
user_config = UserConfig(user)
return SafeString(user_config.to_json())
@register.assignment_tag
def organization_json(organization):
return SafeString(json.dumps(organization.to_dict()))
@register.assignment_tag
def organizations_json(organization, user):
orgs_list = []
orgs = Organization.objects.get_for_user(user=user)
for org in orgs:
org_dict = org.to_dict()
org_dict.update({
'isCurrent': org==organization
})
orgs_list.append(org_dict)
return SafeString(json.dumps(orgs_list)) | 29.808081 | 97 | 0.73941 | from collections import defaultdict
from django import template
from django.db.models import Q
from django.utils.safestring import SafeString
from cobra.apps.accounts.utils import get_user_info
from cobra.core.loading import get_model
from cobra.core.permissions import is_organization_admin, can_manage_org
from cobra.core.configure.user_config import UserConfig
n
except ImportError:
from django.utils import simplejson as json
register = template.Library()
Organization = get_model('organization', 'Organization')
OrganizationMember = get_model('organization', 'OrganizationMember')
@register.filter
def list_organizations(user):
return Organization.objects.get_for_user(user)
@register.filter
def organization_members(organization):
queryset = OrganizationMember.objects.filter(
Q(user__isnull=False) & Q(user__is_active=True),
organization=organization,
).select_related('user')
queryset = sorted(queryset, key=lambda x: x.user.get_display_name() or x.email)
return queryset
@register.assignment_tag
def organization_members_with_filter(organization, with_invited=False, limit=None):
if with_invited:
queryset = OrganizationMember.objects.filter(
organization=organization,
).select_related('user')
else:
queryset = OrganizationMember.objects.filter(
Q(user__isnull=False) & Q(user__is_active=True),
organization=organization,
).select_related('user')
queryset = sorted(queryset, key=lambda x: x.user.get_display_name() or x.email)
if limit:
queryset = queryset[0:limit]
return queryset
@register.filter
def has_subordinates(user, organization):
if is_organization_admin(user, organization) and len(organization_members(organization)) > 1:
return True
else:
return False
@register.assignment_tag
def organization_current_user(organization, user):
current_user = get_user_info(user)
current_user['admin'] = can_manage_org(user, organization)
return SafeString(json.dumps(current_user))
@register.assignment_tag
def organization_user_config(organization, user):
user_config = UserConfig(user)
return SafeString(user_config.to_json())
@register.assignment_tag
def organization_json(organization):
return SafeString(json.dumps(organization.to_dict()))
@register.assignment_tag
def organizations_json(organization, user):
orgs_list = []
orgs = Organization.objects.get_for_user(user=user)
for org in orgs:
org_dict = org.to_dict()
org_dict.update({
'isCurrent': org==organization
})
orgs_list.append(org_dict)
return SafeString(json.dumps(orgs_list)) | true | true |
f7243c65f64be321b1681b267b99023e62712fa4 | 2,300 | py | Python | app/main/forms/ab_1.py | spetrovic450/ksvotes.org | 1fa25a4098657b5f2f89e345332a26b92b993ecd | [
"MIT"
] | 10 | 2018-08-28T13:35:27.000Z | 2021-07-17T18:01:04.000Z | app/main/forms/ab_1.py | spetrovic450/ksvotes.org | 1fa25a4098657b5f2f89e345332a26b92b993ecd | [
"MIT"
] | 253 | 2018-05-14T14:51:35.000Z | 2021-07-23T00:49:04.000Z | app/main/forms/ab_1.py | lukecivantos/flvotes | ace6fbee9d6cfaa9e4e69e266e321d041ad65da4 | [
"MIT"
] | 5 | 2019-09-05T15:10:32.000Z | 2021-09-30T23:37:04.000Z | import datetime
from flask_wtf import FlaskForm
from wtforms import SelectField, SelectMultipleField, widgets, StringField
from wtforms.validators import DataRequired
from flask_babel import lazy_gettext
from app.main.helpers import is_even_year
class MultiCheckboxField(SelectMultipleField):
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
def pre_validate(self, form):
# Prevent "not a valid choice" error
pass
def process_formdata(self, valuelist):
if valuelist:
self.data = "|".join(valuelist)
else:
self.data = ""
# based on
# https://stackoverflow.com/questions/8463209/how-to-make-a-field-conditionally-optional-in-wtforms
class RequiredIfFieldContains(DataRequired):
def __init__(self, other_field_name, value, *args, **kwargs):
self.other_field_name = other_field_name
self.value = value
super(RequiredIfFieldContains, self).__init__(*args, **kwargs)
def __call__(self, form, field):
other_field = form._fields.get(self.other_field_name)
if other_field is None:
raise Exception('no field named "%s" in form' % self.other_field_name)
other_field_contains = False
for string in self.value:
if string in other_field.data:
other_field_contains = True
if other_field_contains:
super(RequiredIfFieldContains, self).__call__(form, field)
class FormAB1(FlaskForm):
elections = MultiCheckboxField(
lazy_gettext(u'1AB_select_election'),
choices=[], # defer till runtime
validators=[DataRequired(message=lazy_gettext(u'Required'))]
)
perm_reason = StringField(
lazy_gettext(u'1AB_perm_reason'),
validators=[RequiredIfFieldContains('elections', ['permanent'])]
)
party = SelectField(
lazy_gettext(u'1AB_party_help'),
choices=[('', lazy_gettext(u'1AB_select_party')), ('Democratic', 'Democratic'), ('Republican', 'Republican')],
)
def validate_party(form, field):
""" Party is only required on primaries in even numbered years """
if is_even_year():
validator = RequiredIfFieldContains('elections', ['Prim'])
validator(form, field) | 34.848485 | 118 | 0.68087 | import datetime
from flask_wtf import FlaskForm
from wtforms import SelectField, SelectMultipleField, widgets, StringField
from wtforms.validators import DataRequired
from flask_babel import lazy_gettext
from app.main.helpers import is_even_year
class MultiCheckboxField(SelectMultipleField):
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
def pre_validate(self, form):
pass
def process_formdata(self, valuelist):
if valuelist:
self.data = "|".join(valuelist)
else:
self.data = ""
class RequiredIfFieldContains(DataRequired):
def __init__(self, other_field_name, value, *args, **kwargs):
self.other_field_name = other_field_name
self.value = value
super(RequiredIfFieldContains, self).__init__(*args, **kwargs)
def __call__(self, form, field):
other_field = form._fields.get(self.other_field_name)
if other_field is None:
raise Exception('no field named "%s" in form' % self.other_field_name)
other_field_contains = False
for string in self.value:
if string in other_field.data:
other_field_contains = True
if other_field_contains:
super(RequiredIfFieldContains, self).__call__(form, field)
class FormAB1(FlaskForm):
elections = MultiCheckboxField(
lazy_gettext(u'1AB_select_election'),
choices=[],
validators=[DataRequired(message=lazy_gettext(u'Required'))]
)
perm_reason = StringField(
lazy_gettext(u'1AB_perm_reason'),
validators=[RequiredIfFieldContains('elections', ['permanent'])]
)
party = SelectField(
lazy_gettext(u'1AB_party_help'),
choices=[('', lazy_gettext(u'1AB_select_party')), ('Democratic', 'Democratic'), ('Republican', 'Republican')],
)
def validate_party(form, field):
if is_even_year():
validator = RequiredIfFieldContains('elections', ['Prim'])
validator(form, field) | true | true |
f7243cbe297538fda22473a8806395c5effa6634 | 255 | py | Python | server/apps/accounts/api/urls.py | supercooledcreations/djangular-seed | 6f73c7d91cd510ff03548a578a06730b4c351274 | [
"MIT"
] | null | null | null | server/apps/accounts/api/urls.py | supercooledcreations/djangular-seed | 6f73c7d91cd510ff03548a578a06730b4c351274 | [
"MIT"
] | 2 | 2020-06-05T18:23:13.000Z | 2021-06-10T20:30:24.000Z | server/apps/accounts/api/urls.py | supercooledcreations/djangular-seed | 6f73c7d91cd510ff03548a578a06730b4c351274 | [
"MIT"
] | null | null | null | from django.urls import path
from rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token
from .views import RegisterAPIView
urlpatterns = [
# JWT
path('register/', RegisterAPIView.as_view()),
path('login/', obtain_jwt_token),
] | 23.181818 | 72 | 0.756863 | from django.urls import path
from rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token
from .views import RegisterAPIView
urlpatterns = [
path('register/', RegisterAPIView.as_view()),
path('login/', obtain_jwt_token),
] | true | true |
f7243cc1b5a9ed4a6cee6275399bb27e7e0e4cac | 2,145 | py | Python | python/src/nnabla/context.py | syoyo/nnabla | b776b68dcdffe894cac1233dfd07c301415cc0fb | [
"Apache-2.0"
] | 1 | 2020-08-03T12:49:19.000Z | 2020-08-03T12:49:19.000Z | python/src/nnabla/context.py | langbin2014/nnabla | e94bac5bed65337010e2ac07a5937fb862ab2dd8 | [
"Apache-2.0"
] | 1 | 2020-11-09T07:33:29.000Z | 2020-11-09T07:33:29.000Z | python/src/nnabla/context.py | langbin2014/nnabla | e94bac5bed65337010e2ac07a5937fb862ab2dd8 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NNabla Context manager
"""
from __future__ import absolute_import
from contextlib import contextmanager
from .variable import Context
current_ctx = Context()
context_level = 0
@contextmanager
def context_scope(ctx):
"""
Context as Python context.
.. code-block:: python
import nnabla as nn
import nnabla.functions as F
x = nn.Variable([2, 3 ,4])
ctx = nnabla_ext.cuda.context('0')
with context_scope(ctx):
# Inside with scope, the specified context is used.
with parameter_scope('w1'):
l1 = F.relu(F.affine(x, 64))
with parameter_scope('w2'):
l2 = F.relu(F.affine(x, 64))
"""
global current_ctx
global context_level
context_level += 1
prev_context = current_ctx
current_ctx = ctx
try:
yield
finally:
context_level -= 1
current_ctx = prev_context
def set_default_context(ctx):
"""
Set the default context.
Note:
It cannot be called inside any `context_scope`.
Args:
ctx (Context): A Context.
"""
global context_level
global current_ctx
assert context_level == 0, "It cannot be called inside any context_scope."
current_ctx = ctx
def get_current_context():
"""
Get the current context.
It can be set using :meth:`nnabla.context_scope` or :meth:`nnabla.set_default_context` .
Returns:
Context: a current context.
"""
global current_ctx
return current_ctx
| 24.375 | 92 | 0.663403 |
from __future__ import absolute_import
from contextlib import contextmanager
from .variable import Context
current_ctx = Context()
context_level = 0
@contextmanager
def context_scope(ctx):
global current_ctx
global context_level
context_level += 1
prev_context = current_ctx
current_ctx = ctx
try:
yield
finally:
context_level -= 1
current_ctx = prev_context
def set_default_context(ctx):
global context_level
global current_ctx
assert context_level == 0, "It cannot be called inside any context_scope."
current_ctx = ctx
def get_current_context():
global current_ctx
return current_ctx
| true | true |
f7243dae5379fe7773261511200764ad59342d37 | 10,478 | py | Python | GTSRB/train_standard_vgg.py | THUYimingLi/Open-sourced_Dataset_Protection | 910962c57e7d132497443b26c8e5da1dcb5ba4eb | [
"Apache-2.0"
] | 14 | 2020-11-16T03:57:19.000Z | 2022-03-30T01:44:53.000Z | GTSRB/train_standard_vgg.py | THUYimingLi/Open-sourced_Dataset_Protection | 910962c57e7d132497443b26c8e5da1dcb5ba4eb | [
"Apache-2.0"
] | null | null | null | GTSRB/train_standard_vgg.py | THUYimingLi/Open-sourced_Dataset_Protection | 910962c57e7d132497443b26c8e5da1dcb5ba4eb | [
"Apache-2.0"
] | 5 | 2020-11-16T03:56:00.000Z | 2022-03-19T06:37:02.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
This is the implement of standard training on GTSRB dataset.
Copyright (c) Yiming Li, 2020
'''
from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import gtsrb_dataset as dataset
from model import *
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
parser = argparse.ArgumentParser(description='PyTorch GTSRB')
# Datasets
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
# Optimization options
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--train-batch', default=128, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--test-batch', default=128, type=int, metavar='N',
help='test batchsize')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--drop', '--dropout', default=0, type=float,
metavar='Dropout', help='Dropout ratio')
parser.add_argument('--schedule', type=int, nargs='+', default=[20],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
# Checkpoints
parser.add_argument('-c', '--checkpoint', default='checkpoint/benign', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# Miscs
parser.add_argument('--manualSeed', type=int, default=1, help='manual seed')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
#Device options
parser.add_argument('--gpu-id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
best_acc = 0 # best test accuracy
def main():
global best_acc
start_epoch = args.start_epoch # start from epoch 0 or last checkpoint epoch
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
# Dataset preprocessing
title = 'GTSRB'
print('==> Preparing GTSRB dataset')
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()
])
# Create Datasets
trainset = dataset.GTSRB(
root_dir='./data', train=True, transform=transform)
testset = dataset.GTSRB(
root_dir='./data', train=False, transform=transform)
# Load Datasets
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)
testloader = torch.utils.data.DataLoader(
testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
# Model
model = vgg19_bn()
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Resume
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])
if args.evaluate:
print('\nEvaluation only')
test_loss, test_acc = test(testloader, model, criterion, start_epoch, use_cuda)
print(' Test Loss: %.8f, Test Acc: %.2f' % (test_loss, test_acc))
return
# Train and val
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
train_loss, train_acc = train(args, model, trainloader, criterion, optimizer, epoch, use_cuda)
test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda)
# append logger file
logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc])
# save model
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'acc': test_acc,
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, is_best, checkpoint=args.checkpoint)
logger.close()
logger.plot()
savefig(os.path.join(args.checkpoint, 'log.eps'))
print('Best acc:')
print(best_acc)
def train(args, model, trainloader, criterion, optimizer, epoch, use_cuda):
# switch to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
bar = Bar('Processing', max=len(trainloader))
for batch_idx, (image, target) in enumerate(trainloader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
image, target = image.cuda(), target.cuda()
# compute loss and do SGD step
outputs = model(image)
loss = criterion(outputs, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure train accuracy and record loss
prec1, prec5 = accuracy(outputs.data, target.data, topk=(1, 5))
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
top5.update(prec5.item(), image.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(trainloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def test(testloader, model, criterion, epoch, use_cuda):
global best_acc
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
bar = Bar('Processing', max=len(testloader))
for batch_idx, (inputs, targets) in enumerate(testloader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
# compute output
outputs = model(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record standard loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(testloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr']
if __name__ == '__main__':
main()
| 33.691318 | 176 | 0.617293 |
from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import gtsrb_dataset as dataset
from model import *
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
parser = argparse.ArgumentParser(description='PyTorch GTSRB')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--train-batch', default=128, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--test-batch', default=128, type=int, metavar='N',
help='test batchsize')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--drop', '--dropout', default=0, type=float,
metavar='Dropout', help='Dropout ratio')
parser.add_argument('--schedule', type=int, nargs='+', default=[20],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-c', '--checkpoint', default='checkpoint/benign', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--manualSeed', type=int, default=1, help='manual seed')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--gpu-id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
best_acc = 0
def main():
global best_acc
start_epoch = args.start_epoch
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
title = 'GTSRB'
print('==> Preparing GTSRB dataset')
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()
])
trainset = dataset.GTSRB(
root_dir='./data', train=True, transform=transform)
testset = dataset.GTSRB(
root_dir='./data', train=False, transform=transform)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)
testloader = torch.utils.data.DataLoader(
testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
model = vgg19_bn()
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
print('==> Resuming from checkpoint..')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])
if args.evaluate:
print('\nEvaluation only')
test_loss, test_acc = test(testloader, model, criterion, start_epoch, use_cuda)
print(' Test Loss: %.8f, Test Acc: %.2f' % (test_loss, test_acc))
return
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
train_loss, train_acc = train(args, model, trainloader, criterion, optimizer, epoch, use_cuda)
test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda)
logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc])
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'acc': test_acc,
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, is_best, checkpoint=args.checkpoint)
logger.close()
logger.plot()
savefig(os.path.join(args.checkpoint, 'log.eps'))
print('Best acc:')
print(best_acc)
def train(args, model, trainloader, criterion, optimizer, epoch, use_cuda):
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
bar = Bar('Processing', max=len(trainloader))
for batch_idx, (image, target) in enumerate(trainloader):
data_time.update(time.time() - end)
if use_cuda:
image, target = image.cuda(), target.cuda()
outputs = model(image)
loss = criterion(outputs, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
prec1, prec5 = accuracy(outputs.data, target.data, topk=(1, 5))
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
top5.update(prec5.item(), image.size(0))
batch_time.update(time.time() - end)
end = time.time()
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(trainloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def test(testloader, model, criterion, epoch, use_cuda):
global best_acc
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
end = time.time()
bar = Bar('Processing', max=len(testloader))
for batch_idx, (inputs, targets) in enumerate(testloader):
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
loss = criterion(outputs, targets)
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
batch_time.update(time.time() - end)
end = time.time()
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(testloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr']
if __name__ == '__main__':
main()
| true | true |
f7243ece8fee42902951221c8ca46e1a47204344 | 1,686 | py | Python | examples/certificate_authority.py | PragadeeswaranS/oneview-python | 3acc113b8dd30029beb7c228c3bc2bbe67d3485b | [
"Apache-2.0"
] | null | null | null | examples/certificate_authority.py | PragadeeswaranS/oneview-python | 3acc113b8dd30029beb7c228c3bc2bbe67d3485b | [
"Apache-2.0"
] | null | null | null | examples/certificate_authority.py | PragadeeswaranS/oneview-python | 3acc113b8dd30029beb7c228c3bc2bbe67d3485b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from pprint import pprint
from config_loader import try_load_from_file
from hpOneView.oneview_client import OneViewClient
config = {
"ip": "",
"credentials": {
"userName": "",
"password": ""
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
# Retrieve Internal CA Certificate
print('Get the internal Certificate Authority:')
certificate = oneview_client.certificate_authority.get()
pprint(certificate)
# Retrieve Certificate Revocation List
print("Getting the Certificate Revocation List")
certificate_visual_content = oneview_client.certificate_authority.get_crl()
pprint(certificate_visual_content)
# Revoke Internal CA Signed Certificate
print("Revoking Internal CA Signed Certificate")
success = oneview_client.certificate_authority.delete("default")
print(success)
# Regenerate Certificates
print("Regenerating Certificates")
success = oneview_client.certificate_authority.delete("rabbitmq_readonly")
print(success)
| 30.654545 | 75 | 0.771649 |
rom pprint import pprint
from config_loader import try_load_from_file
from hpOneView.oneview_client import OneViewClient
config = {
"ip": "",
"credentials": {
"userName": "",
"password": ""
}
}
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
print('Get the internal Certificate Authority:')
certificate = oneview_client.certificate_authority.get()
pprint(certificate)
print("Getting the Certificate Revocation List")
certificate_visual_content = oneview_client.certificate_authority.get_crl()
pprint(certificate_visual_content)
print("Revoking Internal CA Signed Certificate")
success = oneview_client.certificate_authority.delete("default")
print(success)
print("Regenerating Certificates")
success = oneview_client.certificate_authority.delete("rabbitmq_readonly")
print(success)
| true | true |
f7243f26df4b059080a9f14f425e1616dfe8271f | 8,598 | py | Python | tests/broker/test_publish_sandbox.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 7 | 2015-07-31T05:57:30.000Z | 2021-09-07T15:18:56.000Z | tests/broker/test_publish_sandbox.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 115 | 2015-03-03T13:11:46.000Z | 2021-09-20T12:42:24.000Z | tests/broker/test_publish_sandbox.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 13 | 2015-03-03T11:17:59.000Z | 2021-09-09T09:16:41.000Z | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the publish command."""
import os
from shutil import copy, rmtree
from subprocess import Popen, PIPE
import unittest
from brokertest import TestBrokerCommand
if __name__ == "__main__":
import utils
utils.import_depends()
class TestPublishSandbox(TestBrokerCommand):
@classmethod
def setUpClass(cls):
super(TestPublishSandbox, cls).setUpClass()
# Run "make clean" on templates before anything else.
testdir = os.path.join(cls.sandboxdir, "changetest1", "t")
if os.path.exists(os.path.join(testdir, "Makefile")):
p = Popen(('/usr/bin/make', 'clean'),
cwd=testdir, env=cls.gitenv(
env={'PATH': '/bin:/usr/bin'}),
stdout=PIPE, stderr=PIPE)
(out, err) = p.communicate()
cls.assertEqual(p.returncode, 0,
"Non-zero return code running "
"make clean in sandbox, "
"STDOUT:\n@@@'{}'\n@@@\nSTDERR:\n@@@'{}'@@@\n"
.format(out, err))
def test_100_add_ut_files(self):
src_dir = os.path.join(self.config.get("unittest", "datadir"),
"utsandbox")
sandboxdir = os.path.join(self.sandboxdir, "utsandbox")
for root, _, files in os.walk(src_dir):
relpath = root[len(src_dir) + 1:]
dst_dir = os.path.join(sandboxdir, relpath)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for file in files:
copy(os.path.join(root, file), os.path.join(dst_dir, file))
self.gitcommand(["add", os.path.join(relpath, file)],
cwd=sandboxdir)
self.gitcommand(["commit", "-a", "-m", "Added unittest files"],
cwd=sandboxdir)
def test_110_make_change(self):
sandboxdir = os.path.join(self.sandboxdir, "changetest1")
template = self.find_template("aquilon", "archetype", "base",
sandbox="changetest1")
with open(template) as f:
contents = f.readlines()
contents.append("#Added by unittest\n")
with open(template, 'w') as f:
f.writelines(contents)
self.gitcommand(["commit", "-a", "-m", "added unittest comment"],
cwd=sandboxdir)
def test_120_publish_changetest1_sandbox(self):
sandboxdir = os.path.join(self.sandboxdir, "changetest1")
self.successtest(["publish", "--branch", "changetest1"],
env=self.gitenv(), cwd=sandboxdir)
# FIXME: Check the branch on the broker directly?
def test_130_publish_changetest1_sandbox_no_review_created(self):
command = ["show_review",
"--source", "changetest1",
"--target", "prod"]
self.notfoundtest(command)
def test_140_publish_ut_sandbox(self):
sandboxdir = os.path.join(self.sandboxdir, "utsandbox")
command = ["publish", "--sandbox", "utsandbox"]
_, err = self.successtest(command, env=self.gitenv(), cwd=sandboxdir)
self.matchoutput(err, "Updating the checked out copy of domain "
"unittest...", command)
self.matchoutput(err, "Updating the checked out copy of domain "
"unittest-json...", command)
self.matchoutput(err, "Updating the checked out copy of domain "
"unittest-xml...", command)
# FIXME: verify that changes made it to unittest
def test_150_publish_ut_sandbox_user(self):
sandboxdir = os.path.join(self.sandboxdir, "utsandbox")
self.ignoreoutputtest(["publish",
"--sandbox", "{}/utsandbox".format(self.user)],
env=self.gitenv(), cwd=sandboxdir)
def test_160_rebase(self):
utsandboxdir = os.path.join(self.sandboxdir, "utsandbox")
self.gitcommand(["rev-list", "--skip=1", "--max-count=1", "HEAD"],
cwd=utsandboxdir)
self.ignoreoutputtest(["add", "sandbox", "--sandbox", "rebasetest",
"--start", "utsandbox"])
sandboxdir = os.path.join(self.sandboxdir, "rebasetest")
# Add some new content
with open(os.path.join(sandboxdir, "TEST"), "w") as f:
f.writelines(["Added test file"])
self.gitcommand(["add", "TEST"], cwd=sandboxdir)
self.gitcommand(["commit", "-m", "Added test file"], cwd=sandboxdir)
# First publish - no problem, it's a fast-forward
self.successtest(["publish", "--sandbox", "rebasetest"],
env=self.gitenv(), cwd=sandboxdir)
# Rewrite the last commit
with open(os.path.join(sandboxdir, "TEST"), "w") as f:
f.writelines(["Changed test file"])
self.gitcommand(["add", "TEST"], cwd=sandboxdir)
self.gitcommand(["commit", "--amend", "--no-edit"], cwd=sandboxdir)
# Try to publish it
command = ["publish", "--sandbox", "rebasetest"]
out = self.badrequesttest(command, env=self.gitenv(), cwd=sandboxdir,
ignoreout=True)
# This string comes from git, so it may change if git is upgraded
self.matchoutput(out, "non-fast-forward", command)
# Publish with rebasing enabled
command.append("--rebase")
self.ignoreoutputtest(command, env=self.gitenv(), cwd=sandboxdir)
def test_170_rebase_too_much(self):
utsandboxdir = os.path.join(self.sandboxdir, "utsandbox")
prod_head, _ = self.gitcommand(["rev-parse", "origin/prod"],
cwd=utsandboxdir)
self.ignoreoutputtest(["add", "sandbox", "--sandbox", "rebasetest2",
"--start", "utsandbox"])
sandboxdir = os.path.join(self.sandboxdir, "rebasetest2")
# Rewrite history going beyond the starting point of the sandbox
self.gitcommand(["filter-branch", "--msg-filter", "tr a-z A-Z",
"--force", prod_head.strip() + "^..HEAD"],
cwd=sandboxdir)
# Try to publish it
command = ["publish", "--sandbox", "rebasetest2", "--rebase"]
out = self.badrequesttest(command, env=self.gitenv(), cwd=sandboxdir,
ignoreout=True)
self.matchoutput(out, "The published branch no longer contains",
command)
def test_180_verify_changetest1(self):
sandboxdir = os.path.join(self.sandboxdir, "changetest1")
p = Popen(["/bin/rm", "-rf", sandboxdir], stdout=1, stderr=2)
p.wait()
self.successtest(["get", "--sandbox", "changetest1"])
self.assertTrue(os.path.exists(sandboxdir))
template = self.find_template("aquilon", "archetype", "base",
sandbox="changetest1")
self.assertTrue(os.path.exists(template),
"aq get did not retrive '%s'" % template)
with open(template) as f:
contents = f.readlines()
self.assertEqual(contents[-1], "#Added by unittest\n")
def test_190_cleanup(self):
self.statustest(["del_sandbox", "--sandbox", "rebasetest"])
sandboxdir = os.path.join(self.sandboxdir, "rebasetest")
rmtree(sandboxdir, ignore_errors=True)
self.statustest(["del_sandbox", "--sandbox", "rebasetest2"])
sandboxdir = os.path.join(self.sandboxdir, "rebasetest2")
rmtree(sandboxdir, ignore_errors=True)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestPublishSandbox)
unittest.TextTestRunner(verbosity=2).run(suite)
| 43.867347 | 78 | 0.585485 |
import os
from shutil import copy, rmtree
from subprocess import Popen, PIPE
import unittest
from brokertest import TestBrokerCommand
if __name__ == "__main__":
import utils
utils.import_depends()
class TestPublishSandbox(TestBrokerCommand):
@classmethod
def setUpClass(cls):
super(TestPublishSandbox, cls).setUpClass()
testdir = os.path.join(cls.sandboxdir, "changetest1", "t")
if os.path.exists(os.path.join(testdir, "Makefile")):
p = Popen(('/usr/bin/make', 'clean'),
cwd=testdir, env=cls.gitenv(
env={'PATH': '/bin:/usr/bin'}),
stdout=PIPE, stderr=PIPE)
(out, err) = p.communicate()
cls.assertEqual(p.returncode, 0,
"Non-zero return code running "
"make clean in sandbox, "
"STDOUT:\n@@@'{}'\n@@@\nSTDERR:\n@@@'{}'@@@\n"
.format(out, err))
def test_100_add_ut_files(self):
src_dir = os.path.join(self.config.get("unittest", "datadir"),
"utsandbox")
sandboxdir = os.path.join(self.sandboxdir, "utsandbox")
for root, _, files in os.walk(src_dir):
relpath = root[len(src_dir) + 1:]
dst_dir = os.path.join(sandboxdir, relpath)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for file in files:
copy(os.path.join(root, file), os.path.join(dst_dir, file))
self.gitcommand(["add", os.path.join(relpath, file)],
cwd=sandboxdir)
self.gitcommand(["commit", "-a", "-m", "Added unittest files"],
cwd=sandboxdir)
def test_110_make_change(self):
sandboxdir = os.path.join(self.sandboxdir, "changetest1")
template = self.find_template("aquilon", "archetype", "base",
sandbox="changetest1")
with open(template) as f:
contents = f.readlines()
contents.append("#Added by unittest\n")
with open(template, 'w') as f:
f.writelines(contents)
self.gitcommand(["commit", "-a", "-m", "added unittest comment"],
cwd=sandboxdir)
def test_120_publish_changetest1_sandbox(self):
sandboxdir = os.path.join(self.sandboxdir, "changetest1")
self.successtest(["publish", "--branch", "changetest1"],
env=self.gitenv(), cwd=sandboxdir)
def test_130_publish_changetest1_sandbox_no_review_created(self):
command = ["show_review",
"--source", "changetest1",
"--target", "prod"]
self.notfoundtest(command)
def test_140_publish_ut_sandbox(self):
sandboxdir = os.path.join(self.sandboxdir, "utsandbox")
command = ["publish", "--sandbox", "utsandbox"]
_, err = self.successtest(command, env=self.gitenv(), cwd=sandboxdir)
self.matchoutput(err, "Updating the checked out copy of domain "
"unittest...", command)
self.matchoutput(err, "Updating the checked out copy of domain "
"unittest-json...", command)
self.matchoutput(err, "Updating the checked out copy of domain "
"unittest-xml...", command)
def test_150_publish_ut_sandbox_user(self):
sandboxdir = os.path.join(self.sandboxdir, "utsandbox")
self.ignoreoutputtest(["publish",
"--sandbox", "{}/utsandbox".format(self.user)],
env=self.gitenv(), cwd=sandboxdir)
def test_160_rebase(self):
utsandboxdir = os.path.join(self.sandboxdir, "utsandbox")
self.gitcommand(["rev-list", "--skip=1", "--max-count=1", "HEAD"],
cwd=utsandboxdir)
self.ignoreoutputtest(["add", "sandbox", "--sandbox", "rebasetest",
"--start", "utsandbox"])
sandboxdir = os.path.join(self.sandboxdir, "rebasetest")
with open(os.path.join(sandboxdir, "TEST"), "w") as f:
f.writelines(["Added test file"])
self.gitcommand(["add", "TEST"], cwd=sandboxdir)
self.gitcommand(["commit", "-m", "Added test file"], cwd=sandboxdir)
self.successtest(["publish", "--sandbox", "rebasetest"],
env=self.gitenv(), cwd=sandboxdir)
# Rewrite the last commit
with open(os.path.join(sandboxdir, "TEST"), "w") as f:
f.writelines(["Changed test file"])
self.gitcommand(["add", "TEST"], cwd=sandboxdir)
self.gitcommand(["commit", "--amend", "--no-edit"], cwd=sandboxdir)
# Try to publish it
command = ["publish", "--sandbox", "rebasetest"]
out = self.badrequesttest(command, env=self.gitenv(), cwd=sandboxdir,
ignoreout=True)
# This string comes from git, so it may change if git is upgraded
self.matchoutput(out, "non-fast-forward", command)
# Publish with rebasing enabled
command.append("--rebase")
self.ignoreoutputtest(command, env=self.gitenv(), cwd=sandboxdir)
def test_170_rebase_too_much(self):
utsandboxdir = os.path.join(self.sandboxdir, "utsandbox")
prod_head, _ = self.gitcommand(["rev-parse", "origin/prod"],
cwd=utsandboxdir)
self.ignoreoutputtest(["add", "sandbox", "--sandbox", "rebasetest2",
"--start", "utsandbox"])
sandboxdir = os.path.join(self.sandboxdir, "rebasetest2")
# Rewrite history going beyond the starting point of the sandbox
self.gitcommand(["filter-branch", "--msg-filter", "tr a-z A-Z",
"--force", prod_head.strip() + "^..HEAD"],
cwd=sandboxdir)
# Try to publish it
command = ["publish", "--sandbox", "rebasetest2", "--rebase"]
out = self.badrequesttest(command, env=self.gitenv(), cwd=sandboxdir,
ignoreout=True)
self.matchoutput(out, "The published branch no longer contains",
command)
def test_180_verify_changetest1(self):
sandboxdir = os.path.join(self.sandboxdir, "changetest1")
p = Popen(["/bin/rm", "-rf", sandboxdir], stdout=1, stderr=2)
p.wait()
self.successtest(["get", "--sandbox", "changetest1"])
self.assertTrue(os.path.exists(sandboxdir))
template = self.find_template("aquilon", "archetype", "base",
sandbox="changetest1")
self.assertTrue(os.path.exists(template),
"aq get did not retrive '%s'" % template)
with open(template) as f:
contents = f.readlines()
self.assertEqual(contents[-1], "#Added by unittest\n")
def test_190_cleanup(self):
self.statustest(["del_sandbox", "--sandbox", "rebasetest"])
sandboxdir = os.path.join(self.sandboxdir, "rebasetest")
rmtree(sandboxdir, ignore_errors=True)
self.statustest(["del_sandbox", "--sandbox", "rebasetest2"])
sandboxdir = os.path.join(self.sandboxdir, "rebasetest2")
rmtree(sandboxdir, ignore_errors=True)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestPublishSandbox)
unittest.TextTestRunner(verbosity=2).run(suite)
| true | true |
f72442631484d92c7dcede95d1d98e464d00507c | 3,779 | py | Python | Fancy_aggregations/moderate_deviations.py | iosurodri/Fancy_aggregations | 647019452a074767706893ecdd431a3ee503b554 | [
"MIT"
] | 1 | 2021-03-25T11:48:20.000Z | 2021-03-25T11:48:20.000Z | Fancy_aggregations/moderate_deviations.py | iosurodri/Fancy_aggregations | 647019452a074767706893ecdd431a3ee503b554 | [
"MIT"
] | null | null | null | Fancy_aggregations/moderate_deviations.py | iosurodri/Fancy_aggregations | 647019452a074767706893ecdd431a3ee503b554 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
File containing different functions to aggregate data using Moderate Deviations. The expressions have been obtained from the following paper:
A.H. Altalhi, J.I. Forcén, M. Pagola, E. Barrenechea, H. Bustince, Zdenko Takáč,
Moderate deviation and restricted equivalence functions for measuring similarity between data,
Information Sciences,
Volume 501,
2019,
Pages 19-29,
ISSN 0020-0255,
https://doi.org/10.1016/j.ins.2019.05.078.
(http://www.sciencedirect.com/science/article/pii/S0020025519305031)
Please, cite accordingly.
@author: Javier Fumanal Idocin (UPNA).
To suggest changes or submit new code please use the github page.
"""
import numpy as np
# =============================================================================
# ~ MODERATE DEVIATIONS
# =============================================================================
def custom_distance(x, y, Mp, Mn, R1, R2):
'''
:param R1:
:param R2:
:return:
'''
if x <= y:
return Mp - Mp*R1(x, y)
else:
return Mn*R2(x,y) - Mn
def custom_distance_morphs(x, y, Mp, Mn, F1, F2, T1, T2):
'''
TODO, and will probably stay like that for long.
:param x:
:param y:
:param Mp:
:param Mn:
:param F1:
:param F2:
:param T1:
:param T2:
:return:
'''
pass
def distance_f1(x, y, Mp, Mn):
'''
:return:
'''
if x <= y:
return Mp*(y - x)*(y - x)
else:
return Mn*(y*y - x*x)
def distance_f2(x, y, Mp, Mn):
'''
:return:
'''
if x <= y:
return Mp*(y - x)
else:
return Mn*(y - x)
def cut_point(D, x_sigma, Mp, Mn):
k = -1
for ix, element in enumerate(x_sigma):
if ix < len(x_sigma) - 1:
con1 = np.sum([D(x_sigma[i], element, Mp, Mn) for i in range(len(x_sigma))]) <= 0
cond2 = np.sum([D(x_sigma[i], x_sigma[ix + 1], Mp, Mn) for i in range(len(x_sigma))]) >= 0
if con1 and cond2:
k = ix
return k
def moderate_deviation_f(X, D=distance_f2, Mp=1, Mn=1, axis=0):
'''
'''
n = len(X)
x_sigma = np.sort(X, axis=0)
k = cut_point(D, x_sigma, Mp, Mn)
f = (Mp * np.sum(x_sigma[0:k+1]) + Mn*np.sum(x_sigma[k+1:])) / (k*Mp + (n - k)*Mn)
return f
def moderate_deviation_eq(X, D=distance_f1, Mp=1, Mn=1):
'''
'''
n = len(X)
x_sigma = np.sort(X)
k = cut_point(D, x_sigma, Mp ,Mn)
a = (k+1)*Mp + (n - k-1)*Mn
b = -2*Mp*np.sum(x_sigma[0:k+1])
x_sigma_squared = np.power(x_sigma, 2)
c = Mp*np.sum(x_sigma_squared[0:k+1]) - Mn*np.sum(x_sigma_squared[k+1:])
sqr_term = np.sqrt(b*b - 4*a*c)
y1 = (-b + sqr_term) / (2*a)
y2 = (-b - sqr_term) / (2*a)
return y1, y2
def md_aggregation(X, axis=0, keepdims=True, md_function=moderate_deviation_f, Mp=1, Mn=10):
'''
Designed to use the md functions using the same interface as the rest of the numpy aggregation functions.
IT ONLY WORKS IN 3 DIMENSIONAL ARRAY (features, samples, classes)
:param X:
:param axis:
:param keepdims:
:param md_function:
:return:
'''
if axis != 0:
X = np.transpose(X, (0, axis))
clasificadores, muestras, clases = X.shape
if keepdims:
result = np.zeros([1] +list(X.shape[1:]))
else:
result = np.zeros(X.shape[1:])
for m in range(muestras):
#print(md_function(X[:, m, 0], Mp=1, Mn=10))
if keepdims:
for clase in range(clases):
result[0, m, clase] = md_function(X[:, m, clase], Mp=1, Mn=10)
else:
for clase in range(clases):
result[m, clase] = md_function(X[:, m, clase], Mp=1, Mn=10)
if axis != 0:
X = np.transpose(X, (0, axis))
return result
| 24.861842 | 141 | 0.546705 |
import numpy as np
def custom_distance(x, y, Mp, Mn, R1, R2):
if x <= y:
return Mp - Mp*R1(x, y)
else:
return Mn*R2(x,y) - Mn
def custom_distance_morphs(x, y, Mp, Mn, F1, F2, T1, T2):
pass
def distance_f1(x, y, Mp, Mn):
if x <= y:
return Mp*(y - x)*(y - x)
else:
return Mn*(y*y - x*x)
def distance_f2(x, y, Mp, Mn):
if x <= y:
return Mp*(y - x)
else:
return Mn*(y - x)
def cut_point(D, x_sigma, Mp, Mn):
k = -1
for ix, element in enumerate(x_sigma):
if ix < len(x_sigma) - 1:
con1 = np.sum([D(x_sigma[i], element, Mp, Mn) for i in range(len(x_sigma))]) <= 0
cond2 = np.sum([D(x_sigma[i], x_sigma[ix + 1], Mp, Mn) for i in range(len(x_sigma))]) >= 0
if con1 and cond2:
k = ix
return k
def moderate_deviation_f(X, D=distance_f2, Mp=1, Mn=1, axis=0):
n = len(X)
x_sigma = np.sort(X, axis=0)
k = cut_point(D, x_sigma, Mp, Mn)
f = (Mp * np.sum(x_sigma[0:k+1]) + Mn*np.sum(x_sigma[k+1:])) / (k*Mp + (n - k)*Mn)
return f
def moderate_deviation_eq(X, D=distance_f1, Mp=1, Mn=1):
n = len(X)
x_sigma = np.sort(X)
k = cut_point(D, x_sigma, Mp ,Mn)
a = (k+1)*Mp + (n - k-1)*Mn
b = -2*Mp*np.sum(x_sigma[0:k+1])
x_sigma_squared = np.power(x_sigma, 2)
c = Mp*np.sum(x_sigma_squared[0:k+1]) - Mn*np.sum(x_sigma_squared[k+1:])
sqr_term = np.sqrt(b*b - 4*a*c)
y1 = (-b + sqr_term) / (2*a)
y2 = (-b - sqr_term) / (2*a)
return y1, y2
def md_aggregation(X, axis=0, keepdims=True, md_function=moderate_deviation_f, Mp=1, Mn=10):
if axis != 0:
X = np.transpose(X, (0, axis))
clasificadores, muestras, clases = X.shape
if keepdims:
result = np.zeros([1] +list(X.shape[1:]))
else:
result = np.zeros(X.shape[1:])
for m in range(muestras):
if keepdims:
for clase in range(clases):
result[0, m, clase] = md_function(X[:, m, clase], Mp=1, Mn=10)
else:
for clase in range(clases):
result[m, clase] = md_function(X[:, m, clase], Mp=1, Mn=10)
if axis != 0:
X = np.transpose(X, (0, axis))
return result
| true | true |
f724427f59fd29bc59e03a5db29564b0e3328b9b | 175 | py | Python | Python/tryexcept.py | Brabec/Hacktoberfest2020 | d3a85850a462ab24abf59d68b5142e0b61b5ce37 | [
"MIT"
] | null | null | null | Python/tryexcept.py | Brabec/Hacktoberfest2020 | d3a85850a462ab24abf59d68b5142e0b61b5ce37 | [
"MIT"
] | null | null | null | Python/tryexcept.py | Brabec/Hacktoberfest2020 | d3a85850a462ab24abf59d68b5142e0b61b5ce37 | [
"MIT"
] | null | null | null | astr = 'HelloThere'
try:
istr = int(astr)
except:
istr = -1
print('First', istr)
astr = '123'
try:
istr = int(astr)
except:
istr = -1
print('Second', istr)
| 10.9375 | 21 | 0.571429 | astr = 'HelloThere'
try:
istr = int(astr)
except:
istr = -1
print('First', istr)
astr = '123'
try:
istr = int(astr)
except:
istr = -1
print('Second', istr)
| true | true |
f72443010052cb77f3bdfe74d89bbae4433a5376 | 1,195 | py | Python | hrflow/hrflow/profile/embedding.py | Riminder/python-hrflow-api | 5457c138c12689a1af08b243c15f3cbe898edf65 | [
"MIT"
] | 4 | 2020-04-01T15:16:04.000Z | 2021-01-18T03:52:39.000Z | hrflow/hrflow/profile/embedding.py | Riminder/python-hrflow-api | 5457c138c12689a1af08b243c15f3cbe898edf65 | [
"MIT"
] | null | null | null | hrflow/hrflow/profile/embedding.py | Riminder/python-hrflow-api | 5457c138c12689a1af08b243c15f3cbe898edf65 | [
"MIT"
] | null | null | null | import json
from ..utils import format_item_payload, validate_response
class ProfileEmbedding():
"""Manage embedding related profile calls."""
def __init__(self, api):
"""Init."""
self.client = api
def get(self, source_key, key=None, reference=None, email=None, fields={}):
"""
Retrieve the interpretability information.
Args:
source_key: <string>
source_key
key: <string>
key
reference: <string>
profile_reference
email: <string>
profile_email
fields: json object
fields
Returns
interpretability information
"""
query_params = format_item_payload("profile", source_key, key, reference, email)
if fields:
query_params["fields"] = json.dumps(fields)
response = self.client.get('profile/embedding', query_params)
return validate_response(response)
| 31.447368 | 88 | 0.488703 | import json
from ..utils import format_item_payload, validate_response
class ProfileEmbedding():
def __init__(self, api):
self.client = api
def get(self, source_key, key=None, reference=None, email=None, fields={}):
query_params = format_item_payload("profile", source_key, key, reference, email)
if fields:
query_params["fields"] = json.dumps(fields)
response = self.client.get('profile/embedding', query_params)
return validate_response(response)
| true | true |
f72445e12e327cd9cce5c3c3f16c18095280f308 | 206 | py | Python | damstagram/notifications/urls.py | DaMacho/damstagram | 9c3a7074df82ecc76ef035b9ac52b464fc60734b | [
"MIT"
] | 1 | 2019-05-01T04:22:38.000Z | 2019-05-01T04:22:38.000Z | damstagram/notifications/urls.py | DaMacho/damstagram | 9c3a7074df82ecc76ef035b9ac52b464fc60734b | [
"MIT"
] | null | null | null | damstagram/notifications/urls.py | DaMacho/damstagram | 9c3a7074df82ecc76ef035b9ac52b464fc60734b | [
"MIT"
] | 1 | 2019-10-16T13:24:23.000Z | 2019-10-16T13:24:23.000Z | from django.conf.urls import url
from . import views
app_name = "users"
urlpatterns = [
url(
regex=r'^$',
view=views.Notifications.as_view(),
name='notifications'
),
] | 15.846154 | 43 | 0.587379 | from django.conf.urls import url
from . import views
app_name = "users"
urlpatterns = [
url(
regex=r'^$',
view=views.Notifications.as_view(),
name='notifications'
),
] | true | true |
f72445f45a378bb85e5fd435692975c53e5449b6 | 766 | py | Python | examples/FreeCAD/Ex001_Simple_Block.py | asukiaaa/cadquery | 1b5d8d91fdae54aadf92af82de935e34a0cc062e | [
"Apache-2.0"
] | 403 | 2015-01-20T07:55:43.000Z | 2022-02-06T03:47:56.000Z | examples/FreeCAD/Ex001_Simple_Block.py | asukiaaa/cadquery | 1b5d8d91fdae54aadf92af82de935e34a0cc062e | [
"Apache-2.0"
] | 242 | 2015-01-01T00:37:27.000Z | 2021-02-08T20:25:27.000Z | examples/FreeCAD/Ex001_Simple_Block.py | asukiaaa/cadquery | 1b5d8d91fdae54aadf92af82de935e34a0cc062e | [
"Apache-2.0"
] | 65 | 2015-01-01T00:24:54.000Z | 2021-12-10T15:42:29.000Z | import cadquery as cq
# These can be modified rather than hardcoding values for each dimension.
length = 80.0 # Length of the block
height = 60.0 # Height of the block
thickness = 10.0 # Thickness of the block
# Create a 3D block based on the dimension variables above.
# 1. Establishes a workplane that an object can be built on.
# 1a. Uses the X and Y origins to define the workplane, meaning that the
# positive Z direction is "up", and the negative Z direction is "down".
result = cq.Workplane("XY").box(length, height, thickness)
# The following method is now outdated, but can still be used to display the
# results of the script if you want
# from Helpers import show
# show(result) # Render the result of this script
show_object(result)
| 38.3 | 76 | 0.736292 | import cadquery as cq
length = 80.0
height = 60.0
thickness = 10.0
result = cq.Workplane("XY").box(length, height, thickness)
| true | true |
f724461d9b5f371de5e8cc03ae8c7e10e9f3cfa1 | 2,813 | py | Python | task_manager/tasks/migrations/0001_initial.py | rabilrbl/task_manager | 10c96df8f41caf2db6a0ec2aa7cb961135412843 | [
"BSD-3-Clause"
] | null | null | null | task_manager/tasks/migrations/0001_initial.py | rabilrbl/task_manager | 10c96df8f41caf2db6a0ec2aa7cb961135412843 | [
"BSD-3-Clause"
] | null | null | null | task_manager/tasks/migrations/0001_initial.py | rabilrbl/task_manager | 10c96df8f41caf2db6a0ec2aa7cb961135412843 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.2.12 on 2022-02-16 17:52
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, unique=True)),
('title', models.CharField(max_length=100)),
('priority', models.IntegerField(default=0)),
('description', models.TextField(blank=True, max_length=500)),
('completed', models.BooleanField(default=False)),
('date_created', models.DateTimeField(auto_now_add=True)),
('deleted', models.BooleanField(default=False)),
('status', models.CharField(choices=[('pending', 'Pending'), ('in_progress', 'In Progress'), ('completed', 'Completed'), ('cancelled', 'Cancelled')], default='pending', max_length=100)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('consent', models.BooleanField(default=False, help_text='Uncheck to stop receiving reports')),
('time', models.TimeField(default=datetime.time(0, 0), help_text='All times are in UTC format.')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='History',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('old_status', models.CharField(choices=[('pending', 'Pending'), ('in_progress', 'In Progress'), ('completed', 'Completed'), ('cancelled', 'Cancelled')], default='n/a', max_length=100)),
('new_status', models.CharField(choices=[('pending', 'Pending'), ('in_progress', 'In Progress'), ('completed', 'Completed'), ('cancelled', 'Cancelled')], default='n/a', max_length=100)),
('change_date', models.DateTimeField(auto_now=True)),
('task', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tasks.task')),
],
),
]
| 52.092593 | 202 | 0.614646 |
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, unique=True)),
('title', models.CharField(max_length=100)),
('priority', models.IntegerField(default=0)),
('description', models.TextField(blank=True, max_length=500)),
('completed', models.BooleanField(default=False)),
('date_created', models.DateTimeField(auto_now_add=True)),
('deleted', models.BooleanField(default=False)),
('status', models.CharField(choices=[('pending', 'Pending'), ('in_progress', 'In Progress'), ('completed', 'Completed'), ('cancelled', 'Cancelled')], default='pending', max_length=100)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('consent', models.BooleanField(default=False, help_text='Uncheck to stop receiving reports')),
('time', models.TimeField(default=datetime.time(0, 0), help_text='All times are in UTC format.')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='History',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('old_status', models.CharField(choices=[('pending', 'Pending'), ('in_progress', 'In Progress'), ('completed', 'Completed'), ('cancelled', 'Cancelled')], default='n/a', max_length=100)),
('new_status', models.CharField(choices=[('pending', 'Pending'), ('in_progress', 'In Progress'), ('completed', 'Completed'), ('cancelled', 'Cancelled')], default='n/a', max_length=100)),
('change_date', models.DateTimeField(auto_now=True)),
('task', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tasks.task')),
],
),
]
| true | true |
f724467171d010ee0e4c6085d11642893a2e975d | 430 | py | Python | packages/python/plotly/plotly/validators/barpolar/marker/colorbar/title/_text.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/barpolar/marker/colorbar/title/_text.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/barpolar/marker/colorbar/title/_text.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="text", parent_name="barpolar.marker.colorbar.title", **kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| 30.714286 | 88 | 0.65814 | import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="text", parent_name="barpolar.marker.colorbar.title", **kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| true | true |
f7244861300db686a544ca4278551c0acac34d9c | 2,864 | py | Python | code/week 6/rat_in_a_maze.py | c235gsy/Sustech_Data-Structure-and-Algorithm-Analysis | fcbd450216e9e62cd3365ad2a8ccab00b9eb679f | [
"MIT"
] | 1 | 2020-01-04T13:35:29.000Z | 2020-01-04T13:35:29.000Z | code/week 6/rat_in_a_maze.py | c235gsy/Sustech_Data-Structure-and-Algorithm-Analysis | fcbd450216e9e62cd3365ad2a8ccab00b9eb679f | [
"MIT"
] | null | null | null | code/week 6/rat_in_a_maze.py | c235gsy/Sustech_Data-Structure-and-Algorithm-Analysis | fcbd450216e9e62cd3365ad2a8ccab00b9eb679f | [
"MIT"
] | null | null | null |
class Queue:
# A container with a first-in-first-out (FIFO) queuing policy.
def __init__(self):
self.list = []
def push(self,item):
# Enqueue the 'item' into the queue
self.list.insert(0, item)
def pop(self):
# Dequeue the earliest enqueued item still in the queue. This operation removes the item from the queue.
return self.list.pop()
def is_empty(self):
# Returns true if the queue is empty
return len(self.list) == 0
global maze
maze = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]]
def get_choices(stage):
# Move order is: right, down, left, up
choices = []
x = stage[0]
y = stage[1]
if y != 14 and maze[x][y+1] == 0:
choices.append([(x, y+1), "right"])
if x != 13 and maze[x+1][y] == 0:
choices.append([(x+1, y), "down"])
if y != 0 and maze[x][y-1] == 0:
choices.append([(x, y-1), "left"])
if x != 0 and maze[x-1][y] == 0:
choices.append([(x-1, y), "up"])
return choices
def get_start_stage():
return 0, 0
def get_goal_stage():
return 13, 14
def is_goal_stage(stage):
return stage == (13, 14)
def breadth_first_search():
states_to_expand = Queue ()
states_to_expand.push (get_start_stage())
visited_states = []
path_to_goal = []
path_to_current_state = Queue ()
current_state = states_to_expand.pop ()
flag = 1
while flag == 1:
if is_goal_stage(current_state):
break
elif current_state not in visited_states:
visited_states.append(current_state)
choices_of_move = get_choices(current_state)
for p in range(0, len(choices_of_move)):
choice = choices_of_move[p]
new_position = choice[0]
direction = choice[1]
states_to_expand.push(new_position)
path_to_current_state.push(path_to_goal + [direction])
current_state = states_to_expand.pop()
path_to_goal = path_to_current_state.pop()
return path_to_goal
result = breadth_first_search()
for step in result:
print(step)
| 29.525773 | 112 | 0.511522 |
class Queue:
def __init__(self):
self.list = []
def push(self,item):
self.list.insert(0, item)
def pop(self):
return self.list.pop()
def is_empty(self):
return len(self.list) == 0
global maze
maze = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]]
def get_choices(stage):
choices = []
x = stage[0]
y = stage[1]
if y != 14 and maze[x][y+1] == 0:
choices.append([(x, y+1), "right"])
if x != 13 and maze[x+1][y] == 0:
choices.append([(x+1, y), "down"])
if y != 0 and maze[x][y-1] == 0:
choices.append([(x, y-1), "left"])
if x != 0 and maze[x-1][y] == 0:
choices.append([(x-1, y), "up"])
return choices
def get_start_stage():
return 0, 0
def get_goal_stage():
return 13, 14
def is_goal_stage(stage):
return stage == (13, 14)
def breadth_first_search():
states_to_expand = Queue ()
states_to_expand.push (get_start_stage())
visited_states = []
path_to_goal = []
path_to_current_state = Queue ()
current_state = states_to_expand.pop ()
flag = 1
while flag == 1:
if is_goal_stage(current_state):
break
elif current_state not in visited_states:
visited_states.append(current_state)
choices_of_move = get_choices(current_state)
for p in range(0, len(choices_of_move)):
choice = choices_of_move[p]
new_position = choice[0]
direction = choice[1]
states_to_expand.push(new_position)
path_to_current_state.push(path_to_goal + [direction])
current_state = states_to_expand.pop()
path_to_goal = path_to_current_state.pop()
return path_to_goal
result = breadth_first_search()
for step in result:
print(step)
| true | true |
f72448dcbaaf3b3cb88177c88c779358a33a7210 | 545 | py | Python | lib/models/__init__.py | ablattmann/pose_estimation_hrnet | 67d5a3446979c2abe54578ee4bba3787862d4077 | [
"MIT"
] | null | null | null | lib/models/__init__.py | ablattmann/pose_estimation_hrnet | 67d5a3446979c2abe54578ee4bba3787862d4077 | [
"MIT"
] | null | null | null | lib/models/__init__.py | ablattmann/pose_estimation_hrnet | 67d5a3446979c2abe54578ee4bba3787862d4077 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import models.pose_resnet
# import models.pose_hrnet
| 32.058824 | 80 | 0.585321 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| true | true |
f72449514abd4834918b9dc32df86e5d6d182d1c | 2,299 | py | Python | magnum/tests/functional/tempest_tests/config.py | mail2nsrajesh/magnum | 2e7e5a77967028c961337177ce577eb936c3845c | [
"Apache-2.0"
] | null | null | null | magnum/tests/functional/tempest_tests/config.py | mail2nsrajesh/magnum | 2e7e5a77967028c961337177ce577eb936c3845c | [
"Apache-2.0"
] | null | null | null | magnum/tests/functional/tempest_tests/config.py | mail2nsrajesh/magnum | 2e7e5a77967028c961337177ce577eb936c3845c | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from oslo_config import cfg
from tempest import config # noqa
service_available_group = cfg.OptGroup(name="service_available",
title="Available OpenStack Services")
ServiceAvailableGroup = [
cfg.BoolOpt("magnum",
default=True,
help="Whether or not magnum is expected to be available"),
]
magnum_group = cfg.OptGroup(name="magnum", title="Magnum Options")
MagnumGroup = [
cfg.StrOpt("image_id",
default="fedora-atomic-latest",
help="Image id to be used for ClusterTemplate."),
cfg.StrOpt("nic_id",
default="public",
help="NIC id."),
cfg.StrOpt("keypair_id",
default="default",
help="Keypair id to use to log into nova instances."),
cfg.StrOpt("flavor_id",
default="s1.magnum",
help="Flavor id to use for ClusterTemplate."),
cfg.StrOpt("magnum_url",
help="Bypass URL for Magnum to skip service catalog lookup"),
cfg.StrOpt("master_flavor_id",
default="m1.magnum",
help="Master flavor id to use for ClusterTemplate."),
cfg.StrOpt("csr_location",
default="/opt/stack/new/magnum/default.csr",
deprecated_for_removal=True,
help="CSR location for certificates. This option is no "
"longer used for anything."),
cfg.StrOpt("dns_nameserver",
default="8.8.8.8",
help="DNS nameserver to use for ClusterTemplate."),
cfg.BoolOpt("copy_logs",
default=True,
help="Specify whether to copy nova server logs on failure."),
]
| 33.808824 | 77 | 0.628099 |
from __future__ import print_function
from oslo_config import cfg
from tempest import config
service_available_group = cfg.OptGroup(name="service_available",
title="Available OpenStack Services")
ServiceAvailableGroup = [
cfg.BoolOpt("magnum",
default=True,
help="Whether or not magnum is expected to be available"),
]
magnum_group = cfg.OptGroup(name="magnum", title="Magnum Options")
MagnumGroup = [
cfg.StrOpt("image_id",
default="fedora-atomic-latest",
help="Image id to be used for ClusterTemplate."),
cfg.StrOpt("nic_id",
default="public",
help="NIC id."),
cfg.StrOpt("keypair_id",
default="default",
help="Keypair id to use to log into nova instances."),
cfg.StrOpt("flavor_id",
default="s1.magnum",
help="Flavor id to use for ClusterTemplate."),
cfg.StrOpt("magnum_url",
help="Bypass URL for Magnum to skip service catalog lookup"),
cfg.StrOpt("master_flavor_id",
default="m1.magnum",
help="Master flavor id to use for ClusterTemplate."),
cfg.StrOpt("csr_location",
default="/opt/stack/new/magnum/default.csr",
deprecated_for_removal=True,
help="CSR location for certificates. This option is no "
"longer used for anything."),
cfg.StrOpt("dns_nameserver",
default="8.8.8.8",
help="DNS nameserver to use for ClusterTemplate."),
cfg.BoolOpt("copy_logs",
default=True,
help="Specify whether to copy nova server logs on failure."),
]
| true | true |
f7244962c5bfd8e1aa5c08eb18369e6d57850268 | 1,912 | py | Python | EnvironmentAM2315MuxSensor.py | MBI-Div-B/pytango-EnvironmentAM2315Mux | ebbea69d2c954b6d1e59e49a31eb36ec60929bad | [
"MIT"
] | null | null | null | EnvironmentAM2315MuxSensor.py | MBI-Div-B/pytango-EnvironmentAM2315Mux | ebbea69d2c954b6d1e59e49a31eb36ec60929bad | [
"MIT"
] | null | null | null | EnvironmentAM2315MuxSensor.py | MBI-Div-B/pytango-EnvironmentAM2315Mux | ebbea69d2c954b6d1e59e49a31eb36ec60929bad | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2020 MBI-Division-B
# MIT License, refer to LICENSE file
# Author: Luca Barbera / Email: barbera@mbi-berlin.de
from tango import AttrWriteType, DevState, DebugIt, ErrorIt, InfoIt, DeviceProxy
from tango.server import Device, attribute, command, device_property
class EnvironmentAM2315MuxSensor(Device):
CtrlDevice = device_property(
dtype="str",
default_value="domain/family/memeber",
)
Channel = device_property(
dtype="int",
default_value=0,
)
temperature = attribute(label='Temperature',
access=AttrWriteType.READ,
dtype=float,
format='3.1f',
unit='C')
humidity = attribute(label='Humidity',
access=AttrWriteType.READ,
dtype=float,
format='3.1f',
unit='%')
def init_device(self):
Device.init_device(self)
self.set_state(DevState.INIT)
try:
self.ctrl = DeviceProxy(self.CtrlDevice)
self.info_stream("Connection established.")
self.set_state(DevState.ON)
except Exception:
self.error_stream('Connection could not be established.')
self.set_state(DevState.OFF)
self._temp = 0
self._humid = 0
def always_executed_hook(self):
try:
# _read_data measures both humidity and temperature
self._temp, self._humid = self.ctrl.read_data(self.Channel)
except Exception:
self.error_stream('Data could not be read')
def read_temperature(self):
return self._temp
def read_humidity(self):
return self._humid
if __name__ == "__main__":
EnvironmentAM2315MuxSensor.run_server()
| 28.537313 | 80 | 0.582636 |
from tango import AttrWriteType, DevState, DebugIt, ErrorIt, InfoIt, DeviceProxy
from tango.server import Device, attribute, command, device_property
class EnvironmentAM2315MuxSensor(Device):
CtrlDevice = device_property(
dtype="str",
default_value="domain/family/memeber",
)
Channel = device_property(
dtype="int",
default_value=0,
)
temperature = attribute(label='Temperature',
access=AttrWriteType.READ,
dtype=float,
format='3.1f',
unit='C')
humidity = attribute(label='Humidity',
access=AttrWriteType.READ,
dtype=float,
format='3.1f',
unit='%')
def init_device(self):
Device.init_device(self)
self.set_state(DevState.INIT)
try:
self.ctrl = DeviceProxy(self.CtrlDevice)
self.info_stream("Connection established.")
self.set_state(DevState.ON)
except Exception:
self.error_stream('Connection could not be established.')
self.set_state(DevState.OFF)
self._temp = 0
self._humid = 0
def always_executed_hook(self):
try:
self._temp, self._humid = self.ctrl.read_data(self.Channel)
except Exception:
self.error_stream('Data could not be read')
def read_temperature(self):
return self._temp
def read_humidity(self):
return self._humid
if __name__ == "__main__":
EnvironmentAM2315MuxSensor.run_server()
| true | true |
f7244a801802bebba70ac938a121a24fcb049c4f | 1,578 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/config/router/routerdynamicrouting_args.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/router/routerdynamicrouting_args.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/router/routerdynamicrouting_args.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class routerdynamicrouting_args :
r""" Provides additional arguments required for fetching the routerdynamicrouting resource.
"""
def __init__(self) :
self._commandstring = None
self._nodeid = None
@property
def commandstring(self) :
r"""command to be executed.
"""
try :
return self._commandstring
except Exception as e:
raise e
@commandstring.setter
def commandstring(self, commandstring) :
r"""command to be executed.
"""
try :
self._commandstring = commandstring
except Exception as e:
raise e
@property
def nodeid(self) :
r"""Unique number that identifies the cluster node.<br/>Minimum value = 0<br/>Maximum value = 31.
"""
try :
return self._nodeid
except Exception as e:
raise e
@nodeid.setter
def nodeid(self, nodeid) :
r"""Unique number that identifies the cluster node.<br/>Minimum value = 0<br/>Maximum value = 31
"""
try :
self._nodeid = nodeid
except Exception as e:
raise e
| 25.868852 | 101 | 0.709125 |
class routerdynamicrouting_args :
def __init__(self) :
self._commandstring = None
self._nodeid = None
@property
def commandstring(self) :
try :
return self._commandstring
except Exception as e:
raise e
@commandstring.setter
def commandstring(self, commandstring) :
try :
self._commandstring = commandstring
except Exception as e:
raise e
@property
def nodeid(self) :
try :
return self._nodeid
except Exception as e:
raise e
@nodeid.setter
def nodeid(self, nodeid) :
try :
self._nodeid = nodeid
except Exception as e:
raise e
| true | true |
f7244c2c22cf9787986fc05f8c297d20c042b807 | 14,326 | py | Python | aries_cloudagent/wallet/tests/test_routes.py | jcourt562/aries-cloudagent-python | de291184c59006391a76317826983dd1eb0ada5d | [
"Apache-2.0"
] | 1 | 2020-11-30T05:47:54.000Z | 2020-11-30T05:47:54.000Z | aries_cloudagent/wallet/tests/test_routes.py | jcourt562/aries-cloudagent-python | de291184c59006391a76317826983dd1eb0ada5d | [
"Apache-2.0"
] | 1 | 2020-02-25T19:01:16.000Z | 2020-02-25T19:01:16.000Z | aries_cloudagent/wallet/tests/test_routes.py | jcourt562/aries-cloudagent-python | de291184c59006391a76317826983dd1eb0ada5d | [
"Apache-2.0"
] | 2 | 2020-02-18T20:34:01.000Z | 2021-03-12T16:18:30.000Z | from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
import pytest
from aiohttp.web import HTTPForbidden
from ...config.injection_context import InjectionContext
from ...ledger.base import BaseLedger
from ...wallet.base import BaseWallet, DIDInfo
from .. import routes as test_module
class TestWalletRoutes(AsyncTestCase):
def setUp(self):
self.context = InjectionContext(enforce_typing=False)
self.wallet = async_mock.create_autospec(BaseWallet)
self.context.injector.bind_instance(BaseWallet, self.wallet)
self.app = {
"outbound_message_router": async_mock.CoroutineMock(),
"request_context": self.context,
}
self.test_did = "did"
self.test_verkey = "verkey"
async def test_missing_wallet(self):
request = async_mock.MagicMock()
request.app = self.app
self.context.injector.clear_binding(BaseWallet)
with self.assertRaises(HTTPForbidden):
await test_module.wallet_create_did(request)
with self.assertRaises(HTTPForbidden):
await test_module.wallet_did_list(request)
with self.assertRaises(HTTPForbidden):
await test_module.wallet_get_public_did(request)
with self.assertRaises(HTTPForbidden):
await test_module.wallet_set_public_did(request)
def test_format_did_info(self):
did_info = DIDInfo(self.test_did, self.test_verkey, {})
result = test_module.format_did_info(did_info)
assert (
result["did"] == self.test_did
and result["verkey"] == self.test_verkey
and result["public"] == "false"
)
did_info = DIDInfo(self.test_did, self.test_verkey, {"public": True})
result = test_module.format_did_info(did_info)
assert result["public"] == "true"
async def test_create_did(self):
request = async_mock.MagicMock()
request.app = self.app
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.create_local_did.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
result = await test_module.wallet_create_did(request)
format_did_info.assert_called_once_with(
self.wallet.create_local_did.return_value
)
json_response.assert_called_once_with(
{"result": format_did_info.return_value}
)
assert result is json_response.return_value
async def test_did_list(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_local_dids.return_value = [
DIDInfo(self.test_did, self.test_verkey, {})
]
format_did_info.return_value = {"did": self.test_did}
result = await test_module.wallet_did_list(request)
format_did_info.assert_called_once_with(
self.wallet.get_local_dids.return_value[0]
)
json_response.assert_called_once_with(
{"results": [format_did_info.return_value]}
)
assert json_response.return_value is json_response()
assert result is json_response.return_value
async def test_did_list_filter_public(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"public": "true"}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_public_did.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
format_did_info.return_value = {"did": self.test_did}
result = await test_module.wallet_did_list(request)
format_did_info.assert_called_once_with(
self.wallet.get_public_did.return_value
)
json_response.assert_called_once_with(
{"results": [format_did_info.return_value]}
)
assert json_response.return_value is json_response()
assert result is json_response.return_value
async def test_did_list_filter_did(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"did": self.test_did}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_local_did.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
format_did_info.return_value = {"did": self.test_did}
result = await test_module.wallet_did_list(request)
format_did_info.assert_called_once_with(
self.wallet.get_local_did.return_value
)
json_response.assert_called_once_with(
{"results": [format_did_info.return_value]}
)
assert json_response.return_value is json_response()
assert result is json_response.return_value
async def test_did_list_filter_did_x(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"did": self.test_did}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response:
self.wallet.get_local_did.side_effect = test_module.WalletError()
result = await test_module.wallet_did_list(request)
json_response.assert_called_once_with({"results": []})
assert json_response.return_value is json_response()
assert result is json_response.return_value
async def test_did_list_filter_verkey(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"verkey": self.test_verkey}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_local_did_for_verkey.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
format_did_info.return_value = {"did": self.test_did}
result = await test_module.wallet_did_list(request)
format_did_info.assert_called_once_with(
self.wallet.get_local_did_for_verkey.return_value
)
json_response.assert_called_once_with(
{"results": [format_did_info.return_value]}
)
assert json_response.return_value is json_response()
assert result is json_response.return_value
async def test_did_list_filter_verkey_x(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"verkey": self.test_verkey}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response:
self.wallet.get_local_did_for_verkey.side_effect = test_module.WalletError()
result = await test_module.wallet_did_list(request)
json_response.assert_called_once_with({"results": []})
assert json_response.return_value is json_response()
assert result is json_response.return_value
async def test_get_public_did(self):
request = async_mock.MagicMock()
request.app = self.app
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_public_did.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
result = await test_module.wallet_get_public_did(request)
format_did_info.assert_called_once_with(
self.wallet.get_public_did.return_value
)
json_response.assert_called_once_with(
{"result": format_did_info.return_value}
)
assert result is json_response.return_value
async def test_set_public_did(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"did": self.test_did}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_public_did.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
result = await test_module.wallet_set_public_did(request)
self.wallet.set_public_did.assert_awaited_once_with(request.query["did"])
format_did_info.assert_called_once_with(
self.wallet.set_public_did.return_value
)
json_response.assert_called_once_with(
{"result": format_did_info.return_value}
)
assert result is json_response.return_value
async def test_set_public_did_no_did(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {}
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.wallet_set_public_did(request)
async def test_set_public_did_not_found(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"did": self.test_did}
self.wallet.get_local_did.side_effect = test_module.WalletError()
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.wallet_set_public_did(request)
async def test_set_public_did_update_endpoint(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"did": self.test_did}
Ledger = async_mock.MagicMock()
self.ledger = Ledger()
self.ledger.update_endpoint_for_did = async_mock.CoroutineMock()
self.ledger.__aenter__ = async_mock.CoroutineMock(return_value=self.ledger)
self.context.injector.bind_instance(BaseLedger, self.ledger)
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_public_did.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
result = await test_module.wallet_set_public_did(request)
self.wallet.set_public_did.assert_awaited_once_with(request.query["did"])
format_did_info.assert_called_once_with(
self.wallet.set_public_did.return_value
)
json_response.assert_called_once_with(
{"result": format_did_info.return_value}
)
assert result is json_response.return_value
async def test_get_catpol(self):
request = async_mock.MagicMock()
request.app = self.app
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response:
self.wallet.WALLET_TYPE = "indy"
self.wallet.get_credential_definition_tag_policy = async_mock.CoroutineMock(
return_value=["a", "b", "c"]
)
result = await test_module.wallet_get_tagging_policy(request)
json_response.assert_called_once_with({"taggables": ["a", "b", "c"]})
assert result is json_response.return_value
async def test_get_catpol_not_indy_x(self):
request = async_mock.MagicMock()
request.app = self.app
self.wallet.WALLET_TYPE = "rich-corinthian-leather"
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.wallet_get_tagging_policy(request)
async def test_set_catpol(self):
request = async_mock.MagicMock()
request.app = self.app
request.json = async_mock.CoroutineMock(
return_value={"taggables": ["a", "b", "c"]}
)
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response:
self.wallet.WALLET_TYPE = "indy"
self.wallet.set_credential_definition_tag_policy = async_mock.CoroutineMock(
return_value=["a", "b", "c"]
)
result = await test_module.wallet_set_tagging_policy(request)
json_response.assert_called_once_with({})
assert result is json_response.return_value
async def test_set_catpol_not_indy_x(self):
request = async_mock.MagicMock()
request.app = self.app
request.json = async_mock.CoroutineMock(
return_value={"taggables": ["a", "b", "c"]}
)
self.wallet.WALLET_TYPE = "rich-corinthian-leather"
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.wallet_set_tagging_policy(request)
async def test_register(self):
mock_app = async_mock.MagicMock()
mock_app.add_routes = async_mock.MagicMock()
await test_module.register(mock_app)
mock_app.add_routes.assert_called_once()
| 42.259587 | 88 | 0.644144 | from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
import pytest
from aiohttp.web import HTTPForbidden
from ...config.injection_context import InjectionContext
from ...ledger.base import BaseLedger
from ...wallet.base import BaseWallet, DIDInfo
from .. import routes as test_module
class TestWalletRoutes(AsyncTestCase):
def setUp(self):
self.context = InjectionContext(enforce_typing=False)
self.wallet = async_mock.create_autospec(BaseWallet)
self.context.injector.bind_instance(BaseWallet, self.wallet)
self.app = {
"outbound_message_router": async_mock.CoroutineMock(),
"request_context": self.context,
}
self.test_did = "did"
self.test_verkey = "verkey"
async def test_missing_wallet(self):
request = async_mock.MagicMock()
request.app = self.app
self.context.injector.clear_binding(BaseWallet)
with self.assertRaises(HTTPForbidden):
await test_module.wallet_create_did(request)
with self.assertRaises(HTTPForbidden):
await test_module.wallet_did_list(request)
with self.assertRaises(HTTPForbidden):
await test_module.wallet_get_public_did(request)
with self.assertRaises(HTTPForbidden):
await test_module.wallet_set_public_did(request)
def test_format_did_info(self):
did_info = DIDInfo(self.test_did, self.test_verkey, {})
result = test_module.format_did_info(did_info)
assert (
result["did"] == self.test_did
and result["verkey"] == self.test_verkey
and result["public"] == "false"
)
did_info = DIDInfo(self.test_did, self.test_verkey, {"public": True})
result = test_module.format_did_info(did_info)
assert result["public"] == "true"
async def test_create_did(self):
request = async_mock.MagicMock()
request.app = self.app
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.create_local_did.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
result = await test_module.wallet_create_did(request)
format_did_info.assert_called_once_with(
self.wallet.create_local_did.return_value
)
json_response.assert_called_once_with(
{"result": format_did_info.return_value}
)
assert result is json_response.return_value
async def test_did_list(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_local_dids.return_value = [
DIDInfo(self.test_did, self.test_verkey, {})
]
format_did_info.return_value = {"did": self.test_did}
result = await test_module.wallet_did_list(request)
format_did_info.assert_called_once_with(
self.wallet.get_local_dids.return_value[0]
)
json_response.assert_called_once_with(
{"results": [format_did_info.return_value]}
)
assert json_response.return_value is json_response()
assert result is json_response.return_value
async def test_did_list_filter_public(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"public": "true"}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_public_did.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
format_did_info.return_value = {"did": self.test_did}
result = await test_module.wallet_did_list(request)
format_did_info.assert_called_once_with(
self.wallet.get_public_did.return_value
)
json_response.assert_called_once_with(
{"results": [format_did_info.return_value]}
)
assert json_response.return_value is json_response()
assert result is json_response.return_value
async def test_did_list_filter_did(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"did": self.test_did}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_local_did.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
format_did_info.return_value = {"did": self.test_did}
result = await test_module.wallet_did_list(request)
format_did_info.assert_called_once_with(
self.wallet.get_local_did.return_value
)
json_response.assert_called_once_with(
{"results": [format_did_info.return_value]}
)
assert json_response.return_value is json_response()
assert result is json_response.return_value
async def test_did_list_filter_did_x(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"did": self.test_did}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response:
self.wallet.get_local_did.side_effect = test_module.WalletError()
result = await test_module.wallet_did_list(request)
json_response.assert_called_once_with({"results": []})
assert json_response.return_value is json_response()
assert result is json_response.return_value
async def test_did_list_filter_verkey(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"verkey": self.test_verkey}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_local_did_for_verkey.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
format_did_info.return_value = {"did": self.test_did}
result = await test_module.wallet_did_list(request)
format_did_info.assert_called_once_with(
self.wallet.get_local_did_for_verkey.return_value
)
json_response.assert_called_once_with(
{"results": [format_did_info.return_value]}
)
assert json_response.return_value is json_response()
assert result is json_response.return_value
async def test_did_list_filter_verkey_x(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"verkey": self.test_verkey}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response:
self.wallet.get_local_did_for_verkey.side_effect = test_module.WalletError()
result = await test_module.wallet_did_list(request)
json_response.assert_called_once_with({"results": []})
assert json_response.return_value is json_response()
assert result is json_response.return_value
async def test_get_public_did(self):
request = async_mock.MagicMock()
request.app = self.app
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_public_did.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
result = await test_module.wallet_get_public_did(request)
format_did_info.assert_called_once_with(
self.wallet.get_public_did.return_value
)
json_response.assert_called_once_with(
{"result": format_did_info.return_value}
)
assert result is json_response.return_value
async def test_set_public_did(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"did": self.test_did}
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_public_did.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
result = await test_module.wallet_set_public_did(request)
self.wallet.set_public_did.assert_awaited_once_with(request.query["did"])
format_did_info.assert_called_once_with(
self.wallet.set_public_did.return_value
)
json_response.assert_called_once_with(
{"result": format_did_info.return_value}
)
assert result is json_response.return_value
async def test_set_public_did_no_did(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {}
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.wallet_set_public_did(request)
async def test_set_public_did_not_found(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"did": self.test_did}
self.wallet.get_local_did.side_effect = test_module.WalletError()
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.wallet_set_public_did(request)
async def test_set_public_did_update_endpoint(self):
request = async_mock.MagicMock()
request.app = self.app
request.query = {"did": self.test_did}
Ledger = async_mock.MagicMock()
self.ledger = Ledger()
self.ledger.update_endpoint_for_did = async_mock.CoroutineMock()
self.ledger.__aenter__ = async_mock.CoroutineMock(return_value=self.ledger)
self.context.injector.bind_instance(BaseLedger, self.ledger)
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response, async_mock.patch.object(
test_module, "format_did_info", async_mock.Mock()
) as format_did_info:
self.wallet.get_public_did.return_value = DIDInfo(
self.test_did, self.test_verkey, {}
)
result = await test_module.wallet_set_public_did(request)
self.wallet.set_public_did.assert_awaited_once_with(request.query["did"])
format_did_info.assert_called_once_with(
self.wallet.set_public_did.return_value
)
json_response.assert_called_once_with(
{"result": format_did_info.return_value}
)
assert result is json_response.return_value
async def test_get_catpol(self):
request = async_mock.MagicMock()
request.app = self.app
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response:
self.wallet.WALLET_TYPE = "indy"
self.wallet.get_credential_definition_tag_policy = async_mock.CoroutineMock(
return_value=["a", "b", "c"]
)
result = await test_module.wallet_get_tagging_policy(request)
json_response.assert_called_once_with({"taggables": ["a", "b", "c"]})
assert result is json_response.return_value
async def test_get_catpol_not_indy_x(self):
request = async_mock.MagicMock()
request.app = self.app
self.wallet.WALLET_TYPE = "rich-corinthian-leather"
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.wallet_get_tagging_policy(request)
async def test_set_catpol(self):
request = async_mock.MagicMock()
request.app = self.app
request.json = async_mock.CoroutineMock(
return_value={"taggables": ["a", "b", "c"]}
)
with async_mock.patch.object(
test_module.web, "json_response", async_mock.Mock()
) as json_response:
self.wallet.WALLET_TYPE = "indy"
self.wallet.set_credential_definition_tag_policy = async_mock.CoroutineMock(
return_value=["a", "b", "c"]
)
result = await test_module.wallet_set_tagging_policy(request)
json_response.assert_called_once_with({})
assert result is json_response.return_value
async def test_set_catpol_not_indy_x(self):
request = async_mock.MagicMock()
request.app = self.app
request.json = async_mock.CoroutineMock(
return_value={"taggables": ["a", "b", "c"]}
)
self.wallet.WALLET_TYPE = "rich-corinthian-leather"
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.wallet_set_tagging_policy(request)
async def test_register(self):
mock_app = async_mock.MagicMock()
mock_app.add_routes = async_mock.MagicMock()
await test_module.register(mock_app)
mock_app.add_routes.assert_called_once()
| true | true |
f7244d3b8570046485a9f0792f05527b6f08760b | 62 | py | Python | app/db/repos/base/protocols/statements/__init__.py | maxzhenzhera/my_vocab_backend | 2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1 | [
"MIT"
] | null | null | null | app/db/repos/base/protocols/statements/__init__.py | maxzhenzhera/my_vocab_backend | 2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1 | [
"MIT"
] | null | null | null | app/db/repos/base/protocols/statements/__init__.py | maxzhenzhera/my_vocab_backend | 2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1 | [
"MIT"
] | null | null | null | from .returnable import Returnable
__all__ = ['Returnable']
| 12.4 | 34 | 0.758065 | from .returnable import Returnable
__all__ = ['Returnable']
| true | true |
f7244e8ebbd013097871e683eb9e56711f004cd7 | 461 | py | Python | EDUREKA/Course.3/Case.Study.1.Programs/distanceCalculation.py | linkeshkanna/ProblemSolving | 8286ce66fbe82a78e1a19396da2d888d755d4cf4 | [
"Apache-2.0"
] | null | null | null | EDUREKA/Course.3/Case.Study.1.Programs/distanceCalculation.py | linkeshkanna/ProblemSolving | 8286ce66fbe82a78e1a19396da2d888d755d4cf4 | [
"Apache-2.0"
] | null | null | null | EDUREKA/Course.3/Case.Study.1.Programs/distanceCalculation.py | linkeshkanna/ProblemSolving | 8286ce66fbe82a78e1a19396da2d888d755d4cf4 | [
"Apache-2.0"
] | null | null | null | import math
from math import radians
lat1 = 13.0827
lat2 = 9.4533
long1 = 80.2707
long2 = 77.8024
R = 6371
teta1 = radians(lat1)
teta2 = radians(lat2)
teta = radians(lat2 - lat1)
landa = radians(long2 - long1)
a = math.sin(teta/2) * math.sin(teta/2) + math.cos(teta1) * math.cos(teta2) * math.sin(landa/2) * math.sin(landa/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = R * c
print("Distance between two Latitude, Longtitude is : " + str(d))
| 19.208333 | 115 | 0.661605 | import math
from math import radians
lat1 = 13.0827
lat2 = 9.4533
long1 = 80.2707
long2 = 77.8024
R = 6371
teta1 = radians(lat1)
teta2 = radians(lat2)
teta = radians(lat2 - lat1)
landa = radians(long2 - long1)
a = math.sin(teta/2) * math.sin(teta/2) + math.cos(teta1) * math.cos(teta2) * math.sin(landa/2) * math.sin(landa/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = R * c
print("Distance between two Latitude, Longtitude is : " + str(d))
| true | true |
f7244e9d2237fe01ddaeb6e8e95ca04552be563e | 3,688 | py | Python | airiam/terraform/entity_terraformers/IAMPolicyDocumentTransformer.py | metahertz/AirIAM | 212f84e1b1a51c7a614384f91b220e7f2a57a079 | [
"Apache-2.0"
] | 501 | 2020-03-04T16:00:54.000Z | 2022-03-30T17:31:10.000Z | airiam/terraform/entity_terraformers/IAMPolicyDocumentTransformer.py | rckasa/AirIAM | 5a99dc25354c1bc6525dbaf25a3afcd472f71b2f | [
"Apache-2.0"
] | 34 | 2020-03-23T08:12:18.000Z | 2022-02-13T08:50:39.000Z | airiam/terraform/entity_terraformers/IAMPolicyDocumentTransformer.py | rckasa/AirIAM | 5a99dc25354c1bc6525dbaf25a3afcd472f71b2f | [
"Apache-2.0"
] | 51 | 2020-04-16T06:43:29.000Z | 2022-03-20T14:20:24.000Z | import json
from airiam.terraform.entity_terraformers.BaseEntityTransformer import BaseEntityTransformer
class IAMPolicyDocumentTransformer(BaseEntityTransformer):
def __init__(self, entity_json: dict, policy_name, principal_name=None):
policy_document_name = f"{policy_name}_document"
if principal_name:
policy_document_name = f"{principal_name}_{policy_document_name}"
super().__init__('data.aws_iam_policy_document', policy_document_name, entity_json)
def _generate_hcl2_code(self, entity_json) -> str:
statements = IAMPolicyDocumentTransformer.force_list(entity_json['Statement'])
if 'Principal' in statements[0]:
statements = self.transform_assume_policy_statements(statements)
else:
statements = self.transform_execution_policy(statements)
code = f"""data "aws_iam_policy_document" "{self._safe_name}" {{
version = "{entity_json.get('Version', '2012-10-17')}"
{statements}}}"""
return code
@staticmethod
def transform_execution_policy(statements):
statement_block = ""
for statement in statements:
sid_string = ""
if statement.get('Sid', '') != '':
sid_string = f"sid = \"{statement['Sid']}\"\n "
actions = IAMPolicyDocumentTransformer.force_list(statement.get('Action'))
if 'Action' in statement:
action_str = f"actions = {json.dumps(actions)}"
else:
actions = IAMPolicyDocumentTransformer.force_list(statement.get('NotAction'))
action_str = f"not_actions = {json.dumps(actions)}"
condition_block = IAMPolicyDocumentTransformer.transform_conditions(statement)
resources_list_str = json.dumps(IAMPolicyDocumentTransformer.force_list(statement.get('Resource'))).replace('${', '$\\u0024{')
statement_block += f""" statement {{
{sid_string}effect = "{statement['Effect']}"
{action_str}
resources = {resources_list_str}
{condition_block}
}}
"""
return statement_block
@staticmethod
def transform_assume_policy_statements(statements):
statement_block = ""
for statement in statements:
sid_string = ""
if statement.get('Sid', '') != '':
sid_string = f"sid = \"{statement['Sid']}\"\n "
condition_block = IAMPolicyDocumentTransformer.transform_conditions(statement)
statement_block += f""" statement {{
{sid_string}effect = "{statement['Effect']}"
actions = {json.dumps(IAMPolicyDocumentTransformer.force_list(statement['Action']))}
principals {{
type = "{list(statement['Principal'].keys())[0]}"
identifiers = {json.dumps(IAMPolicyDocumentTransformer.force_list(statement['Principal'][list(statement['Principal'].keys())[0]]))}
}}
{condition_block}}}
"""
return statement_block
@staticmethod
def transform_conditions(statement):
condition_block = ""
if 'Condition' in statement:
for test, items in statement['Condition'].items():
for variable, values in items.items():
values_str = json.dumps(IAMPolicyDocumentTransformer.force_list(values)).replace('${', '$\\u0024{')
condition_block += f"""
condition {{
test = "{test}"
variable = "{variable}"
values = {values_str}
}}
"""
return condition_block
@staticmethod
def force_list(x):
if isinstance(x, list):
return x
return [x]
def entities_to_import(self) -> list:
return []
| 38.821053 | 138 | 0.632863 | import json
from airiam.terraform.entity_terraformers.BaseEntityTransformer import BaseEntityTransformer
class IAMPolicyDocumentTransformer(BaseEntityTransformer):
def __init__(self, entity_json: dict, policy_name, principal_name=None):
policy_document_name = f"{policy_name}_document"
if principal_name:
policy_document_name = f"{principal_name}_{policy_document_name}"
super().__init__('data.aws_iam_policy_document', policy_document_name, entity_json)
def _generate_hcl2_code(self, entity_json) -> str:
statements = IAMPolicyDocumentTransformer.force_list(entity_json['Statement'])
if 'Principal' in statements[0]:
statements = self.transform_assume_policy_statements(statements)
else:
statements = self.transform_execution_policy(statements)
code = f"""data "aws_iam_policy_document" "{self._safe_name}" {{
version = "{entity_json.get('Version', '2012-10-17')}"
{statements}}}"""
return code
@staticmethod
def transform_execution_policy(statements):
statement_block = ""
for statement in statements:
sid_string = ""
if statement.get('Sid', '') != '':
sid_string = f"sid = \"{statement['Sid']}\"\n "
actions = IAMPolicyDocumentTransformer.force_list(statement.get('Action'))
if 'Action' in statement:
action_str = f"actions = {json.dumps(actions)}"
else:
actions = IAMPolicyDocumentTransformer.force_list(statement.get('NotAction'))
action_str = f"not_actions = {json.dumps(actions)}"
condition_block = IAMPolicyDocumentTransformer.transform_conditions(statement)
resources_list_str = json.dumps(IAMPolicyDocumentTransformer.force_list(statement.get('Resource'))).replace('${', '$\\u0024{')
statement_block += f""" statement {{
{sid_string}effect = "{statement['Effect']}"
{action_str}
resources = {resources_list_str}
{condition_block}
}}
"""
return statement_block
@staticmethod
def transform_assume_policy_statements(statements):
statement_block = ""
for statement in statements:
sid_string = ""
if statement.get('Sid', '') != '':
sid_string = f"sid = \"{statement['Sid']}\"\n "
condition_block = IAMPolicyDocumentTransformer.transform_conditions(statement)
statement_block += f""" statement {{
{sid_string}effect = "{statement['Effect']}"
actions = {json.dumps(IAMPolicyDocumentTransformer.force_list(statement['Action']))}
principals {{
type = "{list(statement['Principal'].keys())[0]}"
identifiers = {json.dumps(IAMPolicyDocumentTransformer.force_list(statement['Principal'][list(statement['Principal'].keys())[0]]))}
}}
{condition_block}}}
"""
return statement_block
@staticmethod
def transform_conditions(statement):
condition_block = ""
if 'Condition' in statement:
for test, items in statement['Condition'].items():
for variable, values in items.items():
values_str = json.dumps(IAMPolicyDocumentTransformer.force_list(values)).replace('${', '$\\u0024{')
condition_block += f"""
condition {{
test = "{test}"
variable = "{variable}"
values = {values_str}
}}
"""
return condition_block
@staticmethod
def force_list(x):
if isinstance(x, list):
return x
return [x]
def entities_to_import(self) -> list:
return []
| true | true |
f7244effc59fcc20c066ca606a185366478f6693 | 620 | py | Python | smpc_demo_platform/benchmarking/views.py | Safe-DEED/mpc-mock-up | 7c12b94d50bcde8480da8a7abf93c32b2708e2aa | [
"MIT"
] | null | null | null | smpc_demo_platform/benchmarking/views.py | Safe-DEED/mpc-mock-up | 7c12b94d50bcde8480da8a7abf93c32b2708e2aa | [
"MIT"
] | null | null | null | smpc_demo_platform/benchmarking/views.py | Safe-DEED/mpc-mock-up | 7c12b94d50bcde8480da8a7abf93c32b2708e2aa | [
"MIT"
] | null | null | null | from django.views.generic.base import TemplateView
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
# decorators = [login_required, ]
# @method_decorator(decorators, name='dispatch')
class BenchmarkViewAppCGOne(TemplateView):
template_name = "benchmarking/app-x3-Z63/benchmarking-home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
# @method_decorator(decorators, name='dispatch')
class BenchmarkEndView(TemplateView):
template_name = "benchmarking/end-of-demo.html"
| 31 | 68 | 0.774194 | from django.views.generic.base import TemplateView
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
class BenchmarkViewAppCGOne(TemplateView):
template_name = "benchmarking/app-x3-Z63/benchmarking-home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class BenchmarkEndView(TemplateView):
template_name = "benchmarking/end-of-demo.html"
| true | true |
f72450ed27bed040eb378e87323d130b0d5efc33 | 143 | py | Python | utests/test_parsers.py | Zephor5/zspider | 49178415137d67d7c88f2904bcb204df32082204 | [
"MIT"
] | 12 | 2015-12-23T10:13:31.000Z | 2021-04-25T17:12:39.000Z | utests/test_parsers.py | Zephor5/zspider | 49178415137d67d7c88f2904bcb204df32082204 | [
"MIT"
] | 1 | 2022-03-02T14:53:18.000Z | 2022-03-02T14:53:18.000Z | utests/test_parsers.py | Zephor5/zspider | 49178415137d67d7c88f2904bcb204df32082204 | [
"MIT"
] | 1 | 2017-09-18T08:51:51.000Z | 2017-09-18T08:51:51.000Z | # coding=utf-8
import unittest
__author__ = "zephor"
# noinspection PyUnresolvedReferences
class TestNewspaper(unittest.TestCase):
pass
| 14.3 | 39 | 0.783217 |
import unittest
__author__ = "zephor"
class TestNewspaper(unittest.TestCase):
pass
| true | true |
f7245165fdf913d8a10e8d0815498a4fecb991a0 | 2,721 | py | Python | lumbermill/modifier/Permutate.py | dstore-dbap/LumberMill | b7cbadc209a83386871735b8ad88b61da917a6ab | [
"Apache-2.0"
] | 15 | 2015-12-14T19:07:28.000Z | 2022-02-28T13:32:11.000Z | lumbermill/modifier/Permutate.py | dstore-dbap/LumberMill | b7cbadc209a83386871735b8ad88b61da917a6ab | [
"Apache-2.0"
] | null | null | null | lumbermill/modifier/Permutate.py | dstore-dbap/LumberMill | b7cbadc209a83386871735b8ad88b61da917a6ab | [
"Apache-2.0"
] | 4 | 2017-02-08T10:49:55.000Z | 2019-03-19T18:47:46.000Z | # -*- coding: utf-8 -*-
import itertools
import sys
from lumbermill.BaseThreadedModule import BaseThreadedModule
from lumbermill.utils.Decorators import ModuleDocstringParser
@ModuleDocstringParser
class Permutate(BaseThreadedModule):
"""
Creates successive len('target_fields') length permutations of elements in 'source_field'.
To add some context data to each emitted event 'context_data_field' can specify a field
containing a dictionary with the values of 'source_field' as keys.
Configuration template:
- Permutate:
source_field: # <type: string; is: required>
target_fields: # <type: list; is: required>
context_data_field: # <default: ""; type:string; is: optional>
context_target_mapping: # <default: {}; type: dict; is: optional if context_data_field == "" else required>
receivers:
- NextModule
"""
module_type = "modifier"
"""Set module type"""
def handleEvent(self, event):
"""
Process the event.
@param event: dictionary
@return data: dictionary
"""
try:
context_data = event[self.getConfigurationValue('context_data_field')]
except KeyError:
context_data = False
try:
permutation_data = event[self.getConfigurationValue('source_field')]
except KeyError:
yield event
return
if type(permutation_data) is not list:
yield event
return
target_field_names = self.getConfigurationValue('target_fields')
context_target_mapping = self.getConfigurationValue('context_target_mapping')
for permutation in itertools.permutations(permutation_data, r=len(target_field_names)):
event_copy = event.copy()
if context_data:
try:
# Rewrite the context data keys to new keys in context_target_mapping
ctx_data = {}
for idx, dct in enumerate([context_data[key] for key in permutation if key in context_data]):
for mapping_key, newkeys in context_target_mapping.items():
if mapping_key in dct:
ctx_data[newkeys[idx]] = dct[mapping_key]
event_copy.update(ctx_data)
except:
etype, evalue, etb = sys.exc_info()
self.logger.warning("Could not add context data. Exception: %s, Error: %s." % (etype, evalue))
perm = dict(zip(target_field_names, permutation))
event_copy.update(perm)
yield event_copy
| 38.871429 | 123 | 0.606027 |
import itertools
import sys
from lumbermill.BaseThreadedModule import BaseThreadedModule
from lumbermill.utils.Decorators import ModuleDocstringParser
@ModuleDocstringParser
class Permutate(BaseThreadedModule):
module_type = "modifier"
def handleEvent(self, event):
try:
context_data = event[self.getConfigurationValue('context_data_field')]
except KeyError:
context_data = False
try:
permutation_data = event[self.getConfigurationValue('source_field')]
except KeyError:
yield event
return
if type(permutation_data) is not list:
yield event
return
target_field_names = self.getConfigurationValue('target_fields')
context_target_mapping = self.getConfigurationValue('context_target_mapping')
for permutation in itertools.permutations(permutation_data, r=len(target_field_names)):
event_copy = event.copy()
if context_data:
try:
ctx_data = {}
for idx, dct in enumerate([context_data[key] for key in permutation if key in context_data]):
for mapping_key, newkeys in context_target_mapping.items():
if mapping_key in dct:
ctx_data[newkeys[idx]] = dct[mapping_key]
event_copy.update(ctx_data)
except:
etype, evalue, etb = sys.exc_info()
self.logger.warning("Could not add context data. Exception: %s, Error: %s." % (etype, evalue))
perm = dict(zip(target_field_names, permutation))
event_copy.update(perm)
yield event_copy
| true | true |
f72451c6dd941af7a8fa151c731d1b07245f0b6c | 8,252 | py | Python | tests/test_utils.py | austinjp/textacy | dddfdbf0e0ab3bf756bc4eda042eab1001aac709 | [
"Apache-2.0"
] | 1,929 | 2016-02-14T08:30:38.000Z | 2022-03-31T03:00:35.000Z | tests/test_utils.py | austinjp/textacy | dddfdbf0e0ab3bf756bc4eda042eab1001aac709 | [
"Apache-2.0"
] | 304 | 2016-02-18T15:52:22.000Z | 2022-03-31T18:06:54.000Z | tests/test_utils.py | austinjp/textacy | dddfdbf0e0ab3bf756bc4eda042eab1001aac709 | [
"Apache-2.0"
] | 285 | 2016-03-20T04:25:08.000Z | 2022-03-24T11:31:17.000Z | import datetime
import pathlib
import pytest
from textacy import utils
@pytest.mark.parametrize(
"val,val_type,col_type,expected",
[
(None, int, list, None),
(1, int, list, [1]),
([1, 2], int, tuple, (1, 2)),
((1, 1.0), (int, float), set, {1, 1.0}),
],
)
def test_to_collection(val, val_type, col_type, expected):
assert utils.to_collection(val, val_type, col_type) == expected
class TestToUnicode:
@pytest.mark.parametrize("s", [b"bytes", "unicode", "úñîçødé"])
def test_valid(self, s):
assert isinstance(utils.to_unicode(s), str)
@pytest.mark.parametrize("s", [1, 2.0, ["foo", "bar"], {"foo": "bar"}])
def test_invalid(self, s):
with pytest.raises(TypeError):
_ = utils.to_unicode(s)
class TestToBytes:
@pytest.mark.parametrize("s", [b"bytes", "unicode", "úñîçødé"])
def test_valid(self, s):
assert isinstance(utils.to_bytes(s), bytes)
@pytest.mark.parametrize("s", [1, 2.0, ["foo", "bar"], {"foo": "bar"}])
def test_invalid(self, s):
with pytest.raises(TypeError):
_ = utils.to_bytes(s)
class TestToPath:
@pytest.mark.parametrize("path", [pathlib.Path("."), pathlib.Path.home()])
def test_path_input(self, path):
assert isinstance(utils.to_path(path), pathlib.Path)
@pytest.mark.parametrize("path", ["unicode", "úñîçødé"])
def test_str_input(self, path):
assert isinstance(utils.to_path(path), pathlib.Path)
@pytest.mark.parametrize("path", [1, 2.0, ["foo", "bar"], {"foo": "bar"}])
def test_invalid_input(self, path):
with pytest.raises(TypeError):
_ = utils.to_path(path)
class TestValidateAndClipRange:
@pytest.mark.parametrize(
"range_vals,full_range,val_type",
[
[("2001-01", "2002-01"), ("2000-01", "2003-01"), None],
[["2001-01", "2004-01"], ("2000-01", "2003-01"), None],
[("2001-01", "2002-01"), ["2000-01", "2003-01"], (str, bytes)],
[[-5, 5], [-10, 10], None],
[(-5, 5), (0, 10), None],
[(-5, 5), (-10, 10), int],
[(-5, 5), (-10, 10), (int, float)],
[(0, None), (-5, 5), None],
[(None, 0), (-5, 5), None],
],
)
def test_valid_inputs(self, range_vals, full_range, val_type):
output = utils.validate_and_clip_range(range_vals, full_range, val_type)
assert isinstance(output, tuple)
assert len(output) == 2
if range_vals[0] is None:
assert output[0] == full_range[0]
else:
assert output[0] == max(range_vals[0], full_range[0])
if range_vals[1] is None:
assert output[1] == full_range[1]
else:
assert output[1] == min(range_vals[1], full_range[1])
@pytest.mark.parametrize(
"range_vals,full_range,val_type,error",
[
["2001-01", ("2000-01", "2003-01"), None, pytest.raises(TypeError)],
[("2001-01", "2002-01"), "2000-01", None, pytest.raises(TypeError)],
[
{"2001-01", "2002-01"},
("2000-01", "2003-01"),
None,
pytest.raises(TypeError),
],
[
("2001-01", "2002-01"),
("2000-01", "2003-01"),
datetime.date,
pytest.raises(TypeError),
],
[0, [-10, 10], None, pytest.raises(TypeError)],
[(-5, 5), 0, None, pytest.raises(TypeError)],
[[-5, 5], [-10, 10], (str, bytes), pytest.raises(TypeError)],
[
("2001-01", "2002-01", "2003-01"),
("2000-01", "2003-01"),
None,
pytest.raises(ValueError),
],
[
("2001-01", "2002-01"),
["2000-01", "2002-01", "2004-01"],
None,
pytest.raises(ValueError),
],
[[0, 5, 10], (-10, 10), None, pytest.raises(ValueError)],
[(-5, 5), [-10, 0, 10], None, pytest.raises(ValueError)],
[(-5, 5), [-10, 0, 10], (str, bytes), pytest.raises(ValueError)],
],
)
def test_invalid_inputs(self, range_vals, full_range, val_type, error):
with error:
_ = utils.validate_and_clip_range(range_vals, full_range, val_type)
class TestValidateSetMembers:
@pytest.mark.parametrize(
"vals,val_type,valid_vals",
[
[{"a", "b"}, (str, bytes), {"a", "b", "c"}],
["a", (str, bytes), {"a", "b", "c"}],
[("a", "b"), (str, bytes), {"a", "b", "c"}],
[["a", "b"], (str, bytes), None],
[{1, 2}, int, {1, 2, 3}],
[{1, 2}, (int, float), {1, 2, 3}],
[1, int, {1: "a", 2: "b", 3: "c"}],
[{3.14, 42.0}, float, None],
[3.14, (int, float), None],
]
)
def test_valid_inputs(self, vals, val_type, valid_vals):
output = utils.validate_set_members(vals, val_type, valid_vals)
assert isinstance(output, set)
assert all(isinstance(val, val_type) for val in output)
@pytest.mark.parametrize(
"vals,val_type,valid_vals,error",
[
[{"a", "b"}, int, None, pytest.raises(TypeError)],
["a", int, None, pytest.raises(TypeError)],
[("a", "b"), (int, float), None, pytest.raises(TypeError)],
[{"a", "b"}, (str, bytes), {"x", "y", "z"}, pytest.raises(ValueError)],
[{"a", "x"}, (str, bytes), {"x", "y", "z"}, pytest.raises(ValueError)],
["a", (str, bytes), {"x", "y", "z"}, pytest.raises(ValueError)],
["a", (str, bytes), {"x": 24, "y": 25, "z": 26}, pytest.raises(ValueError)],
]
)
def test_invalid_inputs(self, vals, val_type, valid_vals, error):
with error:
_ = utils.validate_set_members(vals, val_type, valid_vals)
# TODO: uncomment this when we're only supporting PY3.8+
# def _func_pos_only_args(parg1, parg2, /):
# return (parg1, parg2)
# TODO: uncomment this when we're only supporting PY3.8+
# def _func_mix_args(parg, /, arg, *, kwarg):
# return (parg, arg, kwarg)
def _func_mix_args(arg, *, kwarg):
return (arg, kwarg)
def _func_kw_only_args(*, kwarg1, kwarg2):
return (kwarg1, kwarg2)
@pytest.mark.parametrize(
"func,kwargs,expected",
[
# (_func_pos_only_args, {"kwarg": "kwargval"}, {}),
(_func_mix_args, {"arg": "argval"}, {"arg": "argval"}),
(
_func_mix_args,
{"arg": "argval", "kwarg": "kwarval"},
{"arg": "argval", "kwarg": "kwarval"},
),
(
_func_mix_args,
{"arg": "argval", "kwarg": "kwargval", "foo": "bar"},
{"arg": "argval", "kwarg": "kwargval"},
),
(
_func_kw_only_args,
{"kwarg1": "kwarg1val", "kwarg2": "kwarg2val"},
{"kwarg1": "kwarg1val", "kwarg2": "kwarg2val"},
),
(
_func_kw_only_args,
{"kwarg1": "kwarg1val", "kwarg3": "kwarg3val"},
{"kwarg1": "kwarg1val"},
),
(_func_kw_only_args, {}, {}),
],
)
def test_get_kwargs_for_func(func, kwargs, expected):
assert utils.get_kwargs_for_func(func, kwargs) == expected
@pytest.mark.parametrize(
"text, n, pad, exp",
[
(
"testing 123",
1,
False,
('t', 'e', 's', 't', 'i', 'n', 'g', ' ', '1', '2', '3'),
),
(
"testing 123",
1,
True,
('t', 'e', 's', 't', 'i', 'n', 'g', ' ', '1', '2', '3'),
),
(
"testing 123",
2,
False,
('te', 'es', 'st', 'ti', 'in', 'ng', 'g ', ' 1', '12', '23'),
),
(
"testing 123",
2,
True,
('_t', 'te', 'es', 'st', 'ti', 'in', 'ng', 'g ', ' 1', '12', '23', '3_'),
),
]
)
def test_text_to_char_ngrams(text, n, pad, exp):
obs = utils.text_to_char_ngrams(text, n, pad=pad)
assert all(isinstance(cng, str) and len(cng) == n for cng in obs)
assert obs == exp
| 32.234375 | 88 | 0.491032 | import datetime
import pathlib
import pytest
from textacy import utils
@pytest.mark.parametrize(
"val,val_type,col_type,expected",
[
(None, int, list, None),
(1, int, list, [1]),
([1, 2], int, tuple, (1, 2)),
((1, 1.0), (int, float), set, {1, 1.0}),
],
)
def test_to_collection(val, val_type, col_type, expected):
assert utils.to_collection(val, val_type, col_type) == expected
class TestToUnicode:
@pytest.mark.parametrize("s", [b"bytes", "unicode", "úñîçødé"])
def test_valid(self, s):
assert isinstance(utils.to_unicode(s), str)
@pytest.mark.parametrize("s", [1, 2.0, ["foo", "bar"], {"foo": "bar"}])
def test_invalid(self, s):
with pytest.raises(TypeError):
_ = utils.to_unicode(s)
class TestToBytes:
@pytest.mark.parametrize("s", [b"bytes", "unicode", "úñîçødé"])
def test_valid(self, s):
assert isinstance(utils.to_bytes(s), bytes)
@pytest.mark.parametrize("s", [1, 2.0, ["foo", "bar"], {"foo": "bar"}])
def test_invalid(self, s):
with pytest.raises(TypeError):
_ = utils.to_bytes(s)
class TestToPath:
@pytest.mark.parametrize("path", [pathlib.Path("."), pathlib.Path.home()])
def test_path_input(self, path):
assert isinstance(utils.to_path(path), pathlib.Path)
@pytest.mark.parametrize("path", ["unicode", "úñîçødé"])
def test_str_input(self, path):
assert isinstance(utils.to_path(path), pathlib.Path)
@pytest.mark.parametrize("path", [1, 2.0, ["foo", "bar"], {"foo": "bar"}])
def test_invalid_input(self, path):
with pytest.raises(TypeError):
_ = utils.to_path(path)
class TestValidateAndClipRange:
@pytest.mark.parametrize(
"range_vals,full_range,val_type",
[
[("2001-01", "2002-01"), ("2000-01", "2003-01"), None],
[["2001-01", "2004-01"], ("2000-01", "2003-01"), None],
[("2001-01", "2002-01"), ["2000-01", "2003-01"], (str, bytes)],
[[-5, 5], [-10, 10], None],
[(-5, 5), (0, 10), None],
[(-5, 5), (-10, 10), int],
[(-5, 5), (-10, 10), (int, float)],
[(0, None), (-5, 5), None],
[(None, 0), (-5, 5), None],
],
)
def test_valid_inputs(self, range_vals, full_range, val_type):
output = utils.validate_and_clip_range(range_vals, full_range, val_type)
assert isinstance(output, tuple)
assert len(output) == 2
if range_vals[0] is None:
assert output[0] == full_range[0]
else:
assert output[0] == max(range_vals[0], full_range[0])
if range_vals[1] is None:
assert output[1] == full_range[1]
else:
assert output[1] == min(range_vals[1], full_range[1])
@pytest.mark.parametrize(
"range_vals,full_range,val_type,error",
[
["2001-01", ("2000-01", "2003-01"), None, pytest.raises(TypeError)],
[("2001-01", "2002-01"), "2000-01", None, pytest.raises(TypeError)],
[
{"2001-01", "2002-01"},
("2000-01", "2003-01"),
None,
pytest.raises(TypeError),
],
[
("2001-01", "2002-01"),
("2000-01", "2003-01"),
datetime.date,
pytest.raises(TypeError),
],
[0, [-10, 10], None, pytest.raises(TypeError)],
[(-5, 5), 0, None, pytest.raises(TypeError)],
[[-5, 5], [-10, 10], (str, bytes), pytest.raises(TypeError)],
[
("2001-01", "2002-01", "2003-01"),
("2000-01", "2003-01"),
None,
pytest.raises(ValueError),
],
[
("2001-01", "2002-01"),
["2000-01", "2002-01", "2004-01"],
None,
pytest.raises(ValueError),
],
[[0, 5, 10], (-10, 10), None, pytest.raises(ValueError)],
[(-5, 5), [-10, 0, 10], None, pytest.raises(ValueError)],
[(-5, 5), [-10, 0, 10], (str, bytes), pytest.raises(ValueError)],
],
)
def test_invalid_inputs(self, range_vals, full_range, val_type, error):
with error:
_ = utils.validate_and_clip_range(range_vals, full_range, val_type)
class TestValidateSetMembers:
@pytest.mark.parametrize(
"vals,val_type,valid_vals",
[
[{"a", "b"}, (str, bytes), {"a", "b", "c"}],
["a", (str, bytes), {"a", "b", "c"}],
[("a", "b"), (str, bytes), {"a", "b", "c"}],
[["a", "b"], (str, bytes), None],
[{1, 2}, int, {1, 2, 3}],
[{1, 2}, (int, float), {1, 2, 3}],
[1, int, {1: "a", 2: "b", 3: "c"}],
[{3.14, 42.0}, float, None],
[3.14, (int, float), None],
]
)
def test_valid_inputs(self, vals, val_type, valid_vals):
output = utils.validate_set_members(vals, val_type, valid_vals)
assert isinstance(output, set)
assert all(isinstance(val, val_type) for val in output)
@pytest.mark.parametrize(
"vals,val_type,valid_vals,error",
[
[{"a", "b"}, int, None, pytest.raises(TypeError)],
["a", int, None, pytest.raises(TypeError)],
[("a", "b"), (int, float), None, pytest.raises(TypeError)],
[{"a", "b"}, (str, bytes), {"x", "y", "z"}, pytest.raises(ValueError)],
[{"a", "x"}, (str, bytes), {"x", "y", "z"}, pytest.raises(ValueError)],
["a", (str, bytes), {"x", "y", "z"}, pytest.raises(ValueError)],
["a", (str, bytes), {"x": 24, "y": 25, "z": 26}, pytest.raises(ValueError)],
]
)
def test_invalid_inputs(self, vals, val_type, valid_vals, error):
with error:
_ = utils.validate_set_members(vals, val_type, valid_vals)
# def _func_pos_only_args(parg1, parg2, /):
# return (parg1, parg2)
# TODO: uncomment this when we're only supporting PY3.8+
def _func_mix_args(arg, *, kwarg):
return (arg, kwarg)
def _func_kw_only_args(*, kwarg1, kwarg2):
return (kwarg1, kwarg2)
@pytest.mark.parametrize(
"func,kwargs,expected",
[
(_func_mix_args, {"arg": "argval"}, {"arg": "argval"}),
(
_func_mix_args,
{"arg": "argval", "kwarg": "kwarval"},
{"arg": "argval", "kwarg": "kwarval"},
),
(
_func_mix_args,
{"arg": "argval", "kwarg": "kwargval", "foo": "bar"},
{"arg": "argval", "kwarg": "kwargval"},
),
(
_func_kw_only_args,
{"kwarg1": "kwarg1val", "kwarg2": "kwarg2val"},
{"kwarg1": "kwarg1val", "kwarg2": "kwarg2val"},
),
(
_func_kw_only_args,
{"kwarg1": "kwarg1val", "kwarg3": "kwarg3val"},
{"kwarg1": "kwarg1val"},
),
(_func_kw_only_args, {}, {}),
],
)
def test_get_kwargs_for_func(func, kwargs, expected):
assert utils.get_kwargs_for_func(func, kwargs) == expected
@pytest.mark.parametrize(
"text, n, pad, exp",
[
(
"testing 123",
1,
False,
('t', 'e', 's', 't', 'i', 'n', 'g', ' ', '1', '2', '3'),
),
(
"testing 123",
1,
True,
('t', 'e', 's', 't', 'i', 'n', 'g', ' ', '1', '2', '3'),
),
(
"testing 123",
2,
False,
('te', 'es', 'st', 'ti', 'in', 'ng', 'g ', ' 1', '12', '23'),
),
(
"testing 123",
2,
True,
('_t', 'te', 'es', 'st', 'ti', 'in', 'ng', 'g ', ' 1', '12', '23', '3_'),
),
]
)
def test_text_to_char_ngrams(text, n, pad, exp):
obs = utils.text_to_char_ngrams(text, n, pad=pad)
assert all(isinstance(cng, str) and len(cng) == n for cng in obs)
assert obs == exp
| true | true |
f72453a57dbfc168b184e62aede21ddeafb3650f | 2,970 | py | Python | cerebro/storage/local.py | Abhishek2304/Cerebro-System-Ray | 1e2f2ae291cd449573f87bb83fb2bda12e606b3a | [
"Apache-2.0"
] | 16 | 2020-05-09T03:55:38.000Z | 2022-02-27T01:06:09.000Z | cerebro/storage/local.py | Abhishek2304/Cerebro-System-Ray | 1e2f2ae291cd449573f87bb83fb2bda12e606b3a | [
"Apache-2.0"
] | 16 | 2020-04-20T20:47:10.000Z | 2021-12-02T05:11:09.000Z | cerebro/storage/local.py | Abhishek2304/Cerebro-System-Ray | 1e2f2ae291cd449573f87bb83fb2bda12e606b3a | [
"Apache-2.0"
] | 6 | 2020-06-08T01:27:03.000Z | 2021-12-02T12:06:44.000Z | # Copyright 2020 Supun Nakandala, Yuhao Zhang, and Arun Kumar. All Rights Reserved.
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
import contextlib
import errno
import os
import pyarrow as pa
from .base import FilesystemStore
class LocalStore(FilesystemStore):
"""Uses the local filesystem as a store of intermediate data and training artifacts (also works with NFS mounted
remote storage).
:param prefix_path: Prefix path of the local directory (e.g., /user/test/cerebro).
:param train_path: (Optional) Path of the directory to store training data. If not specified will default to
<prefix_path>/train_data
:param val_path: (Optional) Path of the directory to store validation data. If not specified will default to
<prefix_path>/val_data
:param runs_path: (Optional) Path of the directory to store model checkpoints and log. If not specified will default
to <prefix_path>/runs
"""
FS_PREFIX = 'file://'
def __init__(self, prefix_path, train_path=None, val_path=None, runs_path=None):
self._fs = pa.LocalFileSystem()
super(LocalStore, self).__init__(prefix_path, train_path=train_path, val_path=val_path, runs_path=runs_path)
def path_prefix(self):
return self.FS_PREFIX
def get_filesystem(self):
return self._fs
def get_local_output_dir_fn(self, run_id):
run_path = self.get_localized_path(self.get_run_path(run_id))
@contextlib.contextmanager
def local_run_path():
if not os.path.exists(run_path):
try:
os.makedirs(run_path, mode=0o755)
except OSError as e:
# Race condition from workers on the same host: ignore
if e.errno != errno.EEXIST:
raise
yield run_path
return local_run_path
def sync_fn(self, run_id):
run_path = self.get_localized_path(self.get_run_path(run_id))
def fn(local_run_path):
# No-op for LocalStore since the `local_run_path` will be the same as the run path
assert run_path == local_run_path
return fn
@classmethod
def filesystem_prefix(cls):
return cls.FS_PREFIX
| 36.666667 | 120 | 0.676768 |
from __future__ import absolute_import
from __future__ import print_function
import contextlib
import errno
import os
import pyarrow as pa
from .base import FilesystemStore
class LocalStore(FilesystemStore):
FS_PREFIX = 'file://'
def __init__(self, prefix_path, train_path=None, val_path=None, runs_path=None):
self._fs = pa.LocalFileSystem()
super(LocalStore, self).__init__(prefix_path, train_path=train_path, val_path=val_path, runs_path=runs_path)
def path_prefix(self):
return self.FS_PREFIX
def get_filesystem(self):
return self._fs
def get_local_output_dir_fn(self, run_id):
run_path = self.get_localized_path(self.get_run_path(run_id))
@contextlib.contextmanager
def local_run_path():
if not os.path.exists(run_path):
try:
os.makedirs(run_path, mode=0o755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
yield run_path
return local_run_path
def sync_fn(self, run_id):
run_path = self.get_localized_path(self.get_run_path(run_id))
def fn(local_run_path):
assert run_path == local_run_path
return fn
@classmethod
def filesystem_prefix(cls):
return cls.FS_PREFIX
| true | true |
f7245409b3ef11681b481c7e78e544f12653dcc6 | 714 | py | Python | pyhfo/ui/adjust_spines.py | andersonbrisil/pyhfo | 0fdbe834442550117dc9d9c8f611989bb600db62 | [
"MIT"
] | null | null | null | pyhfo/ui/adjust_spines.py | andersonbrisil/pyhfo | 0fdbe834442550117dc9d9c8f611989bb600db62 | [
"MIT"
] | null | null | null | pyhfo/ui/adjust_spines.py | andersonbrisil/pyhfo | 0fdbe834442550117dc9d9c8f611989bb600db62 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Function adjust spines
Created on Fri Apr 17 10:18:30 2015
@author: anderson
"""
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 2)) # outward by 10 points
spine.set_smart_bounds(False)
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([]) | 25.5 | 70 | 0.591036 |
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 2))
spine.set_smart_bounds(False)
else:
spine.set_color('none')
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([]) | true | true |
f7245440fe7dba32ffeb3a85b4b83af243aba25b | 290 | py | Python | ceuclid.py | jprzywoski/faster-python | 44252bf0a746dd862d752efbe2012a8a404ec7bf | [
"MIT"
] | null | null | null | ceuclid.py | jprzywoski/faster-python | 44252bf0a746dd862d752efbe2012a8a404ec7bf | [
"MIT"
] | null | null | null | ceuclid.py | jprzywoski/faster-python | 44252bf0a746dd862d752efbe2012a8a404ec7bf | [
"MIT"
] | null | null | null | import ctypes
from numpy.ctypeslib import ndpointer
lib = ctypes.cdll.LoadLibrary('./libdist.so')
fn = lib.dist
fn.restype = ctypes.c_double
fn.argtypes = [
ndpointer(ctypes.c_double),
ndpointer(ctypes.c_double),
ctypes.c_size_t
]
def dist(x, y):
return fn(x, y, len(x))
| 18.125 | 45 | 0.7 | import ctypes
from numpy.ctypeslib import ndpointer
lib = ctypes.cdll.LoadLibrary('./libdist.so')
fn = lib.dist
fn.restype = ctypes.c_double
fn.argtypes = [
ndpointer(ctypes.c_double),
ndpointer(ctypes.c_double),
ctypes.c_size_t
]
def dist(x, y):
return fn(x, y, len(x))
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.