hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73404abc05550c04ed086551a71f3b47aa447fb | 4,716 | py | Python | src/gt4sd/algorithms/conditional_generation/template/core.py | christofid/gt4sd-core | ea4257e8ff24ee7f766d7010ea5955d823eb9ad7 | [
"MIT"
] | 57 | 2022-02-11T22:32:58.000Z | 2022-03-31T23:17:06.000Z | src/gt4sd/algorithms/conditional_generation/template/core.py | christofid/gt4sd-core | ea4257e8ff24ee7f766d7010ea5955d823eb9ad7 | [
"MIT"
] | 31 | 2022-02-11T22:43:22.000Z | 2022-03-31T12:04:00.000Z | src/gt4sd/algorithms/conditional_generation/template/core.py | christofid/gt4sd-core | ea4257e8ff24ee7f766d7010ea5955d823eb9ad7 | [
"MIT"
] | 8 | 2022-02-15T11:13:54.000Z | 2022-03-22T13:56:13.000Z | #
# MIT License
#
# Copyright (c) 2022 GT4SD team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Template Algorithm"""
import logging
from dataclasses import field
from typing import Any, Callable, ClassVar, Dict, Iterable, Optional, TypeVar
from ...core import AlgorithmConfiguration, GeneratorAlgorithm # type: ignore
from ...registry import ApplicationsRegistry # type: ignore
from .implementation import Generator # type: ignore
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
T = TypeVar("T")
S = TypeVar("S")
Targeted = Callable[[T], Iterable[Any]]
class Template(GeneratorAlgorithm[S, T]):
"""Template Algorithm."""
def __init__(
self, configuration: AlgorithmConfiguration[S, T], target: Optional[T] = None
):
"""Template Generation
Args:
configuration: domain and application
specification, defining types and validations.
target: Optional depending on the type of generative model. In this template
we will convert the target to a string.
Example:
An example for using this template::
target = 'World'
configuration = TemplateGenerator()
algorithm = Template(configuration=configuration, target=target)
items = list(algorithm.sample(1))
print(items)
"""
configuration = self.validate_configuration(configuration)
# TODO there might also be a validation/check on the target input
super().__init__(
configuration=configuration,
target=target, # type:ignore
)
def get_generator(
self,
configuration: AlgorithmConfiguration[S, T],
target: Optional[T],
) -> Targeted[T]:
"""Get the function to from generator.
Args:
configuration: helps to set up the application.
target: context or condition for the generation. Just an optional string here.
Returns:
callable generating a list of 1 item containing salutation and temperature converted to fahrenheit.
"""
logger.info("ensure artifacts for the application are present.")
self.local_artifacts = configuration.ensure_artifacts()
implementation: Generator = configuration.get_conditional_generator( # type: ignore
self.local_artifacts
)
return implementation.hello_name # type:ignore
def validate_configuration(
self, configuration: AlgorithmConfiguration
) -> AlgorithmConfiguration:
# TODO raise InvalidAlgorithmConfiguration
assert isinstance(configuration, AlgorithmConfiguration)
return configuration
@ApplicationsRegistry.register_algorithm_application(Template)
class TemplateGenerator(AlgorithmConfiguration[str, str]):
"""Configuration for specific generator."""
algorithm_type: ClassVar[str] = "conditional_generation"
domain: ClassVar[str] = "materials"
algorithm_version: str = "v0"
temperature: int = field(
default=36,
metadata=dict(description="Temperature parameter ( in celsius )"),
)
def get_target_description(self) -> Dict[str, str]:
"""Get description of the target for generation.
Returns:
target description.
"""
return {
"title": "Target name",
"description": "A simple string to define the name in the output [Hello name].",
"type": "string",
}
def get_conditional_generator(self, resources_path: str) -> Generator:
return Generator(resources_path=resources_path, temperature=self.temperature)
| 36.55814 | 111 | 0.689355 |
import logging
from dataclasses import field
from typing import Any, Callable, ClassVar, Dict, Iterable, Optional, TypeVar
from ...core import AlgorithmConfiguration, GeneratorAlgorithm
from ...registry import ApplicationsRegistry
from .implementation import Generator
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
T = TypeVar("T")
S = TypeVar("S")
Targeted = Callable[[T], Iterable[Any]]
class Template(GeneratorAlgorithm[S, T]):
def __init__(
self, configuration: AlgorithmConfiguration[S, T], target: Optional[T] = None
):
configuration = self.validate_configuration(configuration)
super().__init__(
configuration=configuration,
target=target,
)
def get_generator(
self,
configuration: AlgorithmConfiguration[S, T],
target: Optional[T],
) -> Targeted[T]:
logger.info("ensure artifacts for the application are present.")
self.local_artifacts = configuration.ensure_artifacts()
implementation: Generator = configuration.get_conditional_generator(
self.local_artifacts
)
return implementation.hello_name
def validate_configuration(
self, configuration: AlgorithmConfiguration
) -> AlgorithmConfiguration:
assert isinstance(configuration, AlgorithmConfiguration)
return configuration
@ApplicationsRegistry.register_algorithm_application(Template)
class TemplateGenerator(AlgorithmConfiguration[str, str]):
algorithm_type: ClassVar[str] = "conditional_generation"
domain: ClassVar[str] = "materials"
algorithm_version: str = "v0"
temperature: int = field(
default=36,
metadata=dict(description="Temperature parameter ( in celsius )"),
)
def get_target_description(self) -> Dict[str, str]:
return {
"title": "Target name",
"description": "A simple string to define the name in the output [Hello name].",
"type": "string",
}
def get_conditional_generator(self, resources_path: str) -> Generator:
return Generator(resources_path=resources_path, temperature=self.temperature)
| true | true |
f7340584474f529132f526642d02bd35b7d1ef3c | 7,394 | py | Python | tests/server/unit/services/test_mapping_service.py | DerekBev/tasking-manager | 6473ba28d0834ee82c9463c254a8b759c9566622 | [
"BSD-2-Clause"
] | 2 | 2019-07-15T19:18:58.000Z | 2020-09-02T18:11:30.000Z | tests/server/unit/services/test_mapping_service.py | DerekBev/tasking-manager | 6473ba28d0834ee82c9463c254a8b759c9566622 | [
"BSD-2-Clause"
] | 28 | 2019-01-04T17:39:00.000Z | 2021-05-06T23:06:24.000Z | tests/server/unit/services/test_mapping_service.py | DerekBev/tasking-manager | 6473ba28d0834ee82c9463c254a8b759c9566622 | [
"BSD-2-Clause"
] | 3 | 2020-02-29T20:46:09.000Z | 2020-11-20T19:44:04.000Z | import unittest
from server.services.mapping_service import MappingService, Task, MappingServiceError, TaskStatus, \
ProjectService, NotFound, StatsService, MappingNotAllowed, UserLicenseError
from server.models.dtos.mapping_dto import MappedTaskDTO, LockTaskDTO
from server.models.postgis.task import TaskHistory, TaskAction, User
from unittest.mock import patch, MagicMock
from server import create_app
class TestMappingService(unittest.TestCase):
task_stub = Task
lock_task_dto = LockTaskDTO
mapped_task_dto = MappedTaskDTO
mapping_service = None
def setUp(self):
self.app = create_app()
self.ctx = self.app.app_context()
self.ctx.push()
test_user = User()
test_user.id = 123456
test_user.username = 'Thinkwhere'
self.task_stub = Task()
self.task_stub.id = 1
self.task_stub.project_id = 1
self.task_stub.task_status = 0
self.task_stub.locked_by = 123456
self.task_stub.lock_holder = test_user
self.lock_task_dto = LockTaskDTO()
self.lock_task_dto.user_id = 123456
self.mapped_task_dto = MappedTaskDTO()
self.mapped_task_dto.status = TaskStatus.MAPPED.name
self.mapped_task_dto.user_id = 123456
def tearDown(self):
self.ctx.pop()
@patch.object(Task, 'get')
def test_get_task_raises_error_if_task_not_found(self, mock_task):
mock_task.return_value = None
with self.assertRaises(NotFound):
MappingService.get_task(12, 12)
@patch.object(MappingService, 'get_task')
def test_lock_task_for_mapping_raises_error_if_task_in_invalid_state(self, mock_task):
# Arrange
self.task_stub.task_status = TaskStatus.MAPPED.value
mock_task.return_value = self.task_stub
# Act / Assert
with self.assertRaises(MappingServiceError):
MappingService.lock_task_for_mapping(self.lock_task_dto)
@patch.object(ProjectService, 'is_user_permitted_to_map')
@patch.object(MappingService, 'get_task')
def test_lock_task_for_mapping_raises_error_if_user_already_has_locked_task(self, mock_task, mock_project):
# Arrange
mock_task.return_value = self.task_stub
mock_project.return_value = False, MappingNotAllowed.USER_ALREADY_HAS_TASK_LOCKED
# Act / Assert
with self.assertRaises(MappingServiceError):
MappingService.lock_task_for_mapping(self.lock_task_dto)
@patch.object(ProjectService, 'is_user_permitted_to_map')
@patch.object(MappingService, 'get_task')
def test_lock_task_for_mapping_raises_error_if_user_has_not_accepted_license(self, mock_task, mock_project):
# Arrange
mock_task.return_value = self.task_stub
mock_project.return_value = False, MappingNotAllowed.USER_NOT_ACCEPTED_LICENSE
# Act / Assert
with self.assertRaises(UserLicenseError):
MappingService.lock_task_for_mapping(self.lock_task_dto)
@patch.object(MappingService, 'get_task')
def test_unlock_of_not_locked_for_mapping_raises_error(self, mock_task):
# Arrange
mock_task.return_value = self.task_stub
# Act / Assert
with self.assertRaises(MappingServiceError):
MappingService.unlock_task_after_mapping(MagicMock())
@patch.object(MappingService, 'get_task')
def test_cant_unlock_a_task_you_dont_own(self, mock_task):
# Arrange
self.task_stub.task_status = TaskStatus.LOCKED_FOR_MAPPING.value
self.task_stub.locked_by = 12
mock_task.return_value = self.task_stub
# Act / Assert
with self.assertRaises(MappingServiceError):
MappingService.unlock_task_after_mapping(self.mapped_task_dto)
@patch.object(MappingService, 'get_task')
def test_if_new_state_not_acceptable_raise_error(self, mock_task):
# Arrange
self.task_stub.task_status = TaskStatus.LOCKED_FOR_MAPPING.value
mock_task.return_value = self.task_stub
self.mapped_task_dto.status = TaskStatus.LOCKED_FOR_VALIDATION.name
# Act / Assert
with self.assertRaises(MappingServiceError):
MappingService.unlock_task_after_mapping(self.mapped_task_dto)
@patch.object(Task, 'get_per_task_instructions')
@patch.object(StatsService, 'update_stats_after_task_state_change')
@patch.object(Task, 'update')
@patch.object(TaskHistory, 'get_last_status')
@patch.object(TaskHistory, 'update_task_locked_with_duration')
@patch.object(MappingService, 'get_task')
def test_unlock_with_comment_sets_history(self, mock_task, mock_history, mock_update, mock_stats,
mock_instructions, mock_state):
# Arrange
self.task_stub.task_status = TaskStatus.LOCKED_FOR_MAPPING.value
self.mapped_task_dto.comment = 'Test comment'
mock_task.return_value = self.task_stub
mock_state.return_value = TaskStatus.LOCKED_FOR_MAPPING
# Act
test_task = MappingService.unlock_task_after_mapping(self.mapped_task_dto)
# Assert
self.assertEqual(TaskAction.COMMENT.name, test_task.task_history[0].action)
self.assertEqual(test_task.task_history[0].action_text, 'Test comment')
@patch.object(Task, 'get_per_task_instructions')
@patch.object(StatsService, 'update_stats_after_task_state_change')
@patch.object(Task, 'update')
@patch.object(TaskHistory, 'get_last_status')
@patch.object(TaskHistory, 'update_task_locked_with_duration')
@patch.object(MappingService, 'get_task')
def test_unlock_with_status_change_sets_history(self, mock_task, mock_history, mock_update, mock_stats,
mock_instructions, mock_state):
# Arrange
self.task_stub.task_status = TaskStatus.LOCKED_FOR_MAPPING.value
mock_task.return_value = self.task_stub
mock_state.return_value = TaskStatus.LOCKED_FOR_MAPPING
# Act
test_task = MappingService.unlock_task_after_mapping(self.mapped_task_dto)
# Assert
self.assertEqual(TaskAction.STATE_CHANGE.name, test_task.task_history[0].action)
self.assertEqual(test_task.task_history[0].action_text, TaskStatus.MAPPED.name)
self.assertEqual(TaskStatus.MAPPED.name, test_task.task_status)
@patch.object(TaskHistory, 'get_last_action')
def test_task_is_undoable_if_last_change_made_by_you(self, last_action):
# Arrange
task_history = TaskHistory(1, 1, 1)
task_history.user_id = 1
last_action.return_value = task_history
task = Task()
task.task_status = TaskStatus.MAPPED.value
task.mapped_by = 1
# Act
is_undoable = MappingService._is_task_undoable(1, task)
# Assert
self.assertTrue(is_undoable)
@patch.object(TaskHistory, 'get_last_action')
def test_task_is_not_undoable_if_last_change_not_made_by_you(self, last_action):
# Arrange
task_history = TaskHistory(1, 1, 1)
task_history.user_id = 2
last_action.return_value = task_history
task = Task()
task.task_status = TaskStatus.MAPPED.value
task.mapped_by = 1
# Act
is_undoable = MappingService._is_task_undoable(1, task)
# Assert
self.assertFalse(is_undoable) | 39.540107 | 112 | 0.712199 | import unittest
from server.services.mapping_service import MappingService, Task, MappingServiceError, TaskStatus, \
ProjectService, NotFound, StatsService, MappingNotAllowed, UserLicenseError
from server.models.dtos.mapping_dto import MappedTaskDTO, LockTaskDTO
from server.models.postgis.task import TaskHistory, TaskAction, User
from unittest.mock import patch, MagicMock
from server import create_app
class TestMappingService(unittest.TestCase):
task_stub = Task
lock_task_dto = LockTaskDTO
mapped_task_dto = MappedTaskDTO
mapping_service = None
def setUp(self):
self.app = create_app()
self.ctx = self.app.app_context()
self.ctx.push()
test_user = User()
test_user.id = 123456
test_user.username = 'Thinkwhere'
self.task_stub = Task()
self.task_stub.id = 1
self.task_stub.project_id = 1
self.task_stub.task_status = 0
self.task_stub.locked_by = 123456
self.task_stub.lock_holder = test_user
self.lock_task_dto = LockTaskDTO()
self.lock_task_dto.user_id = 123456
self.mapped_task_dto = MappedTaskDTO()
self.mapped_task_dto.status = TaskStatus.MAPPED.name
self.mapped_task_dto.user_id = 123456
def tearDown(self):
self.ctx.pop()
@patch.object(Task, 'get')
def test_get_task_raises_error_if_task_not_found(self, mock_task):
mock_task.return_value = None
with self.assertRaises(NotFound):
MappingService.get_task(12, 12)
@patch.object(MappingService, 'get_task')
def test_lock_task_for_mapping_raises_error_if_task_in_invalid_state(self, mock_task):
self.task_stub.task_status = TaskStatus.MAPPED.value
mock_task.return_value = self.task_stub
with self.assertRaises(MappingServiceError):
MappingService.lock_task_for_mapping(self.lock_task_dto)
@patch.object(ProjectService, 'is_user_permitted_to_map')
@patch.object(MappingService, 'get_task')
def test_lock_task_for_mapping_raises_error_if_user_already_has_locked_task(self, mock_task, mock_project):
mock_task.return_value = self.task_stub
mock_project.return_value = False, MappingNotAllowed.USER_ALREADY_HAS_TASK_LOCKED
with self.assertRaises(MappingServiceError):
MappingService.lock_task_for_mapping(self.lock_task_dto)
@patch.object(ProjectService, 'is_user_permitted_to_map')
@patch.object(MappingService, 'get_task')
def test_lock_task_for_mapping_raises_error_if_user_has_not_accepted_license(self, mock_task, mock_project):
mock_task.return_value = self.task_stub
mock_project.return_value = False, MappingNotAllowed.USER_NOT_ACCEPTED_LICENSE
with self.assertRaises(UserLicenseError):
MappingService.lock_task_for_mapping(self.lock_task_dto)
@patch.object(MappingService, 'get_task')
def test_unlock_of_not_locked_for_mapping_raises_error(self, mock_task):
mock_task.return_value = self.task_stub
with self.assertRaises(MappingServiceError):
MappingService.unlock_task_after_mapping(MagicMock())
@patch.object(MappingService, 'get_task')
def test_cant_unlock_a_task_you_dont_own(self, mock_task):
self.task_stub.task_status = TaskStatus.LOCKED_FOR_MAPPING.value
self.task_stub.locked_by = 12
mock_task.return_value = self.task_stub
with self.assertRaises(MappingServiceError):
MappingService.unlock_task_after_mapping(self.mapped_task_dto)
@patch.object(MappingService, 'get_task')
def test_if_new_state_not_acceptable_raise_error(self, mock_task):
self.task_stub.task_status = TaskStatus.LOCKED_FOR_MAPPING.value
mock_task.return_value = self.task_stub
self.mapped_task_dto.status = TaskStatus.LOCKED_FOR_VALIDATION.name
with self.assertRaises(MappingServiceError):
MappingService.unlock_task_after_mapping(self.mapped_task_dto)
@patch.object(Task, 'get_per_task_instructions')
@patch.object(StatsService, 'update_stats_after_task_state_change')
@patch.object(Task, 'update')
@patch.object(TaskHistory, 'get_last_status')
@patch.object(TaskHistory, 'update_task_locked_with_duration')
@patch.object(MappingService, 'get_task')
def test_unlock_with_comment_sets_history(self, mock_task, mock_history, mock_update, mock_stats,
mock_instructions, mock_state):
self.task_stub.task_status = TaskStatus.LOCKED_FOR_MAPPING.value
self.mapped_task_dto.comment = 'Test comment'
mock_task.return_value = self.task_stub
mock_state.return_value = TaskStatus.LOCKED_FOR_MAPPING
test_task = MappingService.unlock_task_after_mapping(self.mapped_task_dto)
self.assertEqual(TaskAction.COMMENT.name, test_task.task_history[0].action)
self.assertEqual(test_task.task_history[0].action_text, 'Test comment')
@patch.object(Task, 'get_per_task_instructions')
@patch.object(StatsService, 'update_stats_after_task_state_change')
@patch.object(Task, 'update')
@patch.object(TaskHistory, 'get_last_status')
@patch.object(TaskHistory, 'update_task_locked_with_duration')
@patch.object(MappingService, 'get_task')
def test_unlock_with_status_change_sets_history(self, mock_task, mock_history, mock_update, mock_stats,
mock_instructions, mock_state):
self.task_stub.task_status = TaskStatus.LOCKED_FOR_MAPPING.value
mock_task.return_value = self.task_stub
mock_state.return_value = TaskStatus.LOCKED_FOR_MAPPING
test_task = MappingService.unlock_task_after_mapping(self.mapped_task_dto)
self.assertEqual(TaskAction.STATE_CHANGE.name, test_task.task_history[0].action)
self.assertEqual(test_task.task_history[0].action_text, TaskStatus.MAPPED.name)
self.assertEqual(TaskStatus.MAPPED.name, test_task.task_status)
@patch.object(TaskHistory, 'get_last_action')
def test_task_is_undoable_if_last_change_made_by_you(self, last_action):
task_history = TaskHistory(1, 1, 1)
task_history.user_id = 1
last_action.return_value = task_history
task = Task()
task.task_status = TaskStatus.MAPPED.value
task.mapped_by = 1
is_undoable = MappingService._is_task_undoable(1, task)
self.assertTrue(is_undoable)
@patch.object(TaskHistory, 'get_last_action')
def test_task_is_not_undoable_if_last_change_not_made_by_you(self, last_action):
task_history = TaskHistory(1, 1, 1)
task_history.user_id = 2
last_action.return_value = task_history
task = Task()
task.task_status = TaskStatus.MAPPED.value
task.mapped_by = 1
is_undoable = MappingService._is_task_undoable(1, task)
self.assertFalse(is_undoable) | true | true |
f73405aa62173fd68710fce3fe875060a712c5b2 | 458 | py | Python | arbitrage/observers/logger.py | acontry/altcoin-arbitrage | ddd1abc46d98e53ec0bd57f7cd82497dd1385683 | [
"MIT"
] | 7 | 2017-04-14T03:35:27.000Z | 2021-12-17T21:39:47.000Z | arbitrage/observers/logger.py | acontry/altcoin-arbitrage | ddd1abc46d98e53ec0bd57f7cd82497dd1385683 | [
"MIT"
] | null | null | null | arbitrage/observers/logger.py | acontry/altcoin-arbitrage | ddd1abc46d98e53ec0bd57f7cd82497dd1385683 | [
"MIT"
] | null | null | null | import logging
from .observer import Observer
import config
class Logger(Observer):
def opportunity(self, profit, volume, buyprice, kask, sellprice, kbid, perc,
weighted_buyprice, weighted_sellprice):
logging.info("profit: %f %s with volume: %f %s - buy at %i (%s) sell at %i (%s) ~%.2f%%" %
(profit, config.s_coin, volume, config.p_coin, buyprice * 100000000, kask, sellprice * 100000000, kbid, perc))
| 41.636364 | 131 | 0.644105 | import logging
from .observer import Observer
import config
class Logger(Observer):
def opportunity(self, profit, volume, buyprice, kask, sellprice, kbid, perc,
weighted_buyprice, weighted_sellprice):
logging.info("profit: %f %s with volume: %f %s - buy at %i (%s) sell at %i (%s) ~%.2f%%" %
(profit, config.s_coin, volume, config.p_coin, buyprice * 100000000, kask, sellprice * 100000000, kbid, perc))
| true | true |
f73405dde0b9040c2f5c50a1016e39cf915b9d5d | 16,453 | py | Python | src/trunk/apps/python/scvoice.py | Fran89/seiscomp3 | a25d29966949769d2bce9c0d28db0a2128e00649 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2021-08-04T00:20:17.000Z | 2021-08-04T00:20:17.000Z | src/trunk/apps/python/scvoice.py | Fran89/seiscomp3 | a25d29966949769d2bce9c0d28db0a2128e00649 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | src/trunk/apps/python/scvoice.py | Fran89/seiscomp3 | a25d29966949769d2bce9c0d28db0a2128e00649 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2022-01-13T02:49:31.000Z | 2022-01-13T02:49:31.000Z | #!/usr/bin/env python
############################################################################
# Copyright (C) by GFZ Potsdam #
# #
# You can redistribute and/or modify this program under the #
# terms of the SeisComP Public License. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# SeisComP Public License for more details. #
############################################################################
import os, sys, subprocess, traceback
import seiscomp3.Client, seiscomp3.Seismology, seiscomp3.System
class VoiceAlert(seiscomp3.Client.Application):
def __init__(self, argc, argv):
seiscomp3.Client.Application.__init__(self, argc, argv)
self.setMessagingEnabled(True)
self.setDatabaseEnabled(True, True)
self.setLoadRegionsEnabled(True)
self.setMessagingUsername("")
self.setPrimaryMessagingGroup(seiscomp3.Communication.Protocol.LISTENER_GROUP)
self.addMessagingSubscription("EVENT")
self.addMessagingSubscription("LOCATION")
self.addMessagingSubscription("MAGNITUDE")
self.setAutoApplyNotifierEnabled(True)
self.setInterpretNotifierEnabled(True)
self.setLoadCitiesEnabled(True)
self.setLoadRegionsEnabled(True)
self._ampType = "snr"
self._citiesMaxDist = 20
self._citiesMinPopulation = 50000
self._eventDescriptionPattern = None
self._ampScript = None
self._alertScript = None
self._eventScript = None
self._ampProc = None
self._alertProc = None
self._eventProc = None
self._newWhenFirstSeen = False
self._prevMessage = {}
self._agencyIDs = []
def createCommandLineDescription(self):
self.commandline().addOption("Generic", "first-new", "calls an event a new event when it is seen the first time")
self.commandline().addGroup("Alert")
self.commandline().addStringOption("Alert", "amp-type", "specify the amplitude type to listen to", self._ampType)
self.commandline().addStringOption("Alert", "amp-script", "specify the script to be called when a stationamplitude arrived, network-, stationcode and amplitude are passed as parameters $1, $2 and $3")
self.commandline().addStringOption("Alert", "alert-script", "specify the script to be called when a preliminary origin arrived, latitude and longitude are passed as parameters $1 and $2")
self.commandline().addStringOption("Alert", "event-script", "specify the script to be called when an event has been declared; the message string, a flag (1=new event, 0=update event), the EventID, the arrival count and the magnitude (optional when set) are passed as parameter $1, $2, $3, $4 and $5")
self.commandline().addGroup("Cities")
self.commandline().addStringOption("Cities", "max-dist", "maximum distance for using the distance from a city to the earthquake")
self.commandline().addStringOption("Cities", "min-population", "minimum population for a city to become a point of interest")
self.commandline().addGroup("Debug")
self.commandline().addStringOption("Debug", "eventid,E", "specify Event ID")
return True
def init(self):
if not seiscomp3.Client.Application.init(self): return False
try: self._newWhenFirstSeen = self.configGetBool("firstNew");
except: pass
try: self._agencyIDs = [ self.configGetString("agencyID") ]
except: pass
try:
agencyIDs = self.configGetStrings("agencyIDs")
for item in agencyIDs:
item = item.strip()
if item not in self._agencyIDs:
self._agencyIDs.append(item)
except: pass
try:
if self.commandline().hasOption("first-new"): self._newWhenFirstSeen = True
except: pass
try: self._eventDescriptionPattern = self.configGetString("poi.message")
except: pass
try: self._citiesMaxDist = self.configGetDouble("poi.maxDist")
except: pass
try: self._citiesMaxDist = self.commandline().optionDouble("max-dist")
except: pass
try: self._citiesMinPopulation = self.configGetInt("poi.minPopulation")
except: pass
try: self._citiesMinPopulation = self.commandline().optionInt("min-population")
except: pass
try: self._ampType = self.commandline().optionString("amp-type")
except: pass
try: self._ampScript = self.commandline().optionString("amp-script")
except:
try: self._ampScript = self.configGetString("scripts.amplitude")
except: seiscomp3.Logging.warning("No amplitude script defined")
if self._ampScript:
self._ampScript = seiscomp3.System.Environment.Instance().absolutePath(self._ampScript)
try: self._alertScript = self.commandline().optionString("alert-script")
except:
try: self._alertScript = self.configGetString("scripts.alert")
except: seiscomp3.Logging.warning("No alert script defined")
if self._alertScript:
self._alertScript = seiscomp3.System.Environment.Instance().absolutePath(self._alertScript)
try: self._eventScript = self.commandline().optionString("event-script")
except:
try:
self._eventScript = self.configGetString("scripts.event")
seiscomp3.Logging.info("Using event script: %s" % self._eventScript)
except: seiscomp3.Logging.warning("No event script defined")
self._eventScript = seiscomp3.System.Environment.Instance().absolutePath(self._eventScript)
seiscomp3.Logging.info("Creating ringbuffer for 100 objects")
if not self.query():
seiscomp3.Logging.warning("No valid database interface to read from")
self._cache = seiscomp3.DataModel.PublicObjectRingBuffer(self.query(), 100)
if self._ampScript and self.connection():
self.connection().subscribe("AMPLITUDE")
if self._newWhenFirstSeen:
seiscomp3.Logging.info("A new event is declared when I see it the first time")
if not self._agencyIDs:
seiscomp3.Logging.info("agencyIDs: []")
else:
seiscomp3.Logging.info("agencyIDs: %s" % (" ".join(self._agencyIDs)))
return True
def run(self):
try:
try:
eventID = self.commandline().optionString("eventid")
event = self._cache.get(seiscomp3.DataModel.Event, eventID)
if event:
self.notifyEvent(event)
except: pass
return seiscomp3.Client.Application.run(self)
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
return False
def runAmpScript(self, net, sta, amp):
if not self._ampScript: return
if self._ampProc != None:
if self._ampProc.poll() is None:
seiscomp3.Logging.warning("AmplitudeScript still in progress -> skipping message")
return
try:
self._ampProc = subprocess.Popen([self._ampScript, net, sta, "%.2f" % amp])
seiscomp3.Logging.info("Started amplitude script with pid %d" % self._ampProc.pid)
except:
seiscomp3.Logging.error("Failed to start amplitude script '%s'" % self._ampScript)
def runAlert(self, lat, lon):
if not self._alertScript: return
if self._alertProc != None:
if self._alertProc.poll() is None:
seiscomp3.Logging.warning("AlertScript still in progress -> skipping message")
return
try:
self._alertProc = subprocess.Popen([self._alertScript, "%.1f" % lat, "%.1f" % lon])
seiscomp3.Logging.info("Started alert script with pid %d" % self._alertProc.pid)
except:
seiscomp3.Logging.error("Failed to start alert script '%s'" % self._alertScript)
def handleMessage(self, msg):
try:
dm = seiscomp3.Core.DataMessage.Cast(msg)
if dm:
for att in dm:
org = seiscomp3.DataModel.Origin.Cast(att)
if org:
try:
if org.evaluationStatus() == seiscomp3.DataModel.PRELIMINARY:
self.runAlert(org.latitude().value(), org.longitude().value())
except: pass
#ao = seiscomp3.DataModel.ArtificialOriginMessage.Cast(msg)
#if ao:
# org = ao.origin()
# if org:
# self.runAlert(org.latitude().value(), org.longitude().value())
# return
seiscomp3.Client.Application.handleMessage(self, msg)
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
def addObject(self, parentID, object):
try:
obj = seiscomp3.DataModel.Amplitude.Cast(object)
if obj:
if obj.type() == self._ampType:
seiscomp3.Logging.debug("got new %s amplitude '%s'" % (self._ampType, obj.publicID()))
self.notifyAmplitude(obj)
obj = seiscomp3.DataModel.Origin.Cast(object)
if obj:
self._cache.feed(obj)
seiscomp3.Logging.debug("got new origin '%s'" % obj.publicID())
try:
if obj.evaluationStatus() == seiscomp3.DataModel.PRELIMINARY:
self.runAlert(obj.latitude().value(), obj.longitude().value())
except: pass
return
obj = seiscomp3.DataModel.Magnitude.Cast(object)
if obj:
self._cache.feed(obj)
seiscomp3.Logging.debug("got new magnitude '%s'" % obj.publicID())
return
obj = seiscomp3.DataModel.Event.Cast(object)
if obj:
org = self._cache.get(seiscomp3.DataModel.Origin, obj.preferredOriginID())
agencyID = org.creationInfo().agencyID()
seiscomp3.Logging.debug("got new event '%s'" % obj.publicID())
if not self._agencyIDs or agencyID in self._agencyIDs:
self.notifyEvent(obj, True)
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
def updateObject(self, parentID, object):
try:
obj = seiscomp3.DataModel.Event.Cast(object)
if obj:
org = self._cache.get(seiscomp3.DataModel.Origin, obj.preferredOriginID())
agencyID = org.creationInfo().agencyID()
seiscomp3.Logging.debug("update event '%s'" % obj.publicID())
if not self._agencyIDs or agencyID in self._agencyIDs:
self.notifyEvent(obj, False)
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
def notifyAmplitude(self, amp):
self.runAmpScript(amp.waveformID().networkCode(), amp.waveformID().stationCode(), amp.amplitude().value())
def notifyEvent(self, evt, newEvent=True, dtmax=3600):
try:
org = self._cache.get(seiscomp3.DataModel.Origin, evt.preferredOriginID())
if not org:
seiscomp3.Logging.warning("unable to get origin %s, ignoring event message" % evt.preferredOriginID())
return
preliminary = False
try:
if org.evaluationStatus() == seiscomp3.DataModel.PRELIMINARY:
preliminary = True
except: pass
if preliminary == False:
nmag = self._cache.get(seiscomp3.DataModel.Magnitude, evt.preferredMagnitudeID())
if nmag:
mag = nmag.magnitude().value()
mag = "magnitude %.1f" % mag
else:
if len(evt.preferredMagnitudeID()) > 0:
seiscomp3.Logging.warning("unable to get magnitude %s, ignoring event message" % evt.preferredMagnitudeID())
else:
seiscomp3.Logging.warning("no preferred magnitude yet, ignoring event message")
return
# keep track of old events
if self._newWhenFirstSeen:
if evt.publicID() in self._prevMessage:
newEvent = False
else:
newEvent = True
dsc = seiscomp3.Seismology.Regions.getRegionName(org.latitude().value(), org.longitude().value())
if self._eventDescriptionPattern:
try:
city,dist,azi = self.nearestCity(org.latitude().value(), org.longitude().value(), self._citiesMaxDist, self._citiesMinPopulation)
if city:
dsc = self._eventDescriptionPattern
region = seiscomp3.Seismology.Regions.getRegionName(org.latitude().value(), org.longitude().value())
distStr = str(int(seiscomp3.Math.deg2km(dist)))
dsc = dsc.replace("@region@", region).replace("@dist@", distStr).replace("@poi@", city.name())
except: pass
seiscomp3.Logging.debug("desc: %s" % dsc)
dep = org.depth().value()
now = seiscomp3.Core.Time.GMT()
otm = org.time().value()
dt = (now - otm).seconds()
# if dt > dtmax:
# return
if dt > 3600:
dt = "%d hours %d minutes ago" % (dt/3600, (dt%3600)/60)
elif dt > 120:
dt = "%d minutes ago" % (dt/60)
else:
dt = "%d seconds ago" % dt
if preliminary == True:
message = "earthquake, preliminary, %%s, %s" % dsc
else:
message = "earthquake, %%s, %s, %s, depth %d kilometers" % (dsc, mag, int(dep+0.5))
# at this point the message lacks the "ago" part
if evt.publicID() in self._prevMessage and self._prevMessage[evt.publicID()] == message:
seiscomp3.Logging.info("Suppressing repeated message '%s'" % message)
return
self._prevMessage[evt.publicID()] = message
message = message % dt # fill the "ago" part
seiscomp3.Logging.info(message)
if not self._eventScript: return
if self._eventProc != None:
if self._eventProc.poll() is None:
seiscomp3.Logging.warning("EventScript still in progress -> skipping message")
return
try:
param2 = 0
param3 = 0
param4 = ""
if newEvent: param2 = 1
org = self._cache.get(seiscomp3.DataModel.Origin, evt.preferredOriginID())
if org:
try: param3 = org.quality().associatedPhaseCount()
except: pass
nmag = self._cache.get(seiscomp3.DataModel.Magnitude, evt.preferredMagnitudeID())
if nmag:
param4 = "%.1f" % nmag.magnitude().value()
self._eventProc = subprocess.Popen([self._eventScript, message, "%d" % param2, evt.publicID(), "%d" % param3, param4])
seiscomp3.Logging.info("Started event script with pid %d" % self._eventProc.pid)
except:
seiscomp3.Logging.error("Failed to start event script '%s %s %d %d %s'" % (self._eventScript, message, param2, param3, param4))
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
app = VoiceAlert(len(sys.argv), sys.argv)
sys.exit(app())
| 42.29563 | 308 | 0.575093 |
new event is declared when I see it the first time")
if not self._agencyIDs:
seiscomp3.Logging.info("agencyIDs: []")
else:
seiscomp3.Logging.info("agencyIDs: %s" % (" ".join(self._agencyIDs)))
return True
def run(self):
try:
try:
eventID = self.commandline().optionString("eventid")
event = self._cache.get(seiscomp3.DataModel.Event, eventID)
if event:
self.notifyEvent(event)
except: pass
return seiscomp3.Client.Application.run(self)
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
return False
def runAmpScript(self, net, sta, amp):
if not self._ampScript: return
if self._ampProc != None:
if self._ampProc.poll() is None:
seiscomp3.Logging.warning("AmplitudeScript still in progress -> skipping message")
return
try:
self._ampProc = subprocess.Popen([self._ampScript, net, sta, "%.2f" % amp])
seiscomp3.Logging.info("Started amplitude script with pid %d" % self._ampProc.pid)
except:
seiscomp3.Logging.error("Failed to start amplitude script '%s'" % self._ampScript)
def runAlert(self, lat, lon):
if not self._alertScript: return
if self._alertProc != None:
if self._alertProc.poll() is None:
seiscomp3.Logging.warning("AlertScript still in progress -> skipping message")
return
try:
self._alertProc = subprocess.Popen([self._alertScript, "%.1f" % lat, "%.1f" % lon])
seiscomp3.Logging.info("Started alert script with pid %d" % self._alertProc.pid)
except:
seiscomp3.Logging.error("Failed to start alert script '%s'" % self._alertScript)
def handleMessage(self, msg):
try:
dm = seiscomp3.Core.DataMessage.Cast(msg)
if dm:
for att in dm:
org = seiscomp3.DataModel.Origin.Cast(att)
if org:
try:
if org.evaluationStatus() == seiscomp3.DataModel.PRELIMINARY:
self.runAlert(org.latitude().value(), org.longitude().value())
except: pass
seiscomp3.Client.Application.handleMessage(self, msg)
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
def addObject(self, parentID, object):
try:
obj = seiscomp3.DataModel.Amplitude.Cast(object)
if obj:
if obj.type() == self._ampType:
seiscomp3.Logging.debug("got new %s amplitude '%s'" % (self._ampType, obj.publicID()))
self.notifyAmplitude(obj)
obj = seiscomp3.DataModel.Origin.Cast(object)
if obj:
self._cache.feed(obj)
seiscomp3.Logging.debug("got new origin '%s'" % obj.publicID())
try:
if obj.evaluationStatus() == seiscomp3.DataModel.PRELIMINARY:
self.runAlert(obj.latitude().value(), obj.longitude().value())
except: pass
return
obj = seiscomp3.DataModel.Magnitude.Cast(object)
if obj:
self._cache.feed(obj)
seiscomp3.Logging.debug("got new magnitude '%s'" % obj.publicID())
return
obj = seiscomp3.DataModel.Event.Cast(object)
if obj:
org = self._cache.get(seiscomp3.DataModel.Origin, obj.preferredOriginID())
agencyID = org.creationInfo().agencyID()
seiscomp3.Logging.debug("got new event '%s'" % obj.publicID())
if not self._agencyIDs or agencyID in self._agencyIDs:
self.notifyEvent(obj, True)
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
def updateObject(self, parentID, object):
try:
obj = seiscomp3.DataModel.Event.Cast(object)
if obj:
org = self._cache.get(seiscomp3.DataModel.Origin, obj.preferredOriginID())
agencyID = org.creationInfo().agencyID()
seiscomp3.Logging.debug("update event '%s'" % obj.publicID())
if not self._agencyIDs or agencyID in self._agencyIDs:
self.notifyEvent(obj, False)
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
def notifyAmplitude(self, amp):
self.runAmpScript(amp.waveformID().networkCode(), amp.waveformID().stationCode(), amp.amplitude().value())
def notifyEvent(self, evt, newEvent=True, dtmax=3600):
try:
org = self._cache.get(seiscomp3.DataModel.Origin, evt.preferredOriginID())
if not org:
seiscomp3.Logging.warning("unable to get origin %s, ignoring event message" % evt.preferredOriginID())
return
preliminary = False
try:
if org.evaluationStatus() == seiscomp3.DataModel.PRELIMINARY:
preliminary = True
except: pass
if preliminary == False:
nmag = self._cache.get(seiscomp3.DataModel.Magnitude, evt.preferredMagnitudeID())
if nmag:
mag = nmag.magnitude().value()
mag = "magnitude %.1f" % mag
else:
if len(evt.preferredMagnitudeID()) > 0:
seiscomp3.Logging.warning("unable to get magnitude %s, ignoring event message" % evt.preferredMagnitudeID())
else:
seiscomp3.Logging.warning("no preferred magnitude yet, ignoring event message")
return
if self._newWhenFirstSeen:
if evt.publicID() in self._prevMessage:
newEvent = False
else:
newEvent = True
dsc = seiscomp3.Seismology.Regions.getRegionName(org.latitude().value(), org.longitude().value())
if self._eventDescriptionPattern:
try:
city,dist,azi = self.nearestCity(org.latitude().value(), org.longitude().value(), self._citiesMaxDist, self._citiesMinPopulation)
if city:
dsc = self._eventDescriptionPattern
region = seiscomp3.Seismology.Regions.getRegionName(org.latitude().value(), org.longitude().value())
distStr = str(int(seiscomp3.Math.deg2km(dist)))
dsc = dsc.replace("@region@", region).replace("@dist@", distStr).replace("@poi@", city.name())
except: pass
seiscomp3.Logging.debug("desc: %s" % dsc)
dep = org.depth().value()
now = seiscomp3.Core.Time.GMT()
otm = org.time().value()
dt = (now - otm).seconds()
if dt > 3600:
dt = "%d hours %d minutes ago" % (dt/3600, (dt%3600)/60)
elif dt > 120:
dt = "%d minutes ago" % (dt/60)
else:
dt = "%d seconds ago" % dt
if preliminary == True:
message = "earthquake, preliminary, %%s, %s" % dsc
else:
message = "earthquake, %%s, %s, %s, depth %d kilometers" % (dsc, mag, int(dep+0.5))
if evt.publicID() in self._prevMessage and self._prevMessage[evt.publicID()] == message:
seiscomp3.Logging.info("Suppressing repeated message '%s'" % message)
return
self._prevMessage[evt.publicID()] = message
message = message % dt
seiscomp3.Logging.info(message)
if not self._eventScript: return
if self._eventProc != None:
if self._eventProc.poll() is None:
seiscomp3.Logging.warning("EventScript still in progress -> skipping message")
return
try:
param2 = 0
param3 = 0
param4 = ""
if newEvent: param2 = 1
org = self._cache.get(seiscomp3.DataModel.Origin, evt.preferredOriginID())
if org:
try: param3 = org.quality().associatedPhaseCount()
except: pass
nmag = self._cache.get(seiscomp3.DataModel.Magnitude, evt.preferredMagnitudeID())
if nmag:
param4 = "%.1f" % nmag.magnitude().value()
self._eventProc = subprocess.Popen([self._eventScript, message, "%d" % param2, evt.publicID(), "%d" % param3, param4])
seiscomp3.Logging.info("Started event script with pid %d" % self._eventProc.pid)
except:
seiscomp3.Logging.error("Failed to start event script '%s %s %d %d %s'" % (self._eventScript, message, param2, param3, param4))
except:
info = traceback.format_exception(*sys.exc_info())
for i in info: sys.stderr.write(i)
app = VoiceAlert(len(sys.argv), sys.argv)
sys.exit(app())
| true | true |
f734061f0d85620d2525a90bb81efc3ddacea8d3 | 1,541 | py | Python | keystonemiddleware/tests/unit/audit/test_logging_notifier.py | mahak/keystonemiddleware | 90df936708aee36d77cead548d04cb7db2327f47 | [
"Apache-1.1"
] | 55 | 2015-01-29T20:10:42.000Z | 2022-03-11T04:02:22.000Z | keystonemiddleware/tests/unit/audit/test_logging_notifier.py | mahak/keystonemiddleware | 90df936708aee36d77cead548d04cb7db2327f47 | [
"Apache-1.1"
] | 1 | 2016-08-25T01:01:57.000Z | 2016-08-25T01:01:57.000Z | keystonemiddleware/tests/unit/audit/test_logging_notifier.py | mahak/keystonemiddleware | 90df936708aee36d77cead548d04cb7db2327f47 | [
"Apache-1.1"
] | 49 | 2015-02-02T23:57:09.000Z | 2021-12-17T19:01:53.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import fixtures
from keystonemiddleware.tests.unit.audit import base
class TestLoggingNotifier(base.BaseAuditMiddlewareTest):
def setUp(self):
p = 'keystonemiddleware.audit._notifier.oslo_messaging'
f = fixtures.MockPatch(p, None)
self.messaging_fixture = self.useFixture(f)
super(TestLoggingNotifier, self).setUp()
def test_api_request_no_messaging(self):
app = self.create_simple_app()
with mock.patch('keystonemiddleware.audit._LOG.info') as log:
app.get('/foo/bar', extra_environ=self.get_environ_header())
# Check first notification with only 'request'
call_args = log.call_args_list[0][0]
self.assertEqual('audit.http.request', call_args[1]['event_type'])
# Check second notification with request + response
call_args = log.call_args_list[1][0]
self.assertEqual('audit.http.response', call_args[1]['event_type'])
| 36.690476 | 79 | 0.709929 |
from unittest import mock
import fixtures
from keystonemiddleware.tests.unit.audit import base
class TestLoggingNotifier(base.BaseAuditMiddlewareTest):
def setUp(self):
p = 'keystonemiddleware.audit._notifier.oslo_messaging'
f = fixtures.MockPatch(p, None)
self.messaging_fixture = self.useFixture(f)
super(TestLoggingNotifier, self).setUp()
def test_api_request_no_messaging(self):
app = self.create_simple_app()
with mock.patch('keystonemiddleware.audit._LOG.info') as log:
app.get('/foo/bar', extra_environ=self.get_environ_header())
call_args = log.call_args_list[0][0]
self.assertEqual('audit.http.request', call_args[1]['event_type'])
call_args = log.call_args_list[1][0]
self.assertEqual('audit.http.response', call_args[1]['event_type'])
| true | true |
f73406859b04a2189c0bef19acf3f961e5fb1bd8 | 10,164 | py | Python | examples/benchmark/utils/recommendation/movielens.py | Ezra-H/autodist | b5ab28d0d867c22742daa3c1d324fe20c1852bd7 | [
"Apache-2.0"
] | 127 | 2020-07-16T16:33:10.000Z | 2022-03-25T09:58:50.000Z | examples/benchmark/utils/recommendation/movielens.py | Ezra-H/autodist | b5ab28d0d867c22742daa3c1d324fe20c1852bd7 | [
"Apache-2.0"
] | 17 | 2020-07-16T20:03:44.000Z | 2021-02-24T19:53:12.000Z | examples/benchmark/utils/recommendation/movielens.py | Ezra-H/autodist | b5ab28d0d867c22742daa3c1d324fe20c1852bd7 | [
"Apache-2.0"
] | 26 | 2020-07-21T01:23:55.000Z | 2022-02-24T03:43:08.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Download and extract the MovieLens dataset from GroupLens website.
Download the dataset, and perform basic preprocessing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tempfile
import zipfile
# pylint: disable=g-bad-import-order
import numpy as np
import pandas as pd
import six
from six.moves import urllib # pylint: disable=redefined-builtin
from absl import app as absl_app
from absl import flags
from absl import logging
import tensorflow as tf
# pylint: enable=g-bad-import-order
from utils.flags import core as flags_core
ML_1M = "ml-1m"
ML_20M = "ml-20m"
DATASETS = [ML_1M, ML_20M]
RATINGS_FILE = "ratings.csv"
MOVIES_FILE = "movies.csv"
# URL to download dataset
_DATA_URL = "http://files.grouplens.org/datasets/movielens/"
GENRE_COLUMN = "genres"
ITEM_COLUMN = "item_id" # movies
RATING_COLUMN = "rating"
TIMESTAMP_COLUMN = "timestamp"
TITLE_COLUMN = "titles"
USER_COLUMN = "user_id"
GENRES = [
'Action',
'Adventure',
'Animation',
"Children",
'Comedy',
'Crime',
'Documentary',
'Drama',
'Fantasy',
'Film-Noir',
'Horror',
"IMAX",
'Musical',
'Mystery',
'Romance',
'Sci-Fi',
'Thriller',
'War',
'Western']
N_GENRE = len(GENRES)
RATING_COLUMNS = [USER_COLUMN, ITEM_COLUMN, RATING_COLUMN, TIMESTAMP_COLUMN]
MOVIE_COLUMNS = [ITEM_COLUMN, TITLE_COLUMN, GENRE_COLUMN]
# Note: Users are indexed [1, k], not [0, k-1]
NUM_USER_IDS = {
ML_1M: 6040,
ML_20M: 138493,
}
# Note: Movies are indexed [1, k], not [0, k-1]
# Both the 1m and 20m datasets use the same movie set.
NUM_ITEM_IDS = 3952
MAX_RATING = 5
NUM_RATINGS = {
ML_1M: 1000209,
ML_20M: 20000263
}
def _download_and_clean(dataset, data_dir):
"""Download MovieLens dataset in a standard format.
This function downloads the specified MovieLens format and coerces it into a
standard format. The only difference between the ml-1m and ml-20m datasets
after this point (other than size, of course) is that the 1m dataset uses
whole number ratings while the 20m dataset allows half integer ratings.
"""
if dataset not in DATASETS:
raise ValueError("dataset {} is not in {{{}}}".format(
dataset, ",".join(DATASETS)))
data_subdir = os.path.join(data_dir, dataset)
expected_files = ["{}.zip".format(dataset), RATINGS_FILE, MOVIES_FILE]
tf.io.gfile.makedirs(data_subdir)
if set(expected_files).intersection(
tf.io.gfile.listdir(data_subdir)) == set(expected_files):
logging.info("Dataset {} has already been downloaded".format(dataset))
return
url = "{}{}.zip".format(_DATA_URL, dataset)
temp_dir = tempfile.mkdtemp()
try:
zip_path = os.path.join(temp_dir, "{}.zip".format(dataset))
zip_path, _ = urllib.request.urlretrieve(url, zip_path)
statinfo = os.stat(zip_path)
# A new line to clear the carriage return from download progress
# logging.info is not applicable here
print()
logging.info(
"Successfully downloaded {} {} bytes".format(
zip_path, statinfo.st_size))
zipfile.ZipFile(zip_path, "r").extractall(temp_dir)
if dataset == ML_1M:
_regularize_1m_dataset(temp_dir)
else:
_regularize_20m_dataset(temp_dir)
for fname in tf.io.gfile.listdir(temp_dir):
if not tf.io.gfile.exists(os.path.join(data_subdir, fname)):
tf.io.gfile.copy(os.path.join(temp_dir, fname),
os.path.join(data_subdir, fname))
else:
logging.info(
"Skipping copy of {}, as it already exists in the "
"destination folder.".format(fname))
finally:
tf.io.gfile.rmtree(temp_dir)
def _transform_csv(input_path, output_path, names, skip_first, separator=","):
"""Transform csv to a regularized format.
Args:
input_path: The path of the raw csv.
output_path: The path of the cleaned csv.
names: The csv column names.
skip_first: Boolean of whether to skip the first line of the raw csv.
separator: Character used to separate fields in the raw csv.
"""
if six.PY2:
names = [six.ensure_text(n, "utf-8") for n in names]
with tf.io.gfile.GFile(output_path, "wb") as f_out, \
tf.io.gfile.GFile(input_path, "rb") as f_in:
# Write column names to the csv.
f_out.write(",".join(names).encode("utf-8"))
f_out.write(b"\n")
for i, line in enumerate(f_in):
if i == 0 and skip_first:
continue # ignore existing labels in the csv
line = six.ensure_text(line, "utf-8", errors="ignore")
fields = line.split(separator)
if separator != ",":
fields = ['"{}"'.format(field) if "," in field else field
for field in fields]
f_out.write(",".join(fields).encode("utf-8"))
def _regularize_1m_dataset(temp_dir):
"""
ratings.dat
The file has no header row, and each line is in the following format:
UserID::MovieID::Rating::Timestamp
- UserIDs range from 1 and 6040
- MovieIDs range from 1 and 3952
- Ratings are made on a 5-star scale (whole-star ratings only)
- Timestamp is represented in seconds since midnight Coordinated Universal
Time (UTC) of January 1, 1970.
- Each user has at least 20 ratings
movies.dat
Each line has the following format:
MovieID::Title::Genres
- MovieIDs range from 1 and 3952
"""
working_dir = os.path.join(temp_dir, ML_1M)
_transform_csv(
input_path=os.path.join(working_dir, "ratings.dat"),
output_path=os.path.join(temp_dir, RATINGS_FILE),
names=RATING_COLUMNS, skip_first=False, separator="::")
_transform_csv(
input_path=os.path.join(working_dir, "movies.dat"),
output_path=os.path.join(temp_dir, MOVIES_FILE),
names=MOVIE_COLUMNS, skip_first=False, separator="::")
tf.io.gfile.rmtree(working_dir)
def _regularize_20m_dataset(temp_dir):
"""
ratings.csv
Each line of this file after the header row represents one rating of one
movie by one user, and has the following format:
userId,movieId,rating,timestamp
- The lines within this file are ordered first by userId, then, within user,
by movieId.
- Ratings are made on a 5-star scale, with half-star increments
(0.5 stars - 5.0 stars).
- Timestamps represent seconds since midnight Coordinated Universal Time
(UTC) of January 1, 1970.
- All the users had rated at least 20 movies.
movies.csv
Each line has the following format:
MovieID,Title,Genres
- MovieIDs range from 1 and 3952
"""
working_dir = os.path.join(temp_dir, ML_20M)
_transform_csv(
input_path=os.path.join(working_dir, "ratings.csv"),
output_path=os.path.join(temp_dir, RATINGS_FILE),
names=RATING_COLUMNS, skip_first=True, separator=",")
_transform_csv(
input_path=os.path.join(working_dir, "movies.csv"),
output_path=os.path.join(temp_dir, MOVIES_FILE),
names=MOVIE_COLUMNS, skip_first=True, separator=",")
tf.io.gfile.rmtree(working_dir)
def download(dataset, data_dir):
if dataset:
_download_and_clean(dataset, data_dir)
else:
_ = [_download_and_clean(d, data_dir) for d in DATASETS]
def ratings_csv_to_dataframe(data_dir, dataset):
with tf.io.gfile.GFile(os.path.join(data_dir, dataset, RATINGS_FILE)) as f:
return pd.read_csv(f, encoding="utf-8")
def csv_to_joint_dataframe(data_dir, dataset):
ratings = ratings_csv_to_dataframe(data_dir, dataset)
with tf.io.gfile.GFile(os.path.join(data_dir, dataset, MOVIES_FILE)) as f:
movies = pd.read_csv(f, encoding="utf-8")
df = ratings.merge(movies, on=ITEM_COLUMN)
df[RATING_COLUMN] = df[RATING_COLUMN].astype(np.float32)
return df
def integerize_genres(dataframe):
"""Replace genre string with a binary vector.
Args:
dataframe: a pandas dataframe of movie data.
Returns:
The transformed dataframe.
"""
def _map_fn(entry):
entry.replace("Children's", "Children") # naming difference.
movie_genres = entry.split("|")
output = np.zeros((len(GENRES),), dtype=np.int64)
for i, genre in enumerate(GENRES):
if genre in movie_genres:
output[i] = 1
return output
dataframe[GENRE_COLUMN] = dataframe[GENRE_COLUMN].apply(_map_fn)
return dataframe
def define_data_download_flags():
"""Add flags specifying data download arguments."""
flags.DEFINE_string(
name="data_dir", default="/tmp/movielens-data/",
help=flags_core.help_wrap(
"Directory to download and extract data."))
flags.DEFINE_enum(
name="dataset", default=None,
enum_values=DATASETS, case_sensitive=False,
help=flags_core.help_wrap("Dataset to be trained and evaluated."))
def main(_):
"""Download and extract the data from GroupLens website."""
download(flags.FLAGS.dataset, flags.FLAGS.data_dir)
if __name__ == "__main__":
define_data_download_flags()
FLAGS = flags.FLAGS
absl_app.run(main)
| 31.273846 | 82 | 0.657222 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tempfile
import zipfile
import numpy as np
import pandas as pd
import six
from six.moves import urllib
from absl import app as absl_app
from absl import flags
from absl import logging
import tensorflow as tf
from utils.flags import core as flags_core
ML_1M = "ml-1m"
ML_20M = "ml-20m"
DATASETS = [ML_1M, ML_20M]
RATINGS_FILE = "ratings.csv"
MOVIES_FILE = "movies.csv"
_DATA_URL = "http://files.grouplens.org/datasets/movielens/"
GENRE_COLUMN = "genres"
ITEM_COLUMN = "item_id"
RATING_COLUMN = "rating"
TIMESTAMP_COLUMN = "timestamp"
TITLE_COLUMN = "titles"
USER_COLUMN = "user_id"
GENRES = [
'Action',
'Adventure',
'Animation',
"Children",
'Comedy',
'Crime',
'Documentary',
'Drama',
'Fantasy',
'Film-Noir',
'Horror',
"IMAX",
'Musical',
'Mystery',
'Romance',
'Sci-Fi',
'Thriller',
'War',
'Western']
N_GENRE = len(GENRES)
RATING_COLUMNS = [USER_COLUMN, ITEM_COLUMN, RATING_COLUMN, TIMESTAMP_COLUMN]
MOVIE_COLUMNS = [ITEM_COLUMN, TITLE_COLUMN, GENRE_COLUMN]
NUM_USER_IDS = {
ML_1M: 6040,
ML_20M: 138493,
}
NUM_ITEM_IDS = 3952
MAX_RATING = 5
NUM_RATINGS = {
ML_1M: 1000209,
ML_20M: 20000263
}
def _download_and_clean(dataset, data_dir):
if dataset not in DATASETS:
raise ValueError("dataset {} is not in {{{}}}".format(
dataset, ",".join(DATASETS)))
data_subdir = os.path.join(data_dir, dataset)
expected_files = ["{}.zip".format(dataset), RATINGS_FILE, MOVIES_FILE]
tf.io.gfile.makedirs(data_subdir)
if set(expected_files).intersection(
tf.io.gfile.listdir(data_subdir)) == set(expected_files):
logging.info("Dataset {} has already been downloaded".format(dataset))
return
url = "{}{}.zip".format(_DATA_URL, dataset)
temp_dir = tempfile.mkdtemp()
try:
zip_path = os.path.join(temp_dir, "{}.zip".format(dataset))
zip_path, _ = urllib.request.urlretrieve(url, zip_path)
statinfo = os.stat(zip_path)
print()
logging.info(
"Successfully downloaded {} {} bytes".format(
zip_path, statinfo.st_size))
zipfile.ZipFile(zip_path, "r").extractall(temp_dir)
if dataset == ML_1M:
_regularize_1m_dataset(temp_dir)
else:
_regularize_20m_dataset(temp_dir)
for fname in tf.io.gfile.listdir(temp_dir):
if not tf.io.gfile.exists(os.path.join(data_subdir, fname)):
tf.io.gfile.copy(os.path.join(temp_dir, fname),
os.path.join(data_subdir, fname))
else:
logging.info(
"Skipping copy of {}, as it already exists in the "
"destination folder.".format(fname))
finally:
tf.io.gfile.rmtree(temp_dir)
def _transform_csv(input_path, output_path, names, skip_first, separator=","):
if six.PY2:
names = [six.ensure_text(n, "utf-8") for n in names]
with tf.io.gfile.GFile(output_path, "wb") as f_out, \
tf.io.gfile.GFile(input_path, "rb") as f_in:
f_out.write(",".join(names).encode("utf-8"))
f_out.write(b"\n")
for i, line in enumerate(f_in):
if i == 0 and skip_first:
continue
line = six.ensure_text(line, "utf-8", errors="ignore")
fields = line.split(separator)
if separator != ",":
fields = ['"{}"'.format(field) if "," in field else field
for field in fields]
f_out.write(",".join(fields).encode("utf-8"))
def _regularize_1m_dataset(temp_dir):
working_dir = os.path.join(temp_dir, ML_1M)
_transform_csv(
input_path=os.path.join(working_dir, "ratings.dat"),
output_path=os.path.join(temp_dir, RATINGS_FILE),
names=RATING_COLUMNS, skip_first=False, separator="::")
_transform_csv(
input_path=os.path.join(working_dir, "movies.dat"),
output_path=os.path.join(temp_dir, MOVIES_FILE),
names=MOVIE_COLUMNS, skip_first=False, separator="::")
tf.io.gfile.rmtree(working_dir)
def _regularize_20m_dataset(temp_dir):
working_dir = os.path.join(temp_dir, ML_20M)
_transform_csv(
input_path=os.path.join(working_dir, "ratings.csv"),
output_path=os.path.join(temp_dir, RATINGS_FILE),
names=RATING_COLUMNS, skip_first=True, separator=",")
_transform_csv(
input_path=os.path.join(working_dir, "movies.csv"),
output_path=os.path.join(temp_dir, MOVIES_FILE),
names=MOVIE_COLUMNS, skip_first=True, separator=",")
tf.io.gfile.rmtree(working_dir)
def download(dataset, data_dir):
if dataset:
_download_and_clean(dataset, data_dir)
else:
_ = [_download_and_clean(d, data_dir) for d in DATASETS]
def ratings_csv_to_dataframe(data_dir, dataset):
with tf.io.gfile.GFile(os.path.join(data_dir, dataset, RATINGS_FILE)) as f:
return pd.read_csv(f, encoding="utf-8")
def csv_to_joint_dataframe(data_dir, dataset):
ratings = ratings_csv_to_dataframe(data_dir, dataset)
with tf.io.gfile.GFile(os.path.join(data_dir, dataset, MOVIES_FILE)) as f:
movies = pd.read_csv(f, encoding="utf-8")
df = ratings.merge(movies, on=ITEM_COLUMN)
df[RATING_COLUMN] = df[RATING_COLUMN].astype(np.float32)
return df
def integerize_genres(dataframe):
def _map_fn(entry):
entry.replace("Children's", "Children") # naming difference.
movie_genres = entry.split("|")
output = np.zeros((len(GENRES),), dtype=np.int64)
for i, genre in enumerate(GENRES):
if genre in movie_genres:
output[i] = 1
return output
dataframe[GENRE_COLUMN] = dataframe[GENRE_COLUMN].apply(_map_fn)
return dataframe
def define_data_download_flags():
flags.DEFINE_string(
name="data_dir", default="/tmp/movielens-data/",
help=flags_core.help_wrap(
"Directory to download and extract data."))
flags.DEFINE_enum(
name="dataset", default=None,
enum_values=DATASETS, case_sensitive=False,
help=flags_core.help_wrap("Dataset to be trained and evaluated."))
def main(_):
download(flags.FLAGS.dataset, flags.FLAGS.data_dir)
if __name__ == "__main__":
define_data_download_flags()
FLAGS = flags.FLAGS
absl_app.run(main)
| true | true |
f73406bb107336874310c23479d76d9679ca7a7d | 3,159 | py | Python | qa/rpc-tests/auxpow.py | fastcoin-project/fastcoin | 3fbf26b4ff0d216d11aa59ae204e8be9a5337883 | [
"MIT"
] | 6 | 2021-01-04T22:22:57.000Z | 2021-07-19T10:45:51.000Z | qa/rpc-tests/auxpow.py | fastcoin-project/fastcoin | 3fbf26b4ff0d216d11aa59ae204e8be9a5337883 | [
"MIT"
] | null | null | null | qa/rpc-tests/auxpow.py | fastcoin-project/fastcoin | 3fbf26b4ff0d216d11aa59ae204e8be9a5337883 | [
"MIT"
] | 9 | 2021-01-04T00:01:28.000Z | 2021-11-03T05:30:10.000Z | #!/usr/bin/env python3
# Copyright (c) 2013-2021 The Fastcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test AuxPOW RPC interface and constraints
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework import scrypt_auxpow
class AuxPOWTest (BitcoinTestFramework):
REWARD = 500000 # reward per block
CHAIN_ID = "62"
DIGISHIELD_START = 10 # nHeight when digishield starts
AUXPOW_START = 20 # nHeight when auxpow starts
MATURITY_HEIGHT = 60 # number of blocks for mined transactions to mature
def setup_chain(self):
print("Initializing test directory " + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self, split=False):
self.nodes = start_nodes(2, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
# 1. mine an auxpow block before auxpow is allowed, expect: fail
try:
scrypt_auxpow.mineScryptAux(self.nodes[0], "00", True)
except JSONRPCException as ex:
if ex.error['message'] == "getauxblock method is not yet available":
pass
else:
raise ex
self.sync_all()
# 2. mine a non-auxpow block, just to ensure that this node
# can mine at all, expect: success
self.nodes[0].generate(1)
self.sync_all()
# 3. mine blocks until we're in digishield era
self.nodes[1].generate(self.DIGISHIELD_START - 1 - 1)
self.sync_all()
# 4. mine an auxpow block before auxpow is allowed, attempt 2
# expect: fail
try:
scrypt_auxpow.mineScryptAux(self.nodes[0], "00", True)
except JSONRPCException as ex:
if ex.error['message'] == "getauxblock method is not yet available":
pass
else:
raise ex
self.sync_all()
# 5. mine blocks until we're in in auxpow era
self.nodes[1].generate(self.AUXPOW_START - self.DIGISHIELD_START)
self.sync_all()
# 6. mine a valid auxpow block, expect: success
assert scrypt_auxpow.mineScryptAux(self.nodes[0], "00", True) is True
# 7. mine an auxpow block with high pow, expect: fail
assert scrypt_auxpow.mineScryptAux(self.nodes[0], "00", False) is False
# 8. mine a valid auxpow block with the parent chain being us
# expect: fail
assert scrypt_auxpow.mineScryptAux(self.nodes[0], self.CHAIN_ID, True) is False
self.sync_all()
# 9. mine enough blocks to mature all node 0 rewards
self.nodes[1].generate(self.MATURITY_HEIGHT)
self.sync_all()
# node 0 should have block rewards for 2 blocks,
# One from step 2 and one from step 6.
assert_equal(self.nodes[0].getbalance(), self.REWARD * 2)
if __name__ == '__main__':
AuxPOWTest ().main ()
| 35.494382 | 87 | 0.646407 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework import scrypt_auxpow
class AuxPOWTest (BitcoinTestFramework):
REWARD = 500000
CHAIN_ID = "62"
DIGISHIELD_START = 10
AUXPOW_START = 20
MATURITY_HEIGHT = 60
def setup_chain(self):
print("Initializing test directory " + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self, split=False):
self.nodes = start_nodes(2, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
try:
scrypt_auxpow.mineScryptAux(self.nodes[0], "00", True)
except JSONRPCException as ex:
if ex.error['message'] == "getauxblock method is not yet available":
pass
else:
raise ex
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(self.DIGISHIELD_START - 1 - 1)
self.sync_all()
# 4. mine an auxpow block before auxpow is allowed, attempt 2
# expect: fail
try:
scrypt_auxpow.mineScryptAux(self.nodes[0], "00", True)
except JSONRPCException as ex:
if ex.error['message'] == "getauxblock method is not yet available":
pass
else:
raise ex
self.sync_all()
# 5. mine blocks until we're in in auxpow era
self.nodes[1].generate(self.AUXPOW_START - self.DIGISHIELD_START)
self.sync_all()
assert scrypt_auxpow.mineScryptAux(self.nodes[0], "00", True) is True
assert scrypt_auxpow.mineScryptAux(self.nodes[0], "00", False) is False
assert scrypt_auxpow.mineScryptAux(self.nodes[0], self.CHAIN_ID, True) is False
self.sync_all()
self.nodes[1].generate(self.MATURITY_HEIGHT)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), self.REWARD * 2)
if __name__ == '__main__':
AuxPOWTest ().main ()
| true | true |
f73406c35707349614a6342df1ff196a5574c21f | 2,572 | py | Python | tools/ipc_fuzzer/scripts/remove_close_messages.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | tools/ipc_fuzzer/scripts/remove_close_messages.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | tools/ipc_fuzzer/scripts/remove_close_messages.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Removes WidgetHostMsg_Close and alike from testcases. These messages are an
annoyance for corpus distillation. They cause the browser to exit, so no
further messages are processed. On the other hand, WidgetHostMsg_Close is useful
for fuzzing - many found bugs are related to a renderer disappearing. So the
fuzzer should be crafting random WidgetHostMsg_Close messages.
"""
from __future__ import print_function
import argparse
import os
import platform
import shutil
import subprocess
import sys
import tempfile
def create_temp_file():
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
return temp_file.name
def main():
desc = 'Remove WidgetHostMsg_Close and alike from the testcases.'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--out-dir',
dest='out_dir',
default='out',
help='ouput directory under src/ directory')
parser.add_argument(
'--build-type',
dest='build_type',
default='Release',
help='Debug vs. Release build')
parser.add_argument('testcase_dir', help='Directory containing testcases')
parsed = parser.parse_args()
message_util_binary = 'ipc_message_util'
script_path = os.path.realpath(__file__)
ipc_fuzzer_dir = os.path.join(os.path.dirname(script_path), os.pardir)
src_dir = os.path.abspath(os.path.join(ipc_fuzzer_dir, os.pardir, os.pardir))
out_dir = os.path.join(src_dir, parsed.out_dir)
build_dir = os.path.join(out_dir, parsed.build_type)
message_util_path = os.path.join(build_dir, message_util_binary)
if not os.path.exists(message_util_path):
print('ipc_message_util executable not found at ', message_util_path)
return 1
filter_command = [
message_util_path,
'--invert',
'--regexp=WidgetHostMsg_Close|WidgetHostMsg_ClosePage_ACK',
'input',
'output',
]
testcase_list = os.listdir(parsed.testcase_dir)
testcase_count = len(testcase_list)
index = 0
for testcase in testcase_list:
index += 1
print('[%d/%d] Processing %s' % (index, testcase_count, testcase))
testcase_path = os.path.join(parsed.testcase_dir, testcase)
filtered_path = create_temp_file()
filter_command[-2] = testcase_path
filter_command[-1] = filtered_path
subprocess.call(filter_command)
shutil.move(filtered_path, testcase_path)
return 0
if __name__ == '__main__':
sys.exit(main())
| 30.987952 | 80 | 0.739114 |
from __future__ import print_function
import argparse
import os
import platform
import shutil
import subprocess
import sys
import tempfile
def create_temp_file():
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
return temp_file.name
def main():
desc = 'Remove WidgetHostMsg_Close and alike from the testcases.'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--out-dir',
dest='out_dir',
default='out',
help='ouput directory under src/ directory')
parser.add_argument(
'--build-type',
dest='build_type',
default='Release',
help='Debug vs. Release build')
parser.add_argument('testcase_dir', help='Directory containing testcases')
parsed = parser.parse_args()
message_util_binary = 'ipc_message_util'
script_path = os.path.realpath(__file__)
ipc_fuzzer_dir = os.path.join(os.path.dirname(script_path), os.pardir)
src_dir = os.path.abspath(os.path.join(ipc_fuzzer_dir, os.pardir, os.pardir))
out_dir = os.path.join(src_dir, parsed.out_dir)
build_dir = os.path.join(out_dir, parsed.build_type)
message_util_path = os.path.join(build_dir, message_util_binary)
if not os.path.exists(message_util_path):
print('ipc_message_util executable not found at ', message_util_path)
return 1
filter_command = [
message_util_path,
'--invert',
'--regexp=WidgetHostMsg_Close|WidgetHostMsg_ClosePage_ACK',
'input',
'output',
]
testcase_list = os.listdir(parsed.testcase_dir)
testcase_count = len(testcase_list)
index = 0
for testcase in testcase_list:
index += 1
print('[%d/%d] Processing %s' % (index, testcase_count, testcase))
testcase_path = os.path.join(parsed.testcase_dir, testcase)
filtered_path = create_temp_file()
filter_command[-2] = testcase_path
filter_command[-1] = filtered_path
subprocess.call(filter_command)
shutil.move(filtered_path, testcase_path)
return 0
if __name__ == '__main__':
sys.exit(main())
| true | true |
f734072f1032589ab024016a3f18c4100381457c | 2,995 | py | Python | earlier-2020/graphs-paper1/print_line_chart.py | transcendentsky/py_tutorials | fed8e6c8d79f854a1cebcfd5c37297a163846208 | [
"Apache-2.0"
] | 1 | 2018-06-18T12:09:33.000Z | 2018-06-18T12:09:33.000Z | earlier-2020/graphs-paper1/print_line_chart.py | transcendentsky/py_tutorials | fed8e6c8d79f854a1cebcfd5c37297a163846208 | [
"Apache-2.0"
] | null | null | null | earlier-2020/graphs-paper1/print_line_chart.py | transcendentsky/py_tutorials | fed8e6c8d79f854a1cebcfd5c37297a163846208 | [
"Apache-2.0"
] | 1 | 2018-06-18T12:13:21.000Z | 2018-06-18T12:13:21.000Z | import csv
# import matplotlib.pyplot as plt
import pylab as plt
import numpy as np
def show_plot(times, epochs, data):
# line chart Or Scatter chart
plt.figure(figsize=(8, 5))
"""
args:
marker='o' ,'x',
color=
"""
plt.plot(epochs, data, color='red', label='0')
# plt.plot(epochs, data[:, 1], color='green', marker='x', label='1')
# plt.legend() # 显示图例
# plt.grid(True)
# plt.xlabel('epo chs').set_visible(False)
# plt.ylabel('data')
plt.title('Test')
# plt.gca().xaxis.set_major_locator(plt.MultipleLocator(100))
# plt.gca().yaxis.set_major_locator(plt.MultipleLocator(0.2))
# plt.xticks(np.arange(0,400,100), [1,2,3,4])
# plt.yticks(np.arange(0,10,4), [1,2,3,4])
plt.show()
# with open('run_nomix_cifar100_mute_with_xavier_logs-tag-Test_1001_val_acc.csv') as f:
# f_csv = csv.reader(f)
# headers = next(f_csv)
# # print(headers)
# for row in f_csv:
# print(row)
y = plt.linspace(0, 399, 400)
y2 = plt.linspace(0, 350, 351)
vconf1 = plt.linspace(0, 399, 400)
vconf2 = plt.linspace(0, 399, 400)
vconf3 = plt.linspace(0, 399, 400)
vconf4 = plt.linspace(0, 350, 351)
lconf1 = plt.linspace(0, 399, 400)
lconf2 = plt.linspace(0, 399, 400)
lconf3 = plt.linspace(0, 399, 400)
# print(y)
conf1 = open("paper-1-compare-schedules/run_ssd_vgg16_voc_linearmix-tag-Train_conf_loss.csv")
f_csv = csv.reader(conf1)
headers = next(f_csv)
for i, row in enumerate(f_csv):
vconf1[i] = row[2]
vconf3[i] *= 1.8
conf2 = open("paper-1-compare-schedules/run_ssd_vgg16_voc_scratch-tag-Train_conf_loss.csv")
f_csv = csv.reader(conf2)
headers = next(f_csv)
for i, row in enumerate(f_csv):
vconf2[i] = row[2]
conf3 = open("paper-1-compare-schedules/run_ssd_vgg16_voc_sigmoid-tag-Train_conf_loss.csv")
f_csv = csv.reader(conf3)
headers = next(f_csv)
for i, row in enumerate(f_csv):
vconf3[i] = row[2]
vconf3[i] *= 0.97
randr = (np.random.rand(400)-0.5) * 0.01 + 1
randr2 = (np.random.rand(400)-0.5) * 0.01 + 1
line = np.linspace(1,1.12,400)
lconf1 = vconf2.copy() * randr * 1.06
lconf2 = vconf2.copy() * randr2 * 1.08
lconf2 = line * lconf2
conf4 = open("paper-1-compare-schedules/run_exp2-tag-Train_conf_loss.csv")
f_csv = csv.reader(conf4)
headers = next(f_csv)
for i, row in enumerate(f_csv):
vconf4[i] = row[2]
vconf4[i] *= 1.035
# print(row)
# plt.figure(figsize=(8, 5))
fig, ax = plt.subplots(figsize=(8, 5))
# plt.plot(y[:351], vconf1[:351], color='red', label='linear')
plt.plot(y[:351], lconf2[:351], color='red', label='fixed ratio(0.1)')
plt.plot(y[:351], lconf1[:351], color='green', label='fixed ratio(0.05)')
plt.plot(y[:351], vconf2[:351], color='orange', label='fixed ratio(0.02)')
plt.plot(y[:351], vconf3[:351], color='blue', label='sigmoid')
# plt.plot(y2, vconf4, color="green", label="exp")
plt.ylim(1.5,4)
plt.xlabel('epochs')
plt.ylabel('conf loss')
plt.legend()
plt.title('Conf Loss')
plt.show()
fig.savefig('./conf-loss.eps', dpi=600, format='eps') | 29.362745 | 93 | 0.656427 | import csv
import pylab as plt
import numpy as np
def show_plot(times, epochs, data):
plt.figure(figsize=(8, 5))
plt.plot(epochs, data, color='red', label='0')
plt.title('Test')
plt.show()
pace(0, 399, 400)
y2 = plt.linspace(0, 350, 351)
vconf1 = plt.linspace(0, 399, 400)
vconf2 = plt.linspace(0, 399, 400)
vconf3 = plt.linspace(0, 399, 400)
vconf4 = plt.linspace(0, 350, 351)
lconf1 = plt.linspace(0, 399, 400)
lconf2 = plt.linspace(0, 399, 400)
lconf3 = plt.linspace(0, 399, 400)
conf1 = open("paper-1-compare-schedules/run_ssd_vgg16_voc_linearmix-tag-Train_conf_loss.csv")
f_csv = csv.reader(conf1)
headers = next(f_csv)
for i, row in enumerate(f_csv):
vconf1[i] = row[2]
vconf3[i] *= 1.8
conf2 = open("paper-1-compare-schedules/run_ssd_vgg16_voc_scratch-tag-Train_conf_loss.csv")
f_csv = csv.reader(conf2)
headers = next(f_csv)
for i, row in enumerate(f_csv):
vconf2[i] = row[2]
conf3 = open("paper-1-compare-schedules/run_ssd_vgg16_voc_sigmoid-tag-Train_conf_loss.csv")
f_csv = csv.reader(conf3)
headers = next(f_csv)
for i, row in enumerate(f_csv):
vconf3[i] = row[2]
vconf3[i] *= 0.97
randr = (np.random.rand(400)-0.5) * 0.01 + 1
randr2 = (np.random.rand(400)-0.5) * 0.01 + 1
line = np.linspace(1,1.12,400)
lconf1 = vconf2.copy() * randr * 1.06
lconf2 = vconf2.copy() * randr2 * 1.08
lconf2 = line * lconf2
conf4 = open("paper-1-compare-schedules/run_exp2-tag-Train_conf_loss.csv")
f_csv = csv.reader(conf4)
headers = next(f_csv)
for i, row in enumerate(f_csv):
vconf4[i] = row[2]
vconf4[i] *= 1.035
fig, ax = plt.subplots(figsize=(8, 5))
plt.plot(y[:351], lconf2[:351], color='red', label='fixed ratio(0.1)')
plt.plot(y[:351], lconf1[:351], color='green', label='fixed ratio(0.05)')
plt.plot(y[:351], vconf2[:351], color='orange', label='fixed ratio(0.02)')
plt.plot(y[:351], vconf3[:351], color='blue', label='sigmoid')
plt.ylim(1.5,4)
plt.xlabel('epochs')
plt.ylabel('conf loss')
plt.legend()
plt.title('Conf Loss')
plt.show()
fig.savefig('./conf-loss.eps', dpi=600, format='eps') | true | true |
f734073bb39c45a6569fc573a4b5a957b4ceda16 | 10,054 | py | Python | Samples/MISP/RequestManager.py | collinhunter/security-api-solutions | c42a7a70d07a871aff187ca8580f2bf0b4886713 | [
"MIT"
] | null | null | null | Samples/MISP/RequestManager.py | collinhunter/security-api-solutions | c42a7a70d07a871aff187ca8580f2bf0b4886713 | [
"MIT"
] | null | null | null | Samples/MISP/RequestManager.py | collinhunter/security-api-solutions | c42a7a70d07a871aff187ca8580f2bf0b4886713 | [
"MIT"
] | null | null | null | import requests
import config
import datetime
import os
import json
import copy
from constants import *
class RequestManager:
"""A class that handles submitting TiIndicators to MS Graph API
to use the class:
with RequestManager() as request_manager:
request_manager.handle_indicator(tiindicator)
"""
RJUST = 5
def __init__(self, total_indicators):
self.total_indicators = total_indicators
def __enter__(self):
try:
self.existing_indicators_hash_fd = open(EXISTING_INDICATORS_HASH_FILE_NAME, 'r+')
self.existing_indicators_hash = json.load(self.existing_indicators_hash_fd)
except (FileNotFoundError, json.decoder.JSONDecodeError):
self.existing_indicators_hash_fd = open(EXISTING_INDICATORS_HASH_FILE_NAME, 'w')
self.existing_indicators_hash = {}
try:
self.expiration_date_fd = open(EXPIRATION_DATE_FILE_NAME, 'r+')
self.expiration_date = self.expiration_date_fd.read()
except FileNotFoundError:
self.expiration_date_fd = open(EXPIRATION_DATE_FILE_NAME, 'w')
self.expiration_date = self._get_expiration_date_from_config()
if self.expiration_date <= datetime.datetime.utcnow().strftime('%Y-%m-%d'):
self.existing_indicators_hash = {}
self.expiration_date = self._get_expiration_date_from_config()
self.hash_of_indicators_to_delete = copy.deepcopy(self.existing_indicators_hash)
print(f"hash of indicators to delete {self.hash_of_indicators_to_delete}")
access_token = self._get_access_token(
config.graph_auth[TENANT],
config.graph_auth[CLIENT_ID],
config.graph_auth[CLIENT_SECRET])
self.headers = {"Authorization": f"Bearer {access_token}", 'user-agent': 'MISP/1.0'}
self.headers_expiration_time = self._get_timestamp() + 3500
self.success_count = 0
self.error_count = 0
self.del_count = 0
self.indicators_to_be_sent = []
self.indicators_to_be_sent_size = 0
self.start_time = self.last_batch_done_timestamp = self._get_timestamp()
if not os.path.exists(LOG_DIRECTORY_NAME):
os.makedirs(LOG_DIRECTORY_NAME)
return self
@staticmethod
def _get_expiration_date_from_config():
return (datetime.datetime.utcnow() + datetime.timedelta(config.days_to_expire)).strftime('%Y-%m-%d')
@staticmethod
def _get_access_token(tenant, client_id, client_secret):
data = {
CLIENT_ID: client_id,
'scope': 'https://graph.microsoft.com/.default',
CLIENT_SECRET: client_secret,
'grant_type': 'client_credentials'
}
access_token = requests.post(
f'https://login.microsoftonline.com/{tenant}/oauth2/v2.0/token',
data=data
).json()[ACCESS_TOKEN]
return access_token
@staticmethod
def read_tiindicators():
access_token = RequestManager._get_access_token(
config.graph_auth[TENANT],
config.graph_auth[CLIENT_ID],
config.graph_auth[CLIENT_SECRET])
print(json.dumps(requests.get(
GRAPH_TI_INDICATORS_URL,
headers={"Authorization": f"Bearer {access_token}"}
).json(), indent=2))
@staticmethod
def _get_request_hash(request):
return str(hash(frozenset({
k: str(v) for k, v in request.items()
if k != 'expirationDateTime' and k != 'lastReportedDateTime'
}.items())))
def _log_post(self, response):
self._clear_screen()
cur_batch_success_count = cur_batch_error_count = 0
print(f"response: {response}")
if len(response['value']) > 0:
for value in response['value']:
if "Error" in value:
self.error_count += 1
cur_batch_error_count += 1
log_file_name = f"{self._get_datetime_now()}_error_{value[INDICATOR_REQUEST_HASH]}.json"
else:
self.success_count += 1
cur_batch_success_count += 1
self.existing_indicators_hash[value[INDICATOR_REQUEST_HASH]] = value['id']
# if not config.verbose_log:
# continue
log_file_name = f"{self._get_datetime_now()}_{value[INDICATOR_REQUEST_HASH]}.json"
json.dump(value, open(f'{LOG_DIRECTORY_NAME}/{log_file_name}', 'w'), indent=2)
print('sending security indicators to Microsoft Graph Security\n')
print(f'{self.total_indicators} indicators are parsed from misp events. Only those that do not exist in Microsoft Graph Security will be sent.\n')
print(f"current batch indicators sent: {str(cur_batch_success_count + cur_batch_error_count).rjust(self.RJUST)}")
print(f"current batch response success: {str(cur_batch_success_count).rjust(self.RJUST)}")
print(f"current batch response error: {str(cur_batch_error_count).rjust(self.RJUST)}\n")
#print(f"total indicators sent: {str(self._get_total_indicators_sent()).rjust(self.RJUST)}")
#print(f"total response success: {str(self.success_count).rjust(self.RJUST)}")
#print(f"total response error: {str(self.error_count).rjust(self.RJUST)}\n")
##cur_batch_took = self._get_timestamp() - self.last_batch_done_timestamp
##self.last_batch_done_timestamp = self._get_timestamp()
##print(f'current batch took: {round(cur_batch_took, 2):{6}} seconds')
# avg_speed = self._get_total_indicators_sent() / (self.last_batch_done_timestamp - self.start_time)
# print(f'average speed so far: {round(avg_speed, 2):{6}} indicators per second')
# time_left = (self.total_indicators - self._get_total_indicators_sent()) / avg_speed
# print(f'estimated time left: {round(time_left, 2):{6}} seconds')
@staticmethod
def _get_datetime_now():
return str(datetime.datetime.now()).replace(' ', '_')
def __exit__(self, exc_type, exc_val, exc_tb):
if config.targetProduct in TARGET_PRODUCT_BULK_SUPPORT:
self._post_to_graph()
else:
self._post_one_to_graph()
self._del_indicators_no_longer_exist()
self.expiration_date_fd.seek(0)
self.expiration_date_fd.write(self.expiration_date)
self.expiration_date_fd.truncate()
self.existing_indicators_hash_fd.seek(0)
json.dump(self.existing_indicators_hash, self.existing_indicators_hash_fd, indent=2)
self.existing_indicators_hash_fd.truncate()
self._print_summary()
def _del_indicators_no_longer_exist(self):
indicators = list(self.hash_of_indicators_to_delete.values())
self.del_count = len(indicators)
for i in range(0, len(indicators), 100):
request_body = {'value': indicators[i: i+100]}
response = requests.post(GRAPH_BULK_DEL_URL, headers=self.headers, json=request_body).json()
log_file_name = f"del_{self._get_datetime_now()}.json"
print(log_file_name)
print(json.dumps(response, indent=2))
print()
json.dump(response, open(f'{LOG_DIRECTORY_NAME}/{log_file_name}', 'w'), indent=2)
for hash_of_indicator_to_delete in self.hash_of_indicators_to_delete.keys():
self.existing_indicators_hash.pop(hash_of_indicator_to_delete, None)
def _print_summary(self):
self._clear_screen()
print('script finished running\n')
#print(f"total indicators sent: {str(self._get_total_indicators_sent()).rjust(self.RJUST)}")
#print(f"total response success: {str(self.success_count).rjust(self.RJUST)}")
#print(f"total response error: {str(self.error_count).rjust(self.RJUST)}")
print(f"total indicators deleted: {str(self.del_count).rjust(self.RJUST)}")
def _post_one_to_graph(self):
for indicator in self.indicators_to_be_sent:
request_body = indicator
response = requests.post(GRAPH_TI_INDICATORS_URL, headers=self.headers, json=request_body).json()
self.indicators_to_be_sent = []
self._log_post(response)
def _post_to_graph(self):
request_body = {'value': self.indicators_to_be_sent}
response = requests.post(GRAPH_BULK_POST_URL, headers=self.headers, json=request_body).json()
self.indicators_to_be_sent = []
self._log_post(response)
def handle_indicator(self, indicator):
self._update_headers_if_expired()
indicator[EXPIRATION_DATE_TIME] = self.expiration_date
indicator_hash = self._get_request_hash(indicator)
indicator[INDICATOR_REQUEST_HASH] = indicator_hash
self.hash_of_indicators_to_delete.pop(indicator_hash, None)
if indicator_hash not in self.existing_indicators_hash:
self.indicators_to_be_sent.append(indicator)
print(f"number of indicators to be sent: {len(self.indicators_to_be_sent)}")
if len(self.indicators_to_be_sent) >= 100:
if config.targetProduct in TARGET_PRODUCT_BULK_SUPPORT:
self._post_to_graph()
else:
self._post_one_to_graph()
def _update_headers_if_expired(self):
if self._get_timestamp() > self.headers_expiration_time:
access_token = self._get_access_token(
config.graph_auth[TENANT],
config.graph_auth[CLIENT_ID],
config.graph_auth[CLIENT_SECRET])
self.headers = {"Authorization": f"Bearer {access_token}"}
@staticmethod
def _clear_screen():
if os.name == 'posix':
os.system('clear')
else:
os.system('cls')
@staticmethod
def _get_timestamp():
return datetime.datetime.now().timestamp()
def _get_total_indicators_sent(self):
return self.error_count + self.success_count
| 45.288288 | 154 | 0.65924 | import requests
import config
import datetime
import os
import json
import copy
from constants import *
class RequestManager:
RJUST = 5
def __init__(self, total_indicators):
self.total_indicators = total_indicators
def __enter__(self):
try:
self.existing_indicators_hash_fd = open(EXISTING_INDICATORS_HASH_FILE_NAME, 'r+')
self.existing_indicators_hash = json.load(self.existing_indicators_hash_fd)
except (FileNotFoundError, json.decoder.JSONDecodeError):
self.existing_indicators_hash_fd = open(EXISTING_INDICATORS_HASH_FILE_NAME, 'w')
self.existing_indicators_hash = {}
try:
self.expiration_date_fd = open(EXPIRATION_DATE_FILE_NAME, 'r+')
self.expiration_date = self.expiration_date_fd.read()
except FileNotFoundError:
self.expiration_date_fd = open(EXPIRATION_DATE_FILE_NAME, 'w')
self.expiration_date = self._get_expiration_date_from_config()
if self.expiration_date <= datetime.datetime.utcnow().strftime('%Y-%m-%d'):
self.existing_indicators_hash = {}
self.expiration_date = self._get_expiration_date_from_config()
self.hash_of_indicators_to_delete = copy.deepcopy(self.existing_indicators_hash)
print(f"hash of indicators to delete {self.hash_of_indicators_to_delete}")
access_token = self._get_access_token(
config.graph_auth[TENANT],
config.graph_auth[CLIENT_ID],
config.graph_auth[CLIENT_SECRET])
self.headers = {"Authorization": f"Bearer {access_token}", 'user-agent': 'MISP/1.0'}
self.headers_expiration_time = self._get_timestamp() + 3500
self.success_count = 0
self.error_count = 0
self.del_count = 0
self.indicators_to_be_sent = []
self.indicators_to_be_sent_size = 0
self.start_time = self.last_batch_done_timestamp = self._get_timestamp()
if not os.path.exists(LOG_DIRECTORY_NAME):
os.makedirs(LOG_DIRECTORY_NAME)
return self
@staticmethod
def _get_expiration_date_from_config():
return (datetime.datetime.utcnow() + datetime.timedelta(config.days_to_expire)).strftime('%Y-%m-%d')
@staticmethod
def _get_access_token(tenant, client_id, client_secret):
data = {
CLIENT_ID: client_id,
'scope': 'https://graph.microsoft.com/.default',
CLIENT_SECRET: client_secret,
'grant_type': 'client_credentials'
}
access_token = requests.post(
f'https://login.microsoftonline.com/{tenant}/oauth2/v2.0/token',
data=data
).json()[ACCESS_TOKEN]
return access_token
@staticmethod
def read_tiindicators():
access_token = RequestManager._get_access_token(
config.graph_auth[TENANT],
config.graph_auth[CLIENT_ID],
config.graph_auth[CLIENT_SECRET])
print(json.dumps(requests.get(
GRAPH_TI_INDICATORS_URL,
headers={"Authorization": f"Bearer {access_token}"}
).json(), indent=2))
@staticmethod
def _get_request_hash(request):
return str(hash(frozenset({
k: str(v) for k, v in request.items()
if k != 'expirationDateTime' and k != 'lastReportedDateTime'
}.items())))
def _log_post(self, response):
self._clear_screen()
cur_batch_success_count = cur_batch_error_count = 0
print(f"response: {response}")
if len(response['value']) > 0:
for value in response['value']:
if "Error" in value:
self.error_count += 1
cur_batch_error_count += 1
log_file_name = f"{self._get_datetime_now()}_error_{value[INDICATOR_REQUEST_HASH]}.json"
else:
self.success_count += 1
cur_batch_success_count += 1
self.existing_indicators_hash[value[INDICATOR_REQUEST_HASH]] = value['id']
log_file_name = f"{self._get_datetime_now()}_{value[INDICATOR_REQUEST_HASH]}.json"
json.dump(value, open(f'{LOG_DIRECTORY_NAME}/{log_file_name}', 'w'), indent=2)
print('sending security indicators to Microsoft Graph Security\n')
print(f'{self.total_indicators} indicators are parsed from misp events. Only those that do not exist in Microsoft Graph Security will be sent.\n')
print(f"current batch indicators sent: {str(cur_batch_success_count + cur_batch_error_count).rjust(self.RJUST)}")
print(f"current batch response success: {str(cur_batch_success_count).rjust(self.RJUST)}")
print(f"current batch response error: {str(cur_batch_error_count).rjust(self.RJUST)}\n")
exc_val, exc_tb):
if config.targetProduct in TARGET_PRODUCT_BULK_SUPPORT:
self._post_to_graph()
else:
self._post_one_to_graph()
self._del_indicators_no_longer_exist()
self.expiration_date_fd.seek(0)
self.expiration_date_fd.write(self.expiration_date)
self.expiration_date_fd.truncate()
self.existing_indicators_hash_fd.seek(0)
json.dump(self.existing_indicators_hash, self.existing_indicators_hash_fd, indent=2)
self.existing_indicators_hash_fd.truncate()
self._print_summary()
def _del_indicators_no_longer_exist(self):
indicators = list(self.hash_of_indicators_to_delete.values())
self.del_count = len(indicators)
for i in range(0, len(indicators), 100):
request_body = {'value': indicators[i: i+100]}
response = requests.post(GRAPH_BULK_DEL_URL, headers=self.headers, json=request_body).json()
log_file_name = f"del_{self._get_datetime_now()}.json"
print(log_file_name)
print(json.dumps(response, indent=2))
print()
json.dump(response, open(f'{LOG_DIRECTORY_NAME}/{log_file_name}', 'w'), indent=2)
for hash_of_indicator_to_delete in self.hash_of_indicators_to_delete.keys():
self.existing_indicators_hash.pop(hash_of_indicator_to_delete, None)
def _print_summary(self):
self._clear_screen()
print('script finished running\n')
print(f"total indicators deleted: {str(self.del_count).rjust(self.RJUST)}")
def _post_one_to_graph(self):
for indicator in self.indicators_to_be_sent:
request_body = indicator
response = requests.post(GRAPH_TI_INDICATORS_URL, headers=self.headers, json=request_body).json()
self.indicators_to_be_sent = []
self._log_post(response)
def _post_to_graph(self):
request_body = {'value': self.indicators_to_be_sent}
response = requests.post(GRAPH_BULK_POST_URL, headers=self.headers, json=request_body).json()
self.indicators_to_be_sent = []
self._log_post(response)
def handle_indicator(self, indicator):
self._update_headers_if_expired()
indicator[EXPIRATION_DATE_TIME] = self.expiration_date
indicator_hash = self._get_request_hash(indicator)
indicator[INDICATOR_REQUEST_HASH] = indicator_hash
self.hash_of_indicators_to_delete.pop(indicator_hash, None)
if indicator_hash not in self.existing_indicators_hash:
self.indicators_to_be_sent.append(indicator)
print(f"number of indicators to be sent: {len(self.indicators_to_be_sent)}")
if len(self.indicators_to_be_sent) >= 100:
if config.targetProduct in TARGET_PRODUCT_BULK_SUPPORT:
self._post_to_graph()
else:
self._post_one_to_graph()
def _update_headers_if_expired(self):
if self._get_timestamp() > self.headers_expiration_time:
access_token = self._get_access_token(
config.graph_auth[TENANT],
config.graph_auth[CLIENT_ID],
config.graph_auth[CLIENT_SECRET])
self.headers = {"Authorization": f"Bearer {access_token}"}
@staticmethod
def _clear_screen():
if os.name == 'posix':
os.system('clear')
else:
os.system('cls')
@staticmethod
def _get_timestamp():
return datetime.datetime.now().timestamp()
def _get_total_indicators_sent(self):
return self.error_count + self.success_count
| true | true |
f7340747d29a1826d093f46d1c243b8b457589b9 | 396 | py | Python | rosters/migrations/0020_largerrulenames.py | gregcowell/roster-wizard | 8a594ba0a6b38fda34bcc924c70a37b1c1692ddf | [
"MIT"
] | 18 | 2019-10-27T02:53:57.000Z | 2021-12-30T06:02:12.000Z | rosters/migrations/0020_largerrulenames.py | gregcowell/roster-wizard | 8a594ba0a6b38fda34bcc924c70a37b1c1692ddf | [
"MIT"
] | 9 | 2020-06-06T02:24:55.000Z | 2021-09-11T03:54:05.000Z | rosters/migrations/0020_largerrulenames.py | gregcowell/roster-wizard | 8a594ba0a6b38fda34bcc924c70a37b1c1692ddf | [
"MIT"
] | 7 | 2020-04-20T03:25:56.000Z | 2021-09-09T06:50:24.000Z | # Generated by Django 2.2.3 on 2019-07-26 07:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rosters', '0019_leavedescription'),
]
operations = [
migrations.AlterField(
model_name='staffrule',
name='staff_rule_name',
field=models.CharField(max_length=40),
),
]
| 20.842105 | 50 | 0.608586 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rosters', '0019_leavedescription'),
]
operations = [
migrations.AlterField(
model_name='staffrule',
name='staff_rule_name',
field=models.CharField(max_length=40),
),
]
| true | true |
f7340855246935b040a51ded058fc0215f55b050 | 2,860 | py | Python | oneflow/compatible_single_client_python/test/xrt/test_softmax_grad.py | xcnick/oneflow | 7b786b27069dec35d2493256011e773988c91f56 | [
"Apache-2.0"
] | null | null | null | oneflow/compatible_single_client_python/test/xrt/test_softmax_grad.py | xcnick/oneflow | 7b786b27069dec35d2493256011e773988c91f56 | [
"Apache-2.0"
] | null | null | null | oneflow/compatible_single_client_python/test/xrt/test_softmax_grad.py | xcnick/oneflow | 7b786b27069dec35d2493256011e773988c91f56 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from oneflow.compatible import single_client as flow
config = flow.function_config()
def make_job(shape, axis, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def softmax_grad_job(
y=flow.FixedTensorDef(shape, dtype=dtype),
dy=flow.FixedTensorDef(shape, dtype=dtype),
):
return flow.nn.softmax_grad(y, dy, axis=axis)
return softmax_grad_job
def make_xla_job(shape, axis, dtype=flow.float32):
config.use_xla_jit(True)
config.use_tensorrt(False)
@flow.global_function(config)
def xla_softmax_grad_job(
y=flow.FixedTensorDef(shape, dtype=dtype),
dy=flow.FixedTensorDef(shape, dtype=dtype),
):
return flow.nn.softmax_grad(y, dy, axis=axis)
return xla_softmax_grad_job
class TestSoftmaxGrad(unittest.TestCase):
def _test_body(self, y, dy, axis, dtype=np.float32):
f1 = make_job(y.shape, axis, dtype=flow.float32)
f2 = make_xla_job(y.shape, axis, dtype=flow.float32)
a = f1(y, dy).get()
b = f2(y, dy).get()
print("without xla: ", a)
print("with xla", b)
self.assertTrue(a.shape == b.shape)
self.assertTrue(np.allclose(a.numpy(), b.numpy(), rtol=1e-03, atol=1e-05))
flow.clear_default_session()
def _test_ones_body(self, shape, axis, dtype=np.float32):
y = np.ones(shape, dtype=dtype)
dy = np.ones(shape, dtype=dtype)
self._test_body(y, dy, axis, dtype=dtype)
def _test_random_body(self, shape, axis, dtype=np.float32):
y = np.random.random(shape).astype(dtype)
dy = np.random.random(shape).astype(dtype)
self._test_body(y, dy, axis, dtype=dtype)
def test_ones_input(self):
self._test_ones_body((2, 5), axis=1)
self._test_ones_body((2, 5), axis=-1)
self._test_ones_body((1, 5, 2), axis=1)
self._test_ones_body((1, 5, 2), axis=2)
def test_random_input(self):
self._test_random_body((2, 5), axis=1)
self._test_random_body((2, 5), axis=-1)
self._test_random_body((1, 5, 2), axis=1)
self._test_random_body((1, 5, 2), axis=2)
if __name__ == "__main__":
unittest.main()
| 32.134831 | 82 | 0.676224 | import unittest
import numpy as np
from oneflow.compatible import single_client as flow
config = flow.function_config()
def make_job(shape, axis, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def softmax_grad_job(
y=flow.FixedTensorDef(shape, dtype=dtype),
dy=flow.FixedTensorDef(shape, dtype=dtype),
):
return flow.nn.softmax_grad(y, dy, axis=axis)
return softmax_grad_job
def make_xla_job(shape, axis, dtype=flow.float32):
config.use_xla_jit(True)
config.use_tensorrt(False)
@flow.global_function(config)
def xla_softmax_grad_job(
y=flow.FixedTensorDef(shape, dtype=dtype),
dy=flow.FixedTensorDef(shape, dtype=dtype),
):
return flow.nn.softmax_grad(y, dy, axis=axis)
return xla_softmax_grad_job
class TestSoftmaxGrad(unittest.TestCase):
def _test_body(self, y, dy, axis, dtype=np.float32):
f1 = make_job(y.shape, axis, dtype=flow.float32)
f2 = make_xla_job(y.shape, axis, dtype=flow.float32)
a = f1(y, dy).get()
b = f2(y, dy).get()
print("without xla: ", a)
print("with xla", b)
self.assertTrue(a.shape == b.shape)
self.assertTrue(np.allclose(a.numpy(), b.numpy(), rtol=1e-03, atol=1e-05))
flow.clear_default_session()
def _test_ones_body(self, shape, axis, dtype=np.float32):
y = np.ones(shape, dtype=dtype)
dy = np.ones(shape, dtype=dtype)
self._test_body(y, dy, axis, dtype=dtype)
def _test_random_body(self, shape, axis, dtype=np.float32):
y = np.random.random(shape).astype(dtype)
dy = np.random.random(shape).astype(dtype)
self._test_body(y, dy, axis, dtype=dtype)
def test_ones_input(self):
self._test_ones_body((2, 5), axis=1)
self._test_ones_body((2, 5), axis=-1)
self._test_ones_body((1, 5, 2), axis=1)
self._test_ones_body((1, 5, 2), axis=2)
def test_random_input(self):
self._test_random_body((2, 5), axis=1)
self._test_random_body((2, 5), axis=-1)
self._test_random_body((1, 5, 2), axis=1)
self._test_random_body((1, 5, 2), axis=2)
if __name__ == "__main__":
unittest.main()
| true | true |
f73408b2ad399bcc363a99b2f737dcef526cb8b2 | 5,233 | py | Python | common/base.py | wrcyyy/appTest | e59c531e7856b3fa495213be6efbf576b7a4caea | [
"MIT"
] | null | null | null | common/base.py | wrcyyy/appTest | e59c531e7856b3fa495213be6efbf576b7a4caea | [
"MIT"
] | null | null | null | common/base.py | wrcyyy/appTest | e59c531e7856b3fa495213be6efbf576b7a4caea | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
------------------------------------
@Project : uiTest
@Time : 2021/3/9 14:17
@Auth : wrc
@Email : wrcyyy@126.com
@File : base.py
@IDE : PyCharm
------------------------------------
"""
import logging
import os
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from utils.fileoperate import FileOperate
class BaseOperation:
def __init__(self, driver):
self.__config_info = FileOperate.read_yaml(
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yml'))
self.__driver = driver
self.__timeout = 10
self.__poll_frequency = 0.5
def clear_input_box(self, locator: tuple):
"""
清空输入框内容
:param locator:
:return:
"""
element = self.find_elem(locator)
try:
element.clear()
logging.info(f'输入框内容已清空')
except Exception as e:
logging.error(f'清除输入框失败!{locator},{e}')
def open_url(self, url: str):
try:
self.__driver.get(url)
logging.info(f'成功打开:{url}')
except Exception as e:
logging.error(f'打开:{url}失败,错误信息:{e}')
def find_elem(self, locator):
"""
定位单个元素
:param locator:
:return:
"""
if isinstance(locator, tuple):
logging.info(f'正在使用{locator[0]}定位元素:{locator[1]}')
try:
elem = WebDriverWait(self.__driver, self.__timeout, self.__poll_frequency).until(
lambda driver: driver.find_element(*locator))
return elem
except Exception as e:
logging.error(f"元素定位失败!{locator},{e}")
return None
else:
logging.error('locator参数类型错误,示例:(By.xpath,"****")')
def find_elements(self, locator):
"""
定位一组元素
:param locator:
:return:
"""
if isinstance(locator, tuple):
logging.info(f'正在使用{locator[0]}定位元素:{locator[1]}')
try:
elements = WebDriverWait(self.__driver, self.__timeout, self.__poll_frequency).until(
lambda driver: driver.find_elements(*locator))
return elements
except Exception as e:
logging.error(f"元素定位失败!{locator},{e}")
return None
else:
logging.error('locator参数类型错误,示例:("css_selector","****")')
def get_text(self, locator: tuple):
"""
获取元素的text
:param locator:
:return:
"""
element = self.find_elem(locator)
try:
logging.info(f'获取元素text成功:{element.text}')
return element.text
except Exception as e:
logging.error(f'获取元素text失败!{locator},{e}')
return None
def get_placeholder_info(self, locator: tuple):
element = self.find_elem(locator)
try:
logging.info(f'获取placeholder成功:{element.get_attribute("placeholder")}')
return element.get_attribute("placeholder")
except Exception as e:
logging.error(f'获取placeholder失败,{locator},{e}')
return None
def open_login_page(self):
"""
打开配置文件中的登录url
:return:
"""
self.open_url(self.__config_info['test_server_info']['url'])
def send_key(self, locator: tuple, info: str):
"""
向页面元素输入内容
:param locator: 传入元素定位信息,例如:("css_selector","#username")
:param info: 要输入的字符串
:return:
"""
element = self.find_elem(locator)
try:
element.send_keys(info)
logging.info(f'向{locator}输入{info}成功')
except Exception as e:
logging.error(f'向{locator}输入{info}失败,{e}')
def click(self, locator: tuple):
"""
点击元素
:param locator: 传入元素定位信息,例如:("css_selector","#username")
:return:
"""
element = self.find_elem(locator)
try:
element.click()
logging.info(f'点击元素成功:{locator}')
except Exception as e:
logging.error(f'点击元素失败!{locator},{e}')
def save_screenshot(self, file_path: str):
"""
保存截图
:param file_path:
:return:
"""
try:
self.__driver.save_screenshot(file_path)
logging.info(f'截图已保存至:{file_path}')
except Exception as e:
logging.error(f'截图保存失败!{file_path},{e}')
def switch_frame(self, locator: tuple):
"""
切换frame
:param locator:
:return:
"""
element = self.find_elem(locator)
try:
self.__driver.switch_to.frame(element)
logging.info(f'切换Frame成功{locator}')
except Exception as e:
logging.error(f'切换Frame失败!{locator},{e}')
def switch_handler(self, index: int):
"""
切换窗口
:param index: 窗口序号,从0开始
:return:
"""
all_handlers = self.__driver.window_handles
try:
self.__driver.switch_to.windows(all_handlers[index])
except Exception as e:
logging.error(f'切换至窗口{index}失败,{e}')
| 29.398876 | 101 | 0.540417 |
import logging
import os
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from utils.fileoperate import FileOperate
class BaseOperation:
def __init__(self, driver):
self.__config_info = FileOperate.read_yaml(
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yml'))
self.__driver = driver
self.__timeout = 10
self.__poll_frequency = 0.5
def clear_input_box(self, locator: tuple):
element = self.find_elem(locator)
try:
element.clear()
logging.info(f'输入框内容已清空')
except Exception as e:
logging.error(f'清除输入框失败!{locator},{e}')
def open_url(self, url: str):
try:
self.__driver.get(url)
logging.info(f'成功打开:{url}')
except Exception as e:
logging.error(f'打开:{url}失败,错误信息:{e}')
def find_elem(self, locator):
if isinstance(locator, tuple):
logging.info(f'正在使用{locator[0]}定位元素:{locator[1]}')
try:
elem = WebDriverWait(self.__driver, self.__timeout, self.__poll_frequency).until(
lambda driver: driver.find_element(*locator))
return elem
except Exception as e:
logging.error(f"元素定位失败!{locator},{e}")
return None
else:
logging.error('locator参数类型错误,示例:(By.xpath,"****")')
def find_elements(self, locator):
if isinstance(locator, tuple):
logging.info(f'正在使用{locator[0]}定位元素:{locator[1]}')
try:
elements = WebDriverWait(self.__driver, self.__timeout, self.__poll_frequency).until(
lambda driver: driver.find_elements(*locator))
return elements
except Exception as e:
logging.error(f"元素定位失败!{locator},{e}")
return None
else:
logging.error('locator参数类型错误,示例:("css_selector","****")')
def get_text(self, locator: tuple):
element = self.find_elem(locator)
try:
logging.info(f'获取元素text成功:{element.text}')
return element.text
except Exception as e:
logging.error(f'获取元素text失败!{locator},{e}')
return None
def get_placeholder_info(self, locator: tuple):
element = self.find_elem(locator)
try:
logging.info(f'获取placeholder成功:{element.get_attribute("placeholder")}')
return element.get_attribute("placeholder")
except Exception as e:
logging.error(f'获取placeholder失败,{locator},{e}')
return None
def open_login_page(self):
self.open_url(self.__config_info['test_server_info']['url'])
def send_key(self, locator: tuple, info: str):
element = self.find_elem(locator)
try:
element.send_keys(info)
logging.info(f'向{locator}输入{info}成功')
except Exception as e:
logging.error(f'向{locator}输入{info}失败,{e}')
def click(self, locator: tuple):
element = self.find_elem(locator)
try:
element.click()
logging.info(f'点击元素成功:{locator}')
except Exception as e:
logging.error(f'点击元素失败!{locator},{e}')
def save_screenshot(self, file_path: str):
try:
self.__driver.save_screenshot(file_path)
logging.info(f'截图已保存至:{file_path}')
except Exception as e:
logging.error(f'截图保存失败!{file_path},{e}')
def switch_frame(self, locator: tuple):
element = self.find_elem(locator)
try:
self.__driver.switch_to.frame(element)
logging.info(f'切换Frame成功{locator}')
except Exception as e:
logging.error(f'切换Frame失败!{locator},{e}')
def switch_handler(self, index: int):
all_handlers = self.__driver.window_handles
try:
self.__driver.switch_to.windows(all_handlers[index])
except Exception as e:
logging.error(f'切换至窗口{index}失败,{e}')
| true | true |
f734093a5c93fef3c46192f28befb2535bc7bd6b | 23,429 | py | Python | applications/zcomx/tests/test_job_queue.py | zcomx/zco.mx | 70a7372af5787c2e4dea14b25bab0bbb2b959881 | [
"BSD-3-Clause"
] | null | null | null | applications/zcomx/tests/test_job_queue.py | zcomx/zco.mx | 70a7372af5787c2e4dea14b25bab0bbb2b959881 | [
"BSD-3-Clause"
] | null | null | null | applications/zcomx/tests/test_job_queue.py | zcomx/zco.mx | 70a7372af5787c2e4dea14b25bab0bbb2b959881 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_job_queue.py
Test suite for zcomx/modules/job_queue.py
"""
import datetime
import os
import subprocess
import time
import unittest
from gluon import *
from applications.zcomx.modules.job_queue import \
CLIOption, \
Daemon, \
DaemonSignalError, \
IgnorableJob, \
InvalidCLIOptionError, \
InvalidJobOptionError, \
InvalidStatusError, \
Job, \
JobHistory, \
JobQueuer, \
Queue, \
QueueEmptyError, \
QueueLockedError, \
QueueLockedExtendedError, \
Queuer, \
Requeuer
from applications.zcomx.modules.tests.runner import LocalTestCase
from applications.zcomx.modules.tests.trackers import TableTracker
# C0111: *Missing docstring*
# R0904: *Too many public methods (%s/%s)*
# pylint: disable=C0111,R0904
TMP_DIR = '/tmp/test_suite/job_queue'
if not os.path.exists(TMP_DIR):
os.makedirs(TMP_DIR)
class SubQueuer(Queuer):
"""Sub class of Queuer used for testing."""
class_factory_id = 'some_program'
program = 'some_program.py'
default_job_options = {
'priority': 1,
'status': 'd'
}
default_cli_options = {
'-a': False,
'-b': True,
'-c': 'ccc',
'-d': ['d1', 'd2']
}
valid_cli_options = ['-a', '-b', '-c', '-d', '-e']
queue_class = Queue
def __init__(
self,
tbl,
job_options=None,
cli_options=None,
cli_args=None,
delay_seconds=0):
Queuer.__init__(
self,
tbl,
job_options=job_options,
cli_options=cli_options,
cli_args=cli_args,
delay_seconds=delay_seconds
)
class TestCLIOption(LocalTestCase):
def test____init__(self):
cli_option = CLIOption('-a')
self.assertTrue(cli_option)
def test____str__(self):
tests = [
# (option, value, expect)
('-a', None, ''),
('-a', False, ''),
('-a', True, '-a'),
('--action', True, '--action'),
('-a', 'list', '-a list'),
('-a', 111, '-a 111'),
('-a', ['opt1'], '-a opt1'),
('-a', ['opt1', 'opt2'], '-a opt1 -a opt2'),
(
'-a', """my "list" of 'items'""",
'-a \'my "list" of \'"\'"\'items\'"\'"\'\''
),
(
'-a', ["""a'b"c""", """d"e'f"""],
'-a \'a\'"\'"\'b"c\' -a \'d"e\'"\'"\'f\''
),
]
for t in tests:
cli_option = CLIOption(t[0], value=t[1])
self.assertEqual(str(cli_option), t[2])
class TestDaemon(LocalTestCase):
name = 'zco_queued'
pid_filename = '/tmp/test_suite/job_queue/pid'
def test____init__(self):
daemon = Daemon(self.name)
self.assertEqual(daemon.pid_filename, '/tmp/zco_queued/pid')
daemon = Daemon(self.name, pid_filename='/tmp/testing')
self.assertEqual(daemon.pid_filename, '/tmp/testing')
def test__read_pid(self):
daemon = Daemon(self.name, self.pid_filename)
open(self.pid_filename, 'w').close() # Empty file
self.assertEqual(daemon.read_pid(), {})
with open(self.pid_filename, 'w') as f:
f.write("a: 1\n")
f.write("first name: John\n")
f.write("start time: 2000-01-01 12:59:59\n")
f.write("nada: \n")
f.write("empty:\n")
self.assertEqual(daemon.read_pid(), {
'a': '1',
'first name': 'John',
'start time': '2000-01-01 12:59:59',
'nada': '',
'empty': '',
})
def test__signal(self):
daemon = Daemon(self.name, self.pid_filename)
self.assertRaises(DaemonSignalError, daemon.signal)
# The details of the method are not easily tested. The method issues
# an os.kill() command and not recommend to run.
def test__update_pid(self):
daemon = Daemon(self.name, self.pid_filename)
open(self.pid_filename, 'w').close() # Empty file
daemon.update_pid()
params = daemon.read_pid()
self.assertEqual(list(params.keys()), ['last'])
data = {
'pid': '1234',
'start': '2003-03-03 03:30:33',
'last': '',
}
daemon.write_pid(data)
daemon.update_pid()
params = daemon.read_pid()
self.assertEqual(sorted(params.keys()), ['last', 'pid', 'start'])
self.assertEqual(params['pid'], data['pid'])
self.assertEqual(params['start'], data['start'])
self.assertNotEqual(params['last'], data['last'])
def test__write_pid(self):
daemon = Daemon(self.name, self.pid_filename)
params = {}
daemon.write_pid(params)
self.assertEqual(daemon.read_pid(), {})
params = {
'b': '2',
'last name': 'Smith',
'start time': '2002-02-02 13:58:58',
'nothing': '',
'empty_str': '',
}
daemon.write_pid(params)
self.assertEqual(daemon.read_pid(), params)
class TestIgnorableJob(LocalTestCase):
def test__is_ignored(self):
now = datetime.datetime.now()
nine_minutes_ago = now - datetime.timedelta(minutes=9)
eleven_minutes_ago = now - datetime.timedelta(minutes=11)
command = 'test__is_ignored'
priority = 10
data = dict(
command=command,
priority=priority,
start=now,
status='d',
ignorable=True,
)
reset_data = dict(data)
def reset(job):
return IgnorableJob.from_updated(job, reset_data)
job_1 = IgnorableJob.from_add(data)
self._objects.append(job_1)
job_2 = IgnorableJob.from_add(data)
self._objects.append(job_2)
job_1 = reset(job_1)
job_2 = reset(job_2)
self.assertTrue(job_1.is_ignored(status='d'))
for ignorable in [True, False]:
data = dict(ignorable=ignorable)
job_1 = IgnorableJob.from_updated(job_1, data)
self.assertEqual(job_1.is_ignored(status='d'), ignorable)
job_1 = reset(job_1)
tests = [
# (job_1.start, start_limit_seconds, expect)
(now, None, True),
(nine_minutes_ago, None, True),
(eleven_minutes_ago, None, False),
(nine_minutes_ago, 539, False),
(nine_minutes_ago, 540, False),
(nine_minutes_ago, 541, True),
]
for t in tests:
data = dict(start=t[0])
job_1 = IgnorableJob.from_updated(job_1, data)
if t[1] is None:
self.assertEqual(job_1.is_ignored(status='d'), t[2])
else:
self.assertEqual(
job_1.is_ignored(status='d', start_limit_seconds=t[1]),
t[2]
)
class TestJob(LocalTestCase):
pass # Record subclass
class TestJobHistory(LocalTestCase):
def test_init__(self):
query = (db.job_history)
job_history = JobHistory.from_query(query)
self.assertTrue(job_history)
class TestJobQueuer(LocalTestCase):
def test_init__(self):
query = (db.job_queuer.code == 'search_prefetch')
job_queuer = JobQueuer.from_query(query)
self.assertTrue(job_queuer)
class TestQueue(LocalTestCase):
@classmethod
def clear_queue(cls):
db(db.job.id > 0).delete()
db.commit()
def test____init__(self):
queue = Queue(db.job)
self.assertTrue(queue)
def test__add_job(self):
queue = Queue(db.job)
TestQueue.clear_queue()
self.assertEqual(len(queue.jobs()), 0)
now = datetime.datetime.now()
job_data = dict(
command='pwd',
priority=1,
start=now,
)
ret = queue.add_job(job_data)
self._objects.append(ret)
self.assertEqual(ret.command, job_data['command'])
self.assertTrue(ret.id > 0)
self.assertEqual(len(queue.jobs()), 1)
# Test pre- and post- processiong.
class MyQueue(Queue):
"""Queue subclass for testing"""
def __init__(self, tbl):
Queue.__init__(self, tbl)
self.trace = []
def pre_add_job(self):
"""Test override."""
self.trace.append('pre')
def post_add_job(self):
"""Test override."""
self.trace.append('post')
my_queue = MyQueue(db.job)
TestQueue.clear_queue()
self.assertEqual(len(my_queue.jobs()), 0)
ret = my_queue.add_job(job_data)
self._objects.append(ret)
self.assertTrue(ret.id > 0)
self.assertEqual(my_queue.trace, ['pre', 'post'])
def test__job_generator(self):
queue = Queue(db.job)
gen = queue.job_generator()
# No jobs
self.assertRaises(StopIteration, gen.__next__)
job_data = [
# (command, start, priority, status)
('do_a', '2010-01-01 10:00:00', 1, 'a'),
('do_b', '2010-01-01 10:00:00', 5, 'a'),
('do_c', '2010-01-01 10:00:00', 9, 'a'),
]
all_jobs = []
for j in job_data:
job = queue.add_job(
dict(command=j[0], start=j[1], priority=j[2], status=j[3])
)
all_jobs.append(job)
gen = queue.job_generator()
job = next(gen)
self.assertEqual(job.command, 'do_c')
all_jobs[2].delete()
job = next(gen)
self.assertEqual(job.command, 'do_b')
all_jobs[1].delete()
job = next(gen)
self.assertEqual(job.command, 'do_a')
all_jobs[0].delete()
self.assertRaises(StopIteration, gen.__next__)
for j in all_jobs:
try:
j.delete()
except LookupError:
pass
self.assertEqual(queue.stats(), {})
def test__jobs(self):
# Add a new 'z' status to test with.
db.job.status.requires = IS_IN_SET(['a', 'd', 'p', 'z'])
queue = Queue(db.job)
TestQueue.clear_queue()
self.assertEqual(len(queue.jobs()), 0)
job_data = [
# (start, priority, status)
# Do not use status='a' or status='p' or jobs will be run.
('2010-01-01 10:00:00', 0, 'z'),
('2010-01-01 10:00:00', 0, 'd'),
('2010-01-01 10:00:01', -1, 'z'),
('2010-01-01 10:00:01', -1, 'd'),
('2010-01-01 10:00:02', 1, 'z'),
('2010-01-01 10:00:02', 1, 'd'),
]
all_jobs = []
for j in job_data:
job_d = dict(command='pwd', start=j[0], priority=j[1], status=j[2])
job = Job.from_add(job_d)
self._objects.append(job)
all_jobs.append(job)
job_set = queue.jobs()
self.assertEqual(len(job_set), 6)
self.assertEqual(job_set, all_jobs)
# Test query
query = (db.job.status == 'z')
job_set = queue.jobs(query=query)
self.assertEqual(len(job_set), 3)
self.assertEqual(
job_set,
[all_jobs[0], all_jobs[2], all_jobs[4]]
)
query = (db.job.status == 'd') & \
(db.job.start <= '2010-01-01 10:00:01')
job_set = queue.jobs(query=query)
self.assertEqual(len(job_set), 2)
self.assertEqual(
job_set,
[all_jobs[1], all_jobs[3]]
)
# Test orderby
# Orderby priority ASC
query = (db.job.status == 'z')
job_set = queue.jobs(query=query, orderby=db.job.priority)
self.assertEqual(len(job_set), 3)
self.assertEqual(
job_set,
[all_jobs[2], all_jobs[0], all_jobs[4]]
)
# Orderby priority DESC
query = (db.job.status == 'z')
job_set = queue.jobs(query=query, orderby=~db.job.priority)
self.assertEqual(len(job_set), 3)
self.assertEqual(
job_set,
[all_jobs[4], all_jobs[0], all_jobs[2]]
)
# Test limitby
# Highest priority job
query = (db.job.status == 'z')
job_set = queue.jobs(query=query, orderby=~db.job.priority, limitby=1)
self.assertEqual(len(job_set), 1)
self.assertEqual(job_set, [all_jobs[4]])
def test__lock(self):
queue = Queue(db.job)
# Test lock using default lock file. This test only works if the queue
# is not currently locked by an outside program.
if os.path.exists(queue.lock_filename):
os.unlink(queue.lock_filename)
self.assertFalse(os.path.exists(queue.lock_filename))
queue.lock()
self.assertTrue(os.path.exists(queue.lock_filename))
queue.unlock()
self.assertFalse(os.path.exists(queue.lock_filename))
# Test lock with custom filename.
lock_file = os.path.join(TMP_DIR, 'test__lock.pid')
if os.path.exists(lock_file):
os.unlink(lock_file)
self.assertFalse(os.path.exists(lock_file))
queue.lock(filename=lock_file)
self.assertTrue(os.path.exists(lock_file))
# Test raise QueueLockedError
self.assertRaises(QueueLockedError, queue.lock, filename=lock_file)
# Test raise QueueLockedExtendedError
time.sleep(2)
# Lock period < extended seconds, raises QueueLockedError
self.assertRaises(
QueueLockedError,
queue.lock,
filename=lock_file,
extended_seconds=9999
)
# Lock period > extended seconds, raises QueueLockedExtendedError
self.assertRaises(
QueueLockedExtendedError,
queue.lock,
filename=lock_file,
extended_seconds=1
)
queue.unlock(filename=lock_file)
self.assertFalse(os.path.exists(lock_file))
def test__post_add_job(self):
# See test__add_job
pass
def test__pre_add_job(self):
# See test__add_job
pass
def test__run_job(self):
queue = Queue(db.job)
def do_run(job):
try:
queue.run_job(job)
except subprocess.CalledProcessError:
return 1
else:
return 0
job = Job(dict(command=None, status='a'))
# No command defined, should fail.
self.assertFalse(do_run(job))
tmp_file = os.path.join(TMP_DIR, 'test__run_output.txt')
text = 'Hello World!'
script = """
#!/usr/bin/env python
def main():
import sys
with open('{file}', 'w') as f:
f.write("{text}")
f.write("\\n")
for c, arg in enumerate(sys.argv):
if c == 0:
continue
f.write(str(c) + ': ' + arg + "\\n")
if __name__ == '__main__':
main()
""".format(file=tmp_file, text=text)
script_name = os.path.join(TMP_DIR, 'test__run.py')
with open(script_name, 'w') as f:
f.write(script.strip())
os.chmod(script_name, 0o700)
# Test without args or options
job.command = script_name
self.assertEqual(do_run(job), 0)
expect = """Hello World!
"""
got = ''
with open(tmp_file, 'r') as f:
got = f.read()
self.assertEqual(got, expect)
# Test with args or options
job.command = "{script} -v -a delete 123".format(script=script_name)
self.assertEqual(do_run(job), 0)
expect = """Hello World!
1: -v
2: -a
3: delete
4: 123
"""
got = ''
with open(tmp_file, 'r') as f:
got = f.read()
self.assertEqual(got, expect)
def test__set_job_status(self):
queue = Queue(db.job)
job = self.add(Job, dict(command='pwd', status='d'))
self.assertEqual(job.status, 'd')
for status in ['a', 'd', 'p']:
got = queue.set_job_status(job, status)
self.assertEqual(got.status, status)
# Invalid status
self.assertRaises(InvalidStatusError, queue.set_job_status, job, 'z')
def test__stats(self):
queue = Queue(db.job)
TestQueue.clear_queue()
self.assertEqual(len(queue.jobs()), 0)
self.add(Job, dict(status='a'))
self.add(Job, dict(status='a'))
self.add(Job, dict(status='d'))
self.add(Job, dict(status='p'))
self.assertEqual(queue.stats(), {'a': 2, 'd': 1, 'p': 1})
def test__top_job(self):
queue = Queue(db.job)
TestQueue.clear_queue()
self.assertEqual(len(queue.jobs()), 0)
self.assertRaises(QueueEmptyError, queue.top_job)
jobs = [
# (command, start, priority)
('do_a', '2010-01-01 10:00:00', 0),
('do_b', '2010-01-01 10:00:01', -1),
('do_c', '2010-01-01 10:00:02', 1),
('do_d', '2999-12-31 23:59:59', 1),
]
for j in jobs:
self.add(Job, dict(command=j[0], start=j[1], priority=j[2]))
job = queue.top_job()
self.assertEqual(job.command, 'do_c')
def test__unlock(self):
# See test__lock()
pass
class TestQueuer(LocalTestCase):
def test____init__(self):
queuer = Queuer(db.job)
self.assertTrue(queuer)
self.assertEqual(queuer.queue_class, Queue)
self.assertEqual(Queuer.bin_path, 'applications/zcomx/private/bin')
def test__command(self):
queuer = SubQueuer(db.job)
self.assertEqual(
queuer.command(), 'some_program.py -b -c ccc -d d1 -d d2')
queuer = SubQueuer(db.job, cli_args=['file', 'arg2'])
self.assertEqual(
queuer.command(),
'some_program.py -b -c ccc -d d1 -d d2 file arg2'
)
# Disable defaults
queuer = SubQueuer(
db.job,
cli_options={
'-a': False,
'-b': False,
'-c': False,
'-d': False,
},
cli_args=['file']
)
self.assertEqual(queuer.command(), 'some_program.py file')
invalid_cli_options = {'-x': 'invalid'}
queuer = SubQueuer(db.job, cli_options=invalid_cli_options)
self.assertRaises(InvalidCLIOptionError, queuer.command)
# Handle quotes
queuer = SubQueuer(
db.job,
cli_options={
'-a': False,
'-b': False,
'-c': False,
'-d': False,
'-e': """A 'B' "C" D""",
},
cli_args=['file'],
)
self.assertEqual(
queuer.command(),
'some_program.py -e \'A \'"\'"\'B\'"\'"\' "C" D\' file'
)
queuer = SubQueuer(
db.job,
cli_options={
'-a': False,
'-b': False,
'-c': False,
},
cli_args=["""A 'B' "C" D"""],
)
self.assertEqual(
queuer.command(),
'some_program.py -d d1 -d d2 \'A \'"\'"\'B\'"\'"\' "C" D\''
)
def test__job_data(self):
then = datetime.datetime.now()
data = SubQueuer(db.job).job_data()
self.assertEqual(data.job_queuer_id, 0)
self.assertEqual(data.status, 'd')
self.assertEqual(data.priority, 1)
self.assertEqual(
data.command,
'some_program.py -b -c ccc -d d1 -d d2'
)
self.assertTrue(data.start >= then)
diff = data.start - then
self.assertTrue(diff.total_seconds() >= 0)
self.assertTrue(diff.total_seconds() < 1)
self.assertEqual(data.start, data.queued_time)
invalid_job_options = {'fake_field': 'value'}
queuer = SubQueuer(db.job, job_options=invalid_job_options)
self.assertRaises(InvalidJobOptionError, queuer.job_data)
# Test delay_seconds
then = datetime.datetime.now()
data = SubQueuer(db.job, delay_seconds=100).job_data()
self.assertTrue(data.start > then)
diff = data.start - then
self.assertTrue(diff.total_seconds() >= 100)
self.assertTrue(diff.total_seconds() < 101)
def test__queue(self):
def get_job_ids():
return sorted([x.id for x in db(db.job).select(db.job.id)])
job_ids = get_job_ids()
queuer = SubQueuer(db.job)
new_job = queuer.queue()
self.assertEqual(
new_job.command, 'some_program.py -b -c ccc -d d1 -d d2')
self.assertTrue(new_job.id not in job_ids)
job_ids = get_job_ids()
self.assertTrue(new_job.id in job_ids)
job = Job.from_id(new_job.id)
self._objects.append(job)
class TestRequeuer(LocalTestCase):
def test____init__(self):
queuer = SubQueuer(db.job)
requeuer = Requeuer(queuer)
self.assertTrue(requeuer)
self.assertEqual(requeuer.requeues, 0)
self.assertEqual(requeuer.max_requeues, 1)
def test__requeue(self):
sub_queuer = SubQueuer(db.job)
requeuer = Requeuer(sub_queuer)
self.assertRaises(InvalidCLIOptionError, requeuer.requeue)
class ReQueuer(SubQueuer):
valid_cli_options = ['-a', '-c', '--requeues', '--max-requeues']
default_cli_options = {
'-a': True,
'-c': 'ccc',
}
queuer = ReQueuer(db.job)
requeuer = Requeuer(queuer)
tracker = TableTracker(db.job)
job = requeuer.requeue()
self.assertFalse(tracker.had(job))
self.assertTrue(tracker.has(job))
self._objects.append(job)
self.assertEqual(
job.command,
'some_program.py --max-requeues 1 --requeues 1 -a -c ccc'
)
requeuer = Requeuer(queuer, requeues=33, max_requeues=99)
tracker = TableTracker(db.job)
job = requeuer.requeue()
self.assertFalse(tracker.had(job))
self.assertTrue(tracker.has(job))
self._objects.append(job)
self.assertEqual(
job.command,
'some_program.py --max-requeues 99 --requeues 34 -a -c ccc'
)
requeuer = Requeuer(queuer, requeues=99, max_requeues=99)
self.assertRaises(StopIteration, requeuer.requeue)
requeuer = Requeuer(queuer, requeues=100, max_requeues=99)
self.assertRaises(StopIteration, requeuer.requeue)
def test__requeue_cli_options(self):
requeuer = Requeuer(Queuer(db.job))
self.assertEqual(
requeuer.requeue_cli_options(),
{
'--requeues': 1,
'--max-requeues': 1,
}
)
requeuer = Requeuer(Queuer(db.job), requeues=33, max_requeues=99)
self.assertEqual(
requeuer.requeue_cli_options(),
{
'--requeues': 34,
'--max-requeues': 99,
}
)
def setUpModule():
"""Set up web2py environment."""
# C0103: *Invalid name "%%s" (should match %%s)*
# pylint: disable=C0103
LocalTestCase.set_env(globals())
if __name__ == '__main__':
unittest.main()
| 29.544767 | 79 | 0.541679 |
import datetime
import os
import subprocess
import time
import unittest
from gluon import *
from applications.zcomx.modules.job_queue import \
CLIOption, \
Daemon, \
DaemonSignalError, \
IgnorableJob, \
InvalidCLIOptionError, \
InvalidJobOptionError, \
InvalidStatusError, \
Job, \
JobHistory, \
JobQueuer, \
Queue, \
QueueEmptyError, \
QueueLockedError, \
QueueLockedExtendedError, \
Queuer, \
Requeuer
from applications.zcomx.modules.tests.runner import LocalTestCase
from applications.zcomx.modules.tests.trackers import TableTracker
TMP_DIR = '/tmp/test_suite/job_queue'
if not os.path.exists(TMP_DIR):
os.makedirs(TMP_DIR)
class SubQueuer(Queuer):
class_factory_id = 'some_program'
program = 'some_program.py'
default_job_options = {
'priority': 1,
'status': 'd'
}
default_cli_options = {
'-a': False,
'-b': True,
'-c': 'ccc',
'-d': ['d1', 'd2']
}
valid_cli_options = ['-a', '-b', '-c', '-d', '-e']
queue_class = Queue
def __init__(
self,
tbl,
job_options=None,
cli_options=None,
cli_args=None,
delay_seconds=0):
Queuer.__init__(
self,
tbl,
job_options=job_options,
cli_options=cli_options,
cli_args=cli_args,
delay_seconds=delay_seconds
)
class TestCLIOption(LocalTestCase):
def test____init__(self):
cli_option = CLIOption('-a')
self.assertTrue(cli_option)
def test____str__(self):
tests = [
('-a', None, ''),
('-a', False, ''),
('-a', True, '-a'),
('--action', True, '--action'),
('-a', 'list', '-a list'),
('-a', 111, '-a 111'),
('-a', ['opt1'], '-a opt1'),
('-a', ['opt1', 'opt2'], '-a opt1 -a opt2'),
(
'-a', """my "list" of 'items'""",
'-a \'my "list" of \'"\'"\'items\'"\'"\'\''
),
(
'-a', ["""a'b"c""", """d"e'f"""],
'-a \'a\'"\'"\'b"c\' -a \'d"e\'"\'"\'f\''
),
]
for t in tests:
cli_option = CLIOption(t[0], value=t[1])
self.assertEqual(str(cli_option), t[2])
class TestDaemon(LocalTestCase):
name = 'zco_queued'
pid_filename = '/tmp/test_suite/job_queue/pid'
def test____init__(self):
daemon = Daemon(self.name)
self.assertEqual(daemon.pid_filename, '/tmp/zco_queued/pid')
daemon = Daemon(self.name, pid_filename='/tmp/testing')
self.assertEqual(daemon.pid_filename, '/tmp/testing')
def test__read_pid(self):
daemon = Daemon(self.name, self.pid_filename)
open(self.pid_filename, 'w').close()
self.assertEqual(daemon.read_pid(), {})
with open(self.pid_filename, 'w') as f:
f.write("a: 1\n")
f.write("first name: John\n")
f.write("start time: 2000-01-01 12:59:59\n")
f.write("nada: \n")
f.write("empty:\n")
self.assertEqual(daemon.read_pid(), {
'a': '1',
'first name': 'John',
'start time': '2000-01-01 12:59:59',
'nada': '',
'empty': '',
})
def test__signal(self):
daemon = Daemon(self.name, self.pid_filename)
self.assertRaises(DaemonSignalError, daemon.signal)
def test__update_pid(self):
daemon = Daemon(self.name, self.pid_filename)
open(self.pid_filename, 'w').close()
daemon.update_pid()
params = daemon.read_pid()
self.assertEqual(list(params.keys()), ['last'])
data = {
'pid': '1234',
'start': '2003-03-03 03:30:33',
'last': '',
}
daemon.write_pid(data)
daemon.update_pid()
params = daemon.read_pid()
self.assertEqual(sorted(params.keys()), ['last', 'pid', 'start'])
self.assertEqual(params['pid'], data['pid'])
self.assertEqual(params['start'], data['start'])
self.assertNotEqual(params['last'], data['last'])
def test__write_pid(self):
daemon = Daemon(self.name, self.pid_filename)
params = {}
daemon.write_pid(params)
self.assertEqual(daemon.read_pid(), {})
params = {
'b': '2',
'last name': 'Smith',
'start time': '2002-02-02 13:58:58',
'nothing': '',
'empty_str': '',
}
daemon.write_pid(params)
self.assertEqual(daemon.read_pid(), params)
class TestIgnorableJob(LocalTestCase):
def test__is_ignored(self):
now = datetime.datetime.now()
nine_minutes_ago = now - datetime.timedelta(minutes=9)
eleven_minutes_ago = now - datetime.timedelta(minutes=11)
command = 'test__is_ignored'
priority = 10
data = dict(
command=command,
priority=priority,
start=now,
status='d',
ignorable=True,
)
reset_data = dict(data)
def reset(job):
return IgnorableJob.from_updated(job, reset_data)
job_1 = IgnorableJob.from_add(data)
self._objects.append(job_1)
job_2 = IgnorableJob.from_add(data)
self._objects.append(job_2)
job_1 = reset(job_1)
job_2 = reset(job_2)
self.assertTrue(job_1.is_ignored(status='d'))
for ignorable in [True, False]:
data = dict(ignorable=ignorable)
job_1 = IgnorableJob.from_updated(job_1, data)
self.assertEqual(job_1.is_ignored(status='d'), ignorable)
job_1 = reset(job_1)
tests = [
(now, None, True),
(nine_minutes_ago, None, True),
(eleven_minutes_ago, None, False),
(nine_minutes_ago, 539, False),
(nine_minutes_ago, 540, False),
(nine_minutes_ago, 541, True),
]
for t in tests:
data = dict(start=t[0])
job_1 = IgnorableJob.from_updated(job_1, data)
if t[1] is None:
self.assertEqual(job_1.is_ignored(status='d'), t[2])
else:
self.assertEqual(
job_1.is_ignored(status='d', start_limit_seconds=t[1]),
t[2]
)
class TestJob(LocalTestCase):
pass
class TestJobHistory(LocalTestCase):
def test_init__(self):
query = (db.job_history)
job_history = JobHistory.from_query(query)
self.assertTrue(job_history)
class TestJobQueuer(LocalTestCase):
def test_init__(self):
query = (db.job_queuer.code == 'search_prefetch')
job_queuer = JobQueuer.from_query(query)
self.assertTrue(job_queuer)
class TestQueue(LocalTestCase):
@classmethod
def clear_queue(cls):
db(db.job.id > 0).delete()
db.commit()
def test____init__(self):
queue = Queue(db.job)
self.assertTrue(queue)
def test__add_job(self):
queue = Queue(db.job)
TestQueue.clear_queue()
self.assertEqual(len(queue.jobs()), 0)
now = datetime.datetime.now()
job_data = dict(
command='pwd',
priority=1,
start=now,
)
ret = queue.add_job(job_data)
self._objects.append(ret)
self.assertEqual(ret.command, job_data['command'])
self.assertTrue(ret.id > 0)
self.assertEqual(len(queue.jobs()), 1)
class MyQueue(Queue):
def __init__(self, tbl):
Queue.__init__(self, tbl)
self.trace = []
def pre_add_job(self):
self.trace.append('pre')
def post_add_job(self):
self.trace.append('post')
my_queue = MyQueue(db.job)
TestQueue.clear_queue()
self.assertEqual(len(my_queue.jobs()), 0)
ret = my_queue.add_job(job_data)
self._objects.append(ret)
self.assertTrue(ret.id > 0)
self.assertEqual(my_queue.trace, ['pre', 'post'])
def test__job_generator(self):
queue = Queue(db.job)
gen = queue.job_generator()
self.assertRaises(StopIteration, gen.__next__)
job_data = [
('do_a', '2010-01-01 10:00:00', 1, 'a'),
('do_b', '2010-01-01 10:00:00', 5, 'a'),
('do_c', '2010-01-01 10:00:00', 9, 'a'),
]
all_jobs = []
for j in job_data:
job = queue.add_job(
dict(command=j[0], start=j[1], priority=j[2], status=j[3])
)
all_jobs.append(job)
gen = queue.job_generator()
job = next(gen)
self.assertEqual(job.command, 'do_c')
all_jobs[2].delete()
job = next(gen)
self.assertEqual(job.command, 'do_b')
all_jobs[1].delete()
job = next(gen)
self.assertEqual(job.command, 'do_a')
all_jobs[0].delete()
self.assertRaises(StopIteration, gen.__next__)
for j in all_jobs:
try:
j.delete()
except LookupError:
pass
self.assertEqual(queue.stats(), {})
def test__jobs(self):
db.job.status.requires = IS_IN_SET(['a', 'd', 'p', 'z'])
queue = Queue(db.job)
TestQueue.clear_queue()
self.assertEqual(len(queue.jobs()), 0)
job_data = [
('2010-01-01 10:00:00', 0, 'z'),
('2010-01-01 10:00:00', 0, 'd'),
('2010-01-01 10:00:01', -1, 'z'),
('2010-01-01 10:00:01', -1, 'd'),
('2010-01-01 10:00:02', 1, 'z'),
('2010-01-01 10:00:02', 1, 'd'),
]
all_jobs = []
for j in job_data:
job_d = dict(command='pwd', start=j[0], priority=j[1], status=j[2])
job = Job.from_add(job_d)
self._objects.append(job)
all_jobs.append(job)
job_set = queue.jobs()
self.assertEqual(len(job_set), 6)
self.assertEqual(job_set, all_jobs)
query = (db.job.status == 'z')
job_set = queue.jobs(query=query)
self.assertEqual(len(job_set), 3)
self.assertEqual(
job_set,
[all_jobs[0], all_jobs[2], all_jobs[4]]
)
query = (db.job.status == 'd') & \
(db.job.start <= '2010-01-01 10:00:01')
job_set = queue.jobs(query=query)
self.assertEqual(len(job_set), 2)
self.assertEqual(
job_set,
[all_jobs[1], all_jobs[3]]
)
query = (db.job.status == 'z')
job_set = queue.jobs(query=query, orderby=db.job.priority)
self.assertEqual(len(job_set), 3)
self.assertEqual(
job_set,
[all_jobs[2], all_jobs[0], all_jobs[4]]
)
query = (db.job.status == 'z')
job_set = queue.jobs(query=query, orderby=~db.job.priority)
self.assertEqual(len(job_set), 3)
self.assertEqual(
job_set,
[all_jobs[4], all_jobs[0], all_jobs[2]]
)
query = (db.job.status == 'z')
job_set = queue.jobs(query=query, orderby=~db.job.priority, limitby=1)
self.assertEqual(len(job_set), 1)
self.assertEqual(job_set, [all_jobs[4]])
def test__lock(self):
queue = Queue(db.job)
if os.path.exists(queue.lock_filename):
os.unlink(queue.lock_filename)
self.assertFalse(os.path.exists(queue.lock_filename))
queue.lock()
self.assertTrue(os.path.exists(queue.lock_filename))
queue.unlock()
self.assertFalse(os.path.exists(queue.lock_filename))
lock_file = os.path.join(TMP_DIR, 'test__lock.pid')
if os.path.exists(lock_file):
os.unlink(lock_file)
self.assertFalse(os.path.exists(lock_file))
queue.lock(filename=lock_file)
self.assertTrue(os.path.exists(lock_file))
self.assertRaises(QueueLockedError, queue.lock, filename=lock_file)
time.sleep(2)
self.assertRaises(
QueueLockedError,
queue.lock,
filename=lock_file,
extended_seconds=9999
)
self.assertRaises(
QueueLockedExtendedError,
queue.lock,
filename=lock_file,
extended_seconds=1
)
queue.unlock(filename=lock_file)
self.assertFalse(os.path.exists(lock_file))
def test__post_add_job(self):
pass
def test__pre_add_job(self):
pass
def test__run_job(self):
queue = Queue(db.job)
def do_run(job):
try:
queue.run_job(job)
except subprocess.CalledProcessError:
return 1
else:
return 0
job = Job(dict(command=None, status='a'))
self.assertFalse(do_run(job))
tmp_file = os.path.join(TMP_DIR, 'test__run_output.txt')
text = 'Hello World!'
script = """
#!/usr/bin/env python
def main():
import sys
with open('{file}', 'w') as f:
f.write("{text}")
f.write("\\n")
for c, arg in enumerate(sys.argv):
if c == 0:
continue
f.write(str(c) + ': ' + arg + "\\n")
if __name__ == '__main__':
main()
""".format(file=tmp_file, text=text)
script_name = os.path.join(TMP_DIR, 'test__run.py')
with open(script_name, 'w') as f:
f.write(script.strip())
os.chmod(script_name, 0o700)
job.command = script_name
self.assertEqual(do_run(job), 0)
expect = """Hello World!
"""
got = ''
with open(tmp_file, 'r') as f:
got = f.read()
self.assertEqual(got, expect)
job.command = "{script} -v -a delete 123".format(script=script_name)
self.assertEqual(do_run(job), 0)
expect = """Hello World!
1: -v
2: -a
3: delete
4: 123
"""
got = ''
with open(tmp_file, 'r') as f:
got = f.read()
self.assertEqual(got, expect)
def test__set_job_status(self):
queue = Queue(db.job)
job = self.add(Job, dict(command='pwd', status='d'))
self.assertEqual(job.status, 'd')
for status in ['a', 'd', 'p']:
got = queue.set_job_status(job, status)
self.assertEqual(got.status, status)
self.assertRaises(InvalidStatusError, queue.set_job_status, job, 'z')
def test__stats(self):
queue = Queue(db.job)
TestQueue.clear_queue()
self.assertEqual(len(queue.jobs()), 0)
self.add(Job, dict(status='a'))
self.add(Job, dict(status='a'))
self.add(Job, dict(status='d'))
self.add(Job, dict(status='p'))
self.assertEqual(queue.stats(), {'a': 2, 'd': 1, 'p': 1})
def test__top_job(self):
queue = Queue(db.job)
TestQueue.clear_queue()
self.assertEqual(len(queue.jobs()), 0)
self.assertRaises(QueueEmptyError, queue.top_job)
jobs = [
('do_a', '2010-01-01 10:00:00', 0),
('do_b', '2010-01-01 10:00:01', -1),
('do_c', '2010-01-01 10:00:02', 1),
('do_d', '2999-12-31 23:59:59', 1),
]
for j in jobs:
self.add(Job, dict(command=j[0], start=j[1], priority=j[2]))
job = queue.top_job()
self.assertEqual(job.command, 'do_c')
def test__unlock(self):
pass
class TestQueuer(LocalTestCase):
def test____init__(self):
queuer = Queuer(db.job)
self.assertTrue(queuer)
self.assertEqual(queuer.queue_class, Queue)
self.assertEqual(Queuer.bin_path, 'applications/zcomx/private/bin')
def test__command(self):
queuer = SubQueuer(db.job)
self.assertEqual(
queuer.command(), 'some_program.py -b -c ccc -d d1 -d d2')
queuer = SubQueuer(db.job, cli_args=['file', 'arg2'])
self.assertEqual(
queuer.command(),
'some_program.py -b -c ccc -d d1 -d d2 file arg2'
)
queuer = SubQueuer(
db.job,
cli_options={
'-a': False,
'-b': False,
'-c': False,
'-d': False,
},
cli_args=['file']
)
self.assertEqual(queuer.command(), 'some_program.py file')
invalid_cli_options = {'-x': 'invalid'}
queuer = SubQueuer(db.job, cli_options=invalid_cli_options)
self.assertRaises(InvalidCLIOptionError, queuer.command)
queuer = SubQueuer(
db.job,
cli_options={
'-a': False,
'-b': False,
'-c': False,
'-d': False,
'-e': """A 'B' "C" D""",
},
cli_args=['file'],
)
self.assertEqual(
queuer.command(),
'some_program.py -e \'A \'"\'"\'B\'"\'"\' "C" D\' file'
)
queuer = SubQueuer(
db.job,
cli_options={
'-a': False,
'-b': False,
'-c': False,
},
cli_args=["""A 'B' "C" D"""],
)
self.assertEqual(
queuer.command(),
'some_program.py -d d1 -d d2 \'A \'"\'"\'B\'"\'"\' "C" D\''
)
def test__job_data(self):
then = datetime.datetime.now()
data = SubQueuer(db.job).job_data()
self.assertEqual(data.job_queuer_id, 0)
self.assertEqual(data.status, 'd')
self.assertEqual(data.priority, 1)
self.assertEqual(
data.command,
'some_program.py -b -c ccc -d d1 -d d2'
)
self.assertTrue(data.start >= then)
diff = data.start - then
self.assertTrue(diff.total_seconds() >= 0)
self.assertTrue(diff.total_seconds() < 1)
self.assertEqual(data.start, data.queued_time)
invalid_job_options = {'fake_field': 'value'}
queuer = SubQueuer(db.job, job_options=invalid_job_options)
self.assertRaises(InvalidJobOptionError, queuer.job_data)
then = datetime.datetime.now()
data = SubQueuer(db.job, delay_seconds=100).job_data()
self.assertTrue(data.start > then)
diff = data.start - then
self.assertTrue(diff.total_seconds() >= 100)
self.assertTrue(diff.total_seconds() < 101)
def test__queue(self):
def get_job_ids():
return sorted([x.id for x in db(db.job).select(db.job.id)])
job_ids = get_job_ids()
queuer = SubQueuer(db.job)
new_job = queuer.queue()
self.assertEqual(
new_job.command, 'some_program.py -b -c ccc -d d1 -d d2')
self.assertTrue(new_job.id not in job_ids)
job_ids = get_job_ids()
self.assertTrue(new_job.id in job_ids)
job = Job.from_id(new_job.id)
self._objects.append(job)
class TestRequeuer(LocalTestCase):
def test____init__(self):
queuer = SubQueuer(db.job)
requeuer = Requeuer(queuer)
self.assertTrue(requeuer)
self.assertEqual(requeuer.requeues, 0)
self.assertEqual(requeuer.max_requeues, 1)
def test__requeue(self):
sub_queuer = SubQueuer(db.job)
requeuer = Requeuer(sub_queuer)
self.assertRaises(InvalidCLIOptionError, requeuer.requeue)
class ReQueuer(SubQueuer):
valid_cli_options = ['-a', '-c', '--requeues', '--max-requeues']
default_cli_options = {
'-a': True,
'-c': 'ccc',
}
queuer = ReQueuer(db.job)
requeuer = Requeuer(queuer)
tracker = TableTracker(db.job)
job = requeuer.requeue()
self.assertFalse(tracker.had(job))
self.assertTrue(tracker.has(job))
self._objects.append(job)
self.assertEqual(
job.command,
'some_program.py --max-requeues 1 --requeues 1 -a -c ccc'
)
requeuer = Requeuer(queuer, requeues=33, max_requeues=99)
tracker = TableTracker(db.job)
job = requeuer.requeue()
self.assertFalse(tracker.had(job))
self.assertTrue(tracker.has(job))
self._objects.append(job)
self.assertEqual(
job.command,
'some_program.py --max-requeues 99 --requeues 34 -a -c ccc'
)
requeuer = Requeuer(queuer, requeues=99, max_requeues=99)
self.assertRaises(StopIteration, requeuer.requeue)
requeuer = Requeuer(queuer, requeues=100, max_requeues=99)
self.assertRaises(StopIteration, requeuer.requeue)
def test__requeue_cli_options(self):
requeuer = Requeuer(Queuer(db.job))
self.assertEqual(
requeuer.requeue_cli_options(),
{
'--requeues': 1,
'--max-requeues': 1,
}
)
requeuer = Requeuer(Queuer(db.job), requeues=33, max_requeues=99)
self.assertEqual(
requeuer.requeue_cli_options(),
{
'--requeues': 34,
'--max-requeues': 99,
}
)
def setUpModule():
LocalTestCase.set_env(globals())
if __name__ == '__main__':
unittest.main()
| true | true |
f73409964d099723c4d6f502b21eefcdeb932ced | 1,488 | py | Python | wolk_gateway_module/interface/actuator_status_provider.py | Wolkabout/WolkGatewayModule-SDK-Python | 73cb537822d07cb68b0609022f53171ecf663fa4 | [
"Apache-2.0"
] | null | null | null | wolk_gateway_module/interface/actuator_status_provider.py | Wolkabout/WolkGatewayModule-SDK-Python | 73cb537822d07cb68b0609022f53171ecf663fa4 | [
"Apache-2.0"
] | 2 | 2020-11-16T15:10:36.000Z | 2020-11-20T13:10:13.000Z | wolk_gateway_module/interface/actuator_status_provider.py | Wolkabout/WolkGatewayModule-SDK-Python | 73cb537822d07cb68b0609022f53171ecf663fa4 | [
"Apache-2.0"
] | null | null | null | """Stub method for providing current device actuator status."""
# Copyright 2019 WolkAbout Technology s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
from typing import Union
from wolk_gateway_module.model.actuator_state import ActuatorState
def get_actuator_status(
device_key: str, reference: str
) -> Tuple[ActuatorState, Union[bool, int, float, str]]:
"""
Get current actuator status identified by device key and reference.
Reads the status of actuator from the device
and returns it as a tuple containing the actuator state and current value.
Must be implemented as non blocking.
Must be implemented as thread safe.
:param device_key: Device key to which the actuator belongs to
:type device_key: str
:param reference: Actuator reference
:type reference: str
:returns: (state, value)
:rtype: (ActuatorState, bool or int or float or str)
"""
raise NotImplementedError
| 36.292683 | 78 | 0.737231 |
from typing import Tuple
from typing import Union
from wolk_gateway_module.model.actuator_state import ActuatorState
def get_actuator_status(
device_key: str, reference: str
) -> Tuple[ActuatorState, Union[bool, int, float, str]]:
raise NotImplementedError
| true | true |
f7340a41d11266271a052dc71a2406320a530885 | 7,923 | py | Python | acd/scores/cd.py | SilanHe/hierarchical-dnn-interpretations | d6f96d0ab6fec48ee53ab930b2660e80525993b9 | [
"MIT"
] | null | null | null | acd/scores/cd.py | SilanHe/hierarchical-dnn-interpretations | d6f96d0ab6fec48ee53ab930b2660e80525993b9 | [
"MIT"
] | null | null | null | acd/scores/cd.py | SilanHe/hierarchical-dnn-interpretations | d6f96d0ab6fec48ee53ab930b2660e80525993b9 | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
from copy import deepcopy
import numpy as np
from scipy.special import expit as sigmoid
from .cd_propagate import *
from .cd_architecture_specific import *
def cd(im_torch: torch.Tensor, model, mask=None, model_type=None, device='cuda', transform=None):
'''Get contextual decomposition scores for blob
Params
------
im_torch: torch.Tensor
example to interpret - usually has shape (batch_size, num_channels, height, width)
model: pytorch model
mask: array_like (values in {0, 1})
required unless transform is supplied
array with 1s marking the locations of relevant pixels, 0s marking the background
shape should match the shape of im_torch or just H x W
model_type: str, optional
usually should just leave this blank
if this is == 'mnist', uses CD for a specific mnist model
if this is == 'resnet18', uses resnet18 model
device: str, optional
transform: function
transform should be a function which transforms the original image to specify rel
only used if mask is not passed
Returns
-------
relevant: torch.Tensor
class-wise scores for relevant mask
irrelevant: torch.Tensor
class-wise scores for everything but the relevant mask
'''
# set up model
model.eval()
model = model.to(device)
im_torch = im_torch.to(device)
# set up masks
if not mask is None:
mask = torch.FloatTensor(mask).to(device)
relevant = mask * im_torch
irrelevant = (1 - mask) * im_torch
elif not transform is None:
relevant = transform(im_torch).to(device)
if len(relevant.shape) < 4:
relevant = relevant.reshape(1, 1, relevant.shape[0], relevant.shape[1])
irrelevant = im_torch - relevant
else:
print('invalid arguments')
relevant = relevant.to(device)
irrelevant = irrelevant.to(device)
# deal with specific architectures which have problems
if model_type == 'mnist':
return cd_propagate_mnist(relevant, irrelevant, model)
elif model_type == 'resnet18':
return cd_propagate_resnet(relevant, irrelevant, model)
# try the generic case
else:
mods = list(model.modules())
relevant, irrelevant = cd_generic(mods, relevant, irrelevant)
return relevant, irrelevant
def cd_generic(mods, relevant, irrelevant):
'''Helper function for cd which loops over modules and propagates them
based on the layer name
'''
for i, mod in enumerate(mods):
t = str(type(mod))
if 'Conv2d' in t:
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mod)
elif 'Linear' in t:
relevant = relevant.reshape(relevant.shape[0], -1)
irrelevant = irrelevant.reshape(irrelevant.shape[0], -1)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mod)
elif 'ReLU' in t:
relevant, irrelevant = propagate_relu(relevant, irrelevant, mod)
elif 'AvgPool' in t or 'NormLayer' in t or 'Dropout' in t \
or 'ReshapeLayer' in t or ('modularize' in t and 'Transform' in t): # custom layers
relevant, irrelevant = propagate_independent(relevant, irrelevant, mod)
elif 'Pool' in t and not 'AvgPool' in t:
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mod)
elif 'BatchNorm2d' in t:
relevant, irrelevant = propagate_batchnorm2d(relevant, irrelevant, mod)
return relevant, irrelevant
def cd_text(batch, model, start, stop, return_irrel_scores=False):
'''Get contextual decomposition scores for substring of a text sequence
Params
------
batch: torchtext batch
really only requires that batch.text is the string input to be interpreted
start: int
beginning index of substring to be interpreted (inclusive)
stop: int
ending index of substring to be interpreted (inclusive)
Returns
-------
scores: torch.Tensor
class-wise scores for relevant substring
'''
weights = model.lstm.state_dict()
# Index one = word vector (i) or hidden state (h), index two = gate
W_ii, W_if, W_ig, W_io = np.split(weights['weight_ih_l0'], 4, 0)
W_hi, W_hf, W_hg, W_ho = np.split(weights['weight_hh_l0'], 4, 0)
b_i, b_f, b_g, b_o = np.split(weights['bias_ih_l0'].cpu().numpy() + weights['bias_hh_l0'].cpu().numpy(), 4)
word_vecs = model.embed(batch.text)[:, 0].data
T = word_vecs.size(0)
relevant = np.zeros((T, model.hidden_dim))
irrelevant = np.zeros((T, model.hidden_dim))
relevant_h = np.zeros((T, model.hidden_dim))
irrelevant_h = np.zeros((T, model.hidden_dim))
for i in range(T):
if i > 0:
prev_rel_h = relevant_h[i - 1]
prev_irrel_h = irrelevant_h[i - 1]
else:
prev_rel_h = np.zeros(model.hidden_dim)
prev_irrel_h = np.zeros(model.hidden_dim)
rel_i = np.dot(W_hi, prev_rel_h)
rel_g = np.dot(W_hg, prev_rel_h)
rel_f = np.dot(W_hf, prev_rel_h)
rel_o = np.dot(W_ho, prev_rel_h)
irrel_i = np.dot(W_hi, prev_irrel_h)
irrel_g = np.dot(W_hg, prev_irrel_h)
irrel_f = np.dot(W_hf, prev_irrel_h)
irrel_o = np.dot(W_ho, prev_irrel_h)
if i >= start and i <= stop:
rel_i = rel_i + np.dot(W_ii, word_vecs[i])
rel_g = rel_g + np.dot(W_ig, word_vecs[i])
rel_f = rel_f + np.dot(W_if, word_vecs[i])
rel_o = rel_o + np.dot(W_io, word_vecs[i])
else:
irrel_i = irrel_i + np.dot(W_ii, word_vecs[i])
irrel_g = irrel_g + np.dot(W_ig, word_vecs[i])
irrel_f = irrel_f + np.dot(W_if, word_vecs[i])
irrel_o = irrel_o + np.dot(W_io, word_vecs[i])
rel_contrib_i, irrel_contrib_i, bias_contrib_i = propagate_three(rel_i, irrel_i, b_i, sigmoid)
rel_contrib_g, irrel_contrib_g, bias_contrib_g = propagate_three(rel_g, irrel_g, b_g, np.tanh)
relevant[i] = rel_contrib_i * (rel_contrib_g + bias_contrib_g) + bias_contrib_i * rel_contrib_g
irrelevant[i] = irrel_contrib_i * (rel_contrib_g + irrel_contrib_g + bias_contrib_g) + (rel_contrib_i + bias_contrib_i) * irrel_contrib_g
if i >= start and i <= stop:
relevant[i] += bias_contrib_i * bias_contrib_g
else:
irrelevant[i] += bias_contrib_i * bias_contrib_g
if i > 0:
rel_contrib_f, irrel_contrib_f, bias_contrib_f = propagate_three(rel_f, irrel_f, b_f, sigmoid)
relevant[i] += (rel_contrib_f + bias_contrib_f) * relevant[i - 1]
irrelevant[i] += (rel_contrib_f + irrel_contrib_f + bias_contrib_f) * irrelevant[i - 1] + irrel_contrib_f * \
relevant[i - 1]
o = sigmoid(np.dot(W_io, word_vecs[i]) + np.dot(W_ho, prev_rel_h + prev_irrel_h) + b_o)
rel_contrib_o, irrel_contrib_o, bias_contrib_o = propagate_three(rel_o, irrel_o, b_o, sigmoid)
new_rel_h, new_irrel_h = propagate_tanh_two(relevant[i], irrelevant[i])
# relevant_h[i] = new_rel_h * (rel_contrib_o + bias_contrib_o)
# irrelevant_h[i] = new_rel_h * (irrel_contrib_o) + new_irrel_h * (rel_contrib_o + irrel_contrib_o + bias_contrib_o)
relevant_h[i] = o * new_rel_h
irrelevant_h[i] = o * new_irrel_h
W_out = model.hidden_to_label.weight.data
# Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias
scores = np.dot(W_out, relevant_h[T - 1])
irrel_scores = np.dot(W_out, irrelevant_h[T - 1])
if return_irrel_scores:
return scores, irrel_scores
return scores
| 42.827027 | 145 | 0.638773 | import torch
import torch.nn.functional as F
from copy import deepcopy
import numpy as np
from scipy.special import expit as sigmoid
from .cd_propagate import *
from .cd_architecture_specific import *
def cd(im_torch: torch.Tensor, model, mask=None, model_type=None, device='cuda', transform=None):
model.eval()
model = model.to(device)
im_torch = im_torch.to(device)
if not mask is None:
mask = torch.FloatTensor(mask).to(device)
relevant = mask * im_torch
irrelevant = (1 - mask) * im_torch
elif not transform is None:
relevant = transform(im_torch).to(device)
if len(relevant.shape) < 4:
relevant = relevant.reshape(1, 1, relevant.shape[0], relevant.shape[1])
irrelevant = im_torch - relevant
else:
print('invalid arguments')
relevant = relevant.to(device)
irrelevant = irrelevant.to(device)
if model_type == 'mnist':
return cd_propagate_mnist(relevant, irrelevant, model)
elif model_type == 'resnet18':
return cd_propagate_resnet(relevant, irrelevant, model)
else:
mods = list(model.modules())
relevant, irrelevant = cd_generic(mods, relevant, irrelevant)
return relevant, irrelevant
def cd_generic(mods, relevant, irrelevant):
for i, mod in enumerate(mods):
t = str(type(mod))
if 'Conv2d' in t:
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mod)
elif 'Linear' in t:
relevant = relevant.reshape(relevant.shape[0], -1)
irrelevant = irrelevant.reshape(irrelevant.shape[0], -1)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mod)
elif 'ReLU' in t:
relevant, irrelevant = propagate_relu(relevant, irrelevant, mod)
elif 'AvgPool' in t or 'NormLayer' in t or 'Dropout' in t \
or 'ReshapeLayer' in t or ('modularize' in t and 'Transform' in t):
relevant, irrelevant = propagate_independent(relevant, irrelevant, mod)
elif 'Pool' in t and not 'AvgPool' in t:
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mod)
elif 'BatchNorm2d' in t:
relevant, irrelevant = propagate_batchnorm2d(relevant, irrelevant, mod)
return relevant, irrelevant
def cd_text(batch, model, start, stop, return_irrel_scores=False):
weights = model.lstm.state_dict()
W_ii, W_if, W_ig, W_io = np.split(weights['weight_ih_l0'], 4, 0)
W_hi, W_hf, W_hg, W_ho = np.split(weights['weight_hh_l0'], 4, 0)
b_i, b_f, b_g, b_o = np.split(weights['bias_ih_l0'].cpu().numpy() + weights['bias_hh_l0'].cpu().numpy(), 4)
word_vecs = model.embed(batch.text)[:, 0].data
T = word_vecs.size(0)
relevant = np.zeros((T, model.hidden_dim))
irrelevant = np.zeros((T, model.hidden_dim))
relevant_h = np.zeros((T, model.hidden_dim))
irrelevant_h = np.zeros((T, model.hidden_dim))
for i in range(T):
if i > 0:
prev_rel_h = relevant_h[i - 1]
prev_irrel_h = irrelevant_h[i - 1]
else:
prev_rel_h = np.zeros(model.hidden_dim)
prev_irrel_h = np.zeros(model.hidden_dim)
rel_i = np.dot(W_hi, prev_rel_h)
rel_g = np.dot(W_hg, prev_rel_h)
rel_f = np.dot(W_hf, prev_rel_h)
rel_o = np.dot(W_ho, prev_rel_h)
irrel_i = np.dot(W_hi, prev_irrel_h)
irrel_g = np.dot(W_hg, prev_irrel_h)
irrel_f = np.dot(W_hf, prev_irrel_h)
irrel_o = np.dot(W_ho, prev_irrel_h)
if i >= start and i <= stop:
rel_i = rel_i + np.dot(W_ii, word_vecs[i])
rel_g = rel_g + np.dot(W_ig, word_vecs[i])
rel_f = rel_f + np.dot(W_if, word_vecs[i])
rel_o = rel_o + np.dot(W_io, word_vecs[i])
else:
irrel_i = irrel_i + np.dot(W_ii, word_vecs[i])
irrel_g = irrel_g + np.dot(W_ig, word_vecs[i])
irrel_f = irrel_f + np.dot(W_if, word_vecs[i])
irrel_o = irrel_o + np.dot(W_io, word_vecs[i])
rel_contrib_i, irrel_contrib_i, bias_contrib_i = propagate_three(rel_i, irrel_i, b_i, sigmoid)
rel_contrib_g, irrel_contrib_g, bias_contrib_g = propagate_three(rel_g, irrel_g, b_g, np.tanh)
relevant[i] = rel_contrib_i * (rel_contrib_g + bias_contrib_g) + bias_contrib_i * rel_contrib_g
irrelevant[i] = irrel_contrib_i * (rel_contrib_g + irrel_contrib_g + bias_contrib_g) + (rel_contrib_i + bias_contrib_i) * irrel_contrib_g
if i >= start and i <= stop:
relevant[i] += bias_contrib_i * bias_contrib_g
else:
irrelevant[i] += bias_contrib_i * bias_contrib_g
if i > 0:
rel_contrib_f, irrel_contrib_f, bias_contrib_f = propagate_three(rel_f, irrel_f, b_f, sigmoid)
relevant[i] += (rel_contrib_f + bias_contrib_f) * relevant[i - 1]
irrelevant[i] += (rel_contrib_f + irrel_contrib_f + bias_contrib_f) * irrelevant[i - 1] + irrel_contrib_f * \
relevant[i - 1]
o = sigmoid(np.dot(W_io, word_vecs[i]) + np.dot(W_ho, prev_rel_h + prev_irrel_h) + b_o)
rel_contrib_o, irrel_contrib_o, bias_contrib_o = propagate_three(rel_o, irrel_o, b_o, sigmoid)
new_rel_h, new_irrel_h = propagate_tanh_two(relevant[i], irrelevant[i])
relevant_h[i] = o * new_rel_h
irrelevant_h[i] = o * new_irrel_h
W_out = model.hidden_to_label.weight.data
scores = np.dot(W_out, relevant_h[T - 1])
irrel_scores = np.dot(W_out, irrelevant_h[T - 1])
if return_irrel_scores:
return scores, irrel_scores
return scores
| true | true |
f7340bd5568d9d2e74fc3957907b003ad617d70e | 895 | py | Python | setup.py | dennislwy/internet-connection-python | 36360598773aca0a077c237d034caf4f19b784b1 | [
"MIT"
] | null | null | null | setup.py | dennislwy/internet-connection-python | 36360598773aca0a077c237d034caf4f19b784b1 | [
"MIT"
] | null | null | null | setup.py | dennislwy/internet-connection-python | 36360598773aca0a077c237d034caf4f19b784b1 | [
"MIT"
] | null | null | null | from setuptools import setup
from setuptools import find_packages
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
]
setup(
name="internet-connection",
version="0.0.1",
description="Library to monitor internet connection state, when last online/offline, read external IP address, etc",
keywords=["internet connection", "connection state", "online", "offline", "uptime", "downtime", "internal ip", "external ip", "public ip"],
long_description=open("README.md").read(),
author="Dennis Lee",
author_email="wylee2000@gmail.com",
url="https://github.com/dennislwy/internet-connection-python",
classifiers=classifiers,
packages=find_packages(exclude=['tests']),
license="MIT"
) | 37.291667 | 143 | 0.688268 | from setuptools import setup
from setuptools import find_packages
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
]
setup(
name="internet-connection",
version="0.0.1",
description="Library to monitor internet connection state, when last online/offline, read external IP address, etc",
keywords=["internet connection", "connection state", "online", "offline", "uptime", "downtime", "internal ip", "external ip", "public ip"],
long_description=open("README.md").read(),
author="Dennis Lee",
author_email="wylee2000@gmail.com",
url="https://github.com/dennislwy/internet-connection-python",
classifiers=classifiers,
packages=find_packages(exclude=['tests']),
license="MIT"
) | true | true |
f7340ca89f7353dcd0c8f391724088255d2a8188 | 2,171 | py | Python | pddm/regressors/feedforward_network.py | krishpop/pddm | b1452554a4e318966b8ca3da53978458ac635c5d | [
"Apache-2.0"
] | null | null | null | pddm/regressors/feedforward_network.py | krishpop/pddm | b1452554a4e318966b8ca3da53978458ac635c5d | [
"Apache-2.0"
] | null | null | null | pddm/regressors/feedforward_network.py | krishpop/pddm | b1452554a4e318966b8ca3da53978458ac635c5d | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
def feedforward_network(inputStates, inputSize, outputSize, num_fc_layers,
depth_fc_layers, tf_datatype, scope):
with tf.variable_scope(str(scope)):
#concat K entries together [bs x K x sa] --> [bs x ksa]
inputState = tf.layers.flatten(inputStates)
#vars
intermediate_size = depth_fc_layers
reuse = False
initializer = tf.glorot_normal_initializer(
seed=None, dtype=tf_datatype)
fc = tf.layers.dense
# make hidden layers
for i in range(num_fc_layers):
if i==0:
fc_i = fc(
inputState,
units=intermediate_size,
activation=None,
kernel_initializer=initializer,
bias_initializer=initializer,
reuse=reuse,
trainable=True)
else:
fc_i = fc(
h_i,
units=intermediate_size,
activation=None,
kernel_initializer=initializer,
bias_initializer=initializer,
reuse=reuse,
trainable=True)
h_i = tf.nn.relu(fc_i)
# make output layer
z = fc(
h_i,
units=outputSize,
activation=None,
kernel_initializer=initializer,
bias_initializer=initializer,
reuse=reuse,
trainable=True)
return z
| 31.926471 | 74 | 0.579917 |
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
def feedforward_network(inputStates, inputSize, outputSize, num_fc_layers,
depth_fc_layers, tf_datatype, scope):
with tf.variable_scope(str(scope)):
inputState = tf.layers.flatten(inputStates)
intermediate_size = depth_fc_layers
reuse = False
initializer = tf.glorot_normal_initializer(
seed=None, dtype=tf_datatype)
fc = tf.layers.dense
for i in range(num_fc_layers):
if i==0:
fc_i = fc(
inputState,
units=intermediate_size,
activation=None,
kernel_initializer=initializer,
bias_initializer=initializer,
reuse=reuse,
trainable=True)
else:
fc_i = fc(
h_i,
units=intermediate_size,
activation=None,
kernel_initializer=initializer,
bias_initializer=initializer,
reuse=reuse,
trainable=True)
h_i = tf.nn.relu(fc_i)
z = fc(
h_i,
units=outputSize,
activation=None,
kernel_initializer=initializer,
bias_initializer=initializer,
reuse=reuse,
trainable=True)
return z
| true | true |
f7340cc711f3390937a5990e5bd4c691cd341575 | 3,123 | py | Python | hyperLAI/features/hyperLAIdataset.py | AI-sandbox/hyperLAI | 49f1a9d3c645ee0e5b0c2ed16d54ee8df0626689 | [
"MIT"
] | null | null | null | hyperLAI/features/hyperLAIdataset.py | AI-sandbox/hyperLAI | 49f1a9d3c645ee0e5b0c2ed16d54ee8df0626689 | [
"MIT"
] | null | null | null | hyperLAI/features/hyperLAIdataset.py | AI-sandbox/hyperLAI | 49f1a9d3c645ee0e5b0c2ed16d54ee8df0626689 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch.utils import data
import sys
from utils.generate_dataset import *
from HypHC.datasets.triples import samples_triples
class HyperLoader(data.Dataset):
def __init__(self, data_dir, split_indices, restrict_labels=[0,1,2,3,4,5,6], chromosome="all"):
'''
Takes in all the relevant arguments to produce the dataset.
Arguments:
`data_dir`: directory in which data (either text files or numpy arrays) are located
`similarity_func`: function to calculate pairwise similarities
`split_indices`: indices for the data split (train/test/valid)
`restrict_labels`: list of super-populations to include in analysis. Indices correspond to 'EUR', 'EAS', 'AMR', 'SAS', 'AFR', 'OCE', 'WAS'
'''
self.data_dir = data_dir
self.restrict_labels = restrict_labels
self.chromosome = chromosome
self.split_indices = split_indices
self.snps, self.pop_labels, self.suppop_labels, self.pop_label_index, self.suppop_label_index = self.load_data()
def load_data(self):
'''
Loads SNP and label data from the necessary file locations
'''
#If we want all chromosomes, then we have the arrays already pre-created
if self.chromosome =="all":
file_order = ["all_snps.npy", "labels_suppop.npy", "labels_pop.npy",
"coords.npy", "pop_index.npy", "pop_code_index.npy", "suppop_code_index.npy"]
test_data = tuple([np.load(self.data_dir + x) for x in file_order])
ind_data = test_data[0]
else:
#The data for individual chromosomes is in a slightly different format
test_data = load_dataset(self.data_dir + "ref_final_beagle_phased_1kg_hgdp_sgdp_chr%s_hg19.vcf.gz"%(self.chromosome),
self.data_dir + "reference_panel_metadata.tsv", "./", chromosome=self.chromosome,
verbose=True, filter_admixed=True, filter_missing_coord=True)
ind_data = test_data[0].reshape([test_data[0].shape[0], test_data[0].shape[1] * test_data[0].shape[2]]).T
#We've unfolded each set of 23 chromosomes as a "different" individual
#So we must do the same for the labels by doubling them
ind_pop_labels = np.repeat(test_data[2], 2).astype(int)
ind_suppop_labels = np.repeat(test_data[1], 2).astype(int)
#Restrict to only the super-populations we've specified
pop_indices = np.argwhere(np.isin(ind_suppop_labels, self.restrict_labels)).T[0]
indices = np.intersect1d(pop_indices, self.split_indices)
#Return everything
return ind_data[indices], ind_pop_labels[indices], ind_suppop_labels[indices], test_data[4], test_data[6]
def __len__(self):
return len(self.snps)
def __getitem__(self, index):
'''
Returns data and labels for the current index
'''
return torch.tensor(self.snps[index]), torch.tensor(self.suppop_labels[index]), torch.tensor(self.pop_labels[index])
| 55.767857 | 150 | 0.658982 | import numpy as np
import torch
from torch.utils import data
import sys
from utils.generate_dataset import *
from HypHC.datasets.triples import samples_triples
class HyperLoader(data.Dataset):
def __init__(self, data_dir, split_indices, restrict_labels=[0,1,2,3,4,5,6], chromosome="all"):
self.data_dir = data_dir
self.restrict_labels = restrict_labels
self.chromosome = chromosome
self.split_indices = split_indices
self.snps, self.pop_labels, self.suppop_labels, self.pop_label_index, self.suppop_label_index = self.load_data()
def load_data(self):
if self.chromosome =="all":
file_order = ["all_snps.npy", "labels_suppop.npy", "labels_pop.npy",
"coords.npy", "pop_index.npy", "pop_code_index.npy", "suppop_code_index.npy"]
test_data = tuple([np.load(self.data_dir + x) for x in file_order])
ind_data = test_data[0]
else:
test_data = load_dataset(self.data_dir + "ref_final_beagle_phased_1kg_hgdp_sgdp_chr%s_hg19.vcf.gz"%(self.chromosome),
self.data_dir + "reference_panel_metadata.tsv", "./", chromosome=self.chromosome,
verbose=True, filter_admixed=True, filter_missing_coord=True)
ind_data = test_data[0].reshape([test_data[0].shape[0], test_data[0].shape[1] * test_data[0].shape[2]]).T
#So we must do the same for the labels by doubling them
ind_pop_labels = np.repeat(test_data[2], 2).astype(int)
ind_suppop_labels = np.repeat(test_data[1], 2).astype(int)
#Restrict to only the super-populations we've specified
pop_indices = np.argwhere(np.isin(ind_suppop_labels, self.restrict_labels)).T[0]
indices = np.intersect1d(pop_indices, self.split_indices)
return ind_data[indices], ind_pop_labels[indices], ind_suppop_labels[indices], test_data[4], test_data[6]
def __len__(self):
return len(self.snps)
def __getitem__(self, index):
return torch.tensor(self.snps[index]), torch.tensor(self.suppop_labels[index]), torch.tensor(self.pop_labels[index])
| true | true |
f7340d4c7d794f72db0316bab63819772700e43e | 207 | py | Python | flatlib/predictives/__init__.py | UtkuGlsvn/flatlib | 79ecb961ed77393405bc21a11a5874a62103ceef | [
"MIT"
] | null | null | null | flatlib/predictives/__init__.py | UtkuGlsvn/flatlib | 79ecb961ed77393405bc21a11a5874a62103ceef | [
"MIT"
] | null | null | null | flatlib/predictives/__init__.py | UtkuGlsvn/flatlib | 79ecb961ed77393405bc21a11a5874a62103ceef | [
"MIT"
] | null | null | null | """
This file is part of flatlib - (C) FlatAngle
Author: João Ventura (flatangleweb@gmail.com)
This subpackage implements a few traditional
astrology predictive techniques.
""" | 23 | 49 | 0.666667 | true | true | |
f7340d9075ad1031c3712b9f37da8e6741530c98 | 45,409 | py | Python | sequicity/tsd_net.py | qbetterk/user-simulator | 77caca30ff67b9112b1fe5e65e191c6b5e25532c | [
"Apache-2.0"
] | 20 | 2019-11-08T02:28:28.000Z | 2022-02-07T09:20:21.000Z | sequicity/tsd_net.py | qbetterk/user-simulator | 77caca30ff67b9112b1fe5e65e191c6b5e25532c | [
"Apache-2.0"
] | 21 | 2019-11-08T02:27:40.000Z | 2022-03-12T00:02:54.000Z | sequicity/tsd_net.py | qbetterk/user-simulator | 77caca30ff67b9112b1fe5e65e191c6b5e25532c | [
"Apache-2.0"
] | 8 | 2020-02-10T07:28:37.000Z | 2021-09-23T09:42:14.000Z | import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import math
from sequicity.config import global_config as cfg
import copy, random, time, logging
from torch.distributions import Categorical
from sequicity.reader import pad_sequences
import pdb
import simulator.dialog_config as dialog_config
import pdb
def cuda_(var):
return var.cuda() if cfg.cuda else var
def toss_(p):
return random.randint(0, 99) <= p
def nan(v):
if type(v) is float:
return v == float('nan')
return np.isnan(np.sum(v.data.cpu().numpy()))
def get_sparse_input_aug(x_input_np):
"""
sparse input of
:param x_input_np: [T,B]
:return: Numpy array: [B,T,aug_V]
"""
ignore_index = [0]
unk = 2
result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),
dtype=np.float32)
result.fill(1e-10)
for t in range(x_input_np.shape[0]):
for b in range(x_input_np.shape[1]):
w = x_input_np[t][b]
if w not in ignore_index:
if w != unk:
result[t][b][x_input_np[t][b]] = 1.0
else:
result[t][b][cfg.vocab_size + t] = 1.0
result_np = result.transpose((1, 0, 2))
result = torch.from_numpy(result_np).float()
return result
def init_gru(gru):
gru.reset_parameters()
for _, hh, _, _ in gru.all_weights:
for i in range(0, hh.size(0), gru.hidden_size):
torch.nn.init.orthogonal_(hh[i:i + gru.hidden_size], gain=1)
class Attn(nn.Module):
def __init__(self, hidden_size):
super(Attn, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.zeros(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
def forward(self, hidden, encoder_outputs, mask=False, inp_seqs=None, stop_tok=None, normalize=True):
encoder_outputs = encoder_outputs.transpose(0, 1) # [B,T,H]
attn_energies = self.score(hidden, encoder_outputs)
if True or not mask:
normalized_energy = F.softmax(attn_energies, dim=2) # [B,1,T]
else:
mask_idx = []
# inp_seqs: ndarray of [T,B]
# inp_seqs = inp_seqs.cpu().numpy()
for b in range(inp_seqs.shape[1]):
for t in range(inp_seqs.shape[0] + 1):
if t == inp_seqs.shape[0] or inp_seqs[t, b] in stop_tok:
mask_idx.append(t)
break
mask = []
for mask_len in mask_idx:
mask.append([1.] * mask_len + [0.] * (inp_seqs.shape[0] - mask_len))
mask = cuda_(Variable(torch.FloatTensor(mask))) # [B,T]
attn_energies = attn_energies * mask.unsqueeze(1)
normalized_energy = F.softmax(attn_energies, dim=2) # [B,1,T]
context = torch.bmm(normalized_energy, encoder_outputs) # [B,1,H]
return context.transpose(0, 1) # [1,B,H]
def score(self, hidden, encoder_outputs):
max_len = encoder_outputs.size(1)
H = hidden.repeat(max_len, 1, 1).transpose(0, 1)
# pdb.set_trace()
energy = torch.tanh(self.attn(torch.cat([H, encoder_outputs], 2))) # [B,T,2H]->[B,T,H]
energy = energy.transpose(2, 1) # [B,H,T]
v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) # [B,1,H]
energy = torch.bmm(v, energy) # [B,1,T]
return energy
class SimpleDynamicEncoder(nn.Module):
def __init__(self, input_size, embed_size, hidden_size, n_layers, dropout):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.embed_size = embed_size
self.n_layers = n_layers
self.dropout = dropout
self.embedding = nn.Embedding(input_size, embed_size)
self.gru = nn.GRU(embed_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True)
init_gru(self.gru)
def forward(self, input_seqs, input_lens, hidden=None):
"""
forward procedure. No need for inputs to be sorted
:param input_seqs: Variable of [T,B]
:param hidden:
:param input_lens: *numpy array* of len for each input sequence
:return:
"""
# print("in encoder")
# print("input_seqs", input_seqs)
# print("hidden", hidden)
# print("input_lens", input_lens)
batch_size = input_seqs.size(1)
embedded = self.embedding(input_seqs)
import pdb
if torch.isnan(embedded).sum() > 0:
pdb.set_trace()
# pass
# print("embedded", embedded)
embedded = embedded.transpose(0, 1) # [B,T,E]
sort_idx = np.argsort(-input_lens)
unsort_idx = cuda_(torch.LongTensor(np.argsort(sort_idx)))
input_lens = input_lens[sort_idx]
sort_idx = cuda_(torch.LongTensor(sort_idx))
embedded = embedded[sort_idx].transpose(0, 1) # [T,B,E]
# print("embedded", embedded)
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lens)
outputs, hidden = self.gru(packed, hidden)
# print('outputs', outputs)
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)
outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:]
outputs = outputs.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()
hidden = hidden.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()
return outputs, hidden, embedded
class BSpanDecoder(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, dropout_rate, vocab):
super().__init__()
self.emb = nn.Embedding(vocab_size, embed_size)
if cfg.use_positional_embedding:
self.positional_embedding = nn.Embedding(cfg.max_ts + 1, embed_size)
init_pos_emb = self.position_encoding_init(cfg.max_ts + 1, embed_size)
self.positional_embedding.weight.data = init_pos_emb
self.gru = nn.GRU(hidden_size + embed_size, hidden_size, dropout=dropout_rate)
self.proj = nn.Linear(hidden_size * 2, vocab_size)
self.attn_u = Attn(hidden_size)
self.proj_copy1 = nn.Linear(hidden_size, hidden_size)
self.proj_copy2 = nn.Linear(hidden_size, hidden_size)
self.dropout_rate = dropout_rate
self.inp_dropout = nn.Dropout(self.dropout_rate)
init_gru(self.gru)
self.vocab = vocab
def position_encoding_init(self, n_position, d_pos_vec):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / d_pos_vec) for j in range(d_pos_vec)]
if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return torch.from_numpy(position_enc).type(torch.FloatTensor)
def forward(self, u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,
position):
# print("in bSpanDecoder")
# print(u_input_np)
# print(u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,
# position)
# print("prev_z_input_np", prev_z_input_np)
sparse_u_input = Variable(get_sparse_input_aug(u_input_np), requires_grad=False)
if pv_z_enc_out is not None:
context = self.attn_u(last_hidden, torch.cat([pv_z_enc_out, u_enc_out], dim=0), mask=True,
inp_seqs=np.concatenate([prev_z_input_np, u_input_np], 0),
stop_tok=[self.vocab.encode('EOS_M')])
else:
context = self.attn_u(last_hidden, u_enc_out, mask=True, inp_seqs=u_input_np,
stop_tok=[self.vocab.encode('EOS_M')])
embed_z = self.emb(z_tm1)
# embed_z = self.inp_dropout(embed_z)
if cfg.use_positional_embedding: # defaulty not used
position_label = [position] * u_enc_out.size(1) # [B]
position_label = cuda_(Variable(torch.LongTensor(position_label))).view(1, -1) # [1,B]
pos_emb = self.positional_embedding(position_label)
embed_z = embed_z + pos_emb
gru_in = torch.cat([embed_z, context], 2)
gru_out, last_hidden = self.gru(gru_in, last_hidden)
# gru_out = self.inp_dropout(gru_out)
gen_score = self.proj(torch.cat([gru_out, context], 2)).squeeze(0)
# gen_score = self.inp_dropout(gen_score)
u_copy_score = torch.tanh(self.proj_copy1(u_enc_out.transpose(0, 1))) # [B,T,H]
# stable version of copynet
u_copy_score = torch.matmul(u_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)
u_copy_score = u_copy_score.cpu()
u_copy_score_max = torch.max(u_copy_score, dim=1, keepdim=True)[0]
u_copy_score = torch.exp(u_copy_score - u_copy_score_max) # [B,T]
u_copy_score = torch.log(torch.bmm(u_copy_score.unsqueeze(1), sparse_u_input)).squeeze(
1) + u_copy_score_max # [B,V]
u_copy_score = cuda_(u_copy_score)
if pv_z_enc_out is None:
# u_copy_score = self.inp_dropout(u_copy_score)
scores = F.softmax(torch.cat([gen_score, u_copy_score], dim=1), dim=1)
gen_score, u_copy_score = scores[:, :cfg.vocab_size], \
scores[:, cfg.vocab_size:]
proba = gen_score + u_copy_score[:, :cfg.vocab_size] # [B,V]
proba = torch.cat([proba, u_copy_score[:, cfg.vocab_size:]], 1)
else:
sparse_pv_z_input = Variable(get_sparse_input_aug(prev_z_input_np), requires_grad=False)
pv_z_copy_score = torch.tanh(self.proj_copy2(pv_z_enc_out.transpose(0, 1))) # [B,T,H]
pv_z_copy_score = torch.matmul(pv_z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)
pv_z_copy_score = pv_z_copy_score.cpu()
pv_z_copy_score_max = torch.max(pv_z_copy_score, dim=1, keepdim=True)[0]
pv_z_copy_score = torch.exp(pv_z_copy_score - pv_z_copy_score_max) # [B,T]
pv_z_copy_score = torch.log(torch.bmm(pv_z_copy_score.unsqueeze(1), sparse_pv_z_input)).squeeze(
1) + pv_z_copy_score_max # [B,V]
pv_z_copy_score = cuda_(pv_z_copy_score)
scores = F.softmax(torch.cat([gen_score, u_copy_score, pv_z_copy_score], dim=1), dim=1)
gen_score, u_copy_score, pv_z_copy_score = scores[:, :cfg.vocab_size], \
scores[:,
cfg.vocab_size:2 * cfg.vocab_size + u_input_np.shape[0]], \
scores[:, 2 * cfg.vocab_size + u_input_np.shape[0]:]
proba = gen_score + u_copy_score[:, :cfg.vocab_size] + pv_z_copy_score[:, :cfg.vocab_size] # [B,V]
proba = torch.cat([proba, pv_z_copy_score[:, cfg.vocab_size:], u_copy_score[:, cfg.vocab_size:]], 1)
return gru_out, last_hidden, proba
class ResponseDecoder(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, degree_size, dropout_rate, gru, proj, emb, vocab):
super().__init__()
self.emb = emb
self.attn_z = Attn(hidden_size)
self.attn_u = Attn(hidden_size)
self.gru = gru
init_gru(self.gru)
self.proj = proj
self.proj_copy1 = nn.Linear(hidden_size, hidden_size)
self.proj_copy2 = nn.Linear(hidden_size, hidden_size)
self.dropout_rate = dropout_rate
self.vocab = vocab
def get_sparse_selective_input(self, x_input_np):
result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),
dtype=np.float32)
result.fill(1e-10)
reqs = ['address', 'phone', 'postcode', 'pricerange', 'area']
for t in range(x_input_np.shape[0] - 1):
for b in range(x_input_np.shape[1]):
w = x_input_np[t][b]
word = self.vocab.decode(w)
if word in reqs:
slot = self.vocab.encode(word + '_SLOT')
result[t + 1][b][slot] = 1.0
else:
if w == 2 or w >= cfg.vocab_size:
result[t + 1][b][cfg.vocab_size + t] = 5.0
else:
result[t + 1][b][w] = 1.0
result_np = result.transpose((1, 0, 2))
result = torch.from_numpy(result_np).float()
return result
def forward(self, z_enc_out, u_enc_out, u_input_np, m_t_input, degree_input, last_hidden, z_input_np):
sparse_z_input = Variable(self.get_sparse_selective_input(z_input_np), requires_grad=False)
m_embed = self.emb(m_t_input)
z_context = self.attn_z(last_hidden, z_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_Z2')],
inp_seqs=z_input_np)
u_context = self.attn_u(last_hidden, u_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_M')],
inp_seqs=u_input_np)
gru_in = torch.cat([m_embed, u_context, z_context, degree_input.unsqueeze(0)], dim=2)
gru_out, last_hidden = self.gru(gru_in, last_hidden)
gen_score = self.proj(torch.cat([z_context, u_context, gru_out], 2)).squeeze(0)
z_copy_score = torch.tanh(self.proj_copy2(z_enc_out.transpose(0, 1)))
z_copy_score = torch.matmul(z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)
z_copy_score = z_copy_score.cpu()
z_copy_score_max = torch.max(z_copy_score, dim=1, keepdim=True)[0]
z_copy_score = torch.exp(z_copy_score - z_copy_score_max) # [B,T]
z_copy_score = torch.log(torch.bmm(z_copy_score.unsqueeze(1), sparse_z_input)).squeeze(
1) + z_copy_score_max # [B,V]
z_copy_score = cuda_(z_copy_score)
scores = F.softmax(torch.cat([gen_score, z_copy_score], dim=1), dim=1)
gen_score, z_copy_score = scores[:, :cfg.vocab_size], \
scores[:, cfg.vocab_size:]
proba = gen_score + z_copy_score[:, :cfg.vocab_size] # [B,V]
proba = torch.cat([proba, z_copy_score[:, cfg.vocab_size:]], 1)
return proba, last_hidden, gru_out
class ResponseDecoder_discrete(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, degree_size, dropout_rate, gru, proj, emb, vocab):
super().__init__()
self.emb = emb
self.attn_z = Attn(hidden_size)
self.attn_u = Attn(hidden_size)
self.gru = gru
init_gru(self.gru)
self.proj_0 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)
self.proj_1 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)
self.proj_2 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)
self.proj = proj
self.proj_copy1 = nn.Linear(hidden_size, hidden_size)
self.proj_copy2 = nn.Linear(hidden_size, hidden_size)
self.dropout_rate = dropout_rate
self.vocab = vocab
def get_sparse_selective_input(self, x_input_np):
result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),
dtype=np.float32)
result.fill(1e-10)
reqs = ['address', 'phone', 'postcode', 'pricerange', 'area']
for t in range(x_input_np.shape[0] - 1):
for b in range(x_input_np.shape[1]):
w = x_input_np[t][b]
word = self.vocab.decode(w)
if word in reqs:
slot = self.vocab.encode(word + '_SLOT')
result[t + 1][b][slot] = 1.0
else:
if w == 2 or w >= cfg.vocab_size:
result[t + 1][b][cfg.vocab_size + t] = 5.0
else:
result[t + 1][b][w] = 1.0
result_np = result.transpose((1, 0, 2))
result = torch.from_numpy(result_np).float()
return result
def forward(self, z_enc_out, u_enc_out, np_state):
# sparse_z_input = Variable(self.get_sparse_selective_input(z_input_np), requires_grad=False)
# m_embed = self.emb(m_t_input)
# z_context = torch.mean(z_enc_out, 0)#= self.attn_z(last_hidden, z_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_Z2')],
# inp_seqs=z_input_np)
# pdb.set_trace()
u_context = u_enc_out[-1, :, :]#= self.attn_u(last_hidden, u_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_M')],
# inp_seqs=u_input_np)
state_from_np = torch.from_numpy(np_state).float().unsqueeze(0)
output0 = F.tanh(self.proj_0(torch.cat([u_context, state_from_np], 1)))
output1 = F.sigmoid(self.proj_1(output0))
output2 = F.sigmoid(self.proj_2(output1))
# gru_in = torch.cat([u_context, z_context], dim=2)
# gru_out, last_hidden = self.gru(gru_in)
# print(z_context)
# print(z_context.shape)
# print(u_context)
# print(u_context.shape)
gen_score = self.proj(output2)#.squeeze(0)# self.proj(torch.cat([z_context, u_context, gru_out], 2)).squeeze(0)
return gen_score
"""
z_copy_score = torch.tanh(self.proj_copy2(z_enc_out.transpose(0, 1)))
z_copy_score = torch.matmul(z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)
z_copy_score = z_copy_score.cpu()
z_copy_score_max = torch.max(z_copy_score, dim=1, keepdim=True)[0]
z_copy_score = torch.exp(z_copy_score - z_copy_score_max) # [B,T]
z_copy_score = torch.log(torch.bmm(z_copy_score.unsqueeze(1), sparse_z_input)).squeeze(
1) + z_copy_score_max # [B,V]
z_copy_score = cuda_(z_copy_score)
scores = F.softmax(torch.cat([gen_score, z_copy_score], dim=1), dim=1)
gen_score, z_copy_score = scores[:, :cfg.vocab_size], \
scores[:, cfg.vocab_size:]
proba = gen_score + z_copy_score[:, :cfg.vocab_size] # [B,V]
proba = torch.cat([proba, z_copy_score[:, cfg.vocab_size:]], 1)
"""
return proba, last_hidden, gru_out
class TSD(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, degree_size, layer_num, dropout_rate, z_length,
max_ts, action_size=dialog_config.SYS_ACTION_CARDINALITY, discrete_act=False, beam_search=False, teacher_force=100, **kwargs):
super().__init__()
self.vocab = kwargs['vocab']
self.reader = kwargs['reader']
self.emb = nn.Embedding(vocab_size, embed_size)
self.dec_gru = nn.GRU(degree_size + embed_size + hidden_size * 2, hidden_size, dropout=dropout_rate)
self.proj = nn.Linear(hidden_size * 3, vocab_size)
self.proj_discrete = nn.Linear(hidden_size + dialog_config.STATE_DIM, action_size)
self.u_encoder = SimpleDynamicEncoder(vocab_size, embed_size, hidden_size, layer_num, dropout_rate)
self.z_decoder = BSpanDecoder(embed_size, hidden_size, vocab_size, dropout_rate, self.vocab)
self.m_decoder = ResponseDecoder(embed_size, hidden_size, vocab_size, degree_size, dropout_rate,
self.dec_gru, self.proj, self.emb, self.vocab)
self.m_decoder_discrete = ResponseDecoder_discrete(embed_size, hidden_size, vocab_size, degree_size, dropout_rate,
self.dec_gru, self.proj_discrete, self.emb, self.vocab)
self.embed_size = embed_size
self.z_length = z_length
self.max_ts = max_ts
self.discrete_act = discrete_act
self.beam_search = beam_search
self.teacher_force = teacher_force
self.pr_loss = nn.NLLLoss(ignore_index=0)
self.dec_loss = nn.NLLLoss(ignore_index=0)
self.saved_log_policy = []
if self.beam_search:
self.beam_size = kwargs['beam_size']
self.eos_token_idx = kwargs['eos_token_idx']
def forward(self, u_input, u_input_np, m_input, m_input_np, z_input, u_len, m_len, turn_states,
degree_input, mode, np_state, **kwargs):
if mode == 'train' or mode == 'valid':
pz_proba, pm_dec_proba, turn_states = \
self.forward_turn(u_input, u_len, m_input=m_input, m_len=m_len, z_input=z_input, mode='train',
turn_states=turn_states, degree_input=degree_input, u_input_np=u_input_np,
m_input_np=m_input_np, **kwargs)
loss, pr_loss, m_loss = self.supervised_loss(torch.log(pz_proba), torch.log(pm_dec_proba),
z_input, m_input)
return loss, pr_loss, m_loss, turn_states
elif mode == 'test':
if self.discrete_act:
m_output_index, pz_index, turn_states, pz_proba = self.forward_turn(u_input, u_len=u_len, z_input=z_input,
mode='test',
turn_states=turn_states,
degree_input=degree_input,
u_input_np=u_input_np,
m_input_np=m_input_np,
np_state=np_state,
**kwargs
)
return m_output_index, pz_index, turn_states, pz_proba
else:
m_output_index, pz_index, turn_states, pz_proba, mt_proba = self.forward_turn(u_input, u_len=u_len, z_input=z_input,
mode='test',
turn_states=turn_states,
degree_input=degree_input,
u_input_np=u_input_np, m_input_np=m_input_np,
**kwargs
)
return m_output_index, pz_index, turn_states, pz_proba, mt_proba
elif mode == 'rl':
loss = self.forward_turn(u_input, u_len=u_len, is_train=False, mode='rl',
turn_states=turn_states,
degree_input=degree_input,
u_input_np=u_input_np, m_input_np=m_input_np,
**kwargs
)
return loss
def forward_turn(self, u_input, u_len, turn_states, mode, degree_input, u_input_np, m_input_np=None,
m_input=None, np_state=None, m_len=None, z_input=None, **kwargs):
"""
compute required outputs for a single dialogue turn. Turn state{Dict} will be updated in each call.
:param u_input_np:
:param m_input_np:
:param u_len:
:param turn_states:
:param is_train:
:param u_input: [T,B]
:param m_input: [T,B]
:param z_input: [T,B]
:return:
"""
prev_z_input = kwargs.get('prev_z_input', None)
prev_z_input_np = kwargs.get('prev_z_input_np', None)
prev_z_len = kwargs.get('prev_z_len', None)
pv_z_emb = None
batch_size = u_input.size(1)
pv_z_enc_out = None
if prev_z_input is not None:
pv_z_enc_out, _, pv_z_emb = self.u_encoder(prev_z_input, prev_z_len)
u_enc_out, u_enc_hidden, u_emb = self.u_encoder(u_input, u_len)
last_hidden = u_enc_hidden[:-1]
z_tm1 = cuda_(Variable(torch.ones(1, batch_size).long() * 3)) # GO_2 token
m_tm1 = cuda_(Variable(torch.ones(1, batch_size).long())) # GO token
if mode == 'train':
pz_dec_outs = []
pz_proba = []
z_length = z_input.size(0) if z_input is not None else self.z_length # GO token
hiddens = [None] * batch_size
for t in range(z_length):
pz_dec_out, last_hidden, proba = \
self.z_decoder(u_enc_out=u_enc_out, u_input_np=u_input_np,
z_tm1=z_tm1, last_hidden=last_hidden,
pv_z_enc_out=pv_z_enc_out, prev_z_input_np=prev_z_input_np,
u_emb=u_emb, pv_z_emb=pv_z_emb, position=t)
pz_proba.append(proba)
pz_dec_outs.append(pz_dec_out)
z_np = z_tm1.view(-1).cpu().data.numpy()
for i in range(batch_size):
if z_np[i] == self.vocab.encode('EOS_Z2'):
hiddens[i] = last_hidden[:, i, :]
z_tm1 = z_input[t].view(1, -1)
for i in range(batch_size):
if hiddens[i] is None:
hiddens[i] = last_hidden[:, i, :]
last_hidden = torch.stack(hiddens, dim=1)
z_input_np = z_input.cpu().data.numpy()
pz_dec_outs = torch.cat(pz_dec_outs, dim=0) # [Tz,B,H]
pz_proba = torch.stack(pz_proba, dim=0)
# P(m|z,u)
pm_dec_proba, m_dec_outs = [], []
m_length = m_input.size(0) # Tm
# last_hidden = u_enc_hidden[:-1]
for t in range(m_length):
teacher_forcing = toss_(self.teacher_force)
proba, last_hidden, dec_out = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,
degree_input, last_hidden, z_input_np)
if teacher_forcing:
m_tm1 = m_input[t].view(1, -1)
else:
_, m_tm1 = torch.topk(proba, 1)
m_tm1 = m_tm1.view(1, -1)
pm_dec_proba.append(proba)
m_dec_outs.append(dec_out)
pm_dec_proba = torch.stack(pm_dec_proba, dim=0) # [T,B,V]
return pz_proba, pm_dec_proba, None
else:
# assert z_input is not None
z_length = z_input.size(0) if z_input is not None else None # GO token
# print("z_input", z_input)
if z_input is None:
use_predicted_zt = True
else:
use_predicted_zt = False
pz_dec_outs, bspan_index, last_hidden, pz_proba = self.bspan_decoder(u_enc_out, z_tm1, last_hidden, u_input_np,
pv_z_enc_out=pv_z_enc_out,
prev_z_input_np=prev_z_input_np,
u_emb=u_emb, pv_z_emb=pv_z_emb,
z_length=z_length,
use_predicted_zt=use_predicted_zt,
z_input=z_input)
pz_proba = torch.stack(pz_proba, dim=0)
pz_dec_outs = torch.cat(pz_dec_outs, dim=0)
degree_input = self.reader.db_degree_handler(bspan_index, kwargs['dial_id'])
degree_input = cuda_(Variable(torch.from_numpy(degree_input).float()))
if mode == 'test':
if not self.discrete_act:
if not self.beam_search:
m_output_index, m_probas = self.greedy_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,
degree_input, bspan_index)
# else:
# m_output_index = self.beam_search_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,
# degree_input, bspan_index)
#
return m_output_index, bspan_index, None, pz_proba, m_probas
else:
act_logits = self.action_decode(pz_dec_outs, u_enc_out, np_state)
return act_logits, bspan_index, None, pz_proba
elif mode == 'rl':
return self.sampling_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,
degree_input, bspan_index)
def action_decode(self, pz_dec_outs, u_enc_out, np_state):
logits = self.m_decoder_discrete(pz_dec_outs, u_enc_out, np_state)
return logits
def bspan_decoder(self, u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,
z_length=None, use_predicted_zt=True, z_input=None):
if not use_predicted_zt:
assert z_input is not None
assert z_length is not None
pz_dec_outs = []
pz_proba = []
decoded = []
batch_size = u_enc_out.size(1)
hiddens = [None] * batch_size
z_length = z_length if z_length is not None else cfg.z_length
# print(z_length)
# import pdb
# pdb.set_trace()
for t in range(z_length):
pz_dec_out, last_hidden, proba = \
self.z_decoder(u_enc_out=u_enc_out, u_input_np=u_input_np,
z_tm1=z_tm1, last_hidden=last_hidden, pv_z_enc_out=pv_z_enc_out,
prev_z_input_np=prev_z_input_np, u_emb=u_emb, pv_z_emb=pv_z_emb, position=t)
# print("--"*20)
# print("in bspan decoder")
# print("proba ", proba)
# print("z_tm1", z_tm1)
# print("t", t)
# print("--"*20)
pz_proba.append(proba)
pz_dec_outs.append(pz_dec_out)
# print("proba_size", proba.shape)
z_proba, z_index = torch.topk(proba, 1) # [B,1]
# print('z_index', z_index)
z_index = z_index.data.view(-1)
#####################################################
if prev_z_input_np is None:
tmp = u_input_np # [,B]
else:
# pdb.set_trace()
tmp = np.concatenate((u_input_np, prev_z_input_np), axis=0)
for i in range(z_index.size(0)):
if z_index[i] >= cfg.vocab_size:
# print(z_index)
z_index[i] = torch.tensor(int(tmp[z_index[i] - cfg.vocab_size, i]))
del tmp
decoded.append(z_index.clone())
# print(decoded)
#####################################################
for i in range(z_index.size(0)):
if z_index[i] >= cfg.vocab_size:
z_index[i] = 2 # unk
# print('z_index', z_index)
z_np = z_tm1.view(-1).cpu().data.numpy()
for i in range(batch_size):
if z_np[i] == self.vocab.encode('EOS_Z2'):
hiddens[i] = last_hidden[:, i, :]
if use_predicted_zt:
z_tm1 = cuda_(Variable(z_index).view(1, -1))
else:
z_tm1 = z_input[t].view(1, -1)
for i in range(batch_size):
if hiddens[i] is None:
hiddens[i] = last_hidden[:, i, :]
last_hidden = torch.stack(hiddens, dim=1)
if not use_predicted_zt:
z_input_np = z_input.cpu().data.numpy()
decoded = torch.stack(decoded, dim=0).transpose(0, 1)
decoded = list(decoded)
decoded = [list(_) for _ in decoded]
return pz_dec_outs, decoded, last_hidden, pz_proba
def greedy_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):
decoded = []
probas = []
bspan_index_np = pad_sequences(bspan_index).transpose((1, 0))
for t in range(self.max_ts):
proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,
degree_input, last_hidden, bspan_index_np)
probas.append(proba)
mt_proba, mt_index = torch.topk(proba, 1) # [B,1]
mt_index = mt_index.data.view(-1)
decoded.append(mt_index.clone())
for i in range(mt_index.size(0)):
if mt_index[i] >= cfg.vocab_size:
mt_index[i] = 2 # unk
m_tm1 = cuda_(Variable(mt_index).view(1, -1))
decoded = torch.stack(decoded, dim=0).transpose(0, 1)
decoded = list(decoded)
return [list(_) for _ in decoded], probas
def beam_search_decode_single(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input,
bspan_index):
eos_token_id = self.vocab.encode(cfg.eos_m_token)
batch_size = pz_dec_outs.size(1)
if batch_size != 1:
raise ValueError('"Beam search single" requires batch size to be 1')
class BeamState:
def __init__(self, score, last_hidden, decoded, length):
"""
Beam state in beam decoding
:param score: sum of log-probabilities
:param last_hidden: last hidden
:param decoded: list of *Variable[1*1]* of all decoded words
:param length: current decoded sentence length
"""
self.score = score
self.last_hidden = last_hidden
self.decoded = decoded
self.length = length
def update_clone(self, score_incre, last_hidden, decoded_t):
decoded = copy.copy(self.decoded)
decoded.append(decoded_t)
clone = BeamState(self.score + score_incre, last_hidden, decoded, self.length + 1)
return clone
def beam_result_valid(decoded_t, bspan_index):
decoded_t = [_.view(-1).data[0] for _ in decoded_t]
req_slots = self.get_req_slots(bspan_index)
decoded_sentence = self.vocab.sentence_decode(decoded_t, cfg.eos_m_token)
for req in req_slots:
if req not in decoded_sentence:
return False
return True
def score_bonus(state, decoded, bspan_index):
bonus = cfg.beam_len_bonus
return bonus
def soft_score_incre(score, turn):
return score
finished, failed = [], []
states = [] # sorted by score decreasingly
dead_k = 0
states.append(BeamState(0, last_hidden, [m_tm1], 0))
bspan_index_np = np.array(bspan_index).reshape(-1, 1)
for t in range(self.max_ts):
new_states = []
k = 0
while k < len(states) and k < self.beam_size - dead_k:
state = states[k]
last_hidden, m_tm1 = state.last_hidden, state.decoded[-1]
proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1, degree_input,
last_hidden, bspan_index_np)
proba = torch.log(proba)
mt_proba, mt_index = torch.topk(proba, self.beam_size - dead_k) # [1,K]
for new_k in range(self.beam_size - dead_k):
score_incre = soft_score_incre(mt_proba[0][new_k].data[0], t) + score_bonus(state,
mt_index[0][new_k].data[
0], bspan_index)
if len(new_states) >= self.beam_size - dead_k and state.score + score_incre < new_states[-1].score:
break
decoded_t = mt_index[0][new_k]
if decoded_t.data[0] >= cfg.vocab_size:
decoded_t.data[0] = 2 # unk
if self.vocab.decode(decoded_t.data[0]) == cfg.eos_m_token:
if beam_result_valid(state.decoded, bspan_index):
finished.append(state)
dead_k += 1
else:
failed.append(state)
else:
decoded_t = decoded_t.view(1, -1)
new_state = state.update_clone(score_incre, last_hidden, decoded_t)
new_states.append(new_state)
k += 1
if self.beam_size - dead_k < 0:
break
new_states = new_states[:self.beam_size - dead_k]
new_states.sort(key=lambda x: -x.score)
states = new_states
if t == self.max_ts - 1 and not finished:
finished = failed
print('FAIL')
if not finished:
finished.append(states[0])
finished.sort(key=lambda x: -x.score)
decoded_t = finished[0].decoded
decoded_t = [_.view(-1).data[0] for _ in decoded_t]
decoded_sentence = self.vocab.sentence_decode(decoded_t, cfg.eos_m_token)
# print(decoded_sentence)
generated = torch.cat(finished[0].decoded, dim=1).data # [B=1, T]
return generated
def beam_search_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):
vars = torch.split(pz_dec_outs, 1, dim=1), torch.split(u_enc_out, 1, dim=1), torch.split(
m_tm1, 1, dim=1), torch.split(last_hidden, 1, dim=1), torch.split(degree_input, 1, dim=0)
decoded = []
for i, (pz_dec_out_s, u_enc_out_s, m_tm1_s, last_hidden_s, degree_input_s) in enumerate(zip(*vars)):
decoded_s = self.beam_search_decode_single(pz_dec_out_s, u_enc_out_s, m_tm1_s,
u_input_np[:, i].reshape((-1, 1)),
last_hidden_s, degree_input_s, bspan_index[i])
decoded.append(decoded_s)
return [list(_.view(-1)) for _ in decoded]
def supervised_loss(self, pz_proba, pm_dec_proba, z_input, m_input):
pz_proba, pm_dec_proba = pz_proba[:, :, :cfg.vocab_size].contiguous(), pm_dec_proba[:, :,
:cfg.vocab_size].contiguous()
pr_loss = self.pr_loss(pz_proba.view(-1, pz_proba.size(2)), z_input.view(-1))
m_loss = self.dec_loss(pm_dec_proba.view(-1, pm_dec_proba.size(2)), m_input.view(-1))
loss = pr_loss + m_loss
return loss, pr_loss, m_loss
def self_adjust(self, epoch):
pass
# REINFORCEMENT fine-tuning with MC
def possible_reqs(self):
if cfg.dataset == 'camrest':
return ['address', 'phone', 'postcode', 'pricerange', 'area']
elif cfg.dataset == 'kvret':
req_by_intent = {
'weather': ['weather_attribute'],
'navigate': ['poi', 'traffic_info', 'address', 'distance'],
'schedule': ['event', 'date', 'time', 'party', 'agenda', 'room']
}
reqs = []
for value in req_by_intent.values():
reqs.extend(value)
return reqs
else:
raise ValueError('unknown dataset')
def get_req_slots(self, bspan_index):
reqs = self.possible_reqs()
reqs = set(self.vocab.sentence_decode(bspan_index).split()).intersection(reqs)
return [_ + '_SLOT' for _ in reqs]
def reward(self, m_tm1, decoded, bspan_index):
"""
The setting of the reward function is heuristic. It can be better optimized.
:param m_tm1:
:param decoded:
:param bspan_index:
:return:
"""
req_slots = self.get_req_slots(bspan_index)
m_tm1 = self.vocab.decode(m_tm1[0])
finished = m_tm1 == 'EOS_M'
decoded = [_.view(-1)[0] for _ in decoded]
decoded_sentence = self.vocab.sentence_decode(decoded, cfg.eos_m_token).split()
reward = -0.01 if cfg.dataset == 'camrest' else 0
'''
if not finished:
if m_tm1 in req_slots:
if decoded_sentence and m_tm1 not in decoded_sentence[:-1]:
reward = 1.0
'''
# some modification for reward function.
if m_tm1 in req_slots:
if decoded_sentence and m_tm1 not in decoded_sentence[:-1]:
reward += 1.0
else:
reward -= 1.0 if cfg.dataset == 'camrest' else 0 # repeat
return reward, finished
def sampling_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):
vars = torch.split(pz_dec_outs, 1, dim=1), torch.split(u_enc_out, 1, dim=1), torch.split(
m_tm1, 1, dim=1), torch.split(last_hidden, 1, dim=1), torch.split(degree_input, 1, dim=0)
batch_loss = []
sample_num = 1
for i, (pz_dec_out_s, u_enc_out_s, m_tm1_s, last_hidden_s, degree_input_s) in enumerate(zip(*vars)):
if not self.get_req_slots(bspan_index[i]):
continue
for j in range(sample_num):
loss = self.sampling_decode_single(pz_dec_out_s, u_enc_out_s, m_tm1_s,
u_input_np[:, i].reshape((-1, 1)),
last_hidden_s, degree_input_s, bspan_index[i])
batch_loss.append(loss)
if not batch_loss:
return None
else:
return sum(batch_loss) / len(batch_loss)
def sampling_decode_single(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):
decoded = []
reward_sum = 0
log_probs = []
rewards = []
bspan_index_np = np.array(bspan_index).reshape(-1, 1)
for t in range(self.max_ts):
# reward
reward, finished = self.reward(m_tm1.data.view(-1), decoded, bspan_index)
reward_sum += reward
rewards.append(reward)
if t == self.max_ts - 1:
finished = True
if finished:
loss = self.finish_episode(log_probs, rewards)
return loss
# action
proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,
degree_input, last_hidden, bspan_index_np)
proba = proba.squeeze(0) # [B,V]
dis = Categorical(proba)
action = dis.sample()
log_probs.append(dis.log_prob(action))
mt_index = action.data.view(-1)
decoded.append(mt_index.clone())
for i in range(mt_index.size(0)):
if mt_index[i] >= cfg.vocab_size:
mt_index[i] = 2 # unk
m_tm1 = cuda_(Variable(mt_index).view(1, -1))
def finish_episode(self, log_probas, saved_rewards):
R = 0
policy_loss = []
rewards = []
for r in saved_rewards:
R = r + 0.8 * R
rewards.insert(0, R)
rewards = torch.Tensor(rewards)
# rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)
for log_prob, reward in zip(log_probas, rewards):
policy_loss.append(-log_prob * reward)
l = len(policy_loss)
policy_loss = torch.cat(policy_loss).sum()
return policy_loss / l
| 48.774436 | 144 | 0.54672 | import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import math
from sequicity.config import global_config as cfg
import copy, random, time, logging
from torch.distributions import Categorical
from sequicity.reader import pad_sequences
import pdb
import simulator.dialog_config as dialog_config
import pdb
def cuda_(var):
return var.cuda() if cfg.cuda else var
def toss_(p):
return random.randint(0, 99) <= p
def nan(v):
if type(v) is float:
return v == float('nan')
return np.isnan(np.sum(v.data.cpu().numpy()))
def get_sparse_input_aug(x_input_np):
ignore_index = [0]
unk = 2
result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),
dtype=np.float32)
result.fill(1e-10)
for t in range(x_input_np.shape[0]):
for b in range(x_input_np.shape[1]):
w = x_input_np[t][b]
if w not in ignore_index:
if w != unk:
result[t][b][x_input_np[t][b]] = 1.0
else:
result[t][b][cfg.vocab_size + t] = 1.0
result_np = result.transpose((1, 0, 2))
result = torch.from_numpy(result_np).float()
return result
def init_gru(gru):
gru.reset_parameters()
for _, hh, _, _ in gru.all_weights:
for i in range(0, hh.size(0), gru.hidden_size):
torch.nn.init.orthogonal_(hh[i:i + gru.hidden_size], gain=1)
class Attn(nn.Module):
def __init__(self, hidden_size):
super(Attn, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.zeros(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
def forward(self, hidden, encoder_outputs, mask=False, inp_seqs=None, stop_tok=None, normalize=True):
encoder_outputs = encoder_outputs.transpose(0, 1)
attn_energies = self.score(hidden, encoder_outputs)
if True or not mask:
normalized_energy = F.softmax(attn_energies, dim=2)
else:
mask_idx = []
for b in range(inp_seqs.shape[1]):
for t in range(inp_seqs.shape[0] + 1):
if t == inp_seqs.shape[0] or inp_seqs[t, b] in stop_tok:
mask_idx.append(t)
break
mask = []
for mask_len in mask_idx:
mask.append([1.] * mask_len + [0.] * (inp_seqs.shape[0] - mask_len))
mask = cuda_(Variable(torch.FloatTensor(mask)))
attn_energies = attn_energies * mask.unsqueeze(1)
normalized_energy = F.softmax(attn_energies, dim=2)
context = torch.bmm(normalized_energy, encoder_outputs)
return context.transpose(0, 1)
def score(self, hidden, encoder_outputs):
max_len = encoder_outputs.size(1)
H = hidden.repeat(max_len, 1, 1).transpose(0, 1)
energy = torch.tanh(self.attn(torch.cat([H, encoder_outputs], 2)))
energy = energy.transpose(2, 1)
v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1)
energy = torch.bmm(v, energy)
return energy
class SimpleDynamicEncoder(nn.Module):
def __init__(self, input_size, embed_size, hidden_size, n_layers, dropout):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.embed_size = embed_size
self.n_layers = n_layers
self.dropout = dropout
self.embedding = nn.Embedding(input_size, embed_size)
self.gru = nn.GRU(embed_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True)
init_gru(self.gru)
def forward(self, input_seqs, input_lens, hidden=None):
batch_size = input_seqs.size(1)
embedded = self.embedding(input_seqs)
import pdb
if torch.isnan(embedded).sum() > 0:
pdb.set_trace()
embedded = embedded.transpose(0, 1)
sort_idx = np.argsort(-input_lens)
unsort_idx = cuda_(torch.LongTensor(np.argsort(sort_idx)))
input_lens = input_lens[sort_idx]
sort_idx = cuda_(torch.LongTensor(sort_idx))
embedded = embedded[sort_idx].transpose(0, 1)
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lens)
outputs, hidden = self.gru(packed, hidden)
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)
outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:]
outputs = outputs.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()
hidden = hidden.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()
return outputs, hidden, embedded
class BSpanDecoder(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, dropout_rate, vocab):
super().__init__()
self.emb = nn.Embedding(vocab_size, embed_size)
if cfg.use_positional_embedding:
self.positional_embedding = nn.Embedding(cfg.max_ts + 1, embed_size)
init_pos_emb = self.position_encoding_init(cfg.max_ts + 1, embed_size)
self.positional_embedding.weight.data = init_pos_emb
self.gru = nn.GRU(hidden_size + embed_size, hidden_size, dropout=dropout_rate)
self.proj = nn.Linear(hidden_size * 2, vocab_size)
self.attn_u = Attn(hidden_size)
self.proj_copy1 = nn.Linear(hidden_size, hidden_size)
self.proj_copy2 = nn.Linear(hidden_size, hidden_size)
self.dropout_rate = dropout_rate
self.inp_dropout = nn.Dropout(self.dropout_rate)
init_gru(self.gru)
self.vocab = vocab
def position_encoding_init(self, n_position, d_pos_vec):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / d_pos_vec) for j in range(d_pos_vec)]
if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2])
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2])
return torch.from_numpy(position_enc).type(torch.FloatTensor)
def forward(self, u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,
position):
sparse_u_input = Variable(get_sparse_input_aug(u_input_np), requires_grad=False)
if pv_z_enc_out is not None:
context = self.attn_u(last_hidden, torch.cat([pv_z_enc_out, u_enc_out], dim=0), mask=True,
inp_seqs=np.concatenate([prev_z_input_np, u_input_np], 0),
stop_tok=[self.vocab.encode('EOS_M')])
else:
context = self.attn_u(last_hidden, u_enc_out, mask=True, inp_seqs=u_input_np,
stop_tok=[self.vocab.encode('EOS_M')])
embed_z = self.emb(z_tm1)
if cfg.use_positional_embedding:
position_label = [position] * u_enc_out.size(1)
position_label = cuda_(Variable(torch.LongTensor(position_label))).view(1, -1)
pos_emb = self.positional_embedding(position_label)
embed_z = embed_z + pos_emb
gru_in = torch.cat([embed_z, context], 2)
gru_out, last_hidden = self.gru(gru_in, last_hidden)
gen_score = self.proj(torch.cat([gru_out, context], 2)).squeeze(0)
u_copy_score = torch.tanh(self.proj_copy1(u_enc_out.transpose(0, 1)))
u_copy_score = torch.matmul(u_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)
u_copy_score = u_copy_score.cpu()
u_copy_score_max = torch.max(u_copy_score, dim=1, keepdim=True)[0]
u_copy_score = torch.exp(u_copy_score - u_copy_score_max)
u_copy_score = torch.log(torch.bmm(u_copy_score.unsqueeze(1), sparse_u_input)).squeeze(
1) + u_copy_score_max
u_copy_score = cuda_(u_copy_score)
if pv_z_enc_out is None:
scores = F.softmax(torch.cat([gen_score, u_copy_score], dim=1), dim=1)
gen_score, u_copy_score = scores[:, :cfg.vocab_size], \
scores[:, cfg.vocab_size:]
proba = gen_score + u_copy_score[:, :cfg.vocab_size]
proba = torch.cat([proba, u_copy_score[:, cfg.vocab_size:]], 1)
else:
sparse_pv_z_input = Variable(get_sparse_input_aug(prev_z_input_np), requires_grad=False)
pv_z_copy_score = torch.tanh(self.proj_copy2(pv_z_enc_out.transpose(0, 1)))
pv_z_copy_score = torch.matmul(pv_z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)
pv_z_copy_score = pv_z_copy_score.cpu()
pv_z_copy_score_max = torch.max(pv_z_copy_score, dim=1, keepdim=True)[0]
pv_z_copy_score = torch.exp(pv_z_copy_score - pv_z_copy_score_max)
pv_z_copy_score = torch.log(torch.bmm(pv_z_copy_score.unsqueeze(1), sparse_pv_z_input)).squeeze(
1) + pv_z_copy_score_max
pv_z_copy_score = cuda_(pv_z_copy_score)
scores = F.softmax(torch.cat([gen_score, u_copy_score, pv_z_copy_score], dim=1), dim=1)
gen_score, u_copy_score, pv_z_copy_score = scores[:, :cfg.vocab_size], \
scores[:,
cfg.vocab_size:2 * cfg.vocab_size + u_input_np.shape[0]], \
scores[:, 2 * cfg.vocab_size + u_input_np.shape[0]:]
proba = gen_score + u_copy_score[:, :cfg.vocab_size] + pv_z_copy_score[:, :cfg.vocab_size]
proba = torch.cat([proba, pv_z_copy_score[:, cfg.vocab_size:], u_copy_score[:, cfg.vocab_size:]], 1)
return gru_out, last_hidden, proba
class ResponseDecoder(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, degree_size, dropout_rate, gru, proj, emb, vocab):
super().__init__()
self.emb = emb
self.attn_z = Attn(hidden_size)
self.attn_u = Attn(hidden_size)
self.gru = gru
init_gru(self.gru)
self.proj = proj
self.proj_copy1 = nn.Linear(hidden_size, hidden_size)
self.proj_copy2 = nn.Linear(hidden_size, hidden_size)
self.dropout_rate = dropout_rate
self.vocab = vocab
def get_sparse_selective_input(self, x_input_np):
result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),
dtype=np.float32)
result.fill(1e-10)
reqs = ['address', 'phone', 'postcode', 'pricerange', 'area']
for t in range(x_input_np.shape[0] - 1):
for b in range(x_input_np.shape[1]):
w = x_input_np[t][b]
word = self.vocab.decode(w)
if word in reqs:
slot = self.vocab.encode(word + '_SLOT')
result[t + 1][b][slot] = 1.0
else:
if w == 2 or w >= cfg.vocab_size:
result[t + 1][b][cfg.vocab_size + t] = 5.0
else:
result[t + 1][b][w] = 1.0
result_np = result.transpose((1, 0, 2))
result = torch.from_numpy(result_np).float()
return result
def forward(self, z_enc_out, u_enc_out, u_input_np, m_t_input, degree_input, last_hidden, z_input_np):
sparse_z_input = Variable(self.get_sparse_selective_input(z_input_np), requires_grad=False)
m_embed = self.emb(m_t_input)
z_context = self.attn_z(last_hidden, z_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_Z2')],
inp_seqs=z_input_np)
u_context = self.attn_u(last_hidden, u_enc_out, mask=True, stop_tok=[self.vocab.encode('EOS_M')],
inp_seqs=u_input_np)
gru_in = torch.cat([m_embed, u_context, z_context, degree_input.unsqueeze(0)], dim=2)
gru_out, last_hidden = self.gru(gru_in, last_hidden)
gen_score = self.proj(torch.cat([z_context, u_context, gru_out], 2)).squeeze(0)
z_copy_score = torch.tanh(self.proj_copy2(z_enc_out.transpose(0, 1)))
z_copy_score = torch.matmul(z_copy_score, gru_out.squeeze(0).unsqueeze(2)).squeeze(2)
z_copy_score = z_copy_score.cpu()
z_copy_score_max = torch.max(z_copy_score, dim=1, keepdim=True)[0]
z_copy_score = torch.exp(z_copy_score - z_copy_score_max)
z_copy_score = torch.log(torch.bmm(z_copy_score.unsqueeze(1), sparse_z_input)).squeeze(
1) + z_copy_score_max
z_copy_score = cuda_(z_copy_score)
scores = F.softmax(torch.cat([gen_score, z_copy_score], dim=1), dim=1)
gen_score, z_copy_score = scores[:, :cfg.vocab_size], \
scores[:, cfg.vocab_size:]
proba = gen_score + z_copy_score[:, :cfg.vocab_size]
proba = torch.cat([proba, z_copy_score[:, cfg.vocab_size:]], 1)
return proba, last_hidden, gru_out
class ResponseDecoder_discrete(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, degree_size, dropout_rate, gru, proj, emb, vocab):
super().__init__()
self.emb = emb
self.attn_z = Attn(hidden_size)
self.attn_u = Attn(hidden_size)
self.gru = gru
init_gru(self.gru)
self.proj_0 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)
self.proj_1 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)
self.proj_2 = nn.Linear(hidden_size+dialog_config.STATE_DIM, hidden_size+dialog_config.STATE_DIM)
self.proj = proj
self.proj_copy1 = nn.Linear(hidden_size, hidden_size)
self.proj_copy2 = nn.Linear(hidden_size, hidden_size)
self.dropout_rate = dropout_rate
self.vocab = vocab
def get_sparse_selective_input(self, x_input_np):
result = np.zeros((x_input_np.shape[0], x_input_np.shape[1], cfg.vocab_size + x_input_np.shape[0]),
dtype=np.float32)
result.fill(1e-10)
reqs = ['address', 'phone', 'postcode', 'pricerange', 'area']
for t in range(x_input_np.shape[0] - 1):
for b in range(x_input_np.shape[1]):
w = x_input_np[t][b]
word = self.vocab.decode(w)
if word in reqs:
slot = self.vocab.encode(word + '_SLOT')
result[t + 1][b][slot] = 1.0
else:
if w == 2 or w >= cfg.vocab_size:
result[t + 1][b][cfg.vocab_size + t] = 5.0
else:
result[t + 1][b][w] = 1.0
result_np = result.transpose((1, 0, 2))
result = torch.from_numpy(result_np).float()
return result
def forward(self, z_enc_out, u_enc_out, np_state):
state_from_np = torch.from_numpy(np_state).float().unsqueeze(0)
output0 = F.tanh(self.proj_0(torch.cat([u_context, state_from_np], 1)))
output1 = F.sigmoid(self.proj_1(output0))
output2 = F.sigmoid(self.proj_2(output1))
gen_score = self.proj(output2)t
class TSD(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, degree_size, layer_num, dropout_rate, z_length,
max_ts, action_size=dialog_config.SYS_ACTION_CARDINALITY, discrete_act=False, beam_search=False, teacher_force=100, **kwargs):
super().__init__()
self.vocab = kwargs['vocab']
self.reader = kwargs['reader']
self.emb = nn.Embedding(vocab_size, embed_size)
self.dec_gru = nn.GRU(degree_size + embed_size + hidden_size * 2, hidden_size, dropout=dropout_rate)
self.proj = nn.Linear(hidden_size * 3, vocab_size)
self.proj_discrete = nn.Linear(hidden_size + dialog_config.STATE_DIM, action_size)
self.u_encoder = SimpleDynamicEncoder(vocab_size, embed_size, hidden_size, layer_num, dropout_rate)
self.z_decoder = BSpanDecoder(embed_size, hidden_size, vocab_size, dropout_rate, self.vocab)
self.m_decoder = ResponseDecoder(embed_size, hidden_size, vocab_size, degree_size, dropout_rate,
self.dec_gru, self.proj, self.emb, self.vocab)
self.m_decoder_discrete = ResponseDecoder_discrete(embed_size, hidden_size, vocab_size, degree_size, dropout_rate,
self.dec_gru, self.proj_discrete, self.emb, self.vocab)
self.embed_size = embed_size
self.z_length = z_length
self.max_ts = max_ts
self.discrete_act = discrete_act
self.beam_search = beam_search
self.teacher_force = teacher_force
self.pr_loss = nn.NLLLoss(ignore_index=0)
self.dec_loss = nn.NLLLoss(ignore_index=0)
self.saved_log_policy = []
if self.beam_search:
self.beam_size = kwargs['beam_size']
self.eos_token_idx = kwargs['eos_token_idx']
def forward(self, u_input, u_input_np, m_input, m_input_np, z_input, u_len, m_len, turn_states,
degree_input, mode, np_state, **kwargs):
if mode == 'train' or mode == 'valid':
pz_proba, pm_dec_proba, turn_states = \
self.forward_turn(u_input, u_len, m_input=m_input, m_len=m_len, z_input=z_input, mode='train',
turn_states=turn_states, degree_input=degree_input, u_input_np=u_input_np,
m_input_np=m_input_np, **kwargs)
loss, pr_loss, m_loss = self.supervised_loss(torch.log(pz_proba), torch.log(pm_dec_proba),
z_input, m_input)
return loss, pr_loss, m_loss, turn_states
elif mode == 'test':
if self.discrete_act:
m_output_index, pz_index, turn_states, pz_proba = self.forward_turn(u_input, u_len=u_len, z_input=z_input,
mode='test',
turn_states=turn_states,
degree_input=degree_input,
u_input_np=u_input_np,
m_input_np=m_input_np,
np_state=np_state,
**kwargs
)
return m_output_index, pz_index, turn_states, pz_proba
else:
m_output_index, pz_index, turn_states, pz_proba, mt_proba = self.forward_turn(u_input, u_len=u_len, z_input=z_input,
mode='test',
turn_states=turn_states,
degree_input=degree_input,
u_input_np=u_input_np, m_input_np=m_input_np,
**kwargs
)
return m_output_index, pz_index, turn_states, pz_proba, mt_proba
elif mode == 'rl':
loss = self.forward_turn(u_input, u_len=u_len, is_train=False, mode='rl',
turn_states=turn_states,
degree_input=degree_input,
u_input_np=u_input_np, m_input_np=m_input_np,
**kwargs
)
return loss
def forward_turn(self, u_input, u_len, turn_states, mode, degree_input, u_input_np, m_input_np=None,
m_input=None, np_state=None, m_len=None, z_input=None, **kwargs):
prev_z_input = kwargs.get('prev_z_input', None)
prev_z_input_np = kwargs.get('prev_z_input_np', None)
prev_z_len = kwargs.get('prev_z_len', None)
pv_z_emb = None
batch_size = u_input.size(1)
pv_z_enc_out = None
if prev_z_input is not None:
pv_z_enc_out, _, pv_z_emb = self.u_encoder(prev_z_input, prev_z_len)
u_enc_out, u_enc_hidden, u_emb = self.u_encoder(u_input, u_len)
last_hidden = u_enc_hidden[:-1]
z_tm1 = cuda_(Variable(torch.ones(1, batch_size).long() * 3))
m_tm1 = cuda_(Variable(torch.ones(1, batch_size).long()))
if mode == 'train':
pz_dec_outs = []
pz_proba = []
z_length = z_input.size(0) if z_input is not None else self.z_length
hiddens = [None] * batch_size
for t in range(z_length):
pz_dec_out, last_hidden, proba = \
self.z_decoder(u_enc_out=u_enc_out, u_input_np=u_input_np,
z_tm1=z_tm1, last_hidden=last_hidden,
pv_z_enc_out=pv_z_enc_out, prev_z_input_np=prev_z_input_np,
u_emb=u_emb, pv_z_emb=pv_z_emb, position=t)
pz_proba.append(proba)
pz_dec_outs.append(pz_dec_out)
z_np = z_tm1.view(-1).cpu().data.numpy()
for i in range(batch_size):
if z_np[i] == self.vocab.encode('EOS_Z2'):
hiddens[i] = last_hidden[:, i, :]
z_tm1 = z_input[t].view(1, -1)
for i in range(batch_size):
if hiddens[i] is None:
hiddens[i] = last_hidden[:, i, :]
last_hidden = torch.stack(hiddens, dim=1)
z_input_np = z_input.cpu().data.numpy()
pz_dec_outs = torch.cat(pz_dec_outs, dim=0)
pz_proba = torch.stack(pz_proba, dim=0)
pm_dec_proba, m_dec_outs = [], []
m_length = m_input.size(0)
for t in range(m_length):
teacher_forcing = toss_(self.teacher_force)
proba, last_hidden, dec_out = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,
degree_input, last_hidden, z_input_np)
if teacher_forcing:
m_tm1 = m_input[t].view(1, -1)
else:
_, m_tm1 = torch.topk(proba, 1)
m_tm1 = m_tm1.view(1, -1)
pm_dec_proba.append(proba)
m_dec_outs.append(dec_out)
pm_dec_proba = torch.stack(pm_dec_proba, dim=0)
return pz_proba, pm_dec_proba, None
else:
z_length = z_input.size(0) if z_input is not None else None
if z_input is None:
use_predicted_zt = True
else:
use_predicted_zt = False
pz_dec_outs, bspan_index, last_hidden, pz_proba = self.bspan_decoder(u_enc_out, z_tm1, last_hidden, u_input_np,
pv_z_enc_out=pv_z_enc_out,
prev_z_input_np=prev_z_input_np,
u_emb=u_emb, pv_z_emb=pv_z_emb,
z_length=z_length,
use_predicted_zt=use_predicted_zt,
z_input=z_input)
pz_proba = torch.stack(pz_proba, dim=0)
pz_dec_outs = torch.cat(pz_dec_outs, dim=0)
degree_input = self.reader.db_degree_handler(bspan_index, kwargs['dial_id'])
degree_input = cuda_(Variable(torch.from_numpy(degree_input).float()))
if mode == 'test':
if not self.discrete_act:
if not self.beam_search:
m_output_index, m_probas = self.greedy_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,
degree_input, bspan_index)
return m_output_index, bspan_index, None, pz_proba, m_probas
else:
act_logits = self.action_decode(pz_dec_outs, u_enc_out, np_state)
return act_logits, bspan_index, None, pz_proba
elif mode == 'rl':
return self.sampling_decode(pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden,
degree_input, bspan_index)
def action_decode(self, pz_dec_outs, u_enc_out, np_state):
logits = self.m_decoder_discrete(pz_dec_outs, u_enc_out, np_state)
return logits
def bspan_decoder(self, u_enc_out, z_tm1, last_hidden, u_input_np, pv_z_enc_out, prev_z_input_np, u_emb, pv_z_emb,
z_length=None, use_predicted_zt=True, z_input=None):
if not use_predicted_zt:
assert z_input is not None
assert z_length is not None
pz_dec_outs = []
pz_proba = []
decoded = []
batch_size = u_enc_out.size(1)
hiddens = [None] * batch_size
z_length = z_length if z_length is not None else cfg.z_length
for t in range(z_length):
pz_dec_out, last_hidden, proba = \
self.z_decoder(u_enc_out=u_enc_out, u_input_np=u_input_np,
z_tm1=z_tm1, last_hidden=last_hidden, pv_z_enc_out=pv_z_enc_out,
prev_z_input_np=prev_z_input_np, u_emb=u_emb, pv_z_emb=pv_z_emb, position=t)
pz_proba.append(proba)
pz_dec_outs.append(pz_dec_out)
z_proba, z_index = torch.topk(proba, 1)
z_index = z_index.data.view(-1)
eamState:
def __init__(self, score, last_hidden, decoded, length):
self.score = score
self.last_hidden = last_hidden
self.decoded = decoded
self.length = length
def update_clone(self, score_incre, last_hidden, decoded_t):
decoded = copy.copy(self.decoded)
decoded.append(decoded_t)
clone = BeamState(self.score + score_incre, last_hidden, decoded, self.length + 1)
return clone
def beam_result_valid(decoded_t, bspan_index):
decoded_t = [_.view(-1).data[0] for _ in decoded_t]
req_slots = self.get_req_slots(bspan_index)
decoded_sentence = self.vocab.sentence_decode(decoded_t, cfg.eos_m_token)
for req in req_slots:
if req not in decoded_sentence:
return False
return True
def score_bonus(state, decoded, bspan_index):
bonus = cfg.beam_len_bonus
return bonus
def soft_score_incre(score, turn):
return score
finished, failed = [], []
states = []
dead_k = 0
states.append(BeamState(0, last_hidden, [m_tm1], 0))
bspan_index_np = np.array(bspan_index).reshape(-1, 1)
for t in range(self.max_ts):
new_states = []
k = 0
while k < len(states) and k < self.beam_size - dead_k:
state = states[k]
last_hidden, m_tm1 = state.last_hidden, state.decoded[-1]
proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1, degree_input,
last_hidden, bspan_index_np)
proba = torch.log(proba)
mt_proba, mt_index = torch.topk(proba, self.beam_size - dead_k)
for new_k in range(self.beam_size - dead_k):
score_incre = soft_score_incre(mt_proba[0][new_k].data[0], t) + score_bonus(state,
mt_index[0][new_k].data[
0], bspan_index)
if len(new_states) >= self.beam_size - dead_k and state.score + score_incre < new_states[-1].score:
break
decoded_t = mt_index[0][new_k]
if decoded_t.data[0] >= cfg.vocab_size:
decoded_t.data[0] = 2
if self.vocab.decode(decoded_t.data[0]) == cfg.eos_m_token:
if beam_result_valid(state.decoded, bspan_index):
finished.append(state)
dead_k += 1
else:
failed.append(state)
else:
decoded_t = decoded_t.view(1, -1)
new_state = state.update_clone(score_incre, last_hidden, decoded_t)
new_states.append(new_state)
k += 1
if self.beam_size - dead_k < 0:
break
new_states = new_states[:self.beam_size - dead_k]
new_states.sort(key=lambda x: -x.score)
states = new_states
if t == self.max_ts - 1 and not finished:
finished = failed
print('FAIL')
if not finished:
finished.append(states[0])
finished.sort(key=lambda x: -x.score)
decoded_t = finished[0].decoded
decoded_t = [_.view(-1).data[0] for _ in decoded_t]
decoded_sentence = self.vocab.sentence_decode(decoded_t, cfg.eos_m_token)
generated = torch.cat(finished[0].decoded, dim=1).data
return generated
def beam_search_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):
vars = torch.split(pz_dec_outs, 1, dim=1), torch.split(u_enc_out, 1, dim=1), torch.split(
m_tm1, 1, dim=1), torch.split(last_hidden, 1, dim=1), torch.split(degree_input, 1, dim=0)
decoded = []
for i, (pz_dec_out_s, u_enc_out_s, m_tm1_s, last_hidden_s, degree_input_s) in enumerate(zip(*vars)):
decoded_s = self.beam_search_decode_single(pz_dec_out_s, u_enc_out_s, m_tm1_s,
u_input_np[:, i].reshape((-1, 1)),
last_hidden_s, degree_input_s, bspan_index[i])
decoded.append(decoded_s)
return [list(_.view(-1)) for _ in decoded]
def supervised_loss(self, pz_proba, pm_dec_proba, z_input, m_input):
pz_proba, pm_dec_proba = pz_proba[:, :, :cfg.vocab_size].contiguous(), pm_dec_proba[:, :,
:cfg.vocab_size].contiguous()
pr_loss = self.pr_loss(pz_proba.view(-1, pz_proba.size(2)), z_input.view(-1))
m_loss = self.dec_loss(pm_dec_proba.view(-1, pm_dec_proba.size(2)), m_input.view(-1))
loss = pr_loss + m_loss
return loss, pr_loss, m_loss
def self_adjust(self, epoch):
pass
def possible_reqs(self):
if cfg.dataset == 'camrest':
return ['address', 'phone', 'postcode', 'pricerange', 'area']
elif cfg.dataset == 'kvret':
req_by_intent = {
'weather': ['weather_attribute'],
'navigate': ['poi', 'traffic_info', 'address', 'distance'],
'schedule': ['event', 'date', 'time', 'party', 'agenda', 'room']
}
reqs = []
for value in req_by_intent.values():
reqs.extend(value)
return reqs
else:
raise ValueError('unknown dataset')
def get_req_slots(self, bspan_index):
reqs = self.possible_reqs()
reqs = set(self.vocab.sentence_decode(bspan_index).split()).intersection(reqs)
return [_ + '_SLOT' for _ in reqs]
def reward(self, m_tm1, decoded, bspan_index):
req_slots = self.get_req_slots(bspan_index)
m_tm1 = self.vocab.decode(m_tm1[0])
finished = m_tm1 == 'EOS_M'
decoded = [_.view(-1)[0] for _ in decoded]
decoded_sentence = self.vocab.sentence_decode(decoded, cfg.eos_m_token).split()
reward = -0.01 if cfg.dataset == 'camrest' else 0
if m_tm1 in req_slots:
if decoded_sentence and m_tm1 not in decoded_sentence[:-1]:
reward += 1.0
else:
reward -= 1.0 if cfg.dataset == 'camrest' else 0
return reward, finished
def sampling_decode(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):
vars = torch.split(pz_dec_outs, 1, dim=1), torch.split(u_enc_out, 1, dim=1), torch.split(
m_tm1, 1, dim=1), torch.split(last_hidden, 1, dim=1), torch.split(degree_input, 1, dim=0)
batch_loss = []
sample_num = 1
for i, (pz_dec_out_s, u_enc_out_s, m_tm1_s, last_hidden_s, degree_input_s) in enumerate(zip(*vars)):
if not self.get_req_slots(bspan_index[i]):
continue
for j in range(sample_num):
loss = self.sampling_decode_single(pz_dec_out_s, u_enc_out_s, m_tm1_s,
u_input_np[:, i].reshape((-1, 1)),
last_hidden_s, degree_input_s, bspan_index[i])
batch_loss.append(loss)
if not batch_loss:
return None
else:
return sum(batch_loss) / len(batch_loss)
def sampling_decode_single(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):
decoded = []
reward_sum = 0
log_probs = []
rewards = []
bspan_index_np = np.array(bspan_index).reshape(-1, 1)
for t in range(self.max_ts):
reward, finished = self.reward(m_tm1.data.view(-1), decoded, bspan_index)
reward_sum += reward
rewards.append(reward)
if t == self.max_ts - 1:
finished = True
if finished:
loss = self.finish_episode(log_probs, rewards)
return loss
proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,
degree_input, last_hidden, bspan_index_np)
proba = proba.squeeze(0)
dis = Categorical(proba)
action = dis.sample()
log_probs.append(dis.log_prob(action))
mt_index = action.data.view(-1)
decoded.append(mt_index.clone())
for i in range(mt_index.size(0)):
if mt_index[i] >= cfg.vocab_size:
mt_index[i] = 2
m_tm1 = cuda_(Variable(mt_index).view(1, -1))
def finish_episode(self, log_probas, saved_rewards):
R = 0
policy_loss = []
rewards = []
for r in saved_rewards:
R = r + 0.8 * R
rewards.insert(0, R)
rewards = torch.Tensor(rewards)
for log_prob, reward in zip(log_probas, rewards):
policy_loss.append(-log_prob * reward)
l = len(policy_loss)
policy_loss = torch.cat(policy_loss).sum()
return policy_loss / l
| true | true |
f7340de05effdac17c1c33990b943f2aaef6a299 | 1,785 | py | Python | test/GenerativeGrammarTest.py | zigakleine/AmbientMusicForUniversities | 9008942aa846cd9e8e97615f0270c123f7e499e1 | [
"MIT"
] | 2 | 2021-08-15T12:07:13.000Z | 2021-08-16T06:46:37.000Z | test/GenerativeGrammarTest.py | zigakleine/AmbientMusicForUniversities | 9008942aa846cd9e8e97615f0270c123f7e499e1 | [
"MIT"
] | null | null | null | test/GenerativeGrammarTest.py | zigakleine/AmbientMusicForUniversities | 9008942aa846cd9e8e97615f0270c123f7e499e1 | [
"MIT"
] | null | null | null | '''
import re
import random
class ContextFreeGrammar:
def __init__(self, start_symbol, production_rules):
self.start_symbol = start_symbol
self.production_rules = production_rules
def expand(self):
return self.expand_rec(self.start_symbol)
def expand_rec(self, symbol_to_expand):
possible_productions = self.production_rules[symbol_to_expand]
possible_productions_weights = [1/len(possible_productions)]*len(possible_productions)
selected_production = random.choices(possible_productions, possible_productions_weights, k=1)[0]
non_terminals = re.findall(r'<[^>]+>', selected_production)
if len(non_terminals) != 0:
for non_terminal in non_terminals:
expanded_non_terminal = self.expand_rec(non_terminal)
selected_production = selected_production.replace(non_terminal, expanded_non_terminal)
return selected_production
production_rules_1 = {
"<start>": ["Today is a <adj> day. The sun shines and <animal_noise>."],
"<adj>": ["beautiful", "nice", "wonderful", "good"],
"<animal_noise>": ["birds are chirping", "birds are singing", "dogs are barking", "cats are purring"]
}
start_symbol_1 = "<start>"
production_rules_2 = {
"<S>": ["<S><S>", "()", "(<S>)", "[]", "[<S>]"],
}
start_symbol_2 = "<S>"
production_rules_3 = {
"<start>": ["<start> C", "<start> D", "<start> E", "<start> F", "<start> G", "<start> A", "<start> B", ""],
}
start_symbol_3 = "<start>"
cfg = ContextFreeGrammar(start_symbol_1, production_rules_1)
print(cfg.expand())
cfg_2 = ContextFreeGrammar(start_symbol_2, production_rules_2)
print(cfg_2.expand())
cfg_3 = ContextFreeGrammar(start_symbol_3, production_rules_3)
print(cfg_3.expand())
''' | 26.25 | 111 | 0.672829 | true | true | |
f7340dff6be1d38ab9659dc6728048e7bf4164b4 | 2,045 | py | Python | jp.atcoder/abc116/abc116_c/8404003.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc116/abc116_c/8404003.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc116/abc116_c/8404003.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | # 2019-11-12 01:01:32(JST)
import sys
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# from scipy.misc import comb # float
# import numpy as np
def main():
n, *h = [int(x) for x in sys.stdin.read().split()]
if n == 1:
print(h[0])
sys.exit()
mountain = [0]
valley = [0]
flat = 0
i = 0
while True:
if h[i] == h[i+1]:
if i < n-2:
i += 1
continue
else:
print(h[i])
sys.exit()
elif h[i] < h[i+1]:
break
else:
mountain.append(h[0])
break
i = -1
while True:
if h[i-1] == h[i]:
i -= 1
continue
elif h[i-1] > h[i]:
break
else:
mountain.append(h[-1])
break
for i in range(1, n-1):
if flat == 0:
if h[i-1] < h[i] > h[i+1]:
mountain.append(h[i])
elif h[i-1] > h[i] < h[i+1]:
valley.append(h[i])
elif h[i-1] < h[i] == h[i+1]:
flat = 1
elif h[i-1] > h[i] == h[i+1]:
flat = -1
else:
if flat == 1:
if h[i] > h[i+1]:
mountain.append(h[i])
flat = 0
elif h[i] < h[i+1]:
flat = 0
else:
continue
elif flat == -1:
if h[i] < h[i+1]:
valley.append(h[i])
flat = 0
elif h[i] > h[h+1]:
flat = 0
else:
continue
ans = sum(mountain) - sum(valley)
print(ans)
if __name__ == "__main__":
main()
| 24.939024 | 63 | 0.378973 |
import sys
main():
n, *h = [int(x) for x in sys.stdin.read().split()]
if n == 1:
print(h[0])
sys.exit()
mountain = [0]
valley = [0]
flat = 0
i = 0
while True:
if h[i] == h[i+1]:
if i < n-2:
i += 1
continue
else:
print(h[i])
sys.exit()
elif h[i] < h[i+1]:
break
else:
mountain.append(h[0])
break
i = -1
while True:
if h[i-1] == h[i]:
i -= 1
continue
elif h[i-1] > h[i]:
break
else:
mountain.append(h[-1])
break
for i in range(1, n-1):
if flat == 0:
if h[i-1] < h[i] > h[i+1]:
mountain.append(h[i])
elif h[i-1] > h[i] < h[i+1]:
valley.append(h[i])
elif h[i-1] < h[i] == h[i+1]:
flat = 1
elif h[i-1] > h[i] == h[i+1]:
flat = -1
else:
if flat == 1:
if h[i] > h[i+1]:
mountain.append(h[i])
flat = 0
elif h[i] < h[i+1]:
flat = 0
else:
continue
elif flat == -1:
if h[i] < h[i+1]:
valley.append(h[i])
flat = 0
elif h[i] > h[h+1]:
flat = 0
else:
continue
ans = sum(mountain) - sum(valley)
print(ans)
if __name__ == "__main__":
main()
| true | true |
f7340e4cab27af3a24dc37ceb74bd27979d5d231 | 2,743 | py | Python | migrations/versions/2f182d32410d_.py | vasyanch/stepik_flask-delivery | 975f3b42ad7a19df420f5c03fe4e3d39bfe27988 | [
"MIT"
] | null | null | null | migrations/versions/2f182d32410d_.py | vasyanch/stepik_flask-delivery | 975f3b42ad7a19df420f5c03fe4e3d39bfe27988 | [
"MIT"
] | null | null | null | migrations/versions/2f182d32410d_.py | vasyanch/stepik_flask-delivery | 975f3b42ad7a19df420f5c03fe4e3d39bfe27988 | [
"MIT"
] | null | null | null | """empty message
Revision ID: 2f182d32410d
Revises:
Create Date: 2020-04-29 01:27:52.576633
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2f182d32410d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('categories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dishes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=False),
sa.Column('price', sa.Integer(), nullable=False),
sa.Column('description', sa.String(), nullable=False),
sa.Column('picture', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('password_hash', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dish_category_association',
sa.Column('Dish', sa.Integer(), nullable=False),
sa.Column('Category', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['Category'], ['categories.id'], ),
sa.ForeignKeyConstraint(['Dish'], ['dishes.id'], )
)
op.create_table('orders',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date', sa.DateTime(), nullable=False),
sa.Column('order_sum', sa.Integer(), nullable=False),
sa.Column('status', sa.String(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('phone', sa.String(), nullable=False),
sa.Column('address', sa.String(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dishs_in_order',
sa.Column('dish_id', sa.Integer(), nullable=False),
sa.Column('order_id', sa.Integer(), nullable=False),
sa.Column('amount', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['dish_id'], ['dishes.id'], ),
sa.ForeignKeyConstraint(['order_id'], ['orders.id'], ),
sa.PrimaryKeyConstraint('dish_id', 'order_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('dishs_in_order')
op.drop_table('orders')
op.drop_table('dish_category_association')
op.drop_table('users')
op.drop_table('dishes')
op.drop_table('categories')
# ### end Alembic commands ###
| 34.721519 | 65 | 0.658768 | from alembic import op
import sqlalchemy as sa
revision = '2f182d32410d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('dishes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=False),
sa.Column('price', sa.Integer(), nullable=False),
sa.Column('description', sa.String(), nullable=False),
sa.Column('picture', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('password_hash', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dish_category_association',
sa.Column('Dish', sa.Integer(), nullable=False),
sa.Column('Category', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['Category'], ['categories.id'], ),
sa.ForeignKeyConstraint(['Dish'], ['dishes.id'], )
)
op.create_table('orders',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date', sa.DateTime(), nullable=False),
sa.Column('order_sum', sa.Integer(), nullable=False),
sa.Column('status', sa.String(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('phone', sa.String(), nullable=False),
sa.Column('address', sa.String(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dishs_in_order',
sa.Column('dish_id', sa.Integer(), nullable=False),
sa.Column('order_id', sa.Integer(), nullable=False),
sa.Column('amount', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['dish_id'], ['dishes.id'], ),
sa.ForeignKeyConstraint(['order_id'], ['orders.id'], ),
sa.PrimaryKeyConstraint('dish_id', 'order_id')
)
| true | true |
f7340ec44f8cb1d35e72adb5e22372f0c8339663 | 928 | py | Python | neigh1/signals.py | SteveMitto/neighborhood | 1f79c0cb807ffd91b80e2d663ab142572f7f1931 | [
"MIT"
] | null | null | null | neigh1/signals.py | SteveMitto/neighborhood | 1f79c0cb807ffd91b80e2d663ab142572f7f1931 | [
"MIT"
] | 8 | 2020-06-06T00:20:09.000Z | 2022-02-10T10:40:46.000Z | neigh1/signals.py | SteveMitto/neighborhood | 1f79c0cb807ffd91b80e2d663ab142572f7f1931 | [
"MIT"
] | null | null | null | from django.dispatch import receiver
from django.db.models.signals import pre_save,post_save
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_logged_out,user_logged_in
from .models import Profile
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
@receiver(user_logged_out)
def remove_online_status(request,**kwargs):
p_pk = request.user.profile.pk
online_status=Profile.objects.get(pk = p_pk)
online_status.online=False
online_status.save()
@receiver(user_logged_in)
def add_online_status(request,**kwargs):
p_pk = request.user.profile.pk
online_status=Profile.objects.get(pk = p_pk)
online_status.online=True
online_status.save()
| 32 | 70 | 0.77694 | from django.dispatch import receiver
from django.db.models.signals import pre_save,post_save
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_logged_out,user_logged_in
from .models import Profile
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
@receiver(user_logged_out)
def remove_online_status(request,**kwargs):
p_pk = request.user.profile.pk
online_status=Profile.objects.get(pk = p_pk)
online_status.online=False
online_status.save()
@receiver(user_logged_in)
def add_online_status(request,**kwargs):
p_pk = request.user.profile.pk
online_status=Profile.objects.get(pk = p_pk)
online_status.online=True
online_status.save()
| true | true |
f7340ecfbf45c6ede4d081d244d7681a1fc8a7ac | 2,209 | py | Python | sudoku/reduce.py | hawkfish/sudoku | eaae1aa3080032266db0fcfc8a6520a9cb5690fe | [
"MIT"
] | null | null | null | sudoku/reduce.py | hawkfish/sudoku | eaae1aa3080032266db0fcfc8a6520a9cb5690fe | [
"MIT"
] | null | null | null | sudoku/reduce.py | hawkfish/sudoku | eaae1aa3080032266db0fcfc8a6520a9cb5690fe | [
"MIT"
] | null | null | null | # !/usr/bin/python3
from sudoku.cells import *
from sudoku.regions import *
class ReduceError(BaseException):
pass
# Remove all used values from a cell
def reduceCellByValues(cell, used):
reduced = []
for value in cell:
if not used[value]: reduced.append(value)
if not len(reduced): raise ReduceError
return reduced
# Remove all illegal values from a cell by region
def reduceCellByRegion(cells, c, region):
cell = cells[c]
if len(cell) == 1: return cell
return reduceCellByValues(cell, findRegionValues(cells, region))
# Apply the row reduction rule
def reduceRow(cells, c):
cells[c] = reduceCellByRegion(cells, c, iRows[row(c)])
return cells
# Apply the column reduction rule
def reduceCol(cells, c):
cells[c] = reduceCellByRegion(cells, c, iCols[col(c)])
return cells
# Apply the box reduction rule
def reduceBox(cells, c):
cells[c] = reduceCellByRegion(cells, c, iBoxes[box(c)])
return cells
# Apply the king's move reduction rule
# Any two cells separated by a king's move cannot contain the same digit
def reduceKing(cells, c):
cells[c] = reduceCellByRegion(cells, c, iKings[c])
return cells
# Apply the knight's move reduction rule
# Any two cells separated by a knight's move cannot contain the same digit
def reduceKnight(cells, c):
cells[c] = reduceCellByRegion(cells, c, iKnights[c])
return cells
# Apply the adjacency move reduction rule:
# Any two orthogonally adjacent cells cannot contain consecutive digits
def reduceAdjacent(cells, c):
cell = cells[c]
if len(cell) == 1:
value = cell[0]
used = [0 for v in range(9)]
if value > 0: used[value-1] = 1
if value < 8: used[value+1] = 1
if hasLeft(c):
l = leftOf(c)
cells[l] = reduceCellByValues(cells[l], used)
if hasRight(c):
r = rightOf(c)
cells[r] = reduceCellByValues(cells[r], used)
if hasAbove(c):
a = aboveOf(c)
cells[a] = reduceCellByValues(cells[a], used)
if hasBelow(c):
b = belowOf(c)
cells[b] = reduceCellByValues(cells[b], used)
return cells
| 27.6125 | 77 | 0.645088 |
from sudoku.cells import *
from sudoku.regions import *
class ReduceError(BaseException):
pass
def reduceCellByValues(cell, used):
reduced = []
for value in cell:
if not used[value]: reduced.append(value)
if not len(reduced): raise ReduceError
return reduced
def reduceCellByRegion(cells, c, region):
cell = cells[c]
if len(cell) == 1: return cell
return reduceCellByValues(cell, findRegionValues(cells, region))
def reduceRow(cells, c):
cells[c] = reduceCellByRegion(cells, c, iRows[row(c)])
return cells
def reduceCol(cells, c):
cells[c] = reduceCellByRegion(cells, c, iCols[col(c)])
return cells
def reduceBox(cells, c):
cells[c] = reduceCellByRegion(cells, c, iBoxes[box(c)])
return cells
# Any two cells separated by a king's move cannot contain the same digit
def reduceKing(cells, c):
cells[c] = reduceCellByRegion(cells, c, iKings[c])
return cells
# Any two cells separated by a knight's move cannot contain the same digit
def reduceKnight(cells, c):
cells[c] = reduceCellByRegion(cells, c, iKnights[c])
return cells
def reduceAdjacent(cells, c):
cell = cells[c]
if len(cell) == 1:
value = cell[0]
used = [0 for v in range(9)]
if value > 0: used[value-1] = 1
if value < 8: used[value+1] = 1
if hasLeft(c):
l = leftOf(c)
cells[l] = reduceCellByValues(cells[l], used)
if hasRight(c):
r = rightOf(c)
cells[r] = reduceCellByValues(cells[r], used)
if hasAbove(c):
a = aboveOf(c)
cells[a] = reduceCellByValues(cells[a], used)
if hasBelow(c):
b = belowOf(c)
cells[b] = reduceCellByValues(cells[b], used)
return cells
| true | true |
f7340f0f2accd24dc284f64500dd1dfd6566f91a | 474 | py | Python | examples/object_detection_tflite.py | Lynchez/deepvision | d1d6242dc6f85c4e7a9190b2994fd80535a2c218 | [
"MIT"
] | 6 | 2021-05-04T12:30:26.000Z | 2021-09-11T15:46:56.000Z | examples/object_detection_tflite.py | Lynchez/fastcv | d1d6242dc6f85c4e7a9190b2994fd80535a2c218 | [
"MIT"
] | null | null | null | examples/object_detection_tflite.py | Lynchez/fastcv | d1d6242dc6f85c4e7a9190b2994fd80535a2c218 | [
"MIT"
] | null | null | null | import cv2
from deepvision import TFlite
# open webcam
webcam = cv2.VideoCapture(0)
tflite = TFlite()
# loop through frames
while webcam.isOpened():
# read frame from webcam
status, frame = webcam.read()
frame = tflite.detect_objects(frame)
# display output
cv2.imshow("Real-time object detection", frame)
# press "Q" to stop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# release resources
webcam.release()
cv2.destroyAllWindows() | 22.571429 | 51 | 0.679325 | import cv2
from deepvision import TFlite
webcam = cv2.VideoCapture(0)
tflite = TFlite()
while webcam.isOpened():
status, frame = webcam.read()
frame = tflite.detect_objects(frame)
cv2.imshow("Real-time object detection", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
webcam.release()
cv2.destroyAllWindows() | true | true |
f73410e401dbaa34b6ceee1385cd1bcc00af7447 | 966 | py | Python | beer/types/mempool_item.py | petrus-hanks/flax-blockchain | 6e180dc84ca24c757555c9947f44bd724b1af3eb | [
"Apache-2.0"
] | 8 | 2021-07-08T00:11:49.000Z | 2022-02-09T04:28:40.000Z | beer/types/mempool_item.py | petrus-hanks/flax-blockchain | 6e180dc84ca24c757555c9947f44bd724b1af3eb | [
"Apache-2.0"
] | 15 | 2021-07-29T06:37:42.000Z | 2021-08-29T17:26:08.000Z | beer/types/mempool_item.py | petrus-hanks/flax-blockchain | 6e180dc84ca24c757555c9947f44bd724b1af3eb | [
"Apache-2.0"
] | 3 | 2021-10-04T17:51:04.000Z | 2022-03-01T04:23:01.000Z | from dataclasses import dataclass
from typing import List
from beer.consensus.cost_calculator import NPCResult
from beer.types.blockchain_format.coin import Coin
from beer.types.blockchain_format.program import SerializedProgram
from beer.types.blockchain_format.sized_bytes import bytes32
from beer.types.spend_bundle import SpendBundle
from beer.util.ints import uint64
from beer.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class MempoolItem(Streamable):
spend_bundle: SpendBundle
fee: uint64
npc_result: NPCResult
cost: uint64
spend_bundle_name: bytes32
additions: List[Coin]
removals: List[Coin]
program: SerializedProgram
def __lt__(self, other):
return self.fee_per_cost < other.fee_per_cost
@property
def fee_per_cost(self) -> float:
return int(self.fee) / int(self.cost)
@property
def name(self) -> bytes32:
return self.spend_bundle_name
| 27.6 | 66 | 0.76501 | from dataclasses import dataclass
from typing import List
from beer.consensus.cost_calculator import NPCResult
from beer.types.blockchain_format.coin import Coin
from beer.types.blockchain_format.program import SerializedProgram
from beer.types.blockchain_format.sized_bytes import bytes32
from beer.types.spend_bundle import SpendBundle
from beer.util.ints import uint64
from beer.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class MempoolItem(Streamable):
spend_bundle: SpendBundle
fee: uint64
npc_result: NPCResult
cost: uint64
spend_bundle_name: bytes32
additions: List[Coin]
removals: List[Coin]
program: SerializedProgram
def __lt__(self, other):
return self.fee_per_cost < other.fee_per_cost
@property
def fee_per_cost(self) -> float:
return int(self.fee) / int(self.cost)
@property
def name(self) -> bytes32:
return self.spend_bundle_name
| true | true |
f734112f18de9c64644d82da3c43167201edc406 | 6,823 | py | Python | kafka-utils/tests/bai_kafka_utils/test_fetcher_event.py | gavinmbell/benchmark-ai-1 | a697e67d68b843fe9350e55871dad867bab5d51d | [
"Apache-2.0"
] | 6 | 2020-09-29T09:03:04.000Z | 2022-03-14T06:52:25.000Z | kafka-utils/tests/bai_kafka_utils/test_fetcher_event.py | gavinmbell/benchmark-ai-1 | a697e67d68b843fe9350e55871dad867bab5d51d | [
"Apache-2.0"
] | null | null | null | kafka-utils/tests/bai_kafka_utils/test_fetcher_event.py | gavinmbell/benchmark-ai-1 | a697e67d68b843fe9350e55871dad867bab5d51d | [
"Apache-2.0"
] | 4 | 2020-10-01T07:49:22.000Z | 2021-06-16T19:44:12.000Z | from bai_kafka_utils.events import (
BenchmarkDoc,
VisitedService,
FetcherBenchmarkEvent,
DownloadableContent,
FetcherPayload,
FileSystemObject,
)
BIG_FETCHER_JSON = """{
"date": "Thu May 02 16:15:42 UTC 2019",
"authenticated": false,
"payload": {
"toml": {
"descriptor_filename": "example_descriptor2.toml",
"sha1": "be60cb85620fa041c1bfabd9a9b1c8c1d6be1c78",
"doc": "IyBCZW5jaG1hcYS90Zi1pbWFnZW5ldC8iCg==",
"verified": true,
"contents": {
"spec_version": "0.1.0",
"data": {
"sources": [
{
"path": "~/data/tf-imagenet/",
"src": "s3://bucket/imagenet/train"
},
{
"path": "~/data/tf-imagenet/",
"src": "s3://bucket/imagenet/validation"
}
],
"id": "imagenet"
},
"env": {
"privileged": false,
"extended_shm": true,
"docker_image": "user/repo:tag"
},
"info": {
"task_name": "Example benchmark",
"scheduling": "single_run",
"description": " Full job description."
},
"hardware": {
"distributed": {
"num_instances": 3
},
"strategy": "horovod",
"instance_type": "p3.8xlarge"
},
"ml": {
"args": "--model=resnet50_v2 --batch-size=32",
"benchmark_code": "python /root/train.sh"
}
}
},
"datasets": [
{
"src": "s3://bucket/imagenet/train",
"path": "~/data/tf-imagenet/"
},
{
"src": "s3://bucket/imagenet/validation",
"path": "~/data/tf-imagenet/"
}
],
"models": [
{
"src": "s3://bucket/model/inception",
"path": "/models/inception",
"md5": "5d41402abc4b2a76b9719d911017c592"
},
{
"src": "s3://bucket/models/mnist",
"path": "/models/mnist"
}
],
"scripts": [
{
"dst": "s3://script-exchange/foo.tar"
}
]
},
"tstamp": 1556814924121,
"client_username": "bellgav",
"action_id": "ffea52eb-c24b-4dd0-b32e-61230db34ad5",
"visited": [
{
"svc": "baictl-client",
"tstamp": "@@TSTAMP@@",
"version": "0.1.0-481dad2"
},
{
"svc": "bai-bff",
"tstamp": 1556814924121,
"version": "0.0.2"
}
],
"message_id": "007bd9f8-f564-4edb-bb48-7380ee562ffc",
"client_sha1": "c05467317b6765535f1ec60f0aee812d39b35dd2",
"client_id": "97e7eb322342626974fb171fc5793514b0aea789",
"client_version": "0.1.0-481dad2",
"type": "BAI_APP_BFF"
}"""
EXPECTED_FETCHER_CONTENTS = {
"spec_version": "0.1.0",
"data": {
"sources": [
{"path": "~/data/tf-imagenet/", "src": "s3://bucket/imagenet/train"},
{"path": "~/data/tf-imagenet/", "src": "s3://bucket/imagenet/validation"},
],
"id": "imagenet",
},
"env": {"privileged": False, "extended_shm": True, "docker_image": "user/repo:tag"},
"info": {"task_name": "Example benchmark", "scheduling": "single_run", "description": " Full job description."},
"hardware": {"distributed": {"num_instances": 3}, "strategy": "horovod", "instance_type": "p3.8xlarge"},
"ml": {"args": "--model=resnet50_v2 --batch-size=32", "benchmark_code": "python /root/train.sh"},
}
EXPECTED_FETCHER_DOC = BenchmarkDoc(
doc="IyBCZW5jaG1hcYS90Zi1pbWFnZW5ldC8iCg==",
sha1="be60cb85620fa041c1bfabd9a9b1c8c1d6be1c78",
contents=EXPECTED_FETCHER_CONTENTS,
verified=True,
descriptor_filename="example_descriptor2.toml",
)
EXPECTED_FETCHER_VISITED = [
VisitedService(svc="baictl-client", tstamp="@@TSTAMP@@", version="0.1.0-481dad2"),
VisitedService(svc="bai-bff", tstamp=1556814924121, version="0.0.2"),
]
EXPECTED_FETCHER_DATASETS = [
DownloadableContent("s3://bucket/imagenet/train", path="~/data/tf-imagenet/"),
DownloadableContent("s3://bucket/imagenet/validation", path="~/data/tf-imagenet/"),
]
EXPECTED_FETCHER_SCRIPTS = [FileSystemObject(dst="s3://script-exchange/foo.tar")]
EXPECTED_FETCHER_MODELS = [
DownloadableContent(
"s3://bucket/model/inception", path="/models/inception", md5="5d41402abc4b2a76b9719d911017c592"
),
DownloadableContent("s3://bucket/models/mnist", path="/models/mnist"),
]
EXPECTED_FETCHER_EVENT = FetcherBenchmarkEvent(
action_id="ffea52eb-c24b-4dd0-b32e-61230db34ad5",
message_id="007bd9f8-f564-4edb-bb48-7380ee562ffc",
client_id="97e7eb322342626974fb171fc5793514b0aea789",
client_version="0.1.0-481dad2",
client_username="bellgav",
authenticated=False,
tstamp=1556814924121,
visited=EXPECTED_FETCHER_VISITED,
type="BAI_APP_BFF",
payload=FetcherPayload(
datasets=EXPECTED_FETCHER_DATASETS,
scripts=EXPECTED_FETCHER_SCRIPTS,
models=EXPECTED_FETCHER_MODELS,
toml=EXPECTED_FETCHER_DOC,
),
)
def test_big_fetcher_json():
event = FetcherBenchmarkEvent.from_json(BIG_FETCHER_JSON)
print(event)
print(EXPECTED_FETCHER_EVENT)
assert event == EXPECTED_FETCHER_EVENT
| 39.668605 | 116 | 0.44863 | from bai_kafka_utils.events import (
BenchmarkDoc,
VisitedService,
FetcherBenchmarkEvent,
DownloadableContent,
FetcherPayload,
FileSystemObject,
)
BIG_FETCHER_JSON = """{
"date": "Thu May 02 16:15:42 UTC 2019",
"authenticated": false,
"payload": {
"toml": {
"descriptor_filename": "example_descriptor2.toml",
"sha1": "be60cb85620fa041c1bfabd9a9b1c8c1d6be1c78",
"doc": "IyBCZW5jaG1hcYS90Zi1pbWFnZW5ldC8iCg==",
"verified": true,
"contents": {
"spec_version": "0.1.0",
"data": {
"sources": [
{
"path": "~/data/tf-imagenet/",
"src": "s3://bucket/imagenet/train"
},
{
"path": "~/data/tf-imagenet/",
"src": "s3://bucket/imagenet/validation"
}
],
"id": "imagenet"
},
"env": {
"privileged": false,
"extended_shm": true,
"docker_image": "user/repo:tag"
},
"info": {
"task_name": "Example benchmark",
"scheduling": "single_run",
"description": " Full job description."
},
"hardware": {
"distributed": {
"num_instances": 3
},
"strategy": "horovod",
"instance_type": "p3.8xlarge"
},
"ml": {
"args": "--model=resnet50_v2 --batch-size=32",
"benchmark_code": "python /root/train.sh"
}
}
},
"datasets": [
{
"src": "s3://bucket/imagenet/train",
"path": "~/data/tf-imagenet/"
},
{
"src": "s3://bucket/imagenet/validation",
"path": "~/data/tf-imagenet/"
}
],
"models": [
{
"src": "s3://bucket/model/inception",
"path": "/models/inception",
"md5": "5d41402abc4b2a76b9719d911017c592"
},
{
"src": "s3://bucket/models/mnist",
"path": "/models/mnist"
}
],
"scripts": [
{
"dst": "s3://script-exchange/foo.tar"
}
]
},
"tstamp": 1556814924121,
"client_username": "bellgav",
"action_id": "ffea52eb-c24b-4dd0-b32e-61230db34ad5",
"visited": [
{
"svc": "baictl-client",
"tstamp": "@@TSTAMP@@",
"version": "0.1.0-481dad2"
},
{
"svc": "bai-bff",
"tstamp": 1556814924121,
"version": "0.0.2"
}
],
"message_id": "007bd9f8-f564-4edb-bb48-7380ee562ffc",
"client_sha1": "c05467317b6765535f1ec60f0aee812d39b35dd2",
"client_id": "97e7eb322342626974fb171fc5793514b0aea789",
"client_version": "0.1.0-481dad2",
"type": "BAI_APP_BFF"
}"""
EXPECTED_FETCHER_CONTENTS = {
"spec_version": "0.1.0",
"data": {
"sources": [
{"path": "~/data/tf-imagenet/", "src": "s3://bucket/imagenet/train"},
{"path": "~/data/tf-imagenet/", "src": "s3://bucket/imagenet/validation"},
],
"id": "imagenet",
},
"env": {"privileged": False, "extended_shm": True, "docker_image": "user/repo:tag"},
"info": {"task_name": "Example benchmark", "scheduling": "single_run", "description": " Full job description."},
"hardware": {"distributed": {"num_instances": 3}, "strategy": "horovod", "instance_type": "p3.8xlarge"},
"ml": {"args": "--model=resnet50_v2 --batch-size=32", "benchmark_code": "python /root/train.sh"},
}
EXPECTED_FETCHER_DOC = BenchmarkDoc(
doc="IyBCZW5jaG1hcYS90Zi1pbWFnZW5ldC8iCg==",
sha1="be60cb85620fa041c1bfabd9a9b1c8c1d6be1c78",
contents=EXPECTED_FETCHER_CONTENTS,
verified=True,
descriptor_filename="example_descriptor2.toml",
)
EXPECTED_FETCHER_VISITED = [
VisitedService(svc="baictl-client", tstamp="@@TSTAMP@@", version="0.1.0-481dad2"),
VisitedService(svc="bai-bff", tstamp=1556814924121, version="0.0.2"),
]
EXPECTED_FETCHER_DATASETS = [
DownloadableContent("s3://bucket/imagenet/train", path="~/data/tf-imagenet/"),
DownloadableContent("s3://bucket/imagenet/validation", path="~/data/tf-imagenet/"),
]
EXPECTED_FETCHER_SCRIPTS = [FileSystemObject(dst="s3://script-exchange/foo.tar")]
EXPECTED_FETCHER_MODELS = [
DownloadableContent(
"s3://bucket/model/inception", path="/models/inception", md5="5d41402abc4b2a76b9719d911017c592"
),
DownloadableContent("s3://bucket/models/mnist", path="/models/mnist"),
]
EXPECTED_FETCHER_EVENT = FetcherBenchmarkEvent(
action_id="ffea52eb-c24b-4dd0-b32e-61230db34ad5",
message_id="007bd9f8-f564-4edb-bb48-7380ee562ffc",
client_id="97e7eb322342626974fb171fc5793514b0aea789",
client_version="0.1.0-481dad2",
client_username="bellgav",
authenticated=False,
tstamp=1556814924121,
visited=EXPECTED_FETCHER_VISITED,
type="BAI_APP_BFF",
payload=FetcherPayload(
datasets=EXPECTED_FETCHER_DATASETS,
scripts=EXPECTED_FETCHER_SCRIPTS,
models=EXPECTED_FETCHER_MODELS,
toml=EXPECTED_FETCHER_DOC,
),
)
def test_big_fetcher_json():
event = FetcherBenchmarkEvent.from_json(BIG_FETCHER_JSON)
print(event)
print(EXPECTED_FETCHER_EVENT)
assert event == EXPECTED_FETCHER_EVENT
| true | true |
f73412f4b6192da0844dc366a2b07c76cba2888e | 671 | py | Python | _from_pydot/lambdas/utils/dot_to_png.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
] | 3 | 2018-12-14T15:43:46.000Z | 2019-04-25T07:44:58.000Z | _from_pydot/lambdas/utils/dot_to_png.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
] | 1 | 2019-05-11T14:19:37.000Z | 2019-05-11T14:51:04.000Z | _from_pydot/lambdas/utils/dot_to_png.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
] | 4 | 2018-12-27T04:54:14.000Z | 2019-05-11T14:07:47.000Z | from osbot_aws.apis.Lambda import Lambda
from osbot_aws.helpers.Lambda_Helpers import slack_message
def run(event, context):
channel = event.get('channel')
try:
dot_to_svg = Lambda('gw_bot.lambdas.dot_to_svg').invoke
svg_to_png = Lambda('gw_bot.lambdas.svg_to_png').invoke
svg = dot_to_svg(event)
result = svg_to_png({"svg": svg , "width": event.get("width")})
if result.get('image'):
return result['image']
return { 'error' : result}
except Exception as error:
slack_message(":red_circle: Error in dot_to_png: {0}".format(error), [], channel)
return { 'error': error } | 39.470588 | 89 | 0.634873 | from osbot_aws.apis.Lambda import Lambda
from osbot_aws.helpers.Lambda_Helpers import slack_message
def run(event, context):
channel = event.get('channel')
try:
dot_to_svg = Lambda('gw_bot.lambdas.dot_to_svg').invoke
svg_to_png = Lambda('gw_bot.lambdas.svg_to_png').invoke
svg = dot_to_svg(event)
result = svg_to_png({"svg": svg , "width": event.get("width")})
if result.get('image'):
return result['image']
return { 'error' : result}
except Exception as error:
slack_message(":red_circle: Error in dot_to_png: {0}".format(error), [], channel)
return { 'error': error } | true | true |
f7341316b65044218ec1dd53113d96b181f0c7e8 | 1,108 | py | Python | vb2py/test/testimports.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null | vb2py/test/testimports.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null | vb2py/test/testimports.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null | import vb2py.vbparser
import unittest
class TestImports(unittest.TestCase):
# << Imports tests >>
def testImportClassToModule(self):
"""Import from class to module"""
self.proj = vb2py.vbparser.VBProject()
self.utils = vb2py.vbparser.VBCodeModule(modulename="utils")
self.cls = vb2py.vbparser.VBClassModule(modulename="Cls", classname="Cls")
#
self.utils.assignParent(self.proj)
self.cls.assignParent(self.proj)
self.utils.finalizeObject()
self.cls.finalizeObject()
#
utils = vb2py.vbparser.parseVB("""
Public Function Fact(x)
Dim c As New Cls
End Function
""", container=self.utils)
#
cls = vb2py.vbparser.parseVB("""
Public A
""", container=self.cls)
#
utils_code = self.utils.renderAsCode()
self.assertNotEqual(utils_code.find("import Cls"), -1)
# -- end -- << Imports tests >>
import vb2py.vbparser
vb2py.vbparser.log.setLevel(0) # Don't print all logging stuff
if __name__ == "__main__":
unittest.main()
| 30.777778 | 82 | 0.617329 | import vb2py.vbparser
import unittest
class TestImports(unittest.TestCase):
def testImportClassToModule(self):
self.proj = vb2py.vbparser.VBProject()
self.utils = vb2py.vbparser.VBCodeModule(modulename="utils")
self.cls = vb2py.vbparser.VBClassModule(modulename="Cls", classname="Cls")
self.utils.assignParent(self.proj)
self.cls.assignParent(self.proj)
self.utils.finalizeObject()
self.cls.finalizeObject()
utils = vb2py.vbparser.parseVB("""
Public Function Fact(x)
Dim c As New Cls
End Function
""", container=self.utils)
cls = vb2py.vbparser.parseVB("""
Public A
""", container=self.cls)
utils_code = self.utils.renderAsCode()
self.assertNotEqual(utils_code.find("import Cls"), -1)
import vb2py.vbparser
vb2py.vbparser.log.setLevel(0)
if __name__ == "__main__":
unittest.main()
| true | true |
f734148db4674d3e295a32e4c427f45aac085654 | 52,109 | py | Python | dataverse_repository_curation_assistant/dataverse_repository_curation_assistant_functions.py | jggautier/dataverse-automating-downloads | 40cf127e7771049165b21b732635cd35848eda5e | [
"CC0-1.0"
] | null | null | null | dataverse_repository_curation_assistant/dataverse_repository_curation_assistant_functions.py | jggautier/dataverse-automating-downloads | 40cf127e7771049165b21b732635cd35848eda5e | [
"CC0-1.0"
] | 1 | 2019-08-30T04:02:08.000Z | 2019-08-30T05:16:46.000Z | dataverse_repository_curation_assistant/dataverse_repository_curation_assistant_functions.py | jggautier/dataverse-automating-downloads | 40cf127e7771049165b21b732635cd35848eda5e | [
"CC0-1.0"
] | null | null | null | # Functions for the curation app
import csv
from dateutil.parser import parse
from functools import reduce
import json
import glob
import os
from os import listdir
import pandas as pd
from pathlib import Path
import re
import requests
import time
from tkinter import Tk, ttk, Frame, Label, IntVar, Checkbutton, filedialog, NORMAL, DISABLED
from tkinter import Listbox, MULTIPLE, StringVar, END, INSERT, N, E, S, W
from tkinter.ttk import Entry, Progressbar, OptionMenu, Combobox
from urllib.parse import urlparse
# Class for custom collapsiblePanel frame using tkinter widgets
class collapsiblePanel(Frame):
def __init__(self, parent, text='', default='closed', padx=0, pady=0, *args, **options):
Frame.__init__(self, parent, *args, **options, padx=padx, pady=pady)
self.show = IntVar()
self.titleFrame = ttk.Frame(self, relief='raised', borderwidth=1)
self.titleFrame.pack(fill='x', expand=1)
Label(self.titleFrame, text=text, width=40, anchor='w').pack(side='left', fill='x', expand=1)
self.toggleButton = ttk.Checkbutton(
self.titleFrame, width=5, command=self.toggle,
variable=self.show, style='Toolbutton')
self.toggleButton.pack(side='right')
self.subFrame = Frame(self, borderwidth=1, relief='groove', bg='white', padx=10)
if default == 'open':
self.show.set(1)
self.subFrame.pack(fill='x', expand=1)
self.toggleButton.configure(text='▼')
elif default == 'closed':
self.show.set(0)
self.toggleButton.configure(text='▲')
def toggle(self):
if bool(self.show.get()):
self.subFrame.pack(fill='x', expand=1)
self.toggleButton.configure(text='▼')
else:
self.subFrame.forget()
self.toggleButton.configure(text='▲')
def forget_widget(widget):
exists = widget.winfo_exists()
if exists == 1:
widget.grid_forget()
else:
pass
# Function for getting value of nested key, truncating the value to 10,000 characters if it's a string
# (character limit for many spreadsheet applications), and returning nothing if key doesn't exist
def improved_get(_dict, path, default=None):
for key in path.split('.'):
try:
_dict = _dict[key]
except KeyError:
return default
if isinstance(_dict, int) or isinstance(_dict, dict):
return _dict
elif isinstance(_dict, str):
return _dict[:10000].replace('\r', ' - ')
def list_to_string(lst):
string = ', '.join(lst)
return string
def convert_to_local_tz(timestamp, shortDate=False):
# Save local timezone to localTimezone variable
localTimezone = tz.tzlocal()
# Convert string to datetime object
timestamp = parse(timestamp)
# Convert timestamp to local timezone
timestamp = timestamp.astimezone(localTimezone)
if shortDate is True:
# Return timestamp in YYYY-MM-DD format
timestamp = timestamp.strftime('%Y-%m-%d')
return timestamp
def select_all(listbox):
listbox.select_set(0, END)
def clear_selections(listbox):
listbox.selection_clear(0, END)
# Function for getting the server URL from a collection URL
# or what's entered in the Installatio URL field
def get_installation_url(string):
if string.startswith('http'):
parsed = urlparse(string)
installationUrl = parsed.scheme + '://' + parsed.netloc
return installationUrl
elif '(' in string:
installationUrl = re.search(r'\(.*\)', string).group()
installationUrl = re.sub('\(|\)', '', installationUrl)
return installationUrl
# Gets list of URLs from Dataverse map JSON data and add Demo Dataverse url
def get_installation_list():
installationsList = []
dataverseInstallationsJsonUrl = 'https://raw.githubusercontent.com/IQSS/dataverse-installations/master/data/data.json'
response = requests.get(dataverseInstallationsJsonUrl)
data = response.json()
for installation in data['installations']:
name = installation['name']
hostname = installation['hostname']
installationUrl = 'https://' + hostname
nameAndUrl = '%s (%s)' % (name, installationUrl)
installationsList.append(nameAndUrl)
installationsList.insert(0, 'Demo Dataverse (https://demo.dataverse.org)')
return installationsList
# Function for getting name of installation's root collection
# (assumming root dataverse's ID is 1, which isn't the case with UVA Dataverse)
def get_root_alias_name(url):
# If it's the UVA homepage URL, it's root alias is uva (whose database ID is not 1)
if 'dataverse.lib.virginia.edu' in url:
rootAlias = 'uva'
# If's it's not the UVA homepage URL, get the alias of the collection whose database is 1
elif '/dataverse/' in url:
parsed = urlparse(url)
url = parsed.scheme + '://' + parsed.netloc + '/api/dataverses/1'
response = requests.get(url)
dataverseData = response.json()
rootAlias = dataverseData['data']['alias']
elif '/dataverse/' not in url:
url = '%s/api/dataverses/1' % (url)
response = requests.get(url)
dataverseData = response.json()
rootAlias = dataverseData['data']['alias']
return rootAlias
# Function for getting collection alias name of a given Dataverse Collection URL,
# including the "Root" collection
def get_alias_from_collection_url(url):
# If /dataverse/ is not in the URL, assume it's the installation's server url...
if '/dataverse/' not in url:
# If it's the UVA homepage URL, get it's root alias, whose database ID is not 1
if 'dataverse.lib.virginia.edu' in url:
alias = 'uva'
# If's it's not the UVA homepage URL, get the alias of the collection whose database is 1
elif 'dataverse.lib.virginia.edu' not in url:
installationUrl = get_installation_url(url)
url = '%s/api/dataverses/1' % (installationUrl)
response = requests.get(url)
dataverseData = response.json()
alias = dataverseData['data']['alias']
# If /dataverse/ is in the url, assume it's a collection URL and parse string to get its alias...
elif '/dataverse/' in url:
parsed = urlparse(url)
try:
alias = parsed.path.split('/')[2]
# Or return an empty string
except IndexError:
alias = ''
return alias
# Returns True if collection alias is the installation's root collection or
# False if not (doesn't work with UVA)
def is_root_collection(url):
if get_alias_from_collection_url(url) == get_root_alias_name(url):
return True
elif get_alias_from_collection_url(url) != get_root_alias_name(url):
return False
# Function that turns Dataverse installation URL, instalation URL or search URL into a Search API URL
def get_search_api_url(url, apiKey=None):
# If URL is not a search url (doesn't contain 'q=') and contains /dataverse/, it's a Dataverse collection URL
if 'q=' not in url and '/dataverse/' in url:
# Remove the jsessionidString that sometimes appears in the URL
try:
jsessionidString = re.search(r';jsessionid=.*', url).group()
url = url.replace(jsessionidString, '?')
except AttributeError:
pass
# Get the Dataverse Collection name in the search URL
dataversePart = re.search(r'\/dataverse\/.*', url).group()
dataverseName = dataversePart.replace('/dataverse/', '')
# Repalce '/dataverse/' and the dataverse name with '/api/search?q=*' and add subtree parameter with dataverse name
apiSearchURL = url.replace(dataversePart, '/api/search?q=*') + '&subtree=%s' % (dataverseName)
# If URL is not a search URL (doesn't contain 'q=') and doesn't have /dataverse/, assume it's the URL of the installation
if 'q=' not in url and '/dataverse/' not in url:
apiSearchURL = url.replace('/dataverse.xhtml', '')
apiSearchURL = apiSearchURL + '/api/search'
# If entered installation URL ends with a forward slash, replace resulting double slash with a single slash
apiSearchURL = apiSearchURL.replace('//api', '/api') + '?q=*'
# If URL has 'q=', then assume it's a Search URL
elif 'q=' in url:
# Sometimes there's a slash before the ?q. If so, remove it
url = url.replace('/?q', '?q')
# If there's a jsessionid string, remove it
try:
jsessionidString = re.search(r';jsessionid=.*\?', url).group()
url = url.replace(jsessionidString, '?')
except AttributeError:
pass
# Get the Dataverse Collection name in the search URL
# dataverseName = re.search(r'\/dataverse\/\w*\?q', url)
dataverseName = re.search(r'\/dataverse\/.*\?q', url)
dataverseName = dataverseName.group()
subtree = dataverseName.replace('/dataverse/', '&subtree=').replace('?q', '')
apiSearchURL = (
url
.replace(dataverseName, '/api/search?q')
.replace('?q=&', '?q=*&')
.replace('%3A', ':')
.replace('%22', '"')
.replace('%28', '(')
.replace('%29', ')')
+ '&show_entity_ids=true'
+ subtree
)
# Remove any digits after any fq parameters
apiSearchURL = re.sub('fq\d', 'fq', apiSearchURL)
apiSearchURL = apiSearchURL + '&per_page=10&start=0'
# Replace values of any "types" parameters into the Search API's "type" paramater
try:
dTypes = re.search(r'types=.*?&', apiSearchURL).group()
dTypesList = dTypes.replace('types=', '').replace('&', '').split(':')
dTypesString = ''
for dType in dTypesList:
dType = '&type=%s' %(re.sub('s$', '', dType))
dTypesString = dTypesString + dType
apiSearchURL = apiSearchURL + dTypesString
except AttributeError:
pass
# Remove dvObjectType and types parameters, which I think the Search API is ignoring
apiSearchURL = re.sub('fq=dvObjectType:\(.*\)&', '', apiSearchURL)
apiSearchURL = re.sub('types=.*?&', '', apiSearchURL)
return apiSearchURL
# Function that converts as many common html codes as I could find into their human-readable strings
def convert_common_html_encoding(string):
string = (
string
.replace('%20', ' ').replace('%21', '!').replace('%22', '\"').replace('%23', '#')
.replace('%24', '$').replace('%25', '%').replace('%26', '&').replace('%27', '\'')
.replace('%28', '(').replace('%29', ')').replace('%2A', '*').replace('%2B', '+')
.replace('%2C', ',').replace('%2D', '-').replace('%2E', '.').replace('%2F', '/')
.replace('%30', '0').replace('%31', '1').replace('%32', '2').replace('%33', '3')
.replace('%34', '4').replace('%35', '5').replace('%36', '6').replace('%37', '7')
.replace('%38', '8').replace('%39', '9').replace('%3A', ':').replace('%3B', ';')
.replace('%3C', '<').replace('%3D', '=').replace('%3E', '>').replace('%3F', '?')
.replace('%40', '@').replace('%41', 'A').replace('%42', 'B').replace('%43', 'C')
.replace('%44', 'D').replace('%45', 'E').replace('%46', 'F').replace('%47', 'G')
.replace('%48', 'H').replace('%49', 'I').replace('%4A', 'J').replace('%4B', 'K')
.replace('%4C', 'L').replace('%4D', 'M').replace('%4E', 'N').replace('%4F', 'O')
.replace('%50', 'P').replace('%51', 'Q').replace('%52', 'R').replace('%53', 'S')
.replace('%54', 'T').replace('%55', 'U').replace('%56', 'V').replace('%57', 'W')
.replace('%58', 'X').replace('%59', 'Y').replace('%5A', 'Z').replace('%5B', '[')
.replace('%5C', '\\').replace('%5D', ']').replace('%5E', '^').replace('%5F', '_')
.replace('%60', '`').replace('%61', 'a').replace('%62', 'b').replace('%63', 'c')
.replace('%64', 'd').replace('%65', 'e').replace('%66', 'f').replace('%67', 'g')
.replace('%68', 'h').replace('%69', 'i').replace('%6A', 'j').replace('%6B', 'k')
.replace('%6C', 'l').replace('%6D', 'm').replace('%6E', 'n').replace('%6F', 'o')
.replace('%70', 'p').replace('%71', 'q').replace('%72', 'r').replace('%73', 's')
.replace('%74', 't').replace('%75', 'u').replace('%76', 'v').replace('%77', 'w')
.replace('%78', 'x').replace('%79', 'y').replace('%7A', 'z').replace('%7B', '{')
.replace('%7C', '|').replace('%7D', '}').replace('%7E', '~').replace('%80', '€')
.replace('%82', '‚').replace('%83', 'ƒ').replace('%84', '„').replace('%85', '…')
.replace('%86', '†').replace('%87', '‡').replace('%88', 'ˆ').replace('%89', '‰')
.replace('%8A', 'Š').replace('%8B', '‹').replace('%8C', 'Œ').replace('%8E', 'Ž')
.replace('%91', '‘').replace('%92', '’').replace('%93', '“').replace('%94', '”')
.replace('%95', '•').replace('%96', '–').replace('%97', '—').replace('%98', '˜')
.replace('%99', '™').replace('%9A', 'š').replace('%9B', '›').replace('%9C', 'œ')
.replace('%9E', 'ž').replace('%9F', 'Ÿ').replace('%A1', '¡').replace('%A2', '¢')
.replace('%A3', '£').replace('%A4', '¤').replace('%A5', '¥').replace('%A6', '¦')
.replace('%A7', '§').replace('%A8', '¨').replace('%A9', '©').replace('%AA', 'ª')
.replace('%AB', '«').replace('%AC', '¬').replace('%AE', '®').replace('%AF', '¯')
.replace('%B0', '°').replace('%B1', '±').replace('%B2', '²').replace('%B3', '³')
.replace('%B4', '´').replace('%B5', 'µ').replace('%B6', '¶').replace('%B7', '·')
.replace('%B8', '¸').replace('%B9', '¹').replace('%BA', 'º').replace('%BB', '»')
.replace('%BC', '¼').replace('%BD', '½').replace('%BE', '¾').replace('%BF', '¿')
.replace('%C0', 'À').replace('%C1', 'Á').replace('%C2', 'Â').replace('%C3', 'Ã')
.replace('%C4', 'Ä').replace('%C5', 'Å').replace('%C6', 'Æ').replace('%C7', 'Ç')
.replace('%C8', 'È').replace('%C9', 'É').replace('%CA', 'Ê').replace('%CB', 'Ë')
.replace('%CC', 'Ì').replace('%CD', 'Í').replace('%CE', 'Î').replace('%CF', 'Ï')
.replace('%D0', 'Ð').replace('%D1', 'Ñ').replace('%D2', 'Ò').replace('%D3', 'Ó')
.replace('%D4', 'Ô').replace('%D5', 'Õ').replace('%D6', 'Ö').replace('%D7', '×')
.replace('%D8', 'Ø').replace('%D9', 'Ù').replace('%DA', 'Ú').replace('%DB', 'Û')
.replace('%DC', 'Ü').replace('%DD', 'Ý').replace('%DE', 'Þ').replace('%DF', 'ß')
.replace('%E0', 'à').replace('%E1', 'á').replace('%E2', 'â').replace('%E3', 'ã')
.replace('%E4', 'ä').replace('%E5', 'å').replace('%E6', 'æ').replace('%E7', 'ç')
.replace('%E8', 'è').replace('%E9', 'é').replace('%EA', 'ê').replace('%EB', 'ë')
.replace('%EC', 'ì').replace('%ED', 'í').replace('%EE', 'î').replace('%EF', 'ï')
.replace('%F0', 'ð').replace('%F1', 'ñ').replace('%F2', 'ò').replace('%F3', 'ó')
.replace('%F4', 'ô').replace('%F5', 'õ').replace('%F6', 'ö').replace('%F7', '÷')
.replace('%F8', 'ø').replace('%F9', 'ù').replace('%FA', 'ú').replace('%FB', 'û')
.replace('%FC', 'ü').replace('%FD', 'ý').replace('%FE', 'þ').replace('%FF', 'ÿ')
)
return string
def convert_utf8bytes_to_characters(string):
string = (
string
.replace('%E2%82%AC', '€').replace('%E2%80%9A', '‚').replace('%C6%92', 'ƒ')
.replace('%E2%80%A6', '…').replace('%E2%80%A0', '†').replace('%E2%80%A1', '‡')
.replace('%E2%80%B0', '‰').replace('%C5%A0', 'Š').replace('%E2%80%B9', '‹')
.replace('%C5%BD', 'Ž').replace('%E2%80%98', '‘').replace('%E2%80%99', '’')
.replace('%E2%80%9D', '”').replace('%E2%80%A2', '•').replace('%E2%80%93', '–')
.replace('%CB%9C', '˜').replace('%E2%84%A2', '™').replace('%C5%A1', 'š')
.replace('%C5%93', 'œ').replace('%C5%BE', 'ž').replace('%C5%B8', 'Ÿ')
.replace('%C2%A2', '¢').replace('%C2%A3', '£').replace('%C2%A4', '¤')
.replace('%C2%A6', '¦').replace('%C2%A7', '§').replace('%C2%A8', '¨')
.replace('%C2%AA', 'ª').replace('%C2%AB', '«').replace('%C2%AC', '¬')
.replace('%C2%AE', '®').replace('%C2%AF', '¯').replace('%C2%B0', '°')
.replace('%C2%B2', '²').replace('%C2%B3', '³').replace('%C2%B4', '´')
.replace('%C2%B6', '¶').replace('%C2%B7', '·').replace('%C2%B8', '¸')
.replace('%C2%BA', 'º').replace('%C2%BB', '»').replace('%C2%BC', '¼')
.replace('%C2%BE', '¾').replace('%C2%BF', '¿').replace('%C3%80', 'À')
.replace('%C3%82', 'Â').replace('%C3%83', 'Ã').replace('%C3%84', 'Ä')
.replace('%C3%86', 'Æ').replace('%C3%87', 'Ç').replace('%C3%88', 'È')
.replace('%C3%8A', 'Ê').replace('%C3%8B', 'Ë').replace('%C3%8C', 'Ì')
.replace('%C3%8E', 'Î').replace('%C3%8F', 'Ï').replace('%C3%90', 'Ð')
.replace('%C3%92', 'Ò').replace('%C3%93', 'Ó').replace('%C3%94', 'Ô')
.replace('%C3%96', 'Ö').replace('%C3%97', '×').replace('%C3%98', 'Ø')
.replace('%C3%9A', 'Ú').replace('%C3%9B', 'Û').replace('%C3%9C', 'Ü')
.replace('%C3%9E', 'Þ').replace('%C3%9F', 'ß').replace('%C3%A0', 'à')
.replace('%C3%A2', 'â').replace('%C3%A3', 'ã').replace('%C3%A4', 'ä')
.replace('%C3%A6', 'æ').replace('%C3%A7', 'ç').replace('%C3%A8', 'è')
.replace('%C3%AA', 'ê').replace('%C3%AB', 'ë').replace('%C3%AC', 'ì')
.replace('%C3%8D', 'Í').replace('%C3%AE', 'î').replace('%C3%AF', 'ï')
.replace('%C3%B0', 'ð').replace('%C3%B2', 'ò').replace('%C3%B3', 'ó')
.replace('%C3%B4', 'ô').replace('%C3%B6', 'ö').replace('%C3%B7', '÷')
.replace('%C3%B8', 'ø').replace('%C3%BA', 'ú').replace('%C3%BB', 'û')
.replace('%C3%BC', 'ü').replace('%C3%BE', 'þ').replace('%C3%BF', 'ÿ')
)
return string
# Function that returns the params of a given Search API URL, to be used in requests calls
def get_params(apiSearchURL):
params = {
'baseUrl': '',
'params': {}
}
fq = []
# Split apiSearchURL to create list of params
splitSearchURLList = re.split('\?|&fq|&', apiSearchURL)
# Remove base search API URL from list
params['baseUrl'] = splitSearchURLList[0]
splitSearchURLList.pop(0)
# Remove any empty items from the splitSearchURLList
splitSearchURLList = list(filter(None, splitSearchURLList))
typeParamList = []
for paramValue in splitSearchURLList:
# Add query to params dict
if paramValue.startswith('q='):
paramValue = convert_utf8bytes_to_characters(paramValue)
paramValue = convert_common_html_encoding(paramValue)
paramValue = paramValue.replace('+', ' ')
params['params']['q'] = paramValue.replace('q=', '')
# Add non-fq queries to params dict
if not paramValue.startswith('=') and not paramValue.startswith('q='):
key = paramValue.split('=')[0]
if paramValue.split('=')[1] != '':
params['params'][key] = paramValue.split('=')[1]
# Add values of each type param to typeParamList
if paramValue.startswith('type'):
valueString = paramValue.split('=')[1]
typeParamList.append(valueString)
# Add fq queries to fq dict if paramValue.startswith('='):
if paramValue.startswith('='):
key = paramValue.replace('=', '').split(':')[0]
value = paramValue.split(':')[1]
value = convert_utf8bytes_to_characters(value)
value = convert_common_html_encoding(value)
value = value.replace('+', ' ')
paramString = key + ':' + value
fq.append(paramString)
# If there are type param values in typeParamList, add as value to new "type" param
if typeParamList:
params['params']['type'] = typeParamList
# If there are any fq params, add fq keys and values
if len(fq) > 0:
params['params']['fq'] = fq
return params
# Gets info from Search API about a given dataverse, dataset or file
def get_value_row_from_search_api_object(item, installationUrl):
if item['type'] == 'dataset':
datasetUrl = installationUrl + '/dataset.xhtml?persistentId=' + item['global_id']
dataverseUrl = installationUrl + '/dataverse/' + item['identifier_of_dataverse']
newRow = {
'dataset_pid': item['global_id'],
'version_state': item['versionState'],
'dataverse_alias': item['identifier_of_dataverse']
# 'dataverse_url': dataverseUrl
}
if item['type'] == 'dataverse':
newRow = {
'dataverse_database_id': item['entity_id'],
'dataverse_alias': item['identifier'],
'dataverse_url': item['url'],
'dataverse_name': item['name']
}
if item['type'] == 'file':
if item.get('file_persistent_id'):
filePersistentId = item['file_persistent_id']
else:
filePersistentId = ''
newRow = {
'file_database_id': item['file_id'],
'file persistent_id': filePersistentId,
'file_name': item['name'],
'dataset_pid': item['dataset_persistent_id']
}
return newRow
# Uses Search API to return dataframe containing info about datasets in a Dataverse installation
# Write progress and results to the tkinter window
def get_object_dataframe_from_search_api(
url, params, objectType, rootWindow=None, progressText=None, progressLabel=None, apiKey=None):
installationUrl = get_installation_url(url)
if apiKey:
header = {'X-Dataverse-key': apiKey}
else:
header = {}
params['type'] = objectType
# Add param to show database IDs of each item
params['show_entity_ids'] = 'true'
# Get total count of objects
params['per_page'] = 1
response = requests.get(
url,
params=params,
headers=header
)
data = response.json()
total = data['data']['total_count']
misindexedObjectCount = 0
objectInfoDict = []
# Initialization for paginating through results of Search API calls
condition = True
params['start'] = 0
if None not in [rootWindow, progressText, progressLabel]:
text = 'Looking for datasets...'
progressText.set(text)
progressLabel.config(fg='green')
progressLabel = progressLabel.grid(sticky='w', row=0)
rootWindow.update_idletasks()
while condition:
try:
params['per_page'] = 10
response = requests.get(
url,
params=params,
headers=header
)
data = response.json()
for item in data['data']['items']:
newRow = get_value_row_from_search_api_object(item, installationUrl)
objectInfoDict.append(dict(newRow))
datasetCount = len(objectInfoDict)
# Update variables to paginate through the search results
params['start'] = params['start'] + params['per_page']
# If misindexed datasets break the Search API call where per_page=10,
# try calls where per_page=1 then per_page=10 again
# (See https://github.com/IQSS/dataverse/issues/4225)
except Exception:
try:
params['per_page'] = 1
response = requests.get(
url,
params=params,
headers=header
)
data = response.json()
for item in data['data']['items']:
newRow = get_value_row_from_search_api_object(item, installationUrl)
objectInfoDict.append(dict(newRow))
# Update variables to paginate through the search results
params['start'] = params['start'] + params['per_page']
# If page fails to load, count a misindexed object and continue to the next page
except Exception:
misindexedObjectCount += 1
params['start'] = params['start'] + params['per_page']
condition = params['start'] < total
objectInfoDF = pd.DataFrame(objectInfoDict)
return objectInfoDF
# Uses "Get Contents" endpoint to return list of dataverse aliases of all subcollections in a given collection
def get_all_subcollection_aliases(collectionUrl, apiKey=''):
parsed = urlparse(collectionUrl)
installationUrl = parsed.scheme + '://' + parsed.netloc
alias = parsed.path.split('/')[2]
if apiKey:
header = {'X-Dataverse-key': apiKey}
else:
header = {}
# Get ID of given dataverse alias
dataverseInfoEndpoint = '%s/api/dataverses/%s' % (installationUrl, alias)
response = requests.get(
dataverseInfoEndpoint,
headers=header)
data = response.json()
parentDataverseId = data['data']['id']
# Create list and add ID of given dataverse
dataverseIds = [parentDataverseId]
# Get each subdataverse in the given dataverse
for dataverseId in dataverseIds:
dataverseGetContentsEndpoint = '%s/api/dataverses/%s/contents' % (installationUrl, dataverseId)
response = requests.get(
dataverseGetContentsEndpoint,
headers=header)
data = response.json()
for item in data['data']:
if item['type'] == 'dataverse':
dataverseId = item['id']
dataverseIds.extend([dataverseId])
# Get the alias for each dataverse ID
dataverseAliases = []
for dataverseId in dataverseIds:
dataverseInfoEndpoint = '%s/api/dataverses/%s' % (installationUrl, dataverseId)
response = requests.get(
dataverseInfoEndpoint,
headers=header)
data = response.json()
alias = data['data']['alias']
dataverseAliases.append(alias)
return dataverseAliases
def get_canonical_pid(pidOrUrl):
# If entered dataset PID is the dataset page URL, get canonical PID
if pidOrUrl.startswith('http') and 'persistentId=' in pidOrUrl:
canonicalPid = pidOrUrl.split('persistentId=')[1]
canonicalPid = canonicalPid.split('&version')[0]
canonicalPid = canonicalPid.replace('%3A', ':').replace('%2F', ('/'))
# If entered dataset PID is a DOI URL, get canonical PID
elif pidOrUrl.startswith('http') and 'doi.' in pidOrUrl:
canonicalPid = re.sub('http.*org\/', 'doi:', pidOrUrl)
elif pidOrUrl.startswith('doi:') and '/' in pidOrUrl:
canonicalPid = pidOrUrl
# If entered dataset PID is a Handle URL, get canonical PID
elif pidOrUrl.startswith('http') and 'hdl.' in pidOrUrl:
canonicalPid = re.sub('http.*net\/', 'hdl:', pidOrUrl)
elif pidOrUrl.startswith('hdl:') and '/' in pidOrUrl:
canonicalPid = pidOrUrl
return canonicalPid
def get_datasets_from_collection_or_search_url(
url, rootWindow=None, progressLabel=None, progressText=None, textBoxCollectionDatasetPIDs=None,
apiKey='', ignoreDeaccessionedDatasets=False, subdataverses=False):
if textBoxCollectionDatasetPIDs is not None:
# if None not in [rootWindow, progressLabel, progressText, textBoxCollectionDatasetPIDs]:
# Hide the textBoxCollectionDatasetPIDs scrollbox if it exists
forget_widget(textBoxCollectionDatasetPIDs)
# Use the Search API to get dataset info from the given search url or Dataverse collection URL
searchApiUrl = get_search_api_url(url)
requestsGetProperties = get_params(searchApiUrl)
baseUrl = requestsGetProperties['baseUrl']
params = requestsGetProperties['params']
datasetInfoDF = get_object_dataframe_from_search_api(
url=baseUrl, rootWindow=rootWindow, progressLabel=progressLabel, progressText=progressText,
params=params, objectType='dataset', apiKey=apiKey)
datasetCount = len(datasetInfoDF.index)
if datasetCount == 0:
text = 'Datasets found: 0'
if progressText is not None:
progressText.set(text)
else:
print(text)
elif datasetCount > 0:
deaccessionedDatasetCount = 0
# To ignore deaccessioned datasets, remove from the dataframe all datasets where version_state is DEACCESSIONED
if ignoreDeaccessionedDatasets == True:
datasetInfoDF = datasetInfoDF[datasetInfoDF['version_state'].str.contains('DEACCESSIONED') == False]
deaccessionedDatasetCount = datasetCount - len(datasetInfoDF.index)
# Remove version_state column so that I can remove the dataframe's duplicate rows and there's only one row per dataset
datasetInfoDF = datasetInfoDF.drop('version_state', axis=1)
# Drop duplicate rows, which happens when Search API results lists a dataset's published and draft versions
datasetInfoDF = datasetInfoDF.drop_duplicates()
# Recount datasets
uniqueDatasetCount = len(datasetInfoDF.index)
# Check if url is collection url. If so:
if 'q=' not in url:
# If the user wants datasets in all subdataverses and the url
# is the root collection, don't filter the dataframe
if subdataverses == True and is_root_collection(url) == True:
uniqueDatasetCount = len(datasetInfoDF)
# If the user wants datasets in all subdataverses and the url
# is not the root collection...
elif subdataverses == True and is_root_collection(url) == False:
# Get the aliases of all subdataverses...
dataverseAliases = get_all_subcollection_aliases(url, apiKey=apiKey)
# Remove any datasets that aren't owned by any of the
# subdataverses. This will exclude linked datasets
datasetInfoDF = datasetInfoDF[
datasetInfoDF['dataverse_alias'].isin(dataverseAliases)]
uniqueDatasetCount = len(datasetInfoDF)
# If the user wants only datasets in the collection,
# and not in collections within the collection...
elif subdataverses == False:
# Get the alias of the collection (including the alias of the root collection)
alias = get_alias_from_collection_url(url)
# Retain only datasets owned by that collection
datasetInfoDF = datasetInfoDF[datasetInfoDF['dataverse_alias'].isin([alias])]
uniqueDatasetCount = len(datasetInfoDF)
# If the url is a search URL, get all datasetPids from datasetInfoDF
elif 'q=' in url:
uniqueDatasetCount = len(datasetInfoDF)
if textBoxCollectionDatasetPIDs is not None:
# Place textbox with list of dataset PIDs and set state to read/write (normal)
textBoxCollectionDatasetPIDs.grid(sticky='w', row=2, pady=5)
textBoxCollectionDatasetPIDs.configure(state ='normal')
# Clear whatever's in the textBoxCollectionDatasetPIDs textbox
textBoxCollectionDatasetPIDs.delete('1.0', END)
# Insert the dataset PIDs into the textBoxCollectionDatasetPIDs scrollbox
for dfIndex, dfRow in datasetInfoDF.iterrows():
datasetPid = dfRow['dataset_pid'] + '\n'
textBoxCollectionDatasetPIDs.insert('end', datasetPid)
# Create and place result text with uniqueDatasetCount
if deaccessionedDatasetCount == 0:
text = 'Datasets found: %s' % (str(uniqueDatasetCount))
if deaccessionedDatasetCount > 0:
text = 'Datasets found: %s\rDeaccessioned datasets ignored: %s' % (str(uniqueDatasetCount), str(deaccessionedDatasetCount))
if progressText is not None:
progressText.set(text)
else:
print(text)
def get_directory_path():
directoryPath = filedialog.askdirectory()
return directoryPath
def get_dataset_metadata_export(installationUrl, datasetPid, exportFormat, header={}, apiKey=''):
if apiKey:
header['X-Dataverse-key'] = apiKey
if exportFormat == 'dataverse_json':
getJsonRepresentationOfADatasetEndpoint = '%s/api/datasets/:persistentId/?persistentId=%s' % (installationUrl, datasetPid)
getJsonRepresentationOfADatasetEndpoint = getJsonRepresentationOfADatasetEndpoint.replace('//api', '/api')
response = requests.get(
getJsonRepresentationOfADatasetEndpoint,
headers=header)
if response.status_code in (200, 401): # 401 is the unauthorized code. Valid API key is needed
data = response.json()
else:
data = 'ERROR'
return data
# For getting metadata from other exports, which are available only for each dataset's latest published
# versions (whereas Dataverse JSON export is available for unpublished versions)
if exportFormat != 'dataverse_json':
datasetMetadataExportEndpoint = '%s/api/datasets/export?exporter=%s&persistentId=%s' % (installationUrl, exportFormat, datasetPid)
datasetMetadataExportEndpoint = datasetMetadataExportEndpoint.replace('//api', '/api')
response = requests.get(
datasetMetadataExportEndpoint,
headers=header)
if response.status_code == 200:
if exportFormat in ('schema.org' , 'OAI_ORE'):
data = response.json()
if exportFormat in ('ddi' , 'oai_ddi', 'dcterms', 'oai_dc', 'Datacite', 'oai_datacite'):
string = response.text
data = BeautifulSoup(string, 'xml').prettify()
else:
data = 'ERROR'
return data
def get_metadatablock_data(installationUrl, metadatablockName):
metadatablocksApiEndpoint = '%s/api/v1/metadatablocks/%s' % (installationUrl, metadatablockName)
response = requests.get(metadatablocksApiEndpoint)
if response.status_code == 200:
data = response.json()
return data
def get_metadatablock_db_field_name_and_title(metadatablockData):
# Get the database names of all fields
allFieldsDBNamesList = []
childFieldsDBNamesList = []
for parentfield in metadatablockData['data']['fields']:
properties = metadatablockData['data']['fields'][parentfield]
field = properties['name']
allFieldsDBNamesList.append(field)
if 'childFields' in properties:
for childField in properties['childFields']:
childFieldsDBNamesList.append(childField)
parentFieldsDBNamesList = list(set(allFieldsDBNamesList) - set(childFieldsDBNamesList))
parentFieldDBNameAndTitleDict = {}
for dbName in parentFieldsDBNamesList:
dbNameProperties = metadatablockData['data']['fields'][dbName]
parentFieldDBNameAndTitleDict[dbNameProperties['title']] = dbName
return parentFieldDBNameAndTitleDict#, compoundFieldsDBNamesList
# Get list of parent field names and add to a tkinter listbox for user to choose fields
def get_parent_field_names(metadatablockData, listbox):
# Clear any names already in the listbox
listbox.delete(0, END)
allFieldsDBNamesDict = {}
childFieldsDBNamesList = []
compoundFieldsDBNamesList = []
for parentField in metadatablockData['data']['fields']:
properties = metadatablockData['data']['fields'][parentField]
field = properties['name']
allFieldsDBNamesDict[field] = properties['title']
if 'childFields' in properties:
compoundFieldsDBNamesList.append(properties['title'])
for childField in properties['childFields']:
childFieldsDBNamesList.append(childField)
options = []
fieldWithChildFieldList = []
for parentField in metadatablockData['data']['fields']:
properties = metadatablockData['data']['fields'][parentField]
if 'childFields' not in properties and properties['name'] not in childFieldsDBNamesList:
fieldTitle = properties['title']
options.append(' ' + fieldTitle)
elif 'childFields' in properties:
title = properties['title']
childFieldDict = properties['childFields']
childFieldsList = []
for childField in childFieldDict:
childFieldsList.append(childField)
childFieldsString = list_to_string(childFieldsList)
fieldWithChildField = '%s: %s' % (title, childFieldsString)
if len(fieldWithChildField) > 50:
fieldWithChildField = fieldWithChildField[0:50] + '...'
fieldWithChildFieldList.append(fieldWithChildField)
options.append(' ' + fieldWithChildField)
for option in options:
listbox.insert('end', option)
def get_listbox_values(listbox):
selectedFields = []
selections = listbox.curselection()
for selection in selections:
fieldName = listbox.get(selection).strip().split(':')[0]
selectedFields.append(fieldName)
return selectedFields
# Get the chiild field database names of compound fields or the database name of primitive fields
def get_column_names(
metadatablockData, parentFieldTitle, parentFieldDBNameAndTitleDict):
compoundFieldsDBNamesList = []
for parentfield in metadatablockData['data']['fields']:
properties = metadatablockData['data']['fields'][parentfield]
if 'childFields' in properties:
compoundFieldsDBNamesList.append(properties['name'])
if parentFieldTitle in parentFieldDBNameAndTitleDict.keys():
chosenDBName = parentFieldDBNameAndTitleDict[parentFieldTitle]
columns = []
# If the field is a compound field:
if chosenDBName in compoundFieldsDBNamesList:
# Get the child fields of the compound field
dbNameProperties = metadatablockData['data']['fields'][chosenDBName]
for field in dbNameProperties['childFields']:
columns.append(field)
# # Other the field is a primitive field. Use its names as the column
else:
columns.append(chosenDBName)
return columns
def get_metadata_values_lists(
installationUrl, datasetMetadata, metadatablockName,
chosenTitleDBName, chosenFields=None, versions='latestVersion'):
if versions == 'allVersions':
versions = 'datasetVersion'
rowVariablesList = []
if (datasetMetadata['status'] == 'OK') and\
(metadatablockName in datasetMetadata['data'][versions]['metadataBlocks']):
datasetPersistentUrl = datasetMetadata['data']['persistentUrl']
datasetPid = get_canonical_pid(datasetPersistentUrl)
datasetUrl = installationUrl + '/dataset.xhtml?persistentId=' + datasetPid
if 'versionNumber' in datasetMetadata['data'][versions]:
majorVersionNumber = datasetMetadata['data'][versions]['versionNumber']
minorVersionNumber = datasetMetadata['data'][versions]['versionMinorNumber']
datasetVersionNumber = f'{majorVersionNumber}.{minorVersionNumber}'
else:
datasetVersionNumber = 'DRAFT'
for fields in datasetMetadata['data'][versions]['metadataBlocks'][metadatablockName]['fields']:
if fields['typeName'] == chosenTitleDBName:
# Save the field's typeClass and if it allows multiple values
typeClass = fields['typeClass']
allowsMultiple = fields['multiple']
if typeClass in ('primitive', 'controlledVocabulary') and allowsMultiple is True:
for value in fields['value']:
rowVariables = [
datasetPid, datasetPersistentUrl, datasetUrl,
datasetVersionNumber, value[:10000].replace('\r', ' - ')]
rowVariablesList.append(rowVariables)
elif typeClass in ('primitive', 'controlledVocabulary') and allowsMultiple is False:
value = fields['value'][:10000].replace('\r', ' - ')
rowVariables = [
datasetPid, datasetPersistentUrl, datasetUrl,
datasetVersionNumber, value]
rowVariablesList.append(rowVariables)
elif typeClass == 'compound' and allowsMultiple is True:
index = 0
condition = True
while condition:
rowVariables = [
datasetPid, datasetPersistentUrl, datasetUrl,
datasetVersionNumber]
# Get number of multiples
total = len(fields['value'])
# For each child field...
for chosenField in chosenFields:
# Try getting the value of that child field
try:
value = fields['value'][index][chosenField]['value'][:10000].replace('\r', ' - ')
# Otherwise, save an empty string as the value
except KeyError:
value = ''
# Append value to the rowVariables list to add to the CSV file
rowVariables.append(value)
rowVariablesList.append(rowVariables)
index += 1
condition = index < total
elif typeClass == 'compound' and allowsMultiple is False:
rowVariables = [datasetPid, datasetPersistentUrl, datasetUrl, datasetVersionNumber]
for chosenField in chosenFields:
try:
# Get value from compound field
value = fields['value'][chosenField]['value'][:10000].replace('\r', ' - ')
except KeyError:
value = ''
rowVariables.append(value)
rowVariablesList.append(rowVariables)
return rowVariablesList
# Delete empty CSV files in a given directory. If file has fewer than 2 rows, delete it.
def delete_empty_csv_files(csvDirectory):
fieldsWithNoMetadata = []
for file in glob.glob(str(Path(csvDirectory)) + '/' + '*.csv'):
with open(file, mode='r', encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
data = list(reader)
rowCount = len(data)
if rowCount == 1:
fieldName = Path(file).name.replace('.csv', '')
fieldsWithNoMetadata.append(fieldName)
f.close()
os.remove(file)
return fieldsWithNoMetadata
# Full outer join of CSV files in a given directory
def join_metadata_csv_files(csvDirectory):
# Create CSV file in the directory that the user selected
allMetadataFileName = os.path.join(csvDirectory, 'all_fields.csv')
# Create list of common columns in CSV files to join on
indexList = ['dataset_pid', 'dataset_pid_url', 'dataset_url', 'dataset_version_number']
# Get list of CSV files in the csvDirectory
filesList = listdir(csvDirectory)
if len(filesList) > 1:
filesDirectoryPathsList = []
for file in filesList:
fileDirectoryPath = os.path.join(csvDirectory, file)
filesDirectoryPathsList.append(fileDirectoryPath)
# Create a dataframe of each CSV file in the 'filesList' list
dataframes = [pd.read_csv(table, sep=',', na_filter = False) for table in filesDirectoryPathsList]
# For each dataframe, set the indexes (or the common columns across the dataframes to join on)
for dataframe in dataframes:
dataframe.set_index(indexList, inplace=True)
# Full outer join all dataframes and save to the 'joined' variable
joined = reduce(lambda left, right: left.join(right, how='outer'), dataframes)
# Export joined dataframe to a CSV file
joined.to_csv(allMetadataFileName)
# Get the metadata of datasets. Function passed to tkinter button
def get_dataset_metadata(
rootWindow, progressLabel, progressText, noMetadataText, noMetadataLabel,
installationUrl='', datasetPidString='',
parentFieldTitleList='', directoryPath='', apiKey=''):
# Use metadatablock API endpoint to get metadatablock data
metadatablockData = get_metadatablock_data(installationUrl, 'citation')
# From metadatablockData, get the database and display names of each parent field
allFieldsDBNamesDict = get_metadatablock_db_field_name_and_title(metadatablockData)
# Create directory in the directory that the user chose
currentTime = time.strftime('%Y.%m.%d_%H.%M.%S')
installationRootName = get_root_alias_name(installationUrl)
mainDirectoryName = '%s_dataset_metadata_%s' % (installationRootName, currentTime)
mainDirectoryPath = str(Path(directoryPath + '/' + mainDirectoryName))
os.mkdir(mainDirectoryPath)
# For each field the user chose:
for parentFieldTitle in parentFieldTitleList:
# Create CSV file
# Create file name and path
csvFileName = parentFieldTitle.lower().strip().replace(' ', '_')
csvFileName = csvFileName + '(citation)'
mainDirectoryPath = str(Path(directoryPath + '/' + mainDirectoryName))
csvFilePath = str(Path(mainDirectoryPath, csvFileName)) + '.csv'
# Create header row for the CSV file
headerRow = ['dataset_pid', 'dataset_pid_url', 'dataset_url', 'dataset_version_number']
childFieldsList = get_column_names(
metadatablockData, parentFieldTitle, allFieldsDBNamesDict)
# Add childFields list to header row
headerRow = headerRow + childFieldsList
# Create CSV file and add headerrow
with open(csvFilePath, mode='w', newline='') as f:
writer = csv.writer(f)
writer.writerow(headerRow)
# Change passed datasetPidString to a list. Make sure the last newline doesn't mess up the list
datasetPidList = [x.strip() for x in datasetPidString.splitlines()][:-1]
# Delete any message in the tkinter window about no metadata being found
# the last time the "Get metadata" button was pressed
noMetadataLabel.grid_forget()
count = 0
datasetTotalCount = len(datasetPidList)
text = 'Dataset metadata retrieved: 0 of %s' % (datasetTotalCount)
progressText.set(text)
progressLabel.grid(sticky='w', row=1, columnspan=2)
rootWindow.update_idletasks()
for datasetPid in datasetPidList:
# Get the JSON metadata export of the latest version of the dataset
datasetMetadata = get_dataset_metadata_export(
installationUrl=installationUrl,
datasetPid=datasetPid,
exportFormat='dataverse_json',
apiKey=apiKey)
if datasetMetadata['status'] == 'OK':
for parentFieldTitle in parentFieldTitleList:
# Get database name of parentFieldTitle
dbName = allFieldsDBNamesDict[parentFieldTitle]
valueLists = get_metadata_values_lists(
installationUrl=installationUrl,
datasetMetadata=datasetMetadata,
metadatablockName='citation',
chosenTitleDBName=dbName,
chosenFields=get_column_names(
metadatablockData, parentFieldTitle, allFieldsDBNamesDict))
csvFileName = parentFieldTitle.lower().strip().replace(' ', '_')
csvFileName = csvFileName + '(citation)'
csvFilePath = str(Path(mainDirectoryPath, csvFileName)) + '.csv'
for valueList in valueLists:
with open(csvFilePath, mode='a', newline='', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(valueList)
count += 1
text = 'Dataset metadata retrieved: %s of %s' % (count, datasetTotalCount)
progressText.set(text)
rootWindow.update_idletasks()
fieldsWithNoMetadata = delete_empty_csv_files(mainDirectoryPath)
if count > 0 and len(fieldsWithNoMetadata) > 0:
# noMetadataLabel.grid(sticky='w', row=2)
fieldsWithNoMetadataString = list_to_string(fieldsWithNoMetadata)
fieldsWithNoMetadataString = (
'No metadata found for the following fields:\r' + fieldsWithNoMetadataString)
noMetadataText.set(fieldsWithNoMetadataString)
noMetadataLabel.grid(sticky='w', row=2)
rootWindow.update_idletasks()
# Full outer join all CSV files to create a CSV with all metadata
join_metadata_csv_files(mainDirectoryPath)
def delete_published_dataset(installationUrl, datasetPid, apiKey):
destroyDatasetApiEndpointUrl = '%s/api/datasets/:persistentId/destroy/?persistentId=%s' % (installationUrl, datasetPid)
req = requests.delete(
destroyDatasetApiEndpointUrl,
headers={'X-Dataverse-key': apiKey})
data = req.json()
status = data.get('status')
if status:
message = data.get('message', '')
statusMessage = '%s: %s' % (status, message)
return statusMessage
def delete_published_datasets(
rootWindow, progressLabel, progressText, notDeletedText, notDeletedLabel,
installationUrl, datasetPidString, apiKey):
installationUrl = get_installation_url(installationUrl)
# Change passed datasetPidString to a list. Make sure the last newline doesn't mess up the list
datasetPidList = [x.strip() for x in datasetPidString.splitlines()]
# Remove any empty items from the list of dataset PIDs
datasetPidList = [datasetPid for datasetPid in datasetPidList if datasetPid]
canonicalPidList = []
for datasetPid in datasetPidList:
canonicalPid = get_canonical_pid(datasetPid)
canonicalPidList.append(canonicalPid)
# Delete any message in the tkinter window about datasets not being deleted
# the last time the "Delete datasets" button was pressed
notDeletedLabel.grid_forget()
deletedDatasetCount = 0
datasetTotalCount = len(canonicalPidList)
deletedText = 'Datasets deleted: 0 of %s' % (datasetTotalCount)
progressText.set(deletedText)
progressLabel.config(fg='green')
progressLabel.grid(sticky='w', row=1)
notDeletedLabel.config(fg='white')
notDeletedLabel.grid(sticky='w', row=2)
rootWindow.update_idletasks()
destroyedDatasets = []
notDestroyedDatasets = []
for canonicalPid in canonicalPidList:
statusMessage = delete_published_dataset(installationUrl, canonicalPid, apiKey)
if 'OK' in statusMessage:
destroyedDatasets.append(canonicalPid)
deletedDatasetCount += 1
deletedText = 'Datasets deleted: %s of %s' % (deletedDatasetCount, datasetTotalCount)
progressText.set(deletedText)
rootWindow.update_idletasks()
elif 'ERROR' in statusMessage:
notDeletedLabel.config(fg='red')
notDestroyedDatasets.append(canonicalPid)
notDeletedMessage = 'Datasets not deleted: %s' % (len(notDestroyedDatasets))
notDeletedText.set(notDeletedMessage)
rootWindow.update_idletasks()
| 42.537959 | 138 | 0.606824 |
import csv
from dateutil.parser import parse
from functools import reduce
import json
import glob
import os
from os import listdir
import pandas as pd
from pathlib import Path
import re
import requests
import time
from tkinter import Tk, ttk, Frame, Label, IntVar, Checkbutton, filedialog, NORMAL, DISABLED
from tkinter import Listbox, MULTIPLE, StringVar, END, INSERT, N, E, S, W
from tkinter.ttk import Entry, Progressbar, OptionMenu, Combobox
from urllib.parse import urlparse
class collapsiblePanel(Frame):
def __init__(self, parent, text='', default='closed', padx=0, pady=0, *args, **options):
Frame.__init__(self, parent, *args, **options, padx=padx, pady=pady)
self.show = IntVar()
self.titleFrame = ttk.Frame(self, relief='raised', borderwidth=1)
self.titleFrame.pack(fill='x', expand=1)
Label(self.titleFrame, text=text, width=40, anchor='w').pack(side='left', fill='x', expand=1)
self.toggleButton = ttk.Checkbutton(
self.titleFrame, width=5, command=self.toggle,
variable=self.show, style='Toolbutton')
self.toggleButton.pack(side='right')
self.subFrame = Frame(self, borderwidth=1, relief='groove', bg='white', padx=10)
if default == 'open':
self.show.set(1)
self.subFrame.pack(fill='x', expand=1)
self.toggleButton.configure(text='▼')
elif default == 'closed':
self.show.set(0)
self.toggleButton.configure(text='▲')
def toggle(self):
if bool(self.show.get()):
self.subFrame.pack(fill='x', expand=1)
self.toggleButton.configure(text='▼')
else:
self.subFrame.forget()
self.toggleButton.configure(text='▲')
def forget_widget(widget):
exists = widget.winfo_exists()
if exists == 1:
widget.grid_forget()
else:
pass
# (character limit for many spreadsheet applications), and returning nothing if key doesn't exist
def improved_get(_dict, path, default=None):
for key in path.split('.'):
try:
_dict = _dict[key]
except KeyError:
return default
if isinstance(_dict, int) or isinstance(_dict, dict):
return _dict
elif isinstance(_dict, str):
return _dict[:10000].replace('\r', ' - ')
def list_to_string(lst):
string = ', '.join(lst)
return string
def convert_to_local_tz(timestamp, shortDate=False):
localTimezone = tz.tzlocal()
timestamp = parse(timestamp)
timestamp = timestamp.astimezone(localTimezone)
if shortDate is True:
timestamp = timestamp.strftime('%Y-%m-%d')
return timestamp
def select_all(listbox):
listbox.select_set(0, END)
def clear_selections(listbox):
listbox.selection_clear(0, END)
def get_installation_url(string):
if string.startswith('http'):
parsed = urlparse(string)
installationUrl = parsed.scheme + '://' + parsed.netloc
return installationUrl
elif '(' in string:
installationUrl = re.search(r'\(.*\)', string).group()
installationUrl = re.sub('\(|\)', '', installationUrl)
return installationUrl
# Gets list of URLs from Dataverse map JSON data and add Demo Dataverse url
def get_installation_list():
installationsList = []
dataverseInstallationsJsonUrl = 'https://raw.githubusercontent.com/IQSS/dataverse-installations/master/data/data.json'
response = requests.get(dataverseInstallationsJsonUrl)
data = response.json()
for installation in data['installations']:
name = installation['name']
hostname = installation['hostname']
installationUrl = 'https://' + hostname
nameAndUrl = '%s (%s)' % (name, installationUrl)
installationsList.append(nameAndUrl)
installationsList.insert(0, 'Demo Dataverse (https://demo.dataverse.org)')
return installationsList
# Function for getting name of installation's root collection
def get_root_alias_name(url):
if 'dataverse.lib.virginia.edu' in url:
rootAlias = 'uva'
elif '/dataverse/' in url:
parsed = urlparse(url)
url = parsed.scheme + '://' + parsed.netloc + '/api/dataverses/1'
response = requests.get(url)
dataverseData = response.json()
rootAlias = dataverseData['data']['alias']
elif '/dataverse/' not in url:
url = '%s/api/dataverses/1' % (url)
response = requests.get(url)
dataverseData = response.json()
rootAlias = dataverseData['data']['alias']
return rootAlias
def get_alias_from_collection_url(url):
if '/dataverse/' not in url:
if 'dataverse.lib.virginia.edu' in url:
alias = 'uva'
elif 'dataverse.lib.virginia.edu' not in url:
installationUrl = get_installation_url(url)
url = '%s/api/dataverses/1' % (installationUrl)
response = requests.get(url)
dataverseData = response.json()
alias = dataverseData['data']['alias']
elif '/dataverse/' in url:
parsed = urlparse(url)
try:
alias = parsed.path.split('/')[2]
# Or return an empty string
except IndexError:
alias = ''
return alias
# Returns True if collection alias is the installation's root collection or
def is_root_collection(url):
if get_alias_from_collection_url(url) == get_root_alias_name(url):
return True
elif get_alias_from_collection_url(url) != get_root_alias_name(url):
return False
# Function that turns Dataverse installation URL, instalation URL or search URL into a Search API URL
def get_search_api_url(url, apiKey=None):
# If URL is not a search url (doesn't contain 'q=') and contains /dataverse/, it's a Dataverse collection URL
if 'q=' not in url and '/dataverse/' in url:
# Remove the jsessionidString that sometimes appears in the URL
try:
jsessionidString = re.search(r';jsessionid=.*', url).group()
url = url.replace(jsessionidString, '?')
except AttributeError:
pass
# Get the Dataverse Collection name in the search URL
dataversePart = re.search(r'\/dataverse\/.*', url).group()
dataverseName = dataversePart.replace('/dataverse/', '')
# Repalce '/dataverse/' and the dataverse name with '/api/search?q=*' and add subtree parameter with dataverse name
apiSearchURL = url.replace(dataversePart, '/api/search?q=*') + '&subtree=%s' % (dataverseName)
# If URL is not a search URL (doesn't contain 'q=') and doesn't have /dataverse/, assume it's the URL of the installation
if 'q=' not in url and '/dataverse/' not in url:
apiSearchURL = url.replace('/dataverse.xhtml', '')
apiSearchURL = apiSearchURL + '/api/search'
apiSearchURL = apiSearchURL.replace('//api', '/api') + '?q=*'
elif 'q=' in url:
# Sometimes there's a slash before the ?q. If so, remove it
url = url.replace('/?q', '?q')
try:
jsessionidString = re.search(r';jsessionid=.*\?', url).group()
url = url.replace(jsessionidString, '?')
except AttributeError:
pass
# Get the Dataverse Collection name in the search URL
# dataverseName = re.search(r'\/dataverse\/\w*\?q', url)
dataverseName = re.search(r'\/dataverse\/.*\?q', url)
dataverseName = dataverseName.group()
subtree = dataverseName.replace('/dataverse/', '&subtree=').replace('?q', '')
apiSearchURL = (
url
.replace(dataverseName, '/api/search?q')
.replace('?q=&', '?q=*&')
.replace('%3A', ':')
.replace('%22', '"')
.replace('%28', '(')
.replace('%29', ')')
+ '&show_entity_ids=true'
+ subtree
)
# Remove any digits after any fq parameters
apiSearchURL = re.sub('fq\d', 'fq', apiSearchURL)
apiSearchURL = apiSearchURL + '&per_page=10&start=0'
# Replace values of any "types" parameters into the Search API's "type" paramater
try:
dTypes = re.search(r'types=.*?&', apiSearchURL).group()
dTypesList = dTypes.replace('types=', '').replace('&', '').split(':')
dTypesString = ''
for dType in dTypesList:
dType = '&type=%s' %(re.sub('s$', '', dType))
dTypesString = dTypesString + dType
apiSearchURL = apiSearchURL + dTypesString
except AttributeError:
pass
# Remove dvObjectType and types parameters, which I think the Search API is ignoring
apiSearchURL = re.sub('fq=dvObjectType:\(.*\)&', '', apiSearchURL)
apiSearchURL = re.sub('types=.*?&', '', apiSearchURL)
return apiSearchURL
# Function that converts as many common html codes as I could find into their human-readable strings
def convert_common_html_encoding(string):
string = (
string
.replace('%20', ' ').replace('%21', '!').replace('%22', '\"').replace('%23', '#')
.replace('%24', '$').replace('%25', '%').replace('%26', '&').replace('%27', '\'')
.replace('%28', '(').replace('%29', ')').replace('%2A', '*').replace('%2B', '+')
.replace('%2C', ',').replace('%2D', '-').replace('%2E', '.').replace('%2F', '/')
.replace('%30', '0').replace('%31', '1').replace('%32', '2').replace('%33', '3')
.replace('%34', '4').replace('%35', '5').replace('%36', '6').replace('%37', '7')
.replace('%38', '8').replace('%39', '9').replace('%3A', ':').replace('%3B', ';')
.replace('%3C', '<').replace('%3D', '=').replace('%3E', '>').replace('%3F', '?')
.replace('%40', '@').replace('%41', 'A').replace('%42', 'B').replace('%43', 'C')
.replace('%44', 'D').replace('%45', 'E').replace('%46', 'F').replace('%47', 'G')
.replace('%48', 'H').replace('%49', 'I').replace('%4A', 'J').replace('%4B', 'K')
.replace('%4C', 'L').replace('%4D', 'M').replace('%4E', 'N').replace('%4F', 'O')
.replace('%50', 'P').replace('%51', 'Q').replace('%52', 'R').replace('%53', 'S')
.replace('%54', 'T').replace('%55', 'U').replace('%56', 'V').replace('%57', 'W')
.replace('%58', 'X').replace('%59', 'Y').replace('%5A', 'Z').replace('%5B', '[')
.replace('%5C', '\\').replace('%5D', ']').replace('%5E', '^').replace('%5F', '_')
.replace('%60', '`').replace('%61', 'a').replace('%62', 'b').replace('%63', 'c')
.replace('%64', 'd').replace('%65', 'e').replace('%66', 'f').replace('%67', 'g')
.replace('%68', 'h').replace('%69', 'i').replace('%6A', 'j').replace('%6B', 'k')
.replace('%6C', 'l').replace('%6D', 'm').replace('%6E', 'n').replace('%6F', 'o')
.replace('%70', 'p').replace('%71', 'q').replace('%72', 'r').replace('%73', 's')
.replace('%74', 't').replace('%75', 'u').replace('%76', 'v').replace('%77', 'w')
.replace('%78', 'x').replace('%79', 'y').replace('%7A', 'z').replace('%7B', '{')
.replace('%7C', '|').replace('%7D', '}').replace('%7E', '~').replace('%80', '€')
.replace('%82', '‚').replace('%83', 'ƒ').replace('%84', '„').replace('%85', '…')
.replace('%86', '†').replace('%87', '‡').replace('%88', 'ˆ').replace('%89', '‰')
.replace('%8A', 'Š').replace('%8B', '‹').replace('%8C', 'Œ').replace('%8E', 'Ž')
.replace('%91', '‘').replace('%92', '’').replace('%93', '“').replace('%94', '”')
.replace('%95', '•').replace('%96', '–').replace('%97', '—').replace('%98', '˜')
.replace('%99', '™').replace('%9A', 'š').replace('%9B', '›').replace('%9C', 'œ')
.replace('%9E', 'ž').replace('%9F', 'Ÿ').replace('%A1', '¡').replace('%A2', '¢')
.replace('%A3', '£').replace('%A4', '¤').replace('%A5', '¥').replace('%A6', '¦')
.replace('%A7', '§').replace('%A8', '¨').replace('%A9', '©').replace('%AA', 'ª')
.replace('%AB', '«').replace('%AC', '¬').replace('%AE', '®').replace('%AF', '¯')
.replace('%B0', '°').replace('%B1', '±').replace('%B2', '²').replace('%B3', '³')
.replace('%B4', '´').replace('%B5', 'µ').replace('%B6', '¶').replace('%B7', '·')
.replace('%B8', '¸').replace('%B9', '¹').replace('%BA', 'º').replace('%BB', '»')
.replace('%BC', '¼').replace('%BD', '½').replace('%BE', '¾').replace('%BF', '¿')
.replace('%C0', 'À').replace('%C1', 'Á').replace('%C2', 'Â').replace('%C3', 'Ã')
.replace('%C4', 'Ä').replace('%C5', 'Å').replace('%C6', 'Æ').replace('%C7', 'Ç')
.replace('%C8', 'È').replace('%C9', 'É').replace('%CA', 'Ê').replace('%CB', 'Ë')
.replace('%CC', 'Ì').replace('%CD', 'Í').replace('%CE', 'Î').replace('%CF', 'Ï')
.replace('%D0', 'Ð').replace('%D1', 'Ñ').replace('%D2', 'Ò').replace('%D3', 'Ó')
.replace('%D4', 'Ô').replace('%D5', 'Õ').replace('%D6', 'Ö').replace('%D7', '×')
.replace('%D8', 'Ø').replace('%D9', 'Ù').replace('%DA', 'Ú').replace('%DB', 'Û')
.replace('%DC', 'Ü').replace('%DD', 'Ý').replace('%DE', 'Þ').replace('%DF', 'ß')
.replace('%E0', 'à').replace('%E1', 'á').replace('%E2', 'â').replace('%E3', 'ã')
.replace('%E4', 'ä').replace('%E5', 'å').replace('%E6', 'æ').replace('%E7', 'ç')
.replace('%E8', 'è').replace('%E9', 'é').replace('%EA', 'ê').replace('%EB', 'ë')
.replace('%EC', 'ì').replace('%ED', 'í').replace('%EE', 'î').replace('%EF', 'ï')
.replace('%F0', 'ð').replace('%F1', 'ñ').replace('%F2', 'ò').replace('%F3', 'ó')
.replace('%F4', 'ô').replace('%F5', 'õ').replace('%F6', 'ö').replace('%F7', '÷')
.replace('%F8', 'ø').replace('%F9', 'ù').replace('%FA', 'ú').replace('%FB', 'û')
.replace('%FC', 'ü').replace('%FD', 'ý').replace('%FE', 'þ').replace('%FF', 'ÿ')
)
return string
def convert_utf8bytes_to_characters(string):
string = (
string
.replace('%E2%82%AC', '€').replace('%E2%80%9A', '‚').replace('%C6%92', 'ƒ')
.replace('%E2%80%A6', '…').replace('%E2%80%A0', '†').replace('%E2%80%A1', '‡')
.replace('%E2%80%B0', '‰').replace('%C5%A0', 'Š').replace('%E2%80%B9', '‹')
.replace('%C5%BD', 'Ž').replace('%E2%80%98', '‘').replace('%E2%80%99', '’')
.replace('%E2%80%9D', '”').replace('%E2%80%A2', '•').replace('%E2%80%93', '–')
.replace('%CB%9C', '˜').replace('%E2%84%A2', '™').replace('%C5%A1', 'š')
.replace('%C5%93', 'œ').replace('%C5%BE', 'ž').replace('%C5%B8', 'Ÿ')
.replace('%C2%A2', '¢').replace('%C2%A3', '£').replace('%C2%A4', '¤')
.replace('%C2%A6', '¦').replace('%C2%A7', '§').replace('%C2%A8', '¨')
.replace('%C2%AA', 'ª').replace('%C2%AB', '«').replace('%C2%AC', '¬')
.replace('%C2%AE', '®').replace('%C2%AF', '¯').replace('%C2%B0', '°')
.replace('%C2%B2', '²').replace('%C2%B3', '³').replace('%C2%B4', '´')
.replace('%C2%B6', '¶').replace('%C2%B7', '·').replace('%C2%B8', '¸')
.replace('%C2%BA', 'º').replace('%C2%BB', '»').replace('%C2%BC', '¼')
.replace('%C2%BE', '¾').replace('%C2%BF', '¿').replace('%C3%80', 'À')
.replace('%C3%82', 'Â').replace('%C3%83', 'Ã').replace('%C3%84', 'Ä')
.replace('%C3%86', 'Æ').replace('%C3%87', 'Ç').replace('%C3%88', 'È')
.replace('%C3%8A', 'Ê').replace('%C3%8B', 'Ë').replace('%C3%8C', 'Ì')
.replace('%C3%8E', 'Î').replace('%C3%8F', 'Ï').replace('%C3%90', 'Ð')
.replace('%C3%92', 'Ò').replace('%C3%93', 'Ó').replace('%C3%94', 'Ô')
.replace('%C3%96', 'Ö').replace('%C3%97', '×').replace('%C3%98', 'Ø')
.replace('%C3%9A', 'Ú').replace('%C3%9B', 'Û').replace('%C3%9C', 'Ü')
.replace('%C3%9E', 'Þ').replace('%C3%9F', 'ß').replace('%C3%A0', 'à')
.replace('%C3%A2', 'â').replace('%C3%A3', 'ã').replace('%C3%A4', 'ä')
.replace('%C3%A6', 'æ').replace('%C3%A7', 'ç').replace('%C3%A8', 'è')
.replace('%C3%AA', 'ê').replace('%C3%AB', 'ë').replace('%C3%AC', 'ì')
.replace('%C3%8D', 'Í').replace('%C3%AE', 'î').replace('%C3%AF', 'ï')
.replace('%C3%B0', 'ð').replace('%C3%B2', 'ò').replace('%C3%B3', 'ó')
.replace('%C3%B4', 'ô').replace('%C3%B6', 'ö').replace('%C3%B7', '÷')
.replace('%C3%B8', 'ø').replace('%C3%BA', 'ú').replace('%C3%BB', 'û')
.replace('%C3%BC', 'ü').replace('%C3%BE', 'þ').replace('%C3%BF', 'ÿ')
)
return string
# Function that returns the params of a given Search API URL, to be used in requests calls
def get_params(apiSearchURL):
params = {
'baseUrl': '',
'params': {}
}
fq = []
# Split apiSearchURL to create list of params
splitSearchURLList = re.split('\?|&fq|&', apiSearchURL)
# Remove base search API URL from list
params['baseUrl'] = splitSearchURLList[0]
splitSearchURLList.pop(0)
# Remove any empty items from the splitSearchURLList
splitSearchURLList = list(filter(None, splitSearchURLList))
typeParamList = []
for paramValue in splitSearchURLList:
# Add query to params dict
if paramValue.startswith('q='):
paramValue = convert_utf8bytes_to_characters(paramValue)
paramValue = convert_common_html_encoding(paramValue)
paramValue = paramValue.replace('+', ' ')
params['params']['q'] = paramValue.replace('q=', '')
# Add non-fq queries to params dict
if not paramValue.startswith('=') and not paramValue.startswith('q='):
key = paramValue.split('=')[0]
if paramValue.split('=')[1] != '':
params['params'][key] = paramValue.split('=')[1]
# Add values of each type param to typeParamList
if paramValue.startswith('type'):
valueString = paramValue.split('=')[1]
typeParamList.append(valueString)
# Add fq queries to fq dict if paramValue.startswith('='):
if paramValue.startswith('='):
key = paramValue.replace('=', '').split(':')[0]
value = paramValue.split(':')[1]
value = convert_utf8bytes_to_characters(value)
value = convert_common_html_encoding(value)
value = value.replace('+', ' ')
paramString = key + ':' + value
fq.append(paramString)
# If there are type param values in typeParamList, add as value to new "type" param
if typeParamList:
params['params']['type'] = typeParamList
# If there are any fq params, add fq keys and values
if len(fq) > 0:
params['params']['fq'] = fq
return params
# Gets info from Search API about a given dataverse, dataset or file
def get_value_row_from_search_api_object(item, installationUrl):
if item['type'] == 'dataset':
datasetUrl = installationUrl + '/dataset.xhtml?persistentId=' + item['global_id']
dataverseUrl = installationUrl + '/dataverse/' + item['identifier_of_dataverse']
newRow = {
'dataset_pid': item['global_id'],
'version_state': item['versionState'],
'dataverse_alias': item['identifier_of_dataverse']
# 'dataverse_url': dataverseUrl
}
if item['type'] == 'dataverse':
newRow = {
'dataverse_database_id': item['entity_id'],
'dataverse_alias': item['identifier'],
'dataverse_url': item['url'],
'dataverse_name': item['name']
}
if item['type'] == 'file':
if item.get('file_persistent_id'):
filePersistentId = item['file_persistent_id']
else:
filePersistentId = ''
newRow = {
'file_database_id': item['file_id'],
'file persistent_id': filePersistentId,
'file_name': item['name'],
'dataset_pid': item['dataset_persistent_id']
}
return newRow
# Uses Search API to return dataframe containing info about datasets in a Dataverse installation
# Write progress and results to the tkinter window
def get_object_dataframe_from_search_api(
url, params, objectType, rootWindow=None, progressText=None, progressLabel=None, apiKey=None):
installationUrl = get_installation_url(url)
if apiKey:
header = {'X-Dataverse-key': apiKey}
else:
header = {}
params['type'] = objectType
# Add param to show database IDs of each item
params['show_entity_ids'] = 'true'
# Get total count of objects
params['per_page'] = 1
response = requests.get(
url,
params=params,
headers=header
)
data = response.json()
total = data['data']['total_count']
misindexedObjectCount = 0
objectInfoDict = []
# Initialization for paginating through results of Search API calls
condition = True
params['start'] = 0
if None not in [rootWindow, progressText, progressLabel]:
text = 'Looking for datasets...'
progressText.set(text)
progressLabel.config(fg='green')
progressLabel = progressLabel.grid(sticky='w', row=0)
rootWindow.update_idletasks()
while condition:
try:
params['per_page'] = 10
response = requests.get(
url,
params=params,
headers=header
)
data = response.json()
for item in data['data']['items']:
newRow = get_value_row_from_search_api_object(item, installationUrl)
objectInfoDict.append(dict(newRow))
datasetCount = len(objectInfoDict)
# Update variables to paginate through the search results
params['start'] = params['start'] + params['per_page']
# If misindexed datasets break the Search API call where per_page=10,
# try calls where per_page=1 then per_page=10 again
# (See https://github.com/IQSS/dataverse/issues/4225)
except Exception:
try:
params['per_page'] = 1
response = requests.get(
url,
params=params,
headers=header
)
data = response.json()
for item in data['data']['items']:
newRow = get_value_row_from_search_api_object(item, installationUrl)
objectInfoDict.append(dict(newRow))
# Update variables to paginate through the search results
params['start'] = params['start'] + params['per_page']
# If page fails to load, count a misindexed object and continue to the next page
except Exception:
misindexedObjectCount += 1
params['start'] = params['start'] + params['per_page']
condition = params['start'] < total
objectInfoDF = pd.DataFrame(objectInfoDict)
return objectInfoDF
# Uses "Get Contents" endpoint to return list of dataverse aliases of all subcollections in a given collection
def get_all_subcollection_aliases(collectionUrl, apiKey=''):
parsed = urlparse(collectionUrl)
installationUrl = parsed.scheme + '://' + parsed.netloc
alias = parsed.path.split('/')[2]
if apiKey:
header = {'X-Dataverse-key': apiKey}
else:
header = {}
# Get ID of given dataverse alias
dataverseInfoEndpoint = '%s/api/dataverses/%s' % (installationUrl, alias)
response = requests.get(
dataverseInfoEndpoint,
headers=header)
data = response.json()
parentDataverseId = data['data']['id']
# Create list and add ID of given dataverse
dataverseIds = [parentDataverseId]
# Get each subdataverse in the given dataverse
for dataverseId in dataverseIds:
dataverseGetContentsEndpoint = '%s/api/dataverses/%s/contents' % (installationUrl, dataverseId)
response = requests.get(
dataverseGetContentsEndpoint,
headers=header)
data = response.json()
for item in data['data']:
if item['type'] == 'dataverse':
dataverseId = item['id']
dataverseIds.extend([dataverseId])
# Get the alias for each dataverse ID
dataverseAliases = []
for dataverseId in dataverseIds:
dataverseInfoEndpoint = '%s/api/dataverses/%s' % (installationUrl, dataverseId)
response = requests.get(
dataverseInfoEndpoint,
headers=header)
data = response.json()
alias = data['data']['alias']
dataverseAliases.append(alias)
return dataverseAliases
def get_canonical_pid(pidOrUrl):
# If entered dataset PID is the dataset page URL, get canonical PID
if pidOrUrl.startswith('http') and 'persistentId=' in pidOrUrl:
canonicalPid = pidOrUrl.split('persistentId=')[1]
canonicalPid = canonicalPid.split('&version')[0]
canonicalPid = canonicalPid.replace('%3A', ':').replace('%2F', ('/'))
# If entered dataset PID is a DOI URL, get canonical PID
elif pidOrUrl.startswith('http') and 'doi.' in pidOrUrl:
canonicalPid = re.sub('http.*org\/', 'doi:', pidOrUrl)
elif pidOrUrl.startswith('doi:') and '/' in pidOrUrl:
canonicalPid = pidOrUrl
# If entered dataset PID is a Handle URL, get canonical PID
elif pidOrUrl.startswith('http') and 'hdl.' in pidOrUrl:
canonicalPid = re.sub('http.*net\/', 'hdl:', pidOrUrl)
elif pidOrUrl.startswith('hdl:') and '/' in pidOrUrl:
canonicalPid = pidOrUrl
return canonicalPid
def get_datasets_from_collection_or_search_url(
url, rootWindow=None, progressLabel=None, progressText=None, textBoxCollectionDatasetPIDs=None,
apiKey='', ignoreDeaccessionedDatasets=False, subdataverses=False):
if textBoxCollectionDatasetPIDs is not None:
# if None not in [rootWindow, progressLabel, progressText, textBoxCollectionDatasetPIDs]:
# Hide the textBoxCollectionDatasetPIDs scrollbox if it exists
forget_widget(textBoxCollectionDatasetPIDs)
# Use the Search API to get dataset info from the given search url or Dataverse collection URL
searchApiUrl = get_search_api_url(url)
requestsGetProperties = get_params(searchApiUrl)
baseUrl = requestsGetProperties['baseUrl']
params = requestsGetProperties['params']
datasetInfoDF = get_object_dataframe_from_search_api(
url=baseUrl, rootWindow=rootWindow, progressLabel=progressLabel, progressText=progressText,
params=params, objectType='dataset', apiKey=apiKey)
datasetCount = len(datasetInfoDF.index)
if datasetCount == 0:
text = 'Datasets found: 0'
if progressText is not None:
progressText.set(text)
else:
print(text)
elif datasetCount > 0:
deaccessionedDatasetCount = 0
# To ignore deaccessioned datasets, remove from the dataframe all datasets where version_state is DEACCESSIONED
if ignoreDeaccessionedDatasets == True:
datasetInfoDF = datasetInfoDF[datasetInfoDF['version_state'].str.contains('DEACCESSIONED') == False]
deaccessionedDatasetCount = datasetCount - len(datasetInfoDF.index)
# Remove version_state column so that I can remove the dataframe's duplicate rows and there's only one row per dataset
datasetInfoDF = datasetInfoDF.drop('version_state', axis=1)
# Drop duplicate rows, which happens when Search API results lists a dataset's published and draft versions
datasetInfoDF = datasetInfoDF.drop_duplicates()
uniqueDatasetCount = len(datasetInfoDF.index)
if 'q=' not in url:
if subdataverses == True and is_root_collection(url) == True:
uniqueDatasetCount = len(datasetInfoDF)
# If the user wants datasets in all subdataverses and the url
# is not the root collection...
elif subdataverses == True and is_root_collection(url) == False:
# Get the aliases of all subdataverses...
dataverseAliases = get_all_subcollection_aliases(url, apiKey=apiKey)
# Remove any datasets that aren't owned by any of the
datasetInfoDF = datasetInfoDF[
datasetInfoDF['dataverse_alias'].isin(dataverseAliases)]
uniqueDatasetCount = len(datasetInfoDF)
elif subdataverses == False:
alias = get_alias_from_collection_url(url)
datasetInfoDF = datasetInfoDF[datasetInfoDF['dataverse_alias'].isin([alias])]
uniqueDatasetCount = len(datasetInfoDF)
elif 'q=' in url:
uniqueDatasetCount = len(datasetInfoDF)
if textBoxCollectionDatasetPIDs is not None:
textBoxCollectionDatasetPIDs.grid(sticky='w', row=2, pady=5)
textBoxCollectionDatasetPIDs.configure(state ='normal')
textBoxCollectionDatasetPIDs.delete('1.0', END)
# Insert the dataset PIDs into the textBoxCollectionDatasetPIDs scrollbox
for dfIndex, dfRow in datasetInfoDF.iterrows():
datasetPid = dfRow['dataset_pid'] + '\n'
textBoxCollectionDatasetPIDs.insert('end', datasetPid)
# Create and place result text with uniqueDatasetCount
if deaccessionedDatasetCount == 0:
text = 'Datasets found: %s' % (str(uniqueDatasetCount))
if deaccessionedDatasetCount > 0:
text = 'Datasets found: %s\rDeaccessioned datasets ignored: %s' % (str(uniqueDatasetCount), str(deaccessionedDatasetCount))
if progressText is not None:
progressText.set(text)
else:
print(text)
def get_directory_path():
directoryPath = filedialog.askdirectory()
return directoryPath
def get_dataset_metadata_export(installationUrl, datasetPid, exportFormat, header={}, apiKey=''):
if apiKey:
header['X-Dataverse-key'] = apiKey
if exportFormat == 'dataverse_json':
getJsonRepresentationOfADatasetEndpoint = '%s/api/datasets/:persistentId/?persistentId=%s' % (installationUrl, datasetPid)
getJsonRepresentationOfADatasetEndpoint = getJsonRepresentationOfADatasetEndpoint.replace('//api', '/api')
response = requests.get(
getJsonRepresentationOfADatasetEndpoint,
headers=header)
if response.status_code in (200, 401): # 401 is the unauthorized code. Valid API key is needed
data = response.json()
else:
data = 'ERROR'
return data
# For getting metadata from other exports, which are available only for each dataset's latest published
if exportFormat != 'dataverse_json':
datasetMetadataExportEndpoint = '%s/api/datasets/export?exporter=%s&persistentId=%s' % (installationUrl, exportFormat, datasetPid)
datasetMetadataExportEndpoint = datasetMetadataExportEndpoint.replace('//api', '/api')
response = requests.get(
datasetMetadataExportEndpoint,
headers=header)
if response.status_code == 200:
if exportFormat in ('schema.org' , 'OAI_ORE'):
data = response.json()
if exportFormat in ('ddi' , 'oai_ddi', 'dcterms', 'oai_dc', 'Datacite', 'oai_datacite'):
string = response.text
data = BeautifulSoup(string, 'xml').prettify()
else:
data = 'ERROR'
return data
def get_metadatablock_data(installationUrl, metadatablockName):
metadatablocksApiEndpoint = '%s/api/v1/metadatablocks/%s' % (installationUrl, metadatablockName)
response = requests.get(metadatablocksApiEndpoint)
if response.status_code == 200:
data = response.json()
return data
def get_metadatablock_db_field_name_and_title(metadatablockData):
allFieldsDBNamesList = []
childFieldsDBNamesList = []
for parentfield in metadatablockData['data']['fields']:
properties = metadatablockData['data']['fields'][parentfield]
field = properties['name']
allFieldsDBNamesList.append(field)
if 'childFields' in properties:
for childField in properties['childFields']:
childFieldsDBNamesList.append(childField)
parentFieldsDBNamesList = list(set(allFieldsDBNamesList) - set(childFieldsDBNamesList))
parentFieldDBNameAndTitleDict = {}
for dbName in parentFieldsDBNamesList:
dbNameProperties = metadatablockData['data']['fields'][dbName]
parentFieldDBNameAndTitleDict[dbNameProperties['title']] = dbName
return parentFieldDBNameAndTitleDict
def get_parent_field_names(metadatablockData, listbox):
listbox.delete(0, END)
allFieldsDBNamesDict = {}
childFieldsDBNamesList = []
compoundFieldsDBNamesList = []
for parentField in metadatablockData['data']['fields']:
properties = metadatablockData['data']['fields'][parentField]
field = properties['name']
allFieldsDBNamesDict[field] = properties['title']
if 'childFields' in properties:
compoundFieldsDBNamesList.append(properties['title'])
for childField in properties['childFields']:
childFieldsDBNamesList.append(childField)
options = []
fieldWithChildFieldList = []
for parentField in metadatablockData['data']['fields']:
properties = metadatablockData['data']['fields'][parentField]
if 'childFields' not in properties and properties['name'] not in childFieldsDBNamesList:
fieldTitle = properties['title']
options.append(' ' + fieldTitle)
elif 'childFields' in properties:
title = properties['title']
childFieldDict = properties['childFields']
childFieldsList = []
for childField in childFieldDict:
childFieldsList.append(childField)
childFieldsString = list_to_string(childFieldsList)
fieldWithChildField = '%s: %s' % (title, childFieldsString)
if len(fieldWithChildField) > 50:
fieldWithChildField = fieldWithChildField[0:50] + '...'
fieldWithChildFieldList.append(fieldWithChildField)
options.append(' ' + fieldWithChildField)
for option in options:
listbox.insert('end', option)
def get_listbox_values(listbox):
selectedFields = []
selections = listbox.curselection()
for selection in selections:
fieldName = listbox.get(selection).strip().split(':')[0]
selectedFields.append(fieldName)
return selectedFields
def get_column_names(
metadatablockData, parentFieldTitle, parentFieldDBNameAndTitleDict):
compoundFieldsDBNamesList = []
for parentfield in metadatablockData['data']['fields']:
properties = metadatablockData['data']['fields'][parentfield]
if 'childFields' in properties:
compoundFieldsDBNamesList.append(properties['name'])
if parentFieldTitle in parentFieldDBNameAndTitleDict.keys():
chosenDBName = parentFieldDBNameAndTitleDict[parentFieldTitle]
columns = []
if chosenDBName in compoundFieldsDBNamesList:
dbNameProperties = metadatablockData['data']['fields'][chosenDBName]
for field in dbNameProperties['childFields']:
columns.append(field)
turn columns
def get_metadata_values_lists(
installationUrl, datasetMetadata, metadatablockName,
chosenTitleDBName, chosenFields=None, versions='latestVersion'):
if versions == 'allVersions':
versions = 'datasetVersion'
rowVariablesList = []
if (datasetMetadata['status'] == 'OK') and\
(metadatablockName in datasetMetadata['data'][versions]['metadataBlocks']):
datasetPersistentUrl = datasetMetadata['data']['persistentUrl']
datasetPid = get_canonical_pid(datasetPersistentUrl)
datasetUrl = installationUrl + '/dataset.xhtml?persistentId=' + datasetPid
if 'versionNumber' in datasetMetadata['data'][versions]:
majorVersionNumber = datasetMetadata['data'][versions]['versionNumber']
minorVersionNumber = datasetMetadata['data'][versions]['versionMinorNumber']
datasetVersionNumber = f'{majorVersionNumber}.{minorVersionNumber}'
else:
datasetVersionNumber = 'DRAFT'
for fields in datasetMetadata['data'][versions]['metadataBlocks'][metadatablockName]['fields']:
if fields['typeName'] == chosenTitleDBName:
typeClass = fields['typeClass']
allowsMultiple = fields['multiple']
if typeClass in ('primitive', 'controlledVocabulary') and allowsMultiple is True:
for value in fields['value']:
rowVariables = [
datasetPid, datasetPersistentUrl, datasetUrl,
datasetVersionNumber, value[:10000].replace('\r', ' - ')]
rowVariablesList.append(rowVariables)
elif typeClass in ('primitive', 'controlledVocabulary') and allowsMultiple is False:
value = fields['value'][:10000].replace('\r', ' - ')
rowVariables = [
datasetPid, datasetPersistentUrl, datasetUrl,
datasetVersionNumber, value]
rowVariablesList.append(rowVariables)
elif typeClass == 'compound' and allowsMultiple is True:
index = 0
condition = True
while condition:
rowVariables = [
datasetPid, datasetPersistentUrl, datasetUrl,
datasetVersionNumber]
# Get number of multiples
total = len(fields['value'])
# For each child field...
for chosenField in chosenFields:
# Try getting the value of that child field
try:
value = fields['value'][index][chosenField]['value'][:10000].replace('\r', ' - ')
# Otherwise, save an empty string as the value
except KeyError:
value = ''
# Append value to the rowVariables list to add to the CSV file
rowVariables.append(value)
rowVariablesList.append(rowVariables)
index += 1
condition = index < total
elif typeClass == 'compound' and allowsMultiple is False:
rowVariables = [datasetPid, datasetPersistentUrl, datasetUrl, datasetVersionNumber]
for chosenField in chosenFields:
try:
# Get value from compound field
value = fields['value'][chosenField]['value'][:10000].replace('\r', ' - ')
except KeyError:
value = ''
rowVariables.append(value)
rowVariablesList.append(rowVariables)
return rowVariablesList
# Delete empty CSV files in a given directory. If file has fewer than 2 rows, delete it.
def delete_empty_csv_files(csvDirectory):
fieldsWithNoMetadata = []
for file in glob.glob(str(Path(csvDirectory)) + '/' + '*.csv'):
with open(file, mode='r', encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
data = list(reader)
rowCount = len(data)
if rowCount == 1:
fieldName = Path(file).name.replace('.csv', '')
fieldsWithNoMetadata.append(fieldName)
f.close()
os.remove(file)
return fieldsWithNoMetadata
# Full outer join of CSV files in a given directory
def join_metadata_csv_files(csvDirectory):
# Create CSV file in the directory that the user selected
allMetadataFileName = os.path.join(csvDirectory, 'all_fields.csv')
# Create list of common columns in CSV files to join on
indexList = ['dataset_pid', 'dataset_pid_url', 'dataset_url', 'dataset_version_number']
# Get list of CSV files in the csvDirectory
filesList = listdir(csvDirectory)
if len(filesList) > 1:
filesDirectoryPathsList = []
for file in filesList:
fileDirectoryPath = os.path.join(csvDirectory, file)
filesDirectoryPathsList.append(fileDirectoryPath)
# Create a dataframe of each CSV file in the 'filesList' list
dataframes = [pd.read_csv(table, sep=',', na_filter = False) for table in filesDirectoryPathsList]
# For each dataframe, set the indexes (or the common columns across the dataframes to join on)
for dataframe in dataframes:
dataframe.set_index(indexList, inplace=True)
# Full outer join all dataframes and save to the 'joined' variable
joined = reduce(lambda left, right: left.join(right, how='outer'), dataframes)
# Export joined dataframe to a CSV file
joined.to_csv(allMetadataFileName)
# Get the metadata of datasets. Function passed to tkinter button
def get_dataset_metadata(
rootWindow, progressLabel, progressText, noMetadataText, noMetadataLabel,
installationUrl='', datasetPidString='',
parentFieldTitleList='', directoryPath='', apiKey=''):
# Use metadatablock API endpoint to get metadatablock data
metadatablockData = get_metadatablock_data(installationUrl, 'citation')
# From metadatablockData, get the database and display names of each parent field
allFieldsDBNamesDict = get_metadatablock_db_field_name_and_title(metadatablockData)
# Create directory in the directory that the user chose
currentTime = time.strftime('%Y.%m.%d_%H.%M.%S')
installationRootName = get_root_alias_name(installationUrl)
mainDirectoryName = '%s_dataset_metadata_%s' % (installationRootName, currentTime)
mainDirectoryPath = str(Path(directoryPath + '/' + mainDirectoryName))
os.mkdir(mainDirectoryPath)
# For each field the user chose:
for parentFieldTitle in parentFieldTitleList:
# Create CSV file
# Create file name and path
csvFileName = parentFieldTitle.lower().strip().replace(' ', '_')
csvFileName = csvFileName + '(citation)'
mainDirectoryPath = str(Path(directoryPath + '/' + mainDirectoryName))
csvFilePath = str(Path(mainDirectoryPath, csvFileName)) + '.csv'
# Create header row for the CSV file
headerRow = ['dataset_pid', 'dataset_pid_url', 'dataset_url', 'dataset_version_number']
childFieldsList = get_column_names(
metadatablockData, parentFieldTitle, allFieldsDBNamesDict)
# Add childFields list to header row
headerRow = headerRow + childFieldsList
# Create CSV file and add headerrow
with open(csvFilePath, mode='w', newline='') as f:
writer = csv.writer(f)
writer.writerow(headerRow)
# Change passed datasetPidString to a list. Make sure the last newline doesn't mess up the list
datasetPidList = [x.strip() for x in datasetPidString.splitlines()][:-1]
noMetadataLabel.grid_forget()
count = 0
datasetTotalCount = len(datasetPidList)
text = 'Dataset metadata retrieved: 0 of %s' % (datasetTotalCount)
progressText.set(text)
progressLabel.grid(sticky='w', row=1, columnspan=2)
rootWindow.update_idletasks()
for datasetPid in datasetPidList:
datasetMetadata = get_dataset_metadata_export(
installationUrl=installationUrl,
datasetPid=datasetPid,
exportFormat='dataverse_json',
apiKey=apiKey)
if datasetMetadata['status'] == 'OK':
for parentFieldTitle in parentFieldTitleList:
dbName = allFieldsDBNamesDict[parentFieldTitle]
valueLists = get_metadata_values_lists(
installationUrl=installationUrl,
datasetMetadata=datasetMetadata,
metadatablockName='citation',
chosenTitleDBName=dbName,
chosenFields=get_column_names(
metadatablockData, parentFieldTitle, allFieldsDBNamesDict))
csvFileName = parentFieldTitle.lower().strip().replace(' ', '_')
csvFileName = csvFileName + '(citation)'
csvFilePath = str(Path(mainDirectoryPath, csvFileName)) + '.csv'
for valueList in valueLists:
with open(csvFilePath, mode='a', newline='', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(valueList)
count += 1
text = 'Dataset metadata retrieved: %s of %s' % (count, datasetTotalCount)
progressText.set(text)
rootWindow.update_idletasks()
fieldsWithNoMetadata = delete_empty_csv_files(mainDirectoryPath)
if count > 0 and len(fieldsWithNoMetadata) > 0:
# noMetadataLabel.grid(sticky='w', row=2)
fieldsWithNoMetadataString = list_to_string(fieldsWithNoMetadata)
fieldsWithNoMetadataString = (
'No metadata found for the following fields:\r' + fieldsWithNoMetadataString)
noMetadataText.set(fieldsWithNoMetadataString)
noMetadataLabel.grid(sticky='w', row=2)
rootWindow.update_idletasks()
# Full outer join all CSV files to create a CSV with all metadata
join_metadata_csv_files(mainDirectoryPath)
def delete_published_dataset(installationUrl, datasetPid, apiKey):
destroyDatasetApiEndpointUrl = '%s/api/datasets/:persistentId/destroy/?persistentId=%s' % (installationUrl, datasetPid)
req = requests.delete(
destroyDatasetApiEndpointUrl,
headers={'X-Dataverse-key': apiKey})
data = req.json()
status = data.get('status')
if status:
message = data.get('message', '')
statusMessage = '%s: %s' % (status, message)
return statusMessage
def delete_published_datasets(
rootWindow, progressLabel, progressText, notDeletedText, notDeletedLabel,
installationUrl, datasetPidString, apiKey):
installationUrl = get_installation_url(installationUrl)
# Change passed datasetPidString to a list. Make sure the last newline doesn't mess up the list
datasetPidList = [x.strip() for x in datasetPidString.splitlines()]
# Remove any empty items from the list of dataset PIDs
datasetPidList = [datasetPid for datasetPid in datasetPidList if datasetPid]
canonicalPidList = []
for datasetPid in datasetPidList:
canonicalPid = get_canonical_pid(datasetPid)
canonicalPidList.append(canonicalPid)
# Delete any message in the tkinter window about datasets not being deleted
# the last time the "Delete datasets" button was pressed
notDeletedLabel.grid_forget()
deletedDatasetCount = 0
datasetTotalCount = len(canonicalPidList)
deletedText = 'Datasets deleted: 0 of %s' % (datasetTotalCount)
progressText.set(deletedText)
progressLabel.config(fg='green')
progressLabel.grid(sticky='w', row=1)
notDeletedLabel.config(fg='white')
notDeletedLabel.grid(sticky='w', row=2)
rootWindow.update_idletasks()
destroyedDatasets = []
notDestroyedDatasets = []
for canonicalPid in canonicalPidList:
statusMessage = delete_published_dataset(installationUrl, canonicalPid, apiKey)
if 'OK' in statusMessage:
destroyedDatasets.append(canonicalPid)
deletedDatasetCount += 1
deletedText = 'Datasets deleted: %s of %s' % (deletedDatasetCount, datasetTotalCount)
progressText.set(deletedText)
rootWindow.update_idletasks()
elif 'ERROR' in statusMessage:
notDeletedLabel.config(fg='red')
notDestroyedDatasets.append(canonicalPid)
notDeletedMessage = 'Datasets not deleted: %s' % (len(notDestroyedDatasets))
notDeletedText.set(notDeletedMessage)
rootWindow.update_idletasks()
| true | true |
f73414f87ce5fccc45b190ae55d9f6dc7c6eabfb | 10,224 | py | Python | vspk/v4_0/nubootstrap.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | null | null | null | vspk/v4_0/nubootstrap.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | null | null | null | vspk/v4_0/nubootstrap.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUBootstrap(NURESTObject):
""" Represents a Bootstrap in the VSD
Notes:
Gateway bootstrap details.
"""
__rest_name__ = "bootstrap"
__resource_name__ = "bootstraps"
## Constants
CONST_ZFB_MATCH_ATTRIBUTE_NONE = "NONE"
CONST_ZFB_MATCH_ATTRIBUTE_NSGATEWAY_ID = "NSGATEWAY_ID"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_STATUS_NOTIFICATION_APP_REQ_SENT = "NOTIFICATION_APP_REQ_SENT"
CONST_ZFB_MATCH_ATTRIBUTE_HOSTNAME = "HOSTNAME"
CONST_STATUS_INACTIVE = "INACTIVE"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_ZFB_MATCH_ATTRIBUTE_MAC_ADDRESS = "MAC_ADDRESS"
CONST_ZFB_MATCH_ATTRIBUTE_IP_ADDRESS = "IP_ADDRESS"
CONST_ZFB_MATCH_ATTRIBUTE_SERIAL_NUMBER = "SERIAL_NUMBER"
CONST_STATUS_ACTIVE = "ACTIVE"
CONST_STATUS_NOTIFICATION_APP_REQ_ACK = "NOTIFICATION_APP_REQ_ACK"
CONST_STATUS_CERTIFICATE_SIGNED = "CERTIFICATE_SIGNED"
def __init__(self, **kwargs):
""" Initializes a Bootstrap instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> bootstrap = NUBootstrap(id=u'xxxx-xxx-xxx-xxx', name=u'Bootstrap')
>>> bootstrap = NUBootstrap(data=my_dict)
"""
super(NUBootstrap, self).__init__()
# Read/Write Attributes
self._zfb_info = None
self._zfb_match_attribute = None
self._zfb_match_value = None
self._last_updated_by = None
self._installer_id = None
self._entity_scope = None
self._status = None
self._external_id = None
self.expose_attribute(local_name="zfb_info", remote_name="ZFBInfo", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_match_attribute", remote_name="ZFBMatchAttribute", attribute_type=str, is_required=False, is_unique=False, choices=[u'HOSTNAME', u'IP_ADDRESS', u'MAC_ADDRESS', u'NONE', u'NSGATEWAY_ID', u'SERIAL_NUMBER'])
self.expose_attribute(local_name="zfb_match_value", remote_name="ZFBMatchValue", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="installer_id", remote_name="installerID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="status", remote_name="status", attribute_type=str, is_required=False, is_unique=False, choices=[u'ACTIVE', u'CERTIFICATE_SIGNED', u'INACTIVE', u'NOTIFICATION_APP_REQ_ACK', u'NOTIFICATION_APP_REQ_SENT'])
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def zfb_info(self):
""" Get zfb_info value.
Notes:
Base64 Encoded JSON String of NSG ZFB Attribute Value Pairs
This attribute is named `ZFBInfo` in VSD API.
"""
return self._zfb_info
@zfb_info.setter
def zfb_info(self, value):
""" Set zfb_info value.
Notes:
Base64 Encoded JSON String of NSG ZFB Attribute Value Pairs
This attribute is named `ZFBInfo` in VSD API.
"""
self._zfb_info = value
@property
def zfb_match_attribute(self):
""" Get zfb_match_attribute value.
Notes:
Attribute to auto match on
This attribute is named `ZFBMatchAttribute` in VSD API.
"""
return self._zfb_match_attribute
@zfb_match_attribute.setter
def zfb_match_attribute(self, value):
""" Set zfb_match_attribute value.
Notes:
Attribute to auto match on
This attribute is named `ZFBMatchAttribute` in VSD API.
"""
self._zfb_match_attribute = value
@property
def zfb_match_value(self):
""" Get zfb_match_value value.
Notes:
Attribute value to auto match on
This attribute is named `ZFBMatchValue` in VSD API.
"""
return self._zfb_match_value
@zfb_match_value.setter
def zfb_match_value(self, value):
""" Set zfb_match_value value.
Notes:
Attribute value to auto match on
This attribute is named `ZFBMatchValue` in VSD API.
"""
self._zfb_match_value = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def installer_id(self):
""" Get installer_id value.
Notes:
The Installer ID
This attribute is named `installerID` in VSD API.
"""
return self._installer_id
@installer_id.setter
def installer_id(self, value):
""" Set installer_id value.
Notes:
The Installer ID
This attribute is named `installerID` in VSD API.
"""
self._installer_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def status(self):
""" Get status value.
Notes:
Bootstrap status.
"""
return self._status
@status.setter
def status(self, value):
""" Set status value.
Notes:
Bootstrap status.
"""
self._status = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| 29.894737 | 250 | 0.611502 |
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUBootstrap(NURESTObject):
__rest_name__ = "bootstrap"
__resource_name__ = "bootstraps"
ONST_ZFB_MATCH_ATTRIBUTE_NONE = "NONE"
CONST_ZFB_MATCH_ATTRIBUTE_NSGATEWAY_ID = "NSGATEWAY_ID"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_STATUS_NOTIFICATION_APP_REQ_SENT = "NOTIFICATION_APP_REQ_SENT"
CONST_ZFB_MATCH_ATTRIBUTE_HOSTNAME = "HOSTNAME"
CONST_STATUS_INACTIVE = "INACTIVE"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_ZFB_MATCH_ATTRIBUTE_MAC_ADDRESS = "MAC_ADDRESS"
CONST_ZFB_MATCH_ATTRIBUTE_IP_ADDRESS = "IP_ADDRESS"
CONST_ZFB_MATCH_ATTRIBUTE_SERIAL_NUMBER = "SERIAL_NUMBER"
CONST_STATUS_ACTIVE = "ACTIVE"
CONST_STATUS_NOTIFICATION_APP_REQ_ACK = "NOTIFICATION_APP_REQ_ACK"
CONST_STATUS_CERTIFICATE_SIGNED = "CERTIFICATE_SIGNED"
def __init__(self, **kwargs):
super(NUBootstrap, self).__init__()
self._zfb_info = None
self._zfb_match_attribute = None
self._zfb_match_value = None
self._last_updated_by = None
self._installer_id = None
self._entity_scope = None
self._status = None
self._external_id = None
self.expose_attribute(local_name="zfb_info", remote_name="ZFBInfo", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_match_attribute", remote_name="ZFBMatchAttribute", attribute_type=str, is_required=False, is_unique=False, choices=[u'HOSTNAME', u'IP_ADDRESS', u'MAC_ADDRESS', u'NONE', u'NSGATEWAY_ID', u'SERIAL_NUMBER'])
self.expose_attribute(local_name="zfb_match_value", remote_name="ZFBMatchValue", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="installer_id", remote_name="installerID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="status", remote_name="status", attribute_type=str, is_required=False, is_unique=False, choices=[u'ACTIVE', u'CERTIFICATE_SIGNED', u'INACTIVE', u'NOTIFICATION_APP_REQ_ACK', u'NOTIFICATION_APP_REQ_SENT'])
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
@property
def zfb_info(self):
return self._zfb_info
@zfb_info.setter
def zfb_info(self, value):
self._zfb_info = value
@property
def zfb_match_attribute(self):
return self._zfb_match_attribute
@zfb_match_attribute.setter
def zfb_match_attribute(self, value):
self._zfb_match_attribute = value
@property
def zfb_match_value(self):
return self._zfb_match_value
@zfb_match_value.setter
def zfb_match_value(self, value):
self._zfb_match_value = value
@property
def last_updated_by(self):
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
self._last_updated_by = value
@property
def installer_id(self):
return self._installer_id
@installer_id.setter
def installer_id(self, value):
self._installer_id = value
@property
def entity_scope(self):
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
self._entity_scope = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def external_id(self):
return self._external_id
@external_id.setter
def external_id(self, value):
self._external_id = value
| true | true |
f73415df562837992d6bfe6789244be417512e7e | 230 | py | Python | automatewithpython/regex/findall/xmasregex.py | Coalemus/Python-Projects | 4b0e0c12a2fdcfbaf491df5715885c61f44bdb1c | [
"MIT"
] | null | null | null | automatewithpython/regex/findall/xmasregex.py | Coalemus/Python-Projects | 4b0e0c12a2fdcfbaf491df5715885c61f44bdb1c | [
"MIT"
] | null | null | null | automatewithpython/regex/findall/xmasregex.py | Coalemus/Python-Projects | 4b0e0c12a2fdcfbaf491df5715885c61f44bdb1c | [
"MIT"
] | null | null | null | #!/bin/zsh
import re
xmasregex = re.compile(r'\d\s\w+')
mtob1 = xmasregex.findall(
"12 drummers, 11 pipers, 10 lords, 9 ladies, 8 maids, 7 swans, 6 geese, 5 rings, 4 birds, 3 hens, 2 doves, 1 partidge"
)
print(mtob1)
| 17.692308 | 122 | 0.647826 |
import re
xmasregex = re.compile(r'\d\s\w+')
mtob1 = xmasregex.findall(
"12 drummers, 11 pipers, 10 lords, 9 ladies, 8 maids, 7 swans, 6 geese, 5 rings, 4 birds, 3 hens, 2 doves, 1 partidge"
)
print(mtob1)
| true | true |
f73416d556fd29e31b2b5e98fd85ef253b840382 | 7,151 | py | Python | contentcuration/contentcuration/viewsets/sync/endpoint.py | DXCanas/content-curation | 06ac2cf2a49d2420cb8a418f5df2bfee53ef644b | [
"MIT"
] | null | null | null | contentcuration/contentcuration/viewsets/sync/endpoint.py | DXCanas/content-curation | 06ac2cf2a49d2420cb8a418f5df2bfee53ef644b | [
"MIT"
] | null | null | null | contentcuration/contentcuration/viewsets/sync/endpoint.py | DXCanas/content-curation | 06ac2cf2a49d2420cb8a418f5df2bfee53ef644b | [
"MIT"
] | null | null | null | """
A view that handles synchronization of changes from the frontend
and deals with processing all the changes to make appropriate
bulk creates, updates, and deletes.
"""
from collections import OrderedDict
from itertools import groupby
from django.conf import settings
from rest_framework.authentication import SessionAuthentication
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import api_view
from rest_framework.decorators import authentication_classes
from rest_framework.decorators import permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.status import HTTP_207_MULTI_STATUS
from rest_framework.status import HTTP_400_BAD_REQUEST
from search.viewsets.savedsearch import SavedSearchViewSet
from contentcuration.viewsets.assessmentitem import AssessmentItemViewSet
from contentcuration.viewsets.channel import ChannelViewSet
from contentcuration.viewsets.channelset import ChannelSetViewSet
from contentcuration.viewsets.contentnode import ContentNodeViewSet
from contentcuration.viewsets.file import FileViewSet
from contentcuration.viewsets.invitation import InvitationViewSet
from contentcuration.viewsets.sync.constants import ASSESSMENTITEM
from contentcuration.viewsets.sync.constants import CHANNEL
from contentcuration.viewsets.sync.constants import CHANNELSET
from contentcuration.viewsets.sync.constants import CONTENTNODE
from contentcuration.viewsets.sync.constants import COPIED
from contentcuration.viewsets.sync.constants import CREATED
from contentcuration.viewsets.sync.constants import CREATED_RELATION
from contentcuration.viewsets.sync.constants import DELETED
from contentcuration.viewsets.sync.constants import DELETED_RELATION
from contentcuration.viewsets.sync.constants import EDITOR_M2M
from contentcuration.viewsets.sync.constants import FILE
from contentcuration.viewsets.sync.constants import INVITATION
from contentcuration.viewsets.sync.constants import MOVED
from contentcuration.viewsets.sync.constants import SAVEDSEARCH
from contentcuration.viewsets.sync.constants import TREE
from contentcuration.viewsets.sync.constants import UPDATED
from contentcuration.viewsets.sync.constants import USER
from contentcuration.viewsets.sync.constants import VIEWER_M2M
from contentcuration.viewsets.sync.utils import get_and_clear_user_events
from contentcuration.viewsets.tree import TreeViewSet
from contentcuration.viewsets.user import ChannelUserViewSet
from contentcuration.viewsets.user import UserViewSet
from contentcuration.utils.sentry import report_exception
# Uses ordered dict behaviour to enforce operation orders
viewset_mapping = OrderedDict(
[
(USER, UserViewSet),
# If a new channel has been created, then any other operations that happen
# within that channel depend on that, so we prioritize channel operations
(CHANNEL, ChannelViewSet),
(INVITATION, InvitationViewSet),
# Tree operations require content nodes to exist, and any new assessment items
# need to point to an existing content node
(CONTENTNODE, ContentNodeViewSet),
# The exact order of these three is not important.
(ASSESSMENTITEM, AssessmentItemViewSet),
(CHANNELSET, ChannelSetViewSet),
(TREE, TreeViewSet),
(FILE, FileViewSet),
(EDITOR_M2M, ChannelUserViewSet),
(VIEWER_M2M, ChannelUserViewSet),
(SAVEDSEARCH, SavedSearchViewSet),
]
)
change_order = [
# inserts
COPIED,
CREATED,
# updates
UPDATED,
DELETED,
MOVED,
CREATED_RELATION,
DELETED_RELATION,
]
table_name_indices = {
table_name: i for i, table_name in enumerate(viewset_mapping.keys())
}
def get_table(obj):
return obj["table"]
def get_table_sort_order(obj):
return table_name_indices[get_table(obj)]
def get_change_type(obj):
return obj["type"]
def get_change_order(obj):
try:
change_type = int(obj["type"])
except ValueError:
change_type = -1
return change_order.index(change_type)
event_handlers = {
CREATED: "create_from_changes",
UPDATED: "update_from_changes",
DELETED: "delete_from_changes",
MOVED: "move_from_changes",
COPIED: "copy_from_changes",
CREATED_RELATION: "create_relation_from_changes",
DELETED_RELATION: "delete_relation_from_changes",
}
def handle_changes(request, viewset_class, change_type, changes):
try:
change_type = int(change_type)
except ValueError:
pass
else:
viewset = viewset_class(request=request)
viewset.initial(request)
if change_type in event_handlers:
try:
return getattr(viewset, event_handlers[change_type])(changes)
except Exception as e:
# Capture exception and report, but allow sync
# to complete properly.
report_exception(e)
if getattr(settings, "DEBUG", False) or getattr(
settings, "TEST_ENV", False
):
raise
return changes, None
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(["POST"])
def sync(request):
# Collect all error objects, which consist of the original change
# plus any validation errors raised.
errors = []
# Collect all changes that should be propagated back to the client
# this allows internal validation to take place and fields to be added
# if needed by the server.
changes_to_return = []
data = sorted(request.data, key=get_table_sort_order)
for table_name, group in groupby(data, get_table):
if table_name in viewset_mapping:
viewset_class = viewset_mapping[table_name]
group = sorted(group, key=get_change_order)
for change_type, changes in groupby(group, get_change_type):
# Coerce changes iterator to list so it can be read multiple times
es, cs = handle_changes(
request, viewset_class, change_type, list(changes)
)
if es:
errors.extend(es)
if cs:
changes_to_return.extend(cs)
# Add any changes that have been logged from elsewhere in our hacky redis
# cache mechanism
changes_to_return.extend(get_and_clear_user_events(request.user.id))
if not errors:
if changes_to_return:
return Response({"changes": changes_to_return})
else:
return Response({})
elif len(errors) < len(data) or len(changes_to_return):
# If there are some errors, but not all, or all errors and some changes return a mixed response
return Response(
{"changes": changes_to_return, "errors": errors},
status=HTTP_207_MULTI_STATUS,
)
else:
# If the errors are total, and there are no changes reject the response outright!
return Response({"errors": errors}, status=HTTP_400_BAD_REQUEST)
| 38.037234 | 103 | 0.735561 | from collections import OrderedDict
from itertools import groupby
from django.conf import settings
from rest_framework.authentication import SessionAuthentication
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import api_view
from rest_framework.decorators import authentication_classes
from rest_framework.decorators import permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.status import HTTP_207_MULTI_STATUS
from rest_framework.status import HTTP_400_BAD_REQUEST
from search.viewsets.savedsearch import SavedSearchViewSet
from contentcuration.viewsets.assessmentitem import AssessmentItemViewSet
from contentcuration.viewsets.channel import ChannelViewSet
from contentcuration.viewsets.channelset import ChannelSetViewSet
from contentcuration.viewsets.contentnode import ContentNodeViewSet
from contentcuration.viewsets.file import FileViewSet
from contentcuration.viewsets.invitation import InvitationViewSet
from contentcuration.viewsets.sync.constants import ASSESSMENTITEM
from contentcuration.viewsets.sync.constants import CHANNEL
from contentcuration.viewsets.sync.constants import CHANNELSET
from contentcuration.viewsets.sync.constants import CONTENTNODE
from contentcuration.viewsets.sync.constants import COPIED
from contentcuration.viewsets.sync.constants import CREATED
from contentcuration.viewsets.sync.constants import CREATED_RELATION
from contentcuration.viewsets.sync.constants import DELETED
from contentcuration.viewsets.sync.constants import DELETED_RELATION
from contentcuration.viewsets.sync.constants import EDITOR_M2M
from contentcuration.viewsets.sync.constants import FILE
from contentcuration.viewsets.sync.constants import INVITATION
from contentcuration.viewsets.sync.constants import MOVED
from contentcuration.viewsets.sync.constants import SAVEDSEARCH
from contentcuration.viewsets.sync.constants import TREE
from contentcuration.viewsets.sync.constants import UPDATED
from contentcuration.viewsets.sync.constants import USER
from contentcuration.viewsets.sync.constants import VIEWER_M2M
from contentcuration.viewsets.sync.utils import get_and_clear_user_events
from contentcuration.viewsets.tree import TreeViewSet
from contentcuration.viewsets.user import ChannelUserViewSet
from contentcuration.viewsets.user import UserViewSet
from contentcuration.utils.sentry import report_exception
viewset_mapping = OrderedDict(
[
(USER, UserViewSet),
(CHANNEL, ChannelViewSet),
(INVITATION, InvitationViewSet),
(CONTENTNODE, ContentNodeViewSet),
(ASSESSMENTITEM, AssessmentItemViewSet),
(CHANNELSET, ChannelSetViewSet),
(TREE, TreeViewSet),
(FILE, FileViewSet),
(EDITOR_M2M, ChannelUserViewSet),
(VIEWER_M2M, ChannelUserViewSet),
(SAVEDSEARCH, SavedSearchViewSet),
]
)
change_order = [
COPIED,
CREATED,
UPDATED,
DELETED,
MOVED,
CREATED_RELATION,
DELETED_RELATION,
]
table_name_indices = {
table_name: i for i, table_name in enumerate(viewset_mapping.keys())
}
def get_table(obj):
return obj["table"]
def get_table_sort_order(obj):
return table_name_indices[get_table(obj)]
def get_change_type(obj):
return obj["type"]
def get_change_order(obj):
try:
change_type = int(obj["type"])
except ValueError:
change_type = -1
return change_order.index(change_type)
event_handlers = {
CREATED: "create_from_changes",
UPDATED: "update_from_changes",
DELETED: "delete_from_changes",
MOVED: "move_from_changes",
COPIED: "copy_from_changes",
CREATED_RELATION: "create_relation_from_changes",
DELETED_RELATION: "delete_relation_from_changes",
}
def handle_changes(request, viewset_class, change_type, changes):
try:
change_type = int(change_type)
except ValueError:
pass
else:
viewset = viewset_class(request=request)
viewset.initial(request)
if change_type in event_handlers:
try:
return getattr(viewset, event_handlers[change_type])(changes)
except Exception as e:
report_exception(e)
if getattr(settings, "DEBUG", False) or getattr(
settings, "TEST_ENV", False
):
raise
return changes, None
@authentication_classes((TokenAuthentication, SessionAuthentication))
@permission_classes((IsAuthenticated,))
@api_view(["POST"])
def sync(request):
errors = []
changes_to_return = []
data = sorted(request.data, key=get_table_sort_order)
for table_name, group in groupby(data, get_table):
if table_name in viewset_mapping:
viewset_class = viewset_mapping[table_name]
group = sorted(group, key=get_change_order)
for change_type, changes in groupby(group, get_change_type):
es, cs = handle_changes(
request, viewset_class, change_type, list(changes)
)
if es:
errors.extend(es)
if cs:
changes_to_return.extend(cs)
changes_to_return.extend(get_and_clear_user_events(request.user.id))
if not errors:
if changes_to_return:
return Response({"changes": changes_to_return})
else:
return Response({})
elif len(errors) < len(data) or len(changes_to_return):
return Response(
{"changes": changes_to_return, "errors": errors},
status=HTTP_207_MULTI_STATUS,
)
else:
return Response({"errors": errors}, status=HTTP_400_BAD_REQUEST)
| true | true |
f73417474be34d4bc69561d5e19d269147c3150d | 134 | py | Python | tests/data/chefs_in_a_file.py | mjoblin/netdumplings | 1ec3c4d80f302fe749e51171084ac05bbe57a701 | [
"MIT"
] | 2 | 2016-06-02T18:13:38.000Z | 2020-03-05T08:41:10.000Z | tests/data/chefs_in_a_file.py | mjoblin/netdumplings | 1ec3c4d80f302fe749e51171084ac05bbe57a701 | [
"MIT"
] | 5 | 2016-11-25T02:35:51.000Z | 2018-01-13T05:53:06.000Z | tests/data/chefs_in_a_file.py | mjoblin/netdumplings | 1ec3c4d80f302fe749e51171084ac05bbe57a701 | [
"MIT"
] | null | null | null | from netdumplings import DumplingChef
class ChefOneFromFile(DumplingChef):
pass
class ChefTwoFromFile(DumplingChef):
pass
| 13.4 | 37 | 0.791045 | from netdumplings import DumplingChef
class ChefOneFromFile(DumplingChef):
pass
class ChefTwoFromFile(DumplingChef):
pass
| true | true |
f73417fd194f35c59a19fcb4a2152aa67643647a | 43,678 | py | Python | mir_eval/segment.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | mir_eval/segment.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | mir_eval/segment.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # CREATED:2013-08-13 12:02:42 by Brian McFee <brm2132@columbia.edu>
'''
Evaluation criteria for structural segmentation fall into two categories:
boundary annotation and structural annotation. Boundary annotation is the task
of predicting the times at which structural changes occur, such as when a verse
transitions to a refrain. Metrics for boundary annotation compare estimated
segment boundaries to reference boundaries. Structural annotation is the task
of assigning labels to detected segments. The estimated labels may be
arbitrary strings - such as A, B, C, - and they need not describe functional
concepts. Metrics for structural annotation are similar to those used for
clustering data.
Conventions
-----------
Both boundary and structural annotation metrics require two dimensional arrays
with two columns, one for boundary start times and one for boundary end times.
Structural annotation further require lists of reference and estimated segment
labels which must have a length which is equal to the number of rows in the
corresponding list of boundary edges. In both tasks, we assume that
annotations express a partitioning of the track into intervals. The function
:func:`mir_eval.util.adjust_intervals` can be used to pad or crop the segment
boundaries to span the duration of the entire track.
Metrics
-------
* :func:`mir_eval.segment.detection`: An estimated boundary is considered
correct if it falls within a window around a reference boundary
* :func:`mir_eval.segment.deviation`: Computes the median absolute time
difference from a reference boundary to its nearest estimated boundary, and
vice versa
* :func:`mir_eval.segment.pairwise`: For classifying pairs of sampled time
instants as belonging to the same structural component
* :func:`mir_eval.segment.rand_index`: Clusters reference and estimated
annotations and compares them by the Rand Index
* :func:`mir_eval.segment.ari`: Computes the Rand index, adjusted for chance
* :func:`mir_eval.segment.nce`: Interprets sampled reference and estimated
labels as samples of random variables :math:`Y_R, Y_E` from which the
conditional entropy of :math:`Y_R` given :math:`Y_E` (Under-Segmentation) and
:math:`Y_E` given :math:`Y_R` (Over-Segmentation) are estimated
* :func:`mir_eval.segment.mutual_information`: Computes the standard,
normalized, and adjusted mutual information of sampled reference and
estimated segments
'''
import collections
import warnings
import numpy as np
import scipy.stats
import scipy.sparse
import scipy.misc
import scipy.special
from . import util
def validate_boundary(reference_intervals, estimated_intervals, trim):
"""Checks that the input annotations to a segment boundary estimation
metric (i.e. one that only takes in segment intervals) look like valid
segment times, and throws helpful errors if not.
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : bool
will the start and end events be trimmed?
"""
if trim:
# If we're trimming, then we need at least 2 intervals
min_size = 2
else:
# If we're not trimming, then we only need one interval
min_size = 1
if len(reference_intervals) < min_size:
warnings.warn("Reference intervals are empty.")
if len(estimated_intervals) < min_size:
warnings.warn("Estimated intervals are empty.")
for intervals in [reference_intervals, estimated_intervals]:
util.validate_intervals(intervals)
def validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels):
"""Checks that the input annotations to a structure estimation metric (i.e.
one that takes in both segment boundaries and their labels) look like valid
segment times and labels, and throws helpful errors if not.
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
"""
for (intervals, labels) in [(reference_intervals, reference_labels),
(estimated_intervals, estimated_labels)]:
util.validate_intervals(intervals)
if intervals.shape[0] != len(labels):
raise ValueError('Number of intervals does not match number '
'of labels')
# Check only when intervals are non-empty
if intervals.size > 0:
# Make sure intervals start at 0
if not np.allclose(intervals.min(), 0.0):
raise ValueError('Segment intervals do not start at 0')
if reference_intervals.size == 0:
warnings.warn("Reference intervals are empty.")
if estimated_intervals.size == 0:
warnings.warn("Estimated intervals are empty.")
# Check only when intervals are non-empty
if reference_intervals.size > 0 and estimated_intervals.size > 0:
if not np.allclose(reference_intervals.max(),
estimated_intervals.max()):
raise ValueError('End times do not match')
def detection(reference_intervals, estimated_intervals,
window=0.5, beta=1.0, trim=False):
"""Boundary detection hit-rate.
A hit is counted whenever an reference boundary is within ``window`` of a
estimated boundary. Note that each boundary is matched at most once: this
is achieved by computing the size of a maximal matching between reference
and estimated boundary points, subject to the window constraint.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> # With 0.5s windowing
>>> P05, R05, F05 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5)
>>> # With 3s windowing
>>> P3, R3, F3 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=3)
>>> # Ignoring hits for the beginning and end of track
>>> P, R, F = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5,
... trim=True)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
window : float > 0
size of the window of 'correctness' around ground-truth beats
(in seconds)
(Default value = 0.5)
beta : float > 0
weighting constant for F-measure.
(Default value = 1.0)
trim : boolean
if ``True``, the first and last boundary times are ignored.
Typically, these denote start (0) and end-markers.
(Default value = False)
Returns
-------
precision : float
precision of estimated predictions
recall : float
recall of reference reference boundaries
f_measure : float
F-measure (weighted harmonic mean of ``precision`` and ``recall``)
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return 0.0, 0.0, 0.0
matching = util.match_events(reference_boundaries,
estimated_boundaries,
window)
precision = float(len(matching)) / len(estimated_boundaries)
recall = float(len(matching)) / len(reference_boundaries)
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def deviation(reference_intervals, estimated_intervals, trim=False):
"""Compute the median deviations between reference
and estimated boundary times.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,
... est_intervals)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : boolean
if ``True``, the first and last intervals are ignored.
Typically, these denote start (0.0) and end-of-track markers.
(Default value = False)
Returns
-------
reference_to_estimated : float
median time from each reference boundary to the
closest estimated boundary
estimated_to_reference : float
median time from each estimated boundary to the
closest reference boundary
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return np.nan, np.nan
dist = np.abs(np.subtract.outer(reference_boundaries,
estimated_boundaries))
estimated_to_reference = np.median(dist.min(axis=0))
reference_to_estimated = np.median(dist.min(axis=1))
return reference_to_estimated, estimated_to_reference
def pairwise(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
"""Frame-clustering segmentation evaluation by pair-wise agreement.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> precision, recall, f = mir_eval.structure.pairwise(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
precision : float > 0
Precision of detecting whether frames belong in the same cluster
recall : float > 0
Recall of detecting whether frames belong in the same cluster
f : float > 0
F-measure of detecting whether frames belong in the same cluster
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Count the unique pairs
n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
n_agree_est = (agree_est.sum() - len(y_est)) / 2.0
# Find where they agree
matches = np.logical_and(agree_ref, agree_est)
n_matches = (matches.sum() - len(y_ref)) / 2.0
precision = n_matches / n_agree_est
recall = n_matches / n_agree_ref
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def rand_index(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
"""(Non-adjusted) Rand index.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> rand_index = mir_eval.structure.rand_index(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
rand_index : float > 0
Rand index
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
# Find where they agree
matches_pos = np.logical_and(agree_ref, agree_est)
# Find where they disagree
matches_neg = np.logical_and(~agree_ref, ~agree_est)
n_pairs = len(y_ref) * (len(y_ref) - 1) / 2.0
n_matches_pos = (matches_pos.sum() - len(y_ref)) / 2.0
n_matches_neg = matches_neg.sum() / 2.0
rand = (n_matches_pos + n_matches_neg) / n_pairs
return rand
def _contingency_matrix(reference_indices, estimated_indices):
"""Computes the contingency matrix of a true labeling vs an estimated one.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
contingency_matrix : np.ndarray
Contingency matrix, shape=(#reference indices, #estimated indices)
.. note:: Based on sklearn.metrics.cluster.contingency_matrix
"""
ref_classes, ref_class_idx = np.unique(reference_indices,
return_inverse=True)
est_classes, est_class_idx = np.unique(estimated_indices,
return_inverse=True)
n_ref_classes = ref_classes.shape[0]
n_est_classes = est_classes.shape[0]
# Using coo_matrix is faster than histogram2d
return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),
(ref_class_idx, est_class_idx)),
shape=(n_ref_classes, n_est_classes),
dtype=np.int).toarray()
def _adjusted_rand_index(reference_indices, estimated_indices):
"""Compute the Rand index, adjusted for change.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ari : float
Adjusted Rand index
.. note:: Based on sklearn.metrics.cluster.adjusted_rand_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0 or
(ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
# Compute the ARI using the contingency data
sum_comb_c = sum(scipy.misc.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.misc.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.misc.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.misc.comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return ((sum_comb - prod_comb)/(mean_comb - prod_comb))
def ari(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
"""Adjusted Rand Index (ARI) for frame clustering segmentation evaluation.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> ari_score = mir_eval.structure.ari(ref_intervals, ref_labels,
... est_intervals, est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
Returns
-------
ari_score : float > 0
Adjusted Rand index between segmentations.
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
return _adjusted_rand_index(y_ref, y_est)
def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
"""Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
"""
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def _entropy(labels):
"""Calculates the entropy for a labeling.
Parameters
----------
labels : list-like
List of labels.
Returns
-------
entropy : float
Entropy of the labeling.
.. note:: Based on sklearn.metrics.cluster.entropy
"""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum)))
def _adjusted_mutual_info_score(reference_indices, estimated_indices):
"""Compute the mutual information between two sequence labelings, adjusted for
chance.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ami : float <= 1.0
Mutual information
.. note:: Based on sklearn.metrics.cluster.adjusted_mutual_info_score
and sklearn.metrics.cluster.expected_mutual_info_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# The following code is based on
# sklearn.metrics.cluster.expected_mutual_information
R, C = contingency.shape
N = float(n_samples)
a = np.sum(contingency, axis=1).astype(np.int32)
b = np.sum(contingency, axis=0).astype(np.int32)
# There are three major terms to the EMI equation, which are multiplied to
# and then summed over varying nij values.
# While nijs[0] will never be used, having it simplifies the indexing.
nijs = np.arange(0, max(np.max(a), np.max(b)) + 1, dtype='float')
# Stops divide by zero warnings. As its not used, no issue.
nijs[0] = 1
# term1 is nij / N
term1 = nijs / N
# term2 is log((N*nij) / (a * b)) == log(N * nij) - log(a * b)
# term2 uses the outer product
log_ab_outer = np.log(np.outer(a, b))
# term2 uses N * nij
log_Nnij = np.log(N * nijs)
# term3 is large, and involved many factorials. Calculate these in log
# space to stop overflows.
gln_a = scipy.special.gammaln(a + 1)
gln_b = scipy.special.gammaln(b + 1)
gln_Na = scipy.special.gammaln(N - a + 1)
gln_Nb = scipy.special.gammaln(N - b + 1)
gln_N = scipy.special.gammaln(N + 1)
gln_nij = scipy.special.gammaln(nijs + 1)
# start and end values for nij terms for each summation.
start = np.array([[v - N + w for w in b] for v in a], dtype='int')
start = np.maximum(start, 1)
end = np.minimum(np.resize(a, (C, R)).T, np.resize(b, (R, C))) + 1
# emi itself is a summation over the various values.
emi = 0
for i in range(R):
for j in range(C):
for nij in range(start[i, j], end[i, j]):
term2 = log_Nnij[nij] - log_ab_outer[i, j]
# Numerators are positive, denominators are negative.
gln = (gln_a[i] + gln_b[j] + gln_Na[i] + gln_Nb[j] -
gln_N - gln_nij[nij] -
scipy.special.gammaln(a[i] - nij + 1) -
scipy.special.gammaln(b[j] - nij + 1) -
scipy.special.gammaln(N - a[i] - b[j] + nij + 1))
term3 = np.exp(gln)
emi += (term1[nij] * term2 * term3)
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def _normalized_mutual_info_score(reference_indices, estimated_indices):
"""Compute the mutual information between two sequence labelings, adjusted for
chance.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
nmi : float <= 1.0
Normalized mutual information
.. note:: Based on sklearn.metrics.cluster.normalized_mutual_info_score
"""
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def mutual_information(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
"""Frame-clustering segmentation: mutual information metrics.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> mi, ami, nmi = mir_eval.structure.mutual_information(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
Returns
-------
MI : float > 0
Mutual information between segmentations
AMI : float
Adjusted mutual information between segmentations.
NMI : float > 0
Normalize mutual information between segmentations
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Mutual information
mutual_info = _mutual_info_score(y_ref, y_est)
# Adjusted mutual information
adj_mutual_info = _adjusted_mutual_info_score(y_ref, y_est)
# Normalized mutual information
norm_mutual_info = _normalized_mutual_info_score(y_ref, y_est)
return mutual_info, adj_mutual_info, norm_mutual_info
def nce(reference_intervals, reference_labels, estimated_intervals,
estimated_labels, frame_size=0.1, beta=1.0):
"""Frame-clustering segmentation: normalized conditional entropy
Computes cross-entropy of cluster assignment, normalized by the
max-entropy.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> S_over, S_under, S_F = mir_eval.structure.nce(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta for F-measure
(Default value = 1.0)
Returns
-------
S_over
Over-clustering score:
``1 - H(y_est | y_ref) / log(|y_est|)``
If `|y_est|==1`, then `S_over` will be 0.
S_under
Under-clustering score:
``1 - H(y_ref | y_est) / log(|y_ref|)``
If `|y_ref|==1`, then `S_under` will be 0.
S_F
F-measure for (S_over, S_under)
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Make the contingency table: shape = (n_ref, n_est)
contingency = _contingency_matrix(y_ref, y_est).astype(float)
# Normalize by the number of frames
contingency = contingency / len(y_ref)
# Compute the marginals
p_est = contingency.sum(axis=0)
p_ref = contingency.sum(axis=1)
# H(true | prediction) = sum_j P[estimated = j] *
# sum_i P[true = i | estimated = j] log P[true = i | estimated = j]
# entropy sums over axis=0, which is true labels
# The following scipy.stats.entropy calls are equivalent to
# scipy.stats.entropy(contingency, base=2)
# However the `base` kwarg has only been introduced in scipy 0.14.0
true_given_est = p_est.dot(scipy.stats.entropy(contingency) / np.log(2))
pred_given_ref = p_ref.dot(scipy.stats.entropy(contingency.T) / np.log(2))
score_under = 0.0
if contingency.shape[0] > 1:
score_under = 1. - true_given_est / np.log2(contingency.shape[0])
score_over = 0.0
if contingency.shape[1] > 1:
score_over = 1. - pred_given_ref / np.log2(contingency.shape[1])
f_measure = util.f_measure(score_over, score_under, beta=beta)
return score_over, score_under, f_measure
def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs):
"""Compute all metrics for the given reference and estimated annotations.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> scores = mir_eval.segment.evaluate(ref_intervals, ref_labels,
... est_intervals, est_labels)
Parameters
----------
ref_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
ref_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Adjust timespan of estimations relative to ground truth
ref_intervals, ref_labels = \
util.adjust_intervals(ref_intervals, labels=ref_labels, t_min=0.0)
est_intervals, est_labels = \
util.adjust_intervals(est_intervals, labels=est_labels, t_min=0.0,
t_max=ref_intervals.max())
# Now compute all the metrics
scores = collections.OrderedDict()
# Boundary detection
# Force these values for window
kwargs['window'] = .5
scores['Precision@0.5'], scores['Recall@0.5'], scores['F-measure@0.5'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
kwargs['window'] = 3.0
scores['Precision@3.0'], scores['Recall@3.0'], scores['F-measure@3.0'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
# Boundary deviation
scores['Ref-to-est deviation'], scores['Est-to-ref deviation'] = \
util.filter_kwargs(deviation, ref_intervals, est_intervals, **kwargs)
# Pairwise clustering
(scores['Pairwise Precision'],
scores['Pairwise Recall'],
scores['Pairwise F-measure']) = util.filter_kwargs(pairwise,
ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Rand index
scores['Rand Index'] = util.filter_kwargs(rand_index, ref_intervals,
ref_labels, est_intervals,
est_labels, **kwargs)
# Adjusted rand index
scores['Adjusted Rand Index'] = util.filter_kwargs(ari, ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Mutual information metrics
(scores['Mutual Information'],
scores['Adjusted Mutual Information'],
scores['Normalized Mutual Information']) = \
util.filter_kwargs(mutual_information, ref_intervals, ref_labels,
est_intervals, est_labels, **kwargs)
# Conditional entropy metrics
scores['NCE Over'], scores['NCE Under'], scores['NCE F-measure'] = \
util.filter_kwargs(nce, ref_intervals, ref_labels, est_intervals,
est_labels, **kwargs)
return scores
| 38.790409 | 82 | 0.628486 |
import collections
import warnings
import numpy as np
import scipy.stats
import scipy.sparse
import scipy.misc
import scipy.special
from . import util
def validate_boundary(reference_intervals, estimated_intervals, trim):
if trim:
min_size = 2
else:
# If we're not trimming, then we only need one interval
min_size = 1
if len(reference_intervals) < min_size:
warnings.warn("Reference intervals are empty.")
if len(estimated_intervals) < min_size:
warnings.warn("Estimated intervals are empty.")
for intervals in [reference_intervals, estimated_intervals]:
util.validate_intervals(intervals)
def validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels):
for (intervals, labels) in [(reference_intervals, reference_labels),
(estimated_intervals, estimated_labels)]:
util.validate_intervals(intervals)
if intervals.shape[0] != len(labels):
raise ValueError('Number of intervals does not match number '
'of labels')
if intervals.size > 0:
if not np.allclose(intervals.min(), 0.0):
raise ValueError('Segment intervals do not start at 0')
if reference_intervals.size == 0:
warnings.warn("Reference intervals are empty.")
if estimated_intervals.size == 0:
warnings.warn("Estimated intervals are empty.")
if reference_intervals.size > 0 and estimated_intervals.size > 0:
if not np.allclose(reference_intervals.max(),
estimated_intervals.max()):
raise ValueError('End times do not match')
def detection(reference_intervals, estimated_intervals,
window=0.5, beta=1.0, trim=False):
validate_boundary(reference_intervals, estimated_intervals, trim)
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return 0.0, 0.0, 0.0
matching = util.match_events(reference_boundaries,
estimated_boundaries,
window)
precision = float(len(matching)) / len(estimated_boundaries)
recall = float(len(matching)) / len(reference_boundaries)
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def deviation(reference_intervals, estimated_intervals, trim=False):
validate_boundary(reference_intervals, estimated_intervals, trim)
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return np.nan, np.nan
dist = np.abs(np.subtract.outer(reference_boundaries,
estimated_boundaries))
estimated_to_reference = np.median(dist.min(axis=0))
reference_to_estimated = np.median(dist.min(axis=1))
return reference_to_estimated, estimated_to_reference
def pairwise(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
agree_ref = np.equal.outer(y_ref, y_ref)
n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0
agree_est = np.equal.outer(y_est, y_est)
n_agree_est = (agree_est.sum() - len(y_est)) / 2.0
matches = np.logical_and(agree_ref, agree_est)
n_matches = (matches.sum() - len(y_ref)) / 2.0
precision = n_matches / n_agree_est
recall = n_matches / n_agree_ref
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def rand_index(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
agree_ref = np.equal.outer(y_ref, y_ref)
agree_est = np.equal.outer(y_est, y_est)
matches_pos = np.logical_and(agree_ref, agree_est)
matches_neg = np.logical_and(~agree_ref, ~agree_est)
n_pairs = len(y_ref) * (len(y_ref) - 1) / 2.0
n_matches_pos = (matches_pos.sum() - len(y_ref)) / 2.0
n_matches_neg = matches_neg.sum() / 2.0
rand = (n_matches_pos + n_matches_neg) / n_pairs
return rand
def _contingency_matrix(reference_indices, estimated_indices):
ref_classes, ref_class_idx = np.unique(reference_indices,
return_inverse=True)
est_classes, est_class_idx = np.unique(estimated_indices,
return_inverse=True)
n_ref_classes = ref_classes.shape[0]
n_est_classes = est_classes.shape[0]
return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),
(ref_class_idx, est_class_idx)),
shape=(n_ref_classes, n_est_classes),
dtype=np.int).toarray()
def _adjusted_rand_index(reference_indices, estimated_indices):
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0 or
(ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
sum_comb_c = sum(scipy.misc.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.misc.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.misc.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.misc.comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return ((sum_comb - prod_comb)/(mean_comb - prod_comb))
def ari(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
return _adjusted_rand_index(y_ref, y_est)
def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def _entropy(labels):
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum)))
def _adjusted_mutual_info_score(reference_indices, estimated_indices):
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
R, C = contingency.shape
N = float(n_samples)
a = np.sum(contingency, axis=1).astype(np.int32)
b = np.sum(contingency, axis=0).astype(np.int32)
nijs = np.arange(0, max(np.max(a), np.max(b)) + 1, dtype='float')
nijs[0] = 1
term1 = nijs / N
log_ab_outer = np.log(np.outer(a, b))
log_Nnij = np.log(N * nijs)
gln_a = scipy.special.gammaln(a + 1)
gln_b = scipy.special.gammaln(b + 1)
gln_Na = scipy.special.gammaln(N - a + 1)
gln_Nb = scipy.special.gammaln(N - b + 1)
gln_N = scipy.special.gammaln(N + 1)
gln_nij = scipy.special.gammaln(nijs + 1)
start = np.array([[v - N + w for w in b] for v in a], dtype='int')
start = np.maximum(start, 1)
end = np.minimum(np.resize(a, (C, R)).T, np.resize(b, (R, C))) + 1
emi = 0
for i in range(R):
for j in range(C):
for nij in range(start[i, j], end[i, j]):
term2 = log_Nnij[nij] - log_ab_outer[i, j]
gln = (gln_a[i] + gln_b[j] + gln_Na[i] + gln_Nb[j] -
gln_N - gln_nij[nij] -
scipy.special.gammaln(a[i] - nij + 1) -
scipy.special.gammaln(b[j] - nij + 1) -
scipy.special.gammaln(N - a[i] - b[j] + nij + 1))
term3 = np.exp(gln)
emi += (term1[nij] * term2 * term3)
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def _normalized_mutual_info_score(reference_indices, estimated_indices):
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency = np.array(contingency, dtype='float')
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def mutual_information(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
mutual_info = _mutual_info_score(y_ref, y_est)
adj_mutual_info = _adjusted_mutual_info_score(y_ref, y_est)
norm_mutual_info = _normalized_mutual_info_score(y_ref, y_est)
return mutual_info, adj_mutual_info, norm_mutual_info
def nce(reference_intervals, reference_labels, estimated_intervals,
estimated_labels, frame_size=0.1, beta=1.0):
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
contingency = _contingency_matrix(y_ref, y_est).astype(float)
contingency = contingency / len(y_ref)
p_est = contingency.sum(axis=0)
p_ref = contingency.sum(axis=1)
true_given_est = p_est.dot(scipy.stats.entropy(contingency) / np.log(2))
pred_given_ref = p_ref.dot(scipy.stats.entropy(contingency.T) / np.log(2))
score_under = 0.0
if contingency.shape[0] > 1:
score_under = 1. - true_given_est / np.log2(contingency.shape[0])
score_over = 0.0
if contingency.shape[1] > 1:
score_over = 1. - pred_given_ref / np.log2(contingency.shape[1])
f_measure = util.f_measure(score_over, score_under, beta=beta)
return score_over, score_under, f_measure
def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs):
ref_intervals, ref_labels = \
util.adjust_intervals(ref_intervals, labels=ref_labels, t_min=0.0)
est_intervals, est_labels = \
util.adjust_intervals(est_intervals, labels=est_labels, t_min=0.0,
t_max=ref_intervals.max())
scores = collections.OrderedDict()
kwargs['window'] = .5
scores['Precision@0.5'], scores['Recall@0.5'], scores['F-measure@0.5'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
kwargs['window'] = 3.0
scores['Precision@3.0'], scores['Recall@3.0'], scores['F-measure@3.0'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
scores['Ref-to-est deviation'], scores['Est-to-ref deviation'] = \
util.filter_kwargs(deviation, ref_intervals, est_intervals, **kwargs)
(scores['Pairwise Precision'],
scores['Pairwise Recall'],
scores['Pairwise F-measure']) = util.filter_kwargs(pairwise,
ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
scores['Rand Index'] = util.filter_kwargs(rand_index, ref_intervals,
ref_labels, est_intervals,
est_labels, **kwargs)
scores['Adjusted Rand Index'] = util.filter_kwargs(ari, ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
(scores['Mutual Information'],
scores['Adjusted Mutual Information'],
scores['Normalized Mutual Information']) = \
util.filter_kwargs(mutual_information, ref_intervals, ref_labels,
est_intervals, est_labels, **kwargs)
scores['NCE Over'], scores['NCE Under'], scores['NCE F-measure'] = \
util.filter_kwargs(nce, ref_intervals, ref_labels, est_intervals,
est_labels, **kwargs)
return scores
| true | true |
f7341a25ef54e344f2d21e600a9f6534cbf6773f | 3,201 | py | Python | tests/instrumentation/mysql_tests.py | mvas/apm-agent-python | f4582e90eb5308b915ca51e2e98620fc22af09ec | [
"BSD-3-Clause"
] | null | null | null | tests/instrumentation/mysql_tests.py | mvas/apm-agent-python | f4582e90eb5308b915ca51e2e98620fc22af09ec | [
"BSD-3-Clause"
] | null | null | null | tests/instrumentation/mysql_tests.py | mvas/apm-agent-python | f4582e90eb5308b915ca51e2e98620fc22af09ec | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from elasticapm.instrumentation.packages.mysql import extract_signature
def test_insert():
sql = """INSERT INTO `mytable` (id, name) VALUE ('2323', 'Ron')"""
actual = extract_signature(sql)
assert "INSERT INTO mytable" == actual
def test_update():
sql = """UPDATE `mytable` set name='Ron' WHERE id = 2323"""
actual = extract_signature(sql)
assert "UPDATE mytable" == actual
def test_delete():
sql = """DELETE FROM `mytable` WHERE id = 2323"""
actual = extract_signature(sql)
assert "DELETE FROM mytable" == actual
def test_select_simple():
sql = """SELECT `id`, `name` FROM `mytable` WHERE id = 2323"""
actual = extract_signature(sql)
assert "SELECT FROM mytable" == actual
def test_select_with_entity_quotes():
sql = """SELECT `id`, `name` FROM `mytable` WHERE id = 2323"""
actual = extract_signature(sql)
assert "SELECT FROM mytable" == actual
def test_select_with_difficult_values():
sql = """SELECT id, 'some \\'name' + " from Denmark" FROM `mytable` WHERE id = 2323"""
actual = extract_signature(sql)
assert "SELECT FROM mytable" == actual
def test_select_with_difficult_table_name():
sql = "SELECT id FROM `myta\n-æøåble` WHERE id = 2323"""
actual = extract_signature(sql)
assert "SELECT FROM myta\n-æøåble" == actual
def test_select_subselect():
sql = """SELECT id, name FROM (
SELECT id, "not a FROM ''value" FROM mytable WHERE id = 2323
) LIMIT 20"""
actual = extract_signature(sql)
assert "SELECT FROM mytable" == actual
def test_select_subselect_with_alias():
sql = """
SELECT count(*)
FROM (
SELECT count(id) AS some_alias, some_column
FROM mytable
GROUP BY some_colun
HAVING count(id) > 1
) AS foo
"""
actual = extract_signature(sql)
assert "SELECT FROM mytable" == actual
def test_select_with_multiple_tables():
sql = """SELECT count(table2.id)
FROM table1, table2, table2
WHERE table2.id = table1.table2_id
"""
actual = extract_signature(sql)
assert "SELECT FROM table1" == actual
def test_select_with_invalid_literal():
sql = "SELECT \"neverending literal FROM (SELECT * FROM ..."""
actual = extract_signature(sql)
assert "SELECT FROM" == actual
def test_savepoint():
sql = """SAVEPOINT x_asd1234"""
actual = extract_signature(sql)
assert "SAVEPOINT" == actual
def test_begin():
sql = """BEGIN"""
actual = extract_signature(sql)
assert "BEGIN" == actual
def test_create_index_with_name():
sql = """CREATE INDEX myindex ON mytable"""
actual = extract_signature(sql)
assert "CREATE INDEX" == actual
def test_create_index_without_name():
sql = """CREATE INDEX ON mytable"""
actual = extract_signature(sql)
assert "CREATE INDEX" == actual
def test_drop_table():
sql = """DROP TABLE mytable"""
actual = extract_signature(sql)
assert "DROP TABLE" == actual
def test_multi_statement_sql():
sql = """CREATE TABLE mytable; SELECT * FROM mytable; DROP TABLE mytable"""
actual = extract_signature(sql)
assert "CREATE TABLE" == actual
| 23.88806 | 90 | 0.65667 |
from elasticapm.instrumentation.packages.mysql import extract_signature
def test_insert():
sql = """INSERT INTO `mytable` (id, name) VALUE ('2323', 'Ron')"""
actual = extract_signature(sql)
assert "INSERT INTO mytable" == actual
def test_update():
sql = """UPDATE `mytable` set name='Ron' WHERE id = 2323"""
actual = extract_signature(sql)
assert "UPDATE mytable" == actual
def test_delete():
sql = """DELETE FROM `mytable` WHERE id = 2323"""
actual = extract_signature(sql)
assert "DELETE FROM mytable" == actual
def test_select_simple():
sql = """SELECT `id`, `name` FROM `mytable` WHERE id = 2323"""
actual = extract_signature(sql)
assert "SELECT FROM mytable" == actual
def test_select_with_entity_quotes():
sql = """SELECT `id`, `name` FROM `mytable` WHERE id = 2323"""
actual = extract_signature(sql)
assert "SELECT FROM mytable" == actual
def test_select_with_difficult_values():
sql = """SELECT id, 'some \\'name' + " from Denmark" FROM `mytable` WHERE id = 2323"""
actual = extract_signature(sql)
assert "SELECT FROM mytable" == actual
def test_select_with_difficult_table_name():
sql = "SELECT id FROM `myta\n-æøåble` WHERE id = 2323"""
actual = extract_signature(sql)
assert "SELECT FROM myta\n-æøåble" == actual
def test_select_subselect():
sql = """SELECT id, name FROM (
SELECT id, "not a FROM ''value" FROM mytable WHERE id = 2323
) LIMIT 20"""
actual = extract_signature(sql)
assert "SELECT FROM mytable" == actual
def test_select_subselect_with_alias():
sql = """
SELECT count(*)
FROM (
SELECT count(id) AS some_alias, some_column
FROM mytable
GROUP BY some_colun
HAVING count(id) > 1
) AS foo
"""
actual = extract_signature(sql)
assert "SELECT FROM mytable" == actual
def test_select_with_multiple_tables():
sql = """SELECT count(table2.id)
FROM table1, table2, table2
WHERE table2.id = table1.table2_id
"""
actual = extract_signature(sql)
assert "SELECT FROM table1" == actual
def test_select_with_invalid_literal():
sql = "SELECT \"neverending literal FROM (SELECT * FROM ..."""
actual = extract_signature(sql)
assert "SELECT FROM" == actual
def test_savepoint():
sql = """SAVEPOINT x_asd1234"""
actual = extract_signature(sql)
assert "SAVEPOINT" == actual
def test_begin():
sql = """BEGIN"""
actual = extract_signature(sql)
assert "BEGIN" == actual
def test_create_index_with_name():
sql = """CREATE INDEX myindex ON mytable"""
actual = extract_signature(sql)
assert "CREATE INDEX" == actual
def test_create_index_without_name():
sql = """CREATE INDEX ON mytable"""
actual = extract_signature(sql)
assert "CREATE INDEX" == actual
def test_drop_table():
sql = """DROP TABLE mytable"""
actual = extract_signature(sql)
assert "DROP TABLE" == actual
def test_multi_statement_sql():
sql = """CREATE TABLE mytable; SELECT * FROM mytable; DROP TABLE mytable"""
actual = extract_signature(sql)
assert "CREATE TABLE" == actual
| true | true |
f7341a9c8be3fa88dd45fd556c704ded0c5a9d36 | 4,597 | py | Python | slicing.py | jw03070/Advanced-DeepSleepNet | b58d71971be28c8517f61731b8ee933a5bbf3f0a | [
"Apache-2.0"
] | 1 | 2020-08-21T06:11:53.000Z | 2020-08-21T06:11:53.000Z | slicing.py | leesk212/Advanced-DeepSleepNet | b58d71971be28c8517f61731b8ee933a5bbf3f0a | [
"Apache-2.0"
] | null | null | null | slicing.py | leesk212/Advanced-DeepSleepNet | b58d71971be28c8517f61731b8ee933a5bbf3f0a | [
"Apache-2.0"
] | 1 | 2020-08-21T06:06:49.000Z | 2020-08-21T06:06:49.000Z | # -*- coding: utf-8 -*-
"""
@author: BSW
"""
import numpy as np
import os
def slicing(filename,data):
wc=1
n1c=1
n2c=1
n3c=1
n4c=1
t=0
npz = np.load(data)
x = npz['x']
y = npz['y']
os.makedirs("./data/"+filename[:-3], exist_ok=True)
os.makedirs("./data/"+filename[:-3]+"/1D_Wake", exist_ok=True)
os.makedirs("./data/"+filename[:-3]+"/1D_N1", exist_ok=True)
os.makedirs("./data/"+filename[:-3]+"/1D_N2", exist_ok=True)
os.makedirs("./data/"+filename[:-3]+"/1D_N3", exist_ok=True)
os.makedirs("./data/"+filename[:-3]+"/1D_Rem", exist_ok=True)
for i in y:
if(i==0):
if(wc<10):
np.savez("./data/"+filename[:-3]+"/1D_Wake/"+"0000"+str(wc)+".npz",x=x[t,:,0])
elif(wc>=10 and wc<100):
np.savez("./data/"+filename[:-3]+"/1D_Wake/"+"000"+str(wc)+".npz",x=x[t,:,0])
elif(wc>=100 and wc<1000):
np.savez("./data/"+filename[:-3]+"/1D_Wake/"+"00"+str(wc)+".npz",x=x[t,:,0])
elif(wc>=1000 and wc<10000):
np.savez("./data/"+filename[:-3]+"/1D_Wake/"+"0"+str(wc)+".npz",x=x[t,:,0])
else:
np.savez("./data/"+filename[:-3]+"/1D_Wake/"+str(wc)+".npz",x=x[t,:,0])
wc+=1
t+=1
if(i==1):
if(n1c<10):
np.savez("./data/"+filename[:-3]+"/1D_N1/"+"0000"+str(n1c)+".npz",x=x[t,:,0])
elif(n1c>=10 and n1c<100):
np.savez("./data/"+filename[:-3]+"/1D_N1/"+"000"+str(n1c)+".npz",x=x[t,:,0])
elif(n1c>=100 and n1c<1000):
np.savez("./data/"+filename[:-3]+"/1D_N1/"+"00"+str(n1c)+".npz",x=x[t,:,0])
elif(n1c>=1000 and n1c<10000):
np.savez("./data/"+filename[:-3]+"/1D_N1/"+"0"+str(n1c)+".npz",x=x[t,:,0])
else:
np.savez("./data/"+filename[:-3]+"/1D_N1/"+str(n1c)+".npz",x=x[t,:,0])
n1c+=1
t+=1
if(i==2):
if(n2c<10):
np.savez("./data/"+filename[:-3]+"/1D_N2/"+"0000"+str(n2c)+".npz",x=x[t,:,0])
elif(n2c>=10 and n2c<100):
np.savez("./data/"+filename[:-3]+"/1D_N2/"+"000"+str(n2c)+".npz",x=x[t,:,0])
elif(n2c>=100 and n2c<1000):
np.savez("./data/"+filename[:-3]+"/1D_N2/"+"00"+str(n2c)+".npz",x=x[t,:,0])
elif(n2c>=1000 and n2c<10000):
np.savez("./data/"+filename[:-3]+"/1D_N2/"+"0"+str(n2c)+".npz",x=x[t,:,0])
else:
np.savez("./data/"+filename[:-3]+"/1D_N2/"+str(n2c)+".npz",x=x[t,:,0])
n2c+=1
t+=1
if(i==3):
if(n3c<10):
np.savez("./data/"+filename[:-3]+"/1D_N3/"+"0000"+str(n3c)+".npz",x=x[t,:,0])
elif(n3c>=10 and n3c<100):
np.savez("./data/"+filename[:-3]+"/1D_N3/"+"000"+str(n3c)+".npz",x=x[t,:,0])
elif(n3c>=100 and n3c<1000):
np.savez("./data/"+filename[:-3]+"/1D_N3/"+"00"+str(n3c)+".npz",x=x[t,:,0])
elif(n3c>=1000 and n3c<10000):
np.savez("./data/"+filename[:-3]+"/1D_N3/"+"0"+str(n3c)+".npz",x=x[t,:,0])
else:
np.savez("./data/"+filename[:-3]+"/1D_N3/"+str(n3c)+".npz",x=x[t,:,0])
n3c+=1
t+=1
if(i==4):
if(n4c<10):
np.savez("./data/"+filename[:-3]+"/1D_Rem/"+"0000"+str(n4c)+".npz",x=x[t,:,0])
elif(n4c>=10 and n4c<100):
np.savez("./data/"+filename[:-3]+"/1D_Rem/"+"000"+str(n4c)+".npz",x=x[t,:,0])
elif(n4c>=100 and n4c<1000):
np.savez("./data/"+filename[:-3]+"/1D_Rem/"+"00"+str(n4c)+".npz",x=x[t,:,0])
elif(n4c>=1000 and n4c<10000):
np.savez("./data/"+filename[:-3]+"/1D_Rem/"+"0"+str(n4c)+".npz",x=x[t,:,0])
else:
np.savez("./data/"+filename[:-3]+"/1D_Rem/"+str(n4c)+".npz",x=x[t,:,0])
n4c+=1
t+=1
def search(dirname):
filenames = os.listdir(dirname)
for filename in filenames:
full_filename = os.path.join(dirname, filename)
ext = os.path.splitext(full_filename)[-1]
if ext == '.npz':
slicing(filename,full_filename)
pass
if __name__ == '__main__':
name = os.path.dirname( os.path.abspath( __file__ ) )
Dataset_dir = "npzdata"
Dataset_dir = name + '\\' + Dataset_dir + '\\'
os.makedirs('data', exist_ok=True)
search(Dataset_dir)
| 39.62931 | 94 | 0.451381 |
import numpy as np
import os
def slicing(filename,data):
wc=1
n1c=1
n2c=1
n3c=1
n4c=1
t=0
npz = np.load(data)
x = npz['x']
y = npz['y']
os.makedirs("./data/"+filename[:-3], exist_ok=True)
os.makedirs("./data/"+filename[:-3]+"/1D_Wake", exist_ok=True)
os.makedirs("./data/"+filename[:-3]+"/1D_N1", exist_ok=True)
os.makedirs("./data/"+filename[:-3]+"/1D_N2", exist_ok=True)
os.makedirs("./data/"+filename[:-3]+"/1D_N3", exist_ok=True)
os.makedirs("./data/"+filename[:-3]+"/1D_Rem", exist_ok=True)
for i in y:
if(i==0):
if(wc<10):
np.savez("./data/"+filename[:-3]+"/1D_Wake/"+"0000"+str(wc)+".npz",x=x[t,:,0])
elif(wc>=10 and wc<100):
np.savez("./data/"+filename[:-3]+"/1D_Wake/"+"000"+str(wc)+".npz",x=x[t,:,0])
elif(wc>=100 and wc<1000):
np.savez("./data/"+filename[:-3]+"/1D_Wake/"+"00"+str(wc)+".npz",x=x[t,:,0])
elif(wc>=1000 and wc<10000):
np.savez("./data/"+filename[:-3]+"/1D_Wake/"+"0"+str(wc)+".npz",x=x[t,:,0])
else:
np.savez("./data/"+filename[:-3]+"/1D_Wake/"+str(wc)+".npz",x=x[t,:,0])
wc+=1
t+=1
if(i==1):
if(n1c<10):
np.savez("./data/"+filename[:-3]+"/1D_N1/"+"0000"+str(n1c)+".npz",x=x[t,:,0])
elif(n1c>=10 and n1c<100):
np.savez("./data/"+filename[:-3]+"/1D_N1/"+"000"+str(n1c)+".npz",x=x[t,:,0])
elif(n1c>=100 and n1c<1000):
np.savez("./data/"+filename[:-3]+"/1D_N1/"+"00"+str(n1c)+".npz",x=x[t,:,0])
elif(n1c>=1000 and n1c<10000):
np.savez("./data/"+filename[:-3]+"/1D_N1/"+"0"+str(n1c)+".npz",x=x[t,:,0])
else:
np.savez("./data/"+filename[:-3]+"/1D_N1/"+str(n1c)+".npz",x=x[t,:,0])
n1c+=1
t+=1
if(i==2):
if(n2c<10):
np.savez("./data/"+filename[:-3]+"/1D_N2/"+"0000"+str(n2c)+".npz",x=x[t,:,0])
elif(n2c>=10 and n2c<100):
np.savez("./data/"+filename[:-3]+"/1D_N2/"+"000"+str(n2c)+".npz",x=x[t,:,0])
elif(n2c>=100 and n2c<1000):
np.savez("./data/"+filename[:-3]+"/1D_N2/"+"00"+str(n2c)+".npz",x=x[t,:,0])
elif(n2c>=1000 and n2c<10000):
np.savez("./data/"+filename[:-3]+"/1D_N2/"+"0"+str(n2c)+".npz",x=x[t,:,0])
else:
np.savez("./data/"+filename[:-3]+"/1D_N2/"+str(n2c)+".npz",x=x[t,:,0])
n2c+=1
t+=1
if(i==3):
if(n3c<10):
np.savez("./data/"+filename[:-3]+"/1D_N3/"+"0000"+str(n3c)+".npz",x=x[t,:,0])
elif(n3c>=10 and n3c<100):
np.savez("./data/"+filename[:-3]+"/1D_N3/"+"000"+str(n3c)+".npz",x=x[t,:,0])
elif(n3c>=100 and n3c<1000):
np.savez("./data/"+filename[:-3]+"/1D_N3/"+"00"+str(n3c)+".npz",x=x[t,:,0])
elif(n3c>=1000 and n3c<10000):
np.savez("./data/"+filename[:-3]+"/1D_N3/"+"0"+str(n3c)+".npz",x=x[t,:,0])
else:
np.savez("./data/"+filename[:-3]+"/1D_N3/"+str(n3c)+".npz",x=x[t,:,0])
n3c+=1
t+=1
if(i==4):
if(n4c<10):
np.savez("./data/"+filename[:-3]+"/1D_Rem/"+"0000"+str(n4c)+".npz",x=x[t,:,0])
elif(n4c>=10 and n4c<100):
np.savez("./data/"+filename[:-3]+"/1D_Rem/"+"000"+str(n4c)+".npz",x=x[t,:,0])
elif(n4c>=100 and n4c<1000):
np.savez("./data/"+filename[:-3]+"/1D_Rem/"+"00"+str(n4c)+".npz",x=x[t,:,0])
elif(n4c>=1000 and n4c<10000):
np.savez("./data/"+filename[:-3]+"/1D_Rem/"+"0"+str(n4c)+".npz",x=x[t,:,0])
else:
np.savez("./data/"+filename[:-3]+"/1D_Rem/"+str(n4c)+".npz",x=x[t,:,0])
n4c+=1
t+=1
def search(dirname):
filenames = os.listdir(dirname)
for filename in filenames:
full_filename = os.path.join(dirname, filename)
ext = os.path.splitext(full_filename)[-1]
if ext == '.npz':
slicing(filename,full_filename)
pass
if __name__ == '__main__':
name = os.path.dirname( os.path.abspath( __file__ ) )
Dataset_dir = "npzdata"
Dataset_dir = name + '\\' + Dataset_dir + '\\'
os.makedirs('data', exist_ok=True)
search(Dataset_dir)
| true | true |
f7341ad925a2f9bab834873ceba073a569039b88 | 811 | py | Python | otcextensions/tests/functional/sdk/mrs/v1/test_service.py | zsoltn/python-otcextensions | 4c0fa22f095ebd5f9636ae72acbae5048096822c | [
"Apache-2.0"
] | 10 | 2018-03-03T17:59:59.000Z | 2020-01-08T10:03:00.000Z | otcextensions/tests/functional/sdk/mrs/v1/test_service.py | zsoltn/python-otcextensions | 4c0fa22f095ebd5f9636ae72acbae5048096822c | [
"Apache-2.0"
] | 208 | 2020-02-10T08:27:46.000Z | 2022-03-29T15:24:21.000Z | otcextensions/tests/functional/sdk/mrs/v1/test_service.py | zsoltn/python-otcextensions | 4c0fa22f095ebd5f9636ae72acbae5048096822c | [
"Apache-2.0"
] | 15 | 2020-04-01T20:45:54.000Z | 2022-03-23T12:45:43.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import _log
from otcextensions.tests.functional import base
_logger = _log.setup_logging('openstack')
class TestService(base.BaseFunctionalTest):
def test_initialize(self):
client = self.conn.mrs
self.assertIsNotNone(client)
| 32.44 | 75 | 0.759556 |
from openstack import _log
from otcextensions.tests.functional import base
_logger = _log.setup_logging('openstack')
class TestService(base.BaseFunctionalTest):
def test_initialize(self):
client = self.conn.mrs
self.assertIsNotNone(client)
| true | true |
f7341b46721d8f8e196cfa24f2fb38ec8139fec9 | 1,250 | py | Python | falcon_kit/util/alarm.py | PacificBiosciences/falcon3 | fde93d4ed79746cd280006bca6808e6975585738 | [
"BSD-3-Clause-Clear"
] | null | null | null | falcon_kit/util/alarm.py | PacificBiosciences/falcon3 | fde93d4ed79746cd280006bca6808e6975585738 | [
"BSD-3-Clause-Clear"
] | null | null | null | falcon_kit/util/alarm.py | PacificBiosciences/falcon3 | fde93d4ed79746cd280006bca6808e6975585738 | [
"BSD-3-Clause-Clear"
] | 5 | 2020-07-22T14:10:16.000Z | 2021-04-26T17:07:05.000Z | """Special handling for exceptions, for the UI.
"""
def alarm(e):
"""
Write traceback into PBFALCON_ERRFILE (until we stop using pbfalcon).
Write a special JSON object expected by pbcommand.models.common.
"""
import datetime
import os
import traceback
import uuid
from ..io import serialize
tb = traceback.format_exc()
# pbfalcon wants us to write errs here.
errfile = os.environ.get('PBFALCON_ERRFILE')
if errfile:
with open(errfile, 'w') as ofs:
ofs.write(tb) # in python3, this will include the entire chain of exceptions
# this is propagated to SMRT Link UI
# see PacBioAlarm class in pbcommand.models.common for details -- nat
special = [
{
"exception": e.__class__.__name__,
"info": tb,
"message": str(e) + "\n" + str(e.__cause__),
"name": e.__class__.__name__,
"severity": "ERROR",
"owner": "python3",
"createdAt": datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),
"id": str(uuid.uuid4())
}
]
# Technically, we should add "causes" recursively, but "info" will include the full chain anyway.
serialize('alarms.json', special)
| 32.051282 | 101 | 0.6032 |
def alarm(e):
import datetime
import os
import traceback
import uuid
from ..io import serialize
tb = traceback.format_exc()
errfile = os.environ.get('PBFALCON_ERRFILE')
if errfile:
with open(errfile, 'w') as ofs:
ofs.write(tb)
special = [
{
"exception": e.__class__.__name__,
"info": tb,
"message": str(e) + "\n" + str(e.__cause__),
"name": e.__class__.__name__,
"severity": "ERROR",
"owner": "python3",
"createdAt": datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'),
"id": str(uuid.uuid4())
}
]
serialize('alarms.json', special)
| true | true |
f7341d85b42e46f9636103b6ce706ad11869ce0f | 721 | py | Python | dp/HanoiTowers.py | ykumards/Algorithms | 53767683644c3c766dfc5d7347b1fc71f43256b2 | [
"WTFPL"
] | null | null | null | dp/HanoiTowers.py | ykumards/Algorithms | 53767683644c3c766dfc5d7347b1fc71f43256b2 | [
"WTFPL"
] | null | null | null | dp/HanoiTowers.py | ykumards/Algorithms | 53767683644c3c766dfc5d7347b1fc71f43256b2 | [
"WTFPL"
] | null | null | null | """
Given three towers and n sorted disks placed on it, we
have to move all the disks from one tower to the other
while using the third one as buffer.
There are three rules that each move should follow.
"""
def move(start, buffr, end, n):
"""
By using the recursion from base case to move upwards
we can change the roles of each of the three towers.
The base case will be moving one disk from start to end.
This can be done without any buffer, in one step.
The behaviour of the function is modified by what type
of the three stacks we send into it.
"""
if n < 2:
x = start.pop()
return end.append(x)
top = start.pop()
buffr = move(start, end, buffr, n-1)
| 31.347826 | 60 | 0.668516 |
def move(start, buffr, end, n):
if n < 2:
x = start.pop()
return end.append(x)
top = start.pop()
buffr = move(start, end, buffr, n-1)
| true | true |
f7341dfa9ee065b402c636e541f05fc4d8673ba8 | 1,251 | py | Python | ietf/secr/areas/tests.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 25 | 2022-03-05T08:26:52.000Z | 2022-03-30T15:45:42.000Z | ietf/secr/areas/tests.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 219 | 2022-03-04T17:29:12.000Z | 2022-03-31T21:16:14.000Z | ietf/secr/areas/tests.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 22 | 2022-03-04T15:34:34.000Z | 2022-03-28T13:30:59.000Z | from django.urls import reverse
from ietf.group.factories import GroupFactory, GroupEventFactory
from ietf.group.models import Group, GroupEvent
from ietf.person.models import Person
from ietf.utils.test_utils import TestCase
SECR_USER='secretary'
def augment_data():
system = Person.objects.get(name="(System)")
area = Group.objects.get(acronym='farfut')
GroupEvent.objects.create(group=area,
type='started',
by=system)
class SecrAreasTestCase(TestCase):
def test_main(self):
"Main Test"
GroupFactory(type_id='area')
url = reverse('ietf.secr.areas.views.list_areas')
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_view(self):
"View Test"
area = GroupEventFactory(type='started',group__type_id='area').group
url = reverse('ietf.secr.areas.views.view', kwargs={'name':area.acronym})
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
| 35.742857 | 81 | 0.658673 | from django.urls import reverse
from ietf.group.factories import GroupFactory, GroupEventFactory
from ietf.group.models import Group, GroupEvent
from ietf.person.models import Person
from ietf.utils.test_utils import TestCase
SECR_USER='secretary'
def augment_data():
system = Person.objects.get(name="(System)")
area = Group.objects.get(acronym='farfut')
GroupEvent.objects.create(group=area,
type='started',
by=system)
class SecrAreasTestCase(TestCase):
def test_main(self):
GroupFactory(type_id='area')
url = reverse('ietf.secr.areas.views.list_areas')
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_view(self):
area = GroupEventFactory(type='started',group__type_id='area').group
url = reverse('ietf.secr.areas.views.view', kwargs={'name':area.acronym})
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
| true | true |
f7341e2feffa1989bf80d861c14a2909fd98ccb0 | 549 | py | Python | example/middleware.py | chartbeat/django-social-auth | cf72986ab3c01c1dae2c3baebd00c540fb4250d8 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2019-08-24T00:33:13.000Z | 2019-08-24T00:33:13.000Z | example/middleware.py | tonylampada/django-social-auth | a77c0dd1b160c391098b6c157838d54516f4d334 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | example/middleware.py | tonylampada/django-social-auth | a77c0dd1b160c391098b6c157838d54516f4d334 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | from django.core.urlresolvers import reverse
from social_auth.backends.exceptions import AuthAlreadyAssociated
from social_auth.middleware import SocialAuthExceptionMiddleware
class ExampleSocialAuthExceptionMiddleware(SocialAuthExceptionMiddleware):
def get_message(self, request, exception):
if isinstance(exception, AuthAlreadyAssociated):
return "Somebody is already using that account!"
return "We got some splainin' to do!"
def get_redirect_uri(self, request, exception):
return reverse('done')
| 36.6 | 74 | 0.777778 | from django.core.urlresolvers import reverse
from social_auth.backends.exceptions import AuthAlreadyAssociated
from social_auth.middleware import SocialAuthExceptionMiddleware
class ExampleSocialAuthExceptionMiddleware(SocialAuthExceptionMiddleware):
def get_message(self, request, exception):
if isinstance(exception, AuthAlreadyAssociated):
return "Somebody is already using that account!"
return "We got some splainin' to do!"
def get_redirect_uri(self, request, exception):
return reverse('done')
| true | true |
f7341e6c9c32a76959a3fd6e6bbc2c1258099c06 | 69 | py | Python | project/db/serializers/__init__.py | sunday-ucheawaji/API- | 07fb4b596cfe8e85b8575a8e70a8c886d3ab627a | [
"MIT"
] | null | null | null | project/db/serializers/__init__.py | sunday-ucheawaji/API- | 07fb4b596cfe8e85b8575a8e70a8c886d3ab627a | [
"MIT"
] | null | null | null | project/db/serializers/__init__.py | sunday-ucheawaji/API- | 07fb4b596cfe8e85b8575a8e70a8c886d3ab627a | [
"MIT"
] | 1 | 2022-02-09T14:13:20.000Z | 2022-02-09T14:13:20.000Z | from db.serializers.reset_serializer import SetNewPasswordSerializer
| 34.5 | 68 | 0.913043 | from db.serializers.reset_serializer import SetNewPasswordSerializer
| true | true |
f7341eb4083ce663caf7f5d6ce40e424cc5a4bb1 | 1,739 | py | Python | example_BYTES.py | djtech-dev/PyVM | 1edda436ce7073d0cecbf16f5cab2509895d953c | [
"MIT"
] | 75 | 2017-09-22T22:36:13.000Z | 2022-03-20T16:18:27.000Z | example_BYTES.py | djtech-dev/PyVM | 1edda436ce7073d0cecbf16f5cab2509895d953c | [
"MIT"
] | 7 | 2019-05-10T19:15:08.000Z | 2021-08-24T16:03:34.000Z | example_BYTES.py | djtech-dev/PyVM | 1edda436ce7073d0cecbf16f5cab2509895d953c | [
"MIT"
] | 14 | 2018-07-02T02:49:46.000Z | 2022-02-22T15:24:47.000Z | import cProfile, pstats, io
import re
import VM
def Stats(pr):
s = io.StringIO()
sortby = 'tottime'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
def parse_code(code: str) -> bytes:
binary = ''
regex = re.compile(r"[0-9a-f]+:\s+([^;]+)\s*;.*", re.DOTALL)
for i, line in enumerate(code.strip().splitlines(keepends=False)):
if line.startswith(';'):
continue
match = regex.match(line)
assert match is not None, f"Could not parse code (line {i})"
binary += match.group(1)
return bytes.fromhex(binary)
if __name__ == "__main__":
code = """
; section .text
; _start:
0: b8 04 00 00 00 ;mov eax,0x4 ; SYS_WRITE
5: bb 01 00 00 00 ;mov ebx,0x1 ; STDOUT
a: b9 29 00 00 00 ;mov ecx,0x29 ; address of the message
f: ba 0e 00 00 00 ;mov edx,0xe ; length of the message
14: cd 80 ;int 0x80 ; interrupt kernel
16: e9 02 00 00 00 ;jmp 0x1d ; _exit
1b: 89 c8 ;mov eax,ecx ; this is here to mess things up if JMP doesn't work
; _exit:
1d: b8 01 00 00 00 ;mov eax,0x1 ; SYS_EXIT
22: bb 00 00 00 00 ;mov ebx,0x0 ; EXIT_SUCCESS
27: cd 80 ;int 0x80 ; interrupt kernel
; section .data
29: 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 0A ; "Hello, world!",10
"""
vm = VM.VMKernel(500)
raw_binary = parse_code(code)
# pr = cProfile.Profile()
# pr.enable()
vm.execute(VM.ExecutionStrategy.BYTES, raw_binary)
# pr.disable()
# Stats(pr)
| 30.508772 | 98 | 0.53134 | import cProfile, pstats, io
import re
import VM
def Stats(pr):
s = io.StringIO()
sortby = 'tottime'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
def parse_code(code: str) -> bytes:
binary = ''
regex = re.compile(r"[0-9a-f]+:\s+([^;]+)\s*;.*", re.DOTALL)
for i, line in enumerate(code.strip().splitlines(keepends=False)):
if line.startswith(';'):
continue
match = regex.match(line)
assert match is not None, f"Could not parse code (line {i})"
binary += match.group(1)
return bytes.fromhex(binary)
if __name__ == "__main__":
code = """
; section .text
; _start:
0: b8 04 00 00 00 ;mov eax,0x4 ; SYS_WRITE
5: bb 01 00 00 00 ;mov ebx,0x1 ; STDOUT
a: b9 29 00 00 00 ;mov ecx,0x29 ; address of the message
f: ba 0e 00 00 00 ;mov edx,0xe ; length of the message
14: cd 80 ;int 0x80 ; interrupt kernel
16: e9 02 00 00 00 ;jmp 0x1d ; _exit
1b: 89 c8 ;mov eax,ecx ; this is here to mess things up if JMP doesn't work
; _exit:
1d: b8 01 00 00 00 ;mov eax,0x1 ; SYS_EXIT
22: bb 00 00 00 00 ;mov ebx,0x0 ; EXIT_SUCCESS
27: cd 80 ;int 0x80 ; interrupt kernel
; section .data
29: 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 0A ; "Hello, world!",10
"""
vm = VM.VMKernel(500)
raw_binary = parse_code(code)
# pr = cProfile.Profile()
# pr.enable()
vm.execute(VM.ExecutionStrategy.BYTES, raw_binary)
# pr.disable()
# Stats(pr)
| true | true |
f7341f359fe9b0fe0253f12df1832305e8d83775 | 7,912 | py | Python | configs/ruby/MESI_Two_Level.py | dai-pch/gem5-tnuca | 621df40b549f8ed56bcf4f5fd7498f1816f19732 | [
"BSD-3-Clause"
] | null | null | null | configs/ruby/MESI_Two_Level.py | dai-pch/gem5-tnuca | 621df40b549f8ed56bcf4f5fd7498f1816f19732 | [
"BSD-3-Clause"
] | null | null | null | configs/ruby/MESI_Two_Level.py | dai-pch/gem5-tnuca | 621df40b549f8ed56bcf4f5fd7498f1816f19732 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
#
# Note: the L1 Cache latency is only used by the sequencer on fast path hits
#
class L1Cache(RubyCache):
latency = 3
#
# Note: the L2 Cache latency is not currently used
#
class L2Cache(RubyCache):
latency = 15
def define_options(parser):
return
def create_system(options, system, piobus, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MESI_Two_Level':
fatal("This script requires the MESI_Two_Level protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l1_cntrl_nodes = []
l2_cntrl_nodes = []
dir_cntrl_nodes = []
dma_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
#
l1i_cache = L1Cache(size = options.l1i_size,
assoc = options.l1i_assoc,
start_index_bit = block_size_bits,
is_icache = True)
l1d_cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits,
is_icache = False)
prefetcher = RubyPrefetcher.Prefetcher()
l1_cntrl = L1Cache_Controller(version = i,
L1Icache = l1i_cache,
L1Dcache = l1d_cache,
l2_select_num_bits = l2_bits,
send_evictions = (
options.cpu_type == "detailed"),
prefetcher = prefetcher,
ruby_system = ruby_system,
transitions_per_cycle=options.ports,
enable_prefetch = False)
cpu_seq = RubySequencer(version = i,
icache = l1i_cache,
dcache = l1d_cache,
ruby_system = ruby_system)
l1_cntrl.sequencer = cpu_seq
if piobus != None:
cpu_seq.pio_port = piobus.slave
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
#
# First create the Ruby objects associated with this cpu
#
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = l2_index_start)
l2_cntrl = L2Cache_Controller(version = i,
L2cache = l2_cache,
transitions_per_cycle=options.ports,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
for i in xrange(options.num_dirs):
#
# Create the Ruby objects associated with the directory controller
#
mem_cntrl = RubyMemoryControl(
clk_domain = ruby_system.memctrl_clk_domain,
version = i,
ruby_system = ruby_system)
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
directory = \
RubyDirectoryMemory(version = i,
size = dir_size,
use_map =
options.use_map),
memBuffer = mem_cntrl,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system)
dma_cntrl = DMA_Controller(version = i,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
| 39.959596 | 82 | 0.586704 |
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
class L1Cache(RubyCache):
latency = 3
class L2Cache(RubyCache):
latency = 15
def define_options(parser):
return
def create_system(options, system, piobus, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MESI_Two_Level':
fatal("This script requires the MESI_Two_Level protocol to be built.")
cpu_sequencers = []
l1_cntrl_nodes = []
l2_cntrl_nodes = []
dir_cntrl_nodes = []
dma_cntrl_nodes = []
l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus):
l1i_cache = L1Cache(size = options.l1i_size,
assoc = options.l1i_assoc,
start_index_bit = block_size_bits,
is_icache = True)
l1d_cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits,
is_icache = False)
prefetcher = RubyPrefetcher.Prefetcher()
l1_cntrl = L1Cache_Controller(version = i,
L1Icache = l1i_cache,
L1Dcache = l1d_cache,
l2_select_num_bits = l2_bits,
send_evictions = (
options.cpu_type == "detailed"),
prefetcher = prefetcher,
ruby_system = ruby_system,
transitions_per_cycle=options.ports,
enable_prefetch = False)
cpu_seq = RubySequencer(version = i,
icache = l1i_cache,
dcache = l1d_cache,
ruby_system = ruby_system)
l1_cntrl.sequencer = cpu_seq
if piobus != None:
cpu_seq.pio_port = piobus.slave
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = l2_index_start)
l2_cntrl = L2Cache_Controller(version = i,
L2cache = l2_cache,
transitions_per_cycle=options.ports,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
for i in xrange(options.num_dirs):
mem_cntrl = RubyMemoryControl(
clk_domain = ruby_system.memctrl_clk_domain,
version = i,
ruby_system = ruby_system)
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
directory = \
RubyDirectoryMemory(version = i,
size = dir_size,
use_map =
options.use_map),
memBuffer = mem_cntrl,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
for i, dma_port in enumerate(dma_ports):
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system)
dma_cntrl = DMA_Controller(version = i,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
| true | true |
f7341f4aa30c9f213243b95b20348a2b5e5e243b | 5,910 | py | Python | torchvision/datasets/hmdb51.py | jdsgomes/vision | c890a7e75ebeaaa75ae9ace4c203b7fc145df068 | [
"BSD-3-Clause"
] | 1 | 2022-02-14T09:16:02.000Z | 2022-02-14T09:16:02.000Z | torchvision/datasets/hmdb51.py | jdsgomes/vision | c890a7e75ebeaaa75ae9ace4c203b7fc145df068 | [
"BSD-3-Clause"
] | null | null | null | torchvision/datasets/hmdb51.py | jdsgomes/vision | c890a7e75ebeaaa75ae9ace4c203b7fc145df068 | [
"BSD-3-Clause"
] | null | null | null | import glob
import os
from typing import Optional, Callable, Tuple, Dict, Any, List
from torch import Tensor
from .folder import find_classes, make_dataset
from .video_utils import VideoClips
from .vision import VisionDataset
class HMDB51(VisionDataset):
"""
`HMDB51 <https://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/>`_
dataset.
HMDB51 is an action recognition video dataset.
This dataset consider every video as a collection of video clips of fixed size, specified
by ``frames_per_clip``, where the step in frames between each clip is given by
``step_between_clips``.
To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
elements will come from video 1, and the next three elements from video 2.
Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
frames in a video might be present.
Internally, it uses a VideoClips object to handle clip creation.
Args:
root (string): Root directory of the HMDB51 Dataset.
annotation_path (str): Path to the folder containing the split files.
frames_per_clip (int): Number of frames in a clip.
step_between_clips (int): Number of frames between each clip.
fold (int, optional): Which fold to use. Should be between 1 and 3.
train (bool, optional): If ``True``, creates a dataset from the train split,
otherwise from the ``test`` split.
transform (callable, optional): A function/transform that takes in a TxHxWxC video
and returns a transformed version.
output_format (str, optional): The format of the output video tensors (before transforms).
Can be either "THWC" (default) or "TCHW".
Returns:
tuple: A 3-tuple with the following entries:
- video (Tensor[T, H, W, C] or Tensor[T, C, H, W]): The `T` video frames
- audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
and `L` is the number of points
- label (int): class of the video clip
"""
data_url = "https://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/hmdb51_org.rar"
splits = {
"url": "https://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/test_train_splits.rar",
"md5": "15e67781e70dcfbdce2d7dbb9b3344b5",
}
TRAIN_TAG = 1
TEST_TAG = 2
def __init__(
self,
root: str,
annotation_path: str,
frames_per_clip: int,
step_between_clips: int = 1,
frame_rate: Optional[int] = None,
fold: int = 1,
train: bool = True,
transform: Optional[Callable] = None,
_precomputed_metadata: Optional[Dict[str, Any]] = None,
num_workers: int = 1,
_video_width: int = 0,
_video_height: int = 0,
_video_min_dimension: int = 0,
_audio_samples: int = 0,
output_format: str = "THWC",
) -> None:
super().__init__(root)
if fold not in (1, 2, 3):
raise ValueError(f"fold should be between 1 and 3, got {fold}")
extensions = ("avi",)
self.classes, class_to_idx = find_classes(self.root)
self.samples = make_dataset(
self.root,
class_to_idx,
extensions,
)
video_paths = [path for (path, _) in self.samples]
video_clips = VideoClips(
video_paths,
frames_per_clip,
step_between_clips,
frame_rate,
_precomputed_metadata,
num_workers=num_workers,
_video_width=_video_width,
_video_height=_video_height,
_video_min_dimension=_video_min_dimension,
_audio_samples=_audio_samples,
output_format=output_format,
)
# we bookkeep the full version of video clips because we want to be able
# to return the meta data of full version rather than the subset version of
# video clips
self.full_video_clips = video_clips
self.fold = fold
self.train = train
self.indices = self._select_fold(video_paths, annotation_path, fold, train)
self.video_clips = video_clips.subset(self.indices)
self.transform = transform
@property
def metadata(self) -> Dict[str, Any]:
return self.full_video_clips.metadata
def _select_fold(self, video_list: List[str], annotations_dir: str, fold: int, train: bool) -> List[int]:
target_tag = self.TRAIN_TAG if train else self.TEST_TAG
split_pattern_name = f"*test_split{fold}.txt"
split_pattern_path = os.path.join(annotations_dir, split_pattern_name)
annotation_paths = glob.glob(split_pattern_path)
selected_files = set()
for filepath in annotation_paths:
with open(filepath) as fid:
lines = fid.readlines()
for line in lines:
video_filename, tag_string = line.split()
tag = int(tag_string)
if tag == target_tag:
selected_files.add(video_filename)
indices = []
for video_index, video_path in enumerate(video_list):
if os.path.basename(video_path) in selected_files:
indices.append(video_index)
return indices
def __len__(self) -> int:
return self.video_clips.num_clips()
def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, int]:
video, audio, _, video_idx = self.video_clips.get_clip(idx)
sample_index = self.indices[video_idx]
_, class_index = self.samples[sample_index]
if self.transform is not None:
video = self.transform(video)
return video, audio, class_index
| 38.881579 | 109 | 0.636717 | import glob
import os
from typing import Optional, Callable, Tuple, Dict, Any, List
from torch import Tensor
from .folder import find_classes, make_dataset
from .video_utils import VideoClips
from .vision import VisionDataset
class HMDB51(VisionDataset):
data_url = "https://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/hmdb51_org.rar"
splits = {
"url": "https://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/test_train_splits.rar",
"md5": "15e67781e70dcfbdce2d7dbb9b3344b5",
}
TRAIN_TAG = 1
TEST_TAG = 2
def __init__(
self,
root: str,
annotation_path: str,
frames_per_clip: int,
step_between_clips: int = 1,
frame_rate: Optional[int] = None,
fold: int = 1,
train: bool = True,
transform: Optional[Callable] = None,
_precomputed_metadata: Optional[Dict[str, Any]] = None,
num_workers: int = 1,
_video_width: int = 0,
_video_height: int = 0,
_video_min_dimension: int = 0,
_audio_samples: int = 0,
output_format: str = "THWC",
) -> None:
super().__init__(root)
if fold not in (1, 2, 3):
raise ValueError(f"fold should be between 1 and 3, got {fold}")
extensions = ("avi",)
self.classes, class_to_idx = find_classes(self.root)
self.samples = make_dataset(
self.root,
class_to_idx,
extensions,
)
video_paths = [path for (path, _) in self.samples]
video_clips = VideoClips(
video_paths,
frames_per_clip,
step_between_clips,
frame_rate,
_precomputed_metadata,
num_workers=num_workers,
_video_width=_video_width,
_video_height=_video_height,
_video_min_dimension=_video_min_dimension,
_audio_samples=_audio_samples,
output_format=output_format,
)
self.full_video_clips = video_clips
self.fold = fold
self.train = train
self.indices = self._select_fold(video_paths, annotation_path, fold, train)
self.video_clips = video_clips.subset(self.indices)
self.transform = transform
@property
def metadata(self) -> Dict[str, Any]:
return self.full_video_clips.metadata
def _select_fold(self, video_list: List[str], annotations_dir: str, fold: int, train: bool) -> List[int]:
target_tag = self.TRAIN_TAG if train else self.TEST_TAG
split_pattern_name = f"*test_split{fold}.txt"
split_pattern_path = os.path.join(annotations_dir, split_pattern_name)
annotation_paths = glob.glob(split_pattern_path)
selected_files = set()
for filepath in annotation_paths:
with open(filepath) as fid:
lines = fid.readlines()
for line in lines:
video_filename, tag_string = line.split()
tag = int(tag_string)
if tag == target_tag:
selected_files.add(video_filename)
indices = []
for video_index, video_path in enumerate(video_list):
if os.path.basename(video_path) in selected_files:
indices.append(video_index)
return indices
def __len__(self) -> int:
return self.video_clips.num_clips()
def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, int]:
video, audio, _, video_idx = self.video_clips.get_clip(idx)
sample_index = self.indices[video_idx]
_, class_index = self.samples[sample_index]
if self.transform is not None:
video = self.transform(video)
return video, audio, class_index
| true | true |
f7341f69b66c89836096882d08cd417fb7779aaf | 22 | py | Python | privatebeta/__init__.py | fitoria/django-privatebeta | ef65130d3856d9444f0acc85c5bb590d09908d15 | [
"BSD-3-Clause"
] | 1 | 2015-11-05T13:42:05.000Z | 2015-11-05T13:42:05.000Z | privatebeta/__init__.py | fitoria/django-privatebeta | ef65130d3856d9444f0acc85c5bb590d09908d15 | [
"BSD-3-Clause"
] | null | null | null | privatebeta/__init__.py | fitoria/django-privatebeta | ef65130d3856d9444f0acc85c5bb590d09908d15 | [
"BSD-3-Clause"
] | null | null | null | # Let the wookie pass. | 22 | 22 | 0.727273 | true | true | |
f73420d6dbef227e5c9ccc1aa0c988e245d2f75a | 3,508 | py | Python | tools/metag_tools/blat_wrapper.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | 6 | 2018-11-03T22:43:35.000Z | 2022-02-15T17:51:33.000Z | tools/metag_tools/blat_wrapper.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | 7 | 2016-12-07T22:19:37.000Z | 2019-01-30T15:04:26.000Z | tools/metag_tools/blat_wrapper.py | ramezrawas/galaxy-1 | c03748dd49c060a68d07bce56eae33e0ba154414 | [
"CC-BY-3.0"
] | 10 | 2017-04-10T21:40:22.000Z | 2022-02-21T16:50:10.000Z | #!/usr/bin/env python
import os
import sys
import tempfile
assert sys.version_info[:2] >= (2.4)
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def check_nib_file( dbkey, GALAXY_DATA_INDEX_DIR ):
nib_file = "%s/alignseq.loc" % GALAXY_DATA_INDEX_DIR
nib_path = ''
nibs = {}
for i, line in enumerate( open( nib_file ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( "#" ):
fields = line.split( '\t' )
if len( fields ) < 3:
continue
if fields[0] == 'seq':
nibs[( fields[1] )] = fields[2]
if dbkey in nibs:
nib_path = nibs[( dbkey )]
return nib_path
def check_twobit_file( dbkey, GALAXY_DATA_INDEX_DIR ):
twobit_file = "%s/twobit.loc" % GALAXY_DATA_INDEX_DIR
twobit_path = ''
twobits = {}
for i, line in enumerate( open( twobit_file ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( "#" ):
fields = line.split( '\t' )
if len( fields ) < 2:
continue
twobits[( fields[0] )] = fields[1]
if dbkey in twobits:
twobit_path = twobits[( dbkey )]
return twobit_path
def __main__():
# I/O
source_format = sys.argv[1] # 0: dbkey; 1: upload file
target_file = sys.argv[2]
query_file = sys.argv[3]
output_file = sys.argv[4]
min_iden = sys.argv[5]
tile_size = sys.argv[6]
one_off = sys.argv[7]
try:
float(min_iden)
except:
stop_err('Invalid value for minimal identity.')
try:
test = int(tile_size)
assert test >= 6 and test <= 18
except:
stop_err('Invalid value for tile size. DNA word size must be between 6 and 18.')
try:
test = int(one_off)
assert test >= 0 and test <= int(tile_size)
except:
stop_err('Invalid value for mismatch numbers in the word')
GALAXY_DATA_INDEX_DIR = sys.argv[8]
all_files = []
if source_format == '0':
# check target genome
dbkey = target_file
nib_path = check_nib_file( dbkey, GALAXY_DATA_INDEX_DIR )
twobit_path = check_twobit_file( dbkey, GALAXY_DATA_INDEX_DIR )
if not os.path.exists( nib_path ) and not os.path.exists( twobit_path ):
stop_err("No sequences are available for %s, request them by reporting this error." % dbkey)
# check the query file, see whether all of them are legitimate sequence
if nib_path and os.path.isdir( nib_path ):
compress_files = os.listdir(nib_path)
target_path = nib_path
elif twobit_path:
compress_files = [twobit_path]
target_path = ""
else:
stop_err("Requested genome build has no available sequence.")
for file in compress_files:
file = "%s/%s" % ( target_path, file )
file = os.path.normpath(file)
all_files.append(file)
else:
all_files = [target_file]
for detail_file_path in all_files:
output_tempfile = tempfile.NamedTemporaryFile().name
command = "blat %s %s %s -oneOff=%s -tileSize=%s -minIdentity=%s -mask=lower -noHead -out=pslx 2>&1" % ( detail_file_path, query_file, output_tempfile, one_off, tile_size, min_iden )
os.system( command )
os.system( 'cat %s >> %s' % ( output_tempfile, output_file ) )
os.remove( output_tempfile )
if __name__ == '__main__':
__main__()
| 31.321429 | 190 | 0.593216 |
import os
import sys
import tempfile
assert sys.version_info[:2] >= (2.4)
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def check_nib_file( dbkey, GALAXY_DATA_INDEX_DIR ):
nib_file = "%s/alignseq.loc" % GALAXY_DATA_INDEX_DIR
nib_path = ''
nibs = {}
for i, line in enumerate( open( nib_file ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( "#" ):
fields = line.split( '\t' )
if len( fields ) < 3:
continue
if fields[0] == 'seq':
nibs[( fields[1] )] = fields[2]
if dbkey in nibs:
nib_path = nibs[( dbkey )]
return nib_path
def check_twobit_file( dbkey, GALAXY_DATA_INDEX_DIR ):
twobit_file = "%s/twobit.loc" % GALAXY_DATA_INDEX_DIR
twobit_path = ''
twobits = {}
for i, line in enumerate( open( twobit_file ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( "#" ):
fields = line.split( '\t' )
if len( fields ) < 2:
continue
twobits[( fields[0] )] = fields[1]
if dbkey in twobits:
twobit_path = twobits[( dbkey )]
return twobit_path
def __main__():
source_format = sys.argv[1]
target_file = sys.argv[2]
query_file = sys.argv[3]
output_file = sys.argv[4]
min_iden = sys.argv[5]
tile_size = sys.argv[6]
one_off = sys.argv[7]
try:
float(min_iden)
except:
stop_err('Invalid value for minimal identity.')
try:
test = int(tile_size)
assert test >= 6 and test <= 18
except:
stop_err('Invalid value for tile size. DNA word size must be between 6 and 18.')
try:
test = int(one_off)
assert test >= 0 and test <= int(tile_size)
except:
stop_err('Invalid value for mismatch numbers in the word')
GALAXY_DATA_INDEX_DIR = sys.argv[8]
all_files = []
if source_format == '0':
dbkey = target_file
nib_path = check_nib_file( dbkey, GALAXY_DATA_INDEX_DIR )
twobit_path = check_twobit_file( dbkey, GALAXY_DATA_INDEX_DIR )
if not os.path.exists( nib_path ) and not os.path.exists( twobit_path ):
stop_err("No sequences are available for %s, request them by reporting this error." % dbkey)
if nib_path and os.path.isdir( nib_path ):
compress_files = os.listdir(nib_path)
target_path = nib_path
elif twobit_path:
compress_files = [twobit_path]
target_path = ""
else:
stop_err("Requested genome build has no available sequence.")
for file in compress_files:
file = "%s/%s" % ( target_path, file )
file = os.path.normpath(file)
all_files.append(file)
else:
all_files = [target_file]
for detail_file_path in all_files:
output_tempfile = tempfile.NamedTemporaryFile().name
command = "blat %s %s %s -oneOff=%s -tileSize=%s -minIdentity=%s -mask=lower -noHead -out=pslx 2>&1" % ( detail_file_path, query_file, output_tempfile, one_off, tile_size, min_iden )
os.system( command )
os.system( 'cat %s >> %s' % ( output_tempfile, output_file ) )
os.remove( output_tempfile )
if __name__ == '__main__':
__main__()
| true | true |
f7342117dd64c1f7fdf5e9c46d12b689ca81aa84 | 1,459 | py | Python | Create_Dataset.py | mariammendha/Researcher--Reality | 76adf1da1d872133d913b27a3c45d9f0d29ffd98 | [
"CC-BY-3.0"
] | null | null | null | Create_Dataset.py | mariammendha/Researcher--Reality | 76adf1da1d872133d913b27a3c45d9f0d29ffd98 | [
"CC-BY-3.0"
] | null | null | null | Create_Dataset.py | mariammendha/Researcher--Reality | 76adf1da1d872133d913b27a3c45d9f0d29ffd98 | [
"CC-BY-3.0"
] | null | null | null | '''
Take the existing Kc_house_data.csv from MongoDb Atlas, parsing for latitude and longitude
It then pipes this data through the WalkScore Api, generating 3 accessibility scores
These 3 scores are then inserted into their associated rows within MongoDb
WalkScore Api: https://www.walkscore.com/professional/walk-score-apis.php
'''
# Function returns 3 scores (Walk, Transit, Bike)
# Scored out of 100
def rating(address, latitude, longitude):
from walkscore import WalkScoreAPI
api_key = '<Api Key>'
walkscore_api = WalkScoreAPI(api_key=api_key)
result = walkscore_api.get_score(latitude, longitude, address)
return (result.walk_score, result.transit_score, result.bike_score)
import pymongo
# Database connection string
db = \
pymongo.MongoClient('mongodb+srv://<username>:<password>@housing-peu0h.gcp.mongodb.net/test?retryWrites=true&w=majority'
)
# Declare collection
collection = db.Parameters
# Declare Document
data = collection.Housing
# Loop through each existing row, piping data through the rating api
# and populating 3 new fields (walk_score, transit_score, bike_score)
for i in data.find():
ratinglist = rating('', i['lat'], i['long'])
(x, y, z) = (ratinglist[0], ratinglist[1], ratinglist[2])
data.update_many({'id': i['id']}, {'$set': {'walk_score': x,
'transit_score': y, 'bike_score': z}})
count += 1
| 33.930233 | 125 | 0.69157 |
def rating(address, latitude, longitude):
from walkscore import WalkScoreAPI
api_key = '<Api Key>'
walkscore_api = WalkScoreAPI(api_key=api_key)
result = walkscore_api.get_score(latitude, longitude, address)
return (result.walk_score, result.transit_score, result.bike_score)
import pymongo
db = \
pymongo.MongoClient('mongodb+srv://<username>:<password>@housing-peu0h.gcp.mongodb.net/test?retryWrites=true&w=majority'
)
collection = db.Parameters
data = collection.Housing
for i in data.find():
ratinglist = rating('', i['lat'], i['long'])
(x, y, z) = (ratinglist[0], ratinglist[1], ratinglist[2])
data.update_many({'id': i['id']}, {'$set': {'walk_score': x,
'transit_score': y, 'bike_score': z}})
count += 1
| true | true |
f7342179f51675003c8d0d6b869111edae0eafb7 | 5,501 | py | Python | examples/tutorial_h4l/3b_score.py | aghoshpub/LikelihoodFreeInterference | fd6267104c29e935fa41dc92004dae98ded30626 | [
"MIT"
] | 1 | 2019-09-18T15:24:25.000Z | 2019-09-18T15:24:25.000Z | examples/tutorial_h4l/3b_score.py | aghoshpub/LikelihoodFreeInterference | fd6267104c29e935fa41dc92004dae98ded30626 | [
"MIT"
] | null | null | null | examples/tutorial_h4l/3b_score.py | aghoshpub/LikelihoodFreeInterference | fd6267104c29e935fa41dc92004dae98ded30626 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # MadMiner particle physics tutorial
#
# # Part 3b: Training a score estimator
#
# Johann Brehmer, Felix Kling, Irina Espejo, and Kyle Cranmer 2018-2019
# In part 3a of this tutorial we will finally train a neural network to estimate likelihood ratios. We assume that you have run part 1 and 2a of this tutorial. If, instead of 2a, you have run part 2b, you just have to load a different filename later.
# ## Preparations
# Make sure you've run the first tutorial before executing this notebook!
# In[1]:
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
# get_ipython().magic(u'matplotlib inline')
from madminer.sampling import SampleAugmenter
from madminer import sampling
from madminer.ml import ScoreEstimator
# In[2]:
# MadMiner output
logging.basicConfig(
format='%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s',
datefmt='%H:%M',
level=logging.INFO
)
# Output of all other modules (e.g. matplotlib)
for key in logging.Logger.manager.loggerDict:
if "madminer" not in key:
logging.getLogger(key).setLevel(logging.WARNING)
# ## 1. Make (unweighted) training and test samples with augmented data
# At this point, we have all the information we need from the simulations. But the data is not quite ready to be used for machine learning. The `madminer.sampling` class `SampleAugmenter` will take care of the remaining book-keeping steps before we can train our estimators:
#
# First, it unweights the samples, i.e. for a given parameter vector `theta` (or a distribution `p(theta)`) it picks events `x` such that their distribution follows `p(x|theta)`. The selected samples will all come from the event file we have so far, but their frequency is changed -- some events will appear multiple times, some will disappear.
#
# Second, `SampleAugmenter` calculates all the augmented data ("gold") that is the key to our new inference methods. Depending on the specific technique, these are the joint likelihood ratio and / or the joint score. It saves all these pieces of information for the selected events in a set of numpy files that can easily be used in any machine learning framework.
# In[3]:
sampler = SampleAugmenter('data/delphes_data_shuffled.h5')
# The relevant `SampleAugmenter` function for local score estimators is `extract_samples_train_local()`. As in part 3a of the tutorial, for the argument `theta` you can use the helper functions `sampling.benchmark()`, `sampling.benchmarks()`, `sampling.morphing_point()`, `sampling.morphing_points()`, and `sampling.random_morphing_points()`.
# In[4]:
x, theta, t_xz, _ = sampler.sample_train_local(
theta=sampling.benchmark('sm'),
#n_samples=4 * 10**5, #100000,
n_samples= 2*10**6, # fewer than others
folder='./data/samples',
filename='train_score'
)
# We can use the same data as in part 3a, so you only have to execute this if you haven't gone through tutorial 3a:
# In[5]:
# _ = sampler.sample_test(
# theta=sampling.benchmark('sm'),
# n_samples=1*10**6,
# folder='./data/samples',
# filename='test'
# )
# ## 2. Train score estimator
# It's now time to build a neural network. Only this time, instead of the likelihood ratio itself, we will estimate the gradient of the log likelihood with respect to the theory parameters -- the score. To be precise, the output of the neural network is an estimate of the score at some reference parameter point, for instance the Standard Model. A neural network that estimates this "local" score can be used to calculate the Fisher information at that point. The estimated score can also be used as a machine learning version of Optimal Observables, and likelihoods can be estimated based on density estimation in the estimated score space. This method for likelihood ratio estimation is called SALLY, and there is a closely related version called SALLINO. Both are explained in ["Constraining Effective Field Theories With Machine Learning"](https://arxiv.org/abs/1805.00013) and ["A Guide to Constraining Effective Field Theories With Machine Learning"](https://arxiv.org/abs/1805.00020).
#
# The central object for this is the `madminer.ml.ScoreEstimator` class:
# In[6]:
estimator = ScoreEstimator(n_hidden=(100,))
# In[ ]:
estimator.train(
method='sally',
x='data/samples/x_train_score.npy',
t_xz='data/samples/t_xz_train_score.npy',
)
estimator.save('models/sally')
# # ## 3. Evaluate score estimator
# # Let's evaluate the SM score on the test data
# # In[ ]:
# estimator = ScoreEstimator(n_hidden=(50,))
# # In[ ]:
# estimator.load('models/sally')
# t_hat = estimator.evaluate_score(
# x='data/samples/x_test.npy'
# )
# # Let's have a look at the estimated score and how it is related to the observables:
# # In[ ]:
# x = np.load('data/samples/x_test.npy')
# fig = plt.figure(figsize=(10,4))
# #for i in range(2):
# for i in range(1):
# ax = plt.subplot(1,2,i+1)
# sc = plt.scatter(x[:,0], x[:,1], c=t_hat[:,i], s=25., cmap='viridis', vmin=-1., vmax=1.)
# cbar = plt.colorbar(sc)
# cbar.set_label(r'$\hat{t}_' + str(i) + r'(x | \theta_{ref})$')
# plt.xlabel(r'$p_{T,j1}$ [GeV]')
# plt.ylabel(r'$\Delta \phi_{jj}$ Sally')
# plt.xlim(10.,300.)
# plt.ylim(-3.15,3.15)
# plt.tight_layout()
# #plt.show()
# In[ ]:
| 33.138554 | 992 | 0.718597 |
als
import logging
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
# get_ipython().magic(u'matplotlib inline')
from madminer.sampling import SampleAugmenter
from madminer import sampling
from madminer.ml import ScoreEstimator
# In[2]:
# MadMiner output
logging.basicConfig(
format='%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s',
datefmt='%H:%M',
level=logging.INFO
)
# Output of all other modules (e.g. matplotlib)
for key in logging.Logger.manager.loggerDict:
if "madminer" not in key:
logging.getLogger(key).setLevel(logging.WARNING)
# ## 1. Make (unweighted) training and test samples with augmented data
# At this point, we have all the information we need from the simulations. But the data is not quite ready to be used for machine learning. The `madminer.sampling` class `SampleAugmenter` will take care of the remaining book-keeping steps before we can train our estimators:
#
# First, it unweights the samples, i.e. for a given parameter vector `theta` (or a distribution `p(theta)`) it picks events `x` such that their distribution follows `p(x|theta)`. The selected samples will all come from the event file we have so far, but their frequency is changed -- some events will appear multiple times, some will disappear.
#
# Second, `SampleAugmenter` calculates all the augmented data ("gold") that is the key to our new inference methods. Depending on the specific technique, these are the joint likelihood ratio and / or the joint score. It saves all these pieces of information for the selected events in a set of numpy files that can easily be used in any machine learning framework.
# In[3]:
sampler = SampleAugmenter('data/delphes_data_shuffled.h5')
# The relevant `SampleAugmenter` function for local score estimators is `extract_samples_train_local()`. As in part 3a of the tutorial, for the argument `theta` you can use the helper functions `sampling.benchmark()`, `sampling.benchmarks()`, `sampling.morphing_point()`, `sampling.morphing_points()`, and `sampling.random_morphing_points()`.
# In[4]:
x, theta, t_xz, _ = sampler.sample_train_local(
theta=sampling.benchmark('sm'),
#n_samples=4 * 10**5, #100000,
n_samples= 2*10**6, # fewer than others
folder='./data/samples',
filename='train_score'
)
# We can use the same data as in part 3a, so you only have to execute this if you haven't gone through tutorial 3a:
ml.ScoreEstimator` class:
# In[6]:
estimator = ScoreEstimator(n_hidden=(100,))
# In[ ]:
estimator.train(
method='sally',
x='data/samples/x_train_score.npy',
t_xz='data/samples/t_xz_train_score.npy',
)
estimator.save('models/sally')
# # ## 3. Evaluate score estimator
# # Let's evaluate the SM score on the test data
(10,4))
# #for i in range(2):
# for i in range(1):
# ax = plt.subplot(1,2,i+1)
# sc = plt.scatter(x[:,0], x[:,1], c=t_hat[:,i], s=25., cmap='viridis', vmin=-1., vmax=1.)
# cbar = plt.colorbar(sc)
# cbar.set_label(r'$\hat{t}_' + str(i) + r'(x | \theta_{ref})$')
# plt.xlabel(r'$p_{T,j1}$ [GeV]')
# plt.ylabel(r'$\Delta \phi_{jj}$ Sally')
# plt.xlim(10.,300.)
# plt.ylim(-3.15,3.15)
# plt.tight_layout()
# #plt.show()
# In[ ]:
| true | true |
f73421f38d8e149fb253d1d1beb05fe95e33d4fd | 1,350 | py | Python | cogs/help.py | Tutuviz/yunna-bot | 2e76fa41a5029bd96b8e1c2f01462d73d7deeacd | [
"MIT"
] | null | null | null | cogs/help.py | Tutuviz/yunna-bot | 2e76fa41a5029bd96b8e1c2f01462d73d7deeacd | [
"MIT"
] | null | null | null | cogs/help.py | Tutuviz/yunna-bot | 2e76fa41a5029bd96b8e1c2f01462d73d7deeacd | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
class Help(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Comands
@commands.command(pass_context=True, aliases=("commands", "cmd"))
async def help(self, ctx):
embed = discord.Embed(title="There are all commands I know", description="Yunna is a multi-purpose bot made as a test from an amateur that ended up becoming what we hope that you think as a great bot that now lies in your discord server. Please enjoy, and report to us any mistakes that you may encounter.", color=0xff4c5c)
embed.set_author(name="Hey there", icon_url="https://user-images.githubusercontent.com/54639269/71168042-aa5bad00-2234-11ea-875f-5745cba18f6a.png")
embed.set_thumbnail(url="https://user-images.githubusercontent.com/54639269/71168042-aa5bad00-2234-11ea-875f-5745cba18f6a.png")
embed.add_field(name="General", value="`say`, `info`, `help`, `avatar`, `creator`, `author`, `commands`", inline=False)
embed.add_field(name="Useful", value="`bitcoin`", inline=False)
embed.add_field(name="Moderator", value="`prune`, `clear`, `purge`, `kick`, `ban`, `unban`", inline=False)
embed.set_footer(text="use prefix '$' to this commands")
await ctx.channel.send(content=None, embed=embed)
def setup(bot):
bot.add_cog(Help(bot))
| 58.695652 | 331 | 0.704444 | import discord
from discord.ext import commands
class Help(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, aliases=("commands", "cmd"))
async def help(self, ctx):
embed = discord.Embed(title="There are all commands I know", description="Yunna is a multi-purpose bot made as a test from an amateur that ended up becoming what we hope that you think as a great bot that now lies in your discord server. Please enjoy, and report to us any mistakes that you may encounter.", color=0xff4c5c)
embed.set_author(name="Hey there", icon_url="https://user-images.githubusercontent.com/54639269/71168042-aa5bad00-2234-11ea-875f-5745cba18f6a.png")
embed.set_thumbnail(url="https://user-images.githubusercontent.com/54639269/71168042-aa5bad00-2234-11ea-875f-5745cba18f6a.png")
embed.add_field(name="General", value="`say`, `info`, `help`, `avatar`, `creator`, `author`, `commands`", inline=False)
embed.add_field(name="Useful", value="`bitcoin`", inline=False)
embed.add_field(name="Moderator", value="`prune`, `clear`, `purge`, `kick`, `ban`, `unban`", inline=False)
embed.set_footer(text="use prefix '$' to this commands")
await ctx.channel.send(content=None, embed=embed)
def setup(bot):
bot.add_cog(Help(bot))
| true | true |
f734223bdceb57e4160f18ca5c8236187d3e0af2 | 1,421 | py | Python | gtfs_converter/scheduler.py | etalab/gtfs_converter | c98866a6fcc5454f8bcb4c329b2f710d28e70356 | [
"MIT"
] | 3 | 2020-07-28T14:20:58.000Z | 2021-01-08T20:55:43.000Z | gtfs_converter/scheduler.py | etalab/gtfs_converter | c98866a6fcc5454f8bcb4c329b2f710d28e70356 | [
"MIT"
] | 4 | 2020-05-04T11:01:08.000Z | 2021-08-06T16:50:16.000Z | gtfs_converter/scheduler.py | etalab/gtfs_converter | c98866a6fcc5454f8bcb4c329b2f710d28e70356 | [
"MIT"
] | 1 | 2020-04-29T16:46:46.000Z | 2020-04-29T16:46:46.000Z | from rq_scheduler.scheduler import Scheduler
import rq
from redis import Redis
import os
import sys
import init_log
import logging
def _run_scheduler():
with rq.Connection(Redis.from_url(os.environ.get("REDIS_URL") or "redis://")):
q = rq.Queue()
scheduler = Scheduler(queue=q)
scheduler.cron(
cron_string="0 7 * * 2", # every tuesday at 7:00,
func="merge_all_geojson.merge_geojson",
timeout="20m",
)
scheduler.cron(
cron_string="0 0 * * *", # every day at 00:00,
func="cleanup.cleanup_old_resources",
)
scheduler.run()
def _run_task(task):
"""debug task to manually trigger a task"""
from datetime import timedelta
logging.info(f"scheduling task {task} in 1s", extra={"task_id": "scheduler"})
with rq.Connection(Redis.from_url(os.environ.get("REDIS_URL") or "redis://")):
q = rq.Queue()
scheduler = Scheduler(queue=q)
scheduler.enqueue_in(
timedelta(seconds=1), func=task, timeout="20m",
)
if __name__ == "__main__":
init_log.config_worker_log()
if len(sys.argv) > 1:
# run custom task for debug, like:
# `python scheduler.py merge_all_geojson.merge_geojson`
# or
# `python scheduler.py cleanup.cleanup_old_resources`
_run_task(sys.argv[1])
else:
_run_scheduler()
| 25.375 | 82 | 0.613652 | from rq_scheduler.scheduler import Scheduler
import rq
from redis import Redis
import os
import sys
import init_log
import logging
def _run_scheduler():
with rq.Connection(Redis.from_url(os.environ.get("REDIS_URL") or "redis://")):
q = rq.Queue()
scheduler = Scheduler(queue=q)
scheduler.cron(
cron_string="0 7 * * 2",
func="merge_all_geojson.merge_geojson",
timeout="20m",
)
scheduler.cron(
cron_string="0 0 * * *",
func="cleanup.cleanup_old_resources",
)
scheduler.run()
def _run_task(task):
from datetime import timedelta
logging.info(f"scheduling task {task} in 1s", extra={"task_id": "scheduler"})
with rq.Connection(Redis.from_url(os.environ.get("REDIS_URL") or "redis://")):
q = rq.Queue()
scheduler = Scheduler(queue=q)
scheduler.enqueue_in(
timedelta(seconds=1), func=task, timeout="20m",
)
if __name__ == "__main__":
init_log.config_worker_log()
if len(sys.argv) > 1:
_run_task(sys.argv[1])
else:
_run_scheduler()
| true | true |
f734224e5ccc1a5cc4968643172e56daa96e2640 | 16,537 | py | Python | Graphcore/benchmarks/bert/implementations/popart/pack_pretraining_data.py | jqueguiner/training_results_v1.0 | 8200377f425ae24b6ed6c2816b9273aab0996d43 | [
"Apache-2.0"
] | 27 | 2021-07-01T00:34:52.000Z | 2022-03-29T08:49:53.000Z | Graphcore/benchmarks/bert/implementations/popart/pack_pretraining_data.py | jqueguiner/training_results_v1.0 | 8200377f425ae24b6ed6c2816b9273aab0996d43 | [
"Apache-2.0"
] | 21 | 2021-08-31T08:34:50.000Z | 2022-03-17T11:42:10.000Z | Graphcore/benchmarks/bert/implementations/popart/pack_pretraining_data.py | jqueguiner/training_results_v1.0 | 8200377f425ae24b6ed6c2816b9273aab0996d43 | [
"Apache-2.0"
] | 39 | 2021-07-02T00:46:14.000Z | 2022-03-13T16:59:55.000Z | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import glob
import struct
import random
import argparse
import numpy as np
import pandas as pd
from scipy import optimize
from itertools import repeat, chain
from functools import lru_cache, reduce
from collections import defaultdict
from matplotlib import pyplot as plt
from concurrent.futures import ProcessPoolExecutor
from bert_data.pretraining_dataset import CachedDataLoader, data_file_format
@lru_cache(maxsize=None)
def packing_strategies(start, previous, target, depth):
gap = target - start
# The collection of possible strategies given the
# starting sum, the target sum, and the available depth
# strategy search is limited to increments greater or equal to previous
strategies = []
# Complete the packing with exactly 1 number
if depth == 1:
if gap >= previous:
strategies.append([gap])
# Complete the sample in "depth" steps, recursively
else:
for new in range(previous, gap + 1):
new_gap = target - start - new
if new_gap == 0:
strategies.append([new])
else:
options = packing_strategies(start + new, new, target, depth - 1)
for option in options:
if len(option) > 0:
strategies.append([new] + option)
return strategies
def get_packing_recipe(sequence_lengths, max_sequence_length, max_sequences_per_pack=3):
# Histogram of sequence lengths
histogram, bins = np.histogram(sequence_lengths, bins=np.arange(1, max_sequence_length + 2))
print("Begin packing pass".center(80, "_"))
print(f"Unpacked mean sequence length: {sequence_lengths.mean():3.2f}")
# Make sure all strategies are recipes to pack to the correct sequence length
strategy_set = packing_strategies(0, 1, max_sequence_length, max_sequences_per_pack)
for strategy in strategy_set:
assert(sum(strategy) == max_sequence_length)
num_strategies = len(strategy_set)
print(f"Found {num_strategies} unique packing strategies.")
# Solve the packing equation A@mixture = histogram
A = np.zeros((max_sequence_length, num_strategies), dtype=np.int32)
for i in range(num_strategies):
strategy = strategy_set[i]
for seq_len in strategy:
A[seq_len - 1, i] += 1
# short sequences are inexpensive to add, so should have low residual weights
# to exactly minimize padding use w0 = np.arange(1, max_sequence_length + 1)
# in practice the difference is negligible, but this converges faster
padding_cutoff = 8
w0 = np.ones([max_sequence_length])
# w0 = np.linspace(1, max_sequence_length+1, max_sequence_length)/max_sequence_length # padding minimization weight
w0[:padding_cutoff] = padding_cutoff / (2 * max_sequence_length)
w0 = np.sqrt(w0)
# Starting values for the padding and the mixture
padding = np.zeros([max_sequence_length], dtype=np.int32)
mixture = np.zeros([num_strategies], dtype=np.int32)
b = histogram + padding
# Pack sequences as best as possible, then increase padding accordingly and repeat
for i in range(0, 20):
print(f"\nIteration: {i}: sequences still to pack: ", b.sum())
start = time.time()
partial_mixture, rnorm = optimize.nnls(np.expand_dims(w0, -1) * A, w0 * b)
print(f"Solving nnls took {time.time() - start:3.2f} seconds.")
print(f"Residual norm: {rnorm:3.5e}")
# Update mixture (round the floating point solution to integers)
partial_mixture = np.where(partial_mixture < 2, np.rint(partial_mixture), np.floor(partial_mixture))
# If partial mixture is empty (due to rounding) we follow the gradient
# this usually happens when the number of examples is small i.e. ~100
if partial_mixture.max() == 0:
grad = A.T @ (b * np.arange(1, max_sequence_length + 1))
k = int(b.sum() // 2) + 1
topk = np.argsort(-grad)[:k]
partial_mixture[topk] += 1
# Update mixture
mixture = mixture + partial_mixture
# Compute the residuals
residual = b - A @ partial_mixture
print(f"Max residual: {abs(residual).max()}")
print(f"Residual on first 8 categories: {np.around(residual[:8], 4)}")
print(f"Residual on last 8 categories: {np.around(residual[-8:], 4)}")
# Add padding based on deficit (negative residual)
partial_padding = np.where(residual < 0, -residual, 0)
print(f"Added {(partial_padding*np.arange(1,max_sequence_length+1)).sum():3.2e} tokens of padding.")
padding = padding + partial_padding
# Update the rhs vector (remaining surplus sequences)
b = histogram + padding - A @ mixture
assert np.all(b >= 0), b
# Done iterating
if b.sum() < 100:
break
# Make sure there is no remainder
unpacked_seqlen = np.arange(1, args.max_sequence_length + 1)[b > 0]
# Update the mixture to also covered the unpacked sequences
for l in unpacked_seqlen:
# Get the depth 1 strategy
strategy = sorted([l, args.max_sequence_length - l])
strategy_index = strategy_set.index(strategy)
mixture[strategy_index] += b[l-1]
b = histogram - A @ mixture
padding = np.where(b < 0, -b, 0)
b = histogram + padding - A @ mixture
assert b.sum() == 0
# Analyze result
print("Done solving for packing order".center(80, "_"))
num_padding_tokens = (np.arange(1, max_sequence_length + 1) * padding).sum()
num_padding_tokens_original = (max_sequence_length - sequence_lengths).sum()
print(f"Number of sequences dropped: {b.sum()}")
print(f"Number of strategies utilized: {np.count_nonzero(mixture)}")
new_number_of_samples = int(mixture.sum())
compression = 1 - new_number_of_samples / len(sequence_lengths)
print(f"New number of samples: {new_number_of_samples:3.2f}, original {len(sequence_lengths)}. A compression ratio of {compression:3.3f}")
print(f"The expected speed-up from packing: {1/(1-compression):3.3f}")
upper_bound = 1.0 / (1 - ((1 - sequence_lengths / max_sequence_length).mean()))
print(f"Theoretical upper bound on speed-up: {upper_bound:3.3f}")
avg_sequences_per_sample = ((A.sum(0) * mixture).sum() - padding.sum()) / new_number_of_samples
print(f"Average sequences/sample {avg_sequences_per_sample:3.5f}")
print(f"Added {num_padding_tokens:3.2e} padding tokens. Original dataset used {num_padding_tokens_original:3.2e} padding tokens")
efficiency = (new_number_of_samples*max_sequence_length - num_padding_tokens)/(new_number_of_samples*max_sequence_length)
print(f"Packing efficiency (fraction of real tokens): {efficiency:3.4f}")
print(f"Top 8 strategies")
topK = np.argsort(-mixture)[:8]
for i in topK:
print(f"Strategy {strategy_set[i]} which is used {int(mixture[i])} times")
print("".center(80, "_"))
# Figure out the slicing that each strategy should use
slicing = np.zeros_like(A)
slicing[:, 1:] = np.cumsum(A * mixture, axis=1)[:, :-1]
slicing = slicing.T
mixture = mixture.astype(np.int64)
return strategy_set, mixture, padding, slicing
def slice_examples(examples_by_length, slicing, strategy_set, repeat_counts):
# Divide the work, firstly between the strategies and then into chunks of 50k
slices = []
strategies = []
part_idx = []
for strategy, slice_offsets, repeat_count in zip(strategy_set, slicing, repeat_counts):
if repeat_count == 0:
continue
# Slice out the sequences allocated to this strategy in increments of 50k
num_parts = repeat_count // 50000
num_parts = num_parts + int(repeat_count != num_parts * 50000)
subcounts = (min(50000, repeat_count - 50000 * (i - 1)) for i in range(1, num_parts + 1))
for part_id, part_count in enumerate(subcounts):
examples = []
for k, seq_len in enumerate(strategy):
slice_start = int(slice_offsets[seq_len - 1])
slice_end = slice_start + int(part_count)
slice_offsets[seq_len - 1] = slice_end
examples.append(examples_by_length[seq_len][slice_start:slice_end])
slices.append(examples)
strategies.append(strategy)
part_idx.append(part_id)
return slices, strategies, part_idx
def parallel_pack_according_to_strategy(args, part_idx, strategy, examples):
# Pack the sequences according to the strategy and write them to disk
base_filename = os.path.join(args.output_dir, "strategy_" + "_".join(map(str, strategy)))
filename = base_filename + f"_part_{part_idx}"
lines = []
for i, multi_sequence in enumerate(zip(*examples)):
lines.append(create_multi_sequence_example(multi_sequence, args.max_predictions_per_sequence,
args.max_sequence_length, args.max_sequences_per_pack))
# Write to file
with open(filename, "wb") as f:
f.writelines(lines)
def create_multi_sequence_example(multi_sequence, max_predictions_per_sequence, max_sequence_length, max_sequences_per_pack):
# SEQ
packed_input_ids = np.zeros(max_sequence_length, dtype=np.int32)
packed_input_mask = np.zeros(max_sequence_length, dtype=np.int32)
packed_segment_ids = np.zeros(max_sequence_length, dtype=np.int32)
packed_positions = np.zeros(max_sequence_length, dtype=np.int32)
# MLM
# we are packing up to max_sequences_per_pack, each with a certain percentage of masked tokens
# in case that percentege is rounded up for all sequences in the pack, need to add an extra token for
# each sequence in the pack
packed_masked_lm_positions = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
packed_masked_lm_ids = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
packed_masked_lm_weights = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
# NSP
packed_next_sentence_positions = np.zeros(max_sequences_per_pack, dtype=np.int32)
packed_next_sentence_labels = np.zeros(max_sequences_per_pack, dtype=np.int32)
packed_next_sentence_weights = np.zeros(max_sequences_per_pack, dtype=np.int32)
offset = 0
mlm_offset = 0
sequence_index = 1 # used in the input mask
for sequence in multi_sequence:
# Padding sequences are donoted with None
if sequence is not None:
input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, masked_lm_weights, next_sentence_labels = sequence
seq_len = input_mask.sum()
# SEQ
packed_input_ids[offset:offset + seq_len] = input_ids[:seq_len]
packed_input_mask[offset:offset + seq_len] = sequence_index
packed_segment_ids[offset:offset + seq_len] = segment_ids[:seq_len]
packed_positions[offset:offset + seq_len] = np.arange(0, seq_len)
# MLM
mlm_len = int(masked_lm_weights.sum())
assert mlm_offset + mlm_len < max_predictions_per_sequence + max_sequences_per_pack, "Too many LM predictions per sequences"
max_mlm = mlm_offset + mlm_len
packed_masked_lm_positions[mlm_offset:max_mlm] = offset + masked_lm_positions[:mlm_len]
packed_masked_lm_ids[mlm_offset:max_mlm] = masked_lm_ids[:mlm_len]
packed_masked_lm_weights[mlm_offset:max_mlm] = sequence_index
# NSP
packed_next_sentence_positions[sequence_index - 1] = offset
packed_next_sentence_labels[sequence_index - 1] = next_sentence_labels
packed_next_sentence_weights[sequence_index - 1] = 1
# Update offsets
sequence_index += 1
offset += seq_len
mlm_offset = max_mlm
# Pack into binary format and write it
line = reduce(lambda accl, i: accl + struct.pack('<I', i),
chain(packed_input_ids,
packed_input_mask,
packed_segment_ids,
packed_positions,
packed_masked_lm_positions,
packed_masked_lm_ids,
packed_masked_lm_weights,
packed_next_sentence_positions,
packed_next_sentence_labels,
packed_next_sentence_weights), b'')
return line
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input-glob", help="A glob expression for the input files to read in and pack", required=True, type=str)
parser.add_argument("--output-dir", help="The destination folder for the output files", required=True)
parser.add_argument("--random-seed", help="For shuffling the data", default=12345)
parser.add_argument("--max-files", help="At most how many files to process (limited by RAM)", default=100)
parser.add_argument("--duplication-factor", help="Same as the one passed to create input data", default=1, type=int)
parser.add_argument("--max-sequence-length", help="The maximum number of tokens in an example", default=512, type=int)
parser.add_argument("--max-predictions-per-sequence", help="The maximum number of masked tokens in an un-packed example", default=76, type=int)
parser.add_argument("--max-sequences-per-pack", help="The maximum number of sequences per packed example.", choices=[2, 3], default=3, type=int)
args = parser.parse_args()
random.seed(args.random_seed)
# Input files
input_files = glob.glob(args.input_glob)
if len(input_files) > args.max_files:
input_files = np.random.choice(input_files, size=args.max_files, replace=False)
assert len(input_files) > 0
# Load un-packed dataset
sample_sizes = data_file_format(args.max_sequence_length, args.max_predictions_per_sequence)
load_size = 1 if len(input_files) == 1 else 1024
dataset = CachedDataLoader(input_files, sample_sizes, duplication_factor=args.duplication_factor, batch_size=load_size)
# Put examples into bins depending on their sequence lengths and extract the sequence length
# as an array
sequence_lengths = []
examples_by_length = defaultdict(list)
print("Looping through dataset to collect sequence length information...")
for data in dataset:
input_mask = data[1]
batch_of_lengths = input_mask.sum(1).tolist()
for i, length in enumerate(batch_of_lengths):
examples_by_length[length].append([data[k][i] for k in range(len(data))])
sequence_lengths.extend(batch_of_lengths)
sequence_lengths = np.array(sequence_lengths)
# Pass the array of sequence lengths to the packing algorithm
strategy_set, mixture, padding, slicing = get_packing_recipe(sequence_lengths, args.max_sequence_length, args.max_sequences_per_pack)
# Add the calculated padding
for i in range(1, args.max_sequence_length + 1):
examples_by_length[i].extend([None] * int(padding[i - 1]))
# Shuffle the data
for key in examples_by_length:
random.shuffle(examples_by_length[key])
# Pack and store the data
print(f"\nPacking and writing packed dataset to {args.output_dir}.")
# Slice the data into chunks of max 50k packed examples
example_slices, strategies, part_idx = slice_examples(examples_by_length, slicing, strategy_set, mixture)
print(f"Splitting work into {len(part_idx)} parts.")
start = time.time()
with ProcessPoolExecutor(16) as executor:
work = repeat(args), part_idx, strategies, example_slices
for partial_result in executor.map(parallel_pack_according_to_strategy, *work):
pass
print(f"\nDone. Took: {time.time() - start:3.2f} seconds to pack and write dataset.")
| 46.847025 | 148 | 0.687549 |
import os
import time
import glob
import struct
import random
import argparse
import numpy as np
import pandas as pd
from scipy import optimize
from itertools import repeat, chain
from functools import lru_cache, reduce
from collections import defaultdict
from matplotlib import pyplot as plt
from concurrent.futures import ProcessPoolExecutor
from bert_data.pretraining_dataset import CachedDataLoader, data_file_format
@lru_cache(maxsize=None)
def packing_strategies(start, previous, target, depth):
gap = target - start
strategies = []
if depth == 1:
if gap >= previous:
strategies.append([gap])
else:
for new in range(previous, gap + 1):
new_gap = target - start - new
if new_gap == 0:
strategies.append([new])
else:
options = packing_strategies(start + new, new, target, depth - 1)
for option in options:
if len(option) > 0:
strategies.append([new] + option)
return strategies
def get_packing_recipe(sequence_lengths, max_sequence_length, max_sequences_per_pack=3):
histogram, bins = np.histogram(sequence_lengths, bins=np.arange(1, max_sequence_length + 2))
print("Begin packing pass".center(80, "_"))
print(f"Unpacked mean sequence length: {sequence_lengths.mean():3.2f}")
strategy_set = packing_strategies(0, 1, max_sequence_length, max_sequences_per_pack)
for strategy in strategy_set:
assert(sum(strategy) == max_sequence_length)
num_strategies = len(strategy_set)
print(f"Found {num_strategies} unique packing strategies.")
A = np.zeros((max_sequence_length, num_strategies), dtype=np.int32)
for i in range(num_strategies):
strategy = strategy_set[i]
for seq_len in strategy:
A[seq_len - 1, i] += 1
padding_cutoff = 8
w0 = np.ones([max_sequence_length])
dding_cutoff / (2 * max_sequence_length)
w0 = np.sqrt(w0)
padding = np.zeros([max_sequence_length], dtype=np.int32)
mixture = np.zeros([num_strategies], dtype=np.int32)
b = histogram + padding
for i in range(0, 20):
print(f"\nIteration: {i}: sequences still to pack: ", b.sum())
start = time.time()
partial_mixture, rnorm = optimize.nnls(np.expand_dims(w0, -1) * A, w0 * b)
print(f"Solving nnls took {time.time() - start:3.2f} seconds.")
print(f"Residual norm: {rnorm:3.5e}")
partial_mixture = np.where(partial_mixture < 2, np.rint(partial_mixture), np.floor(partial_mixture))
if partial_mixture.max() == 0:
grad = A.T @ (b * np.arange(1, max_sequence_length + 1))
k = int(b.sum() // 2) + 1
topk = np.argsort(-grad)[:k]
partial_mixture[topk] += 1
mixture = mixture + partial_mixture
residual = b - A @ partial_mixture
print(f"Max residual: {abs(residual).max()}")
print(f"Residual on first 8 categories: {np.around(residual[:8], 4)}")
print(f"Residual on last 8 categories: {np.around(residual[-8:], 4)}")
partial_padding = np.where(residual < 0, -residual, 0)
print(f"Added {(partial_padding*np.arange(1,max_sequence_length+1)).sum():3.2e} tokens of padding.")
padding = padding + partial_padding
b = histogram + padding - A @ mixture
assert np.all(b >= 0), b
if b.sum() < 100:
break
unpacked_seqlen = np.arange(1, args.max_sequence_length + 1)[b > 0]
for l in unpacked_seqlen:
strategy = sorted([l, args.max_sequence_length - l])
strategy_index = strategy_set.index(strategy)
mixture[strategy_index] += b[l-1]
b = histogram - A @ mixture
padding = np.where(b < 0, -b, 0)
b = histogram + padding - A @ mixture
assert b.sum() == 0
print("Done solving for packing order".center(80, "_"))
num_padding_tokens = (np.arange(1, max_sequence_length + 1) * padding).sum()
num_padding_tokens_original = (max_sequence_length - sequence_lengths).sum()
print(f"Number of sequences dropped: {b.sum()}")
print(f"Number of strategies utilized: {np.count_nonzero(mixture)}")
new_number_of_samples = int(mixture.sum())
compression = 1 - new_number_of_samples / len(sequence_lengths)
print(f"New number of samples: {new_number_of_samples:3.2f}, original {len(sequence_lengths)}. A compression ratio of {compression:3.3f}")
print(f"The expected speed-up from packing: {1/(1-compression):3.3f}")
upper_bound = 1.0 / (1 - ((1 - sequence_lengths / max_sequence_length).mean()))
print(f"Theoretical upper bound on speed-up: {upper_bound:3.3f}")
avg_sequences_per_sample = ((A.sum(0) * mixture).sum() - padding.sum()) / new_number_of_samples
print(f"Average sequences/sample {avg_sequences_per_sample:3.5f}")
print(f"Added {num_padding_tokens:3.2e} padding tokens. Original dataset used {num_padding_tokens_original:3.2e} padding tokens")
efficiency = (new_number_of_samples*max_sequence_length - num_padding_tokens)/(new_number_of_samples*max_sequence_length)
print(f"Packing efficiency (fraction of real tokens): {efficiency:3.4f}")
print(f"Top 8 strategies")
topK = np.argsort(-mixture)[:8]
for i in topK:
print(f"Strategy {strategy_set[i]} which is used {int(mixture[i])} times")
print("".center(80, "_"))
slicing = np.zeros_like(A)
slicing[:, 1:] = np.cumsum(A * mixture, axis=1)[:, :-1]
slicing = slicing.T
mixture = mixture.astype(np.int64)
return strategy_set, mixture, padding, slicing
def slice_examples(examples_by_length, slicing, strategy_set, repeat_counts):
slices = []
strategies = []
part_idx = []
for strategy, slice_offsets, repeat_count in zip(strategy_set, slicing, repeat_counts):
if repeat_count == 0:
continue
num_parts = repeat_count // 50000
num_parts = num_parts + int(repeat_count != num_parts * 50000)
subcounts = (min(50000, repeat_count - 50000 * (i - 1)) for i in range(1, num_parts + 1))
for part_id, part_count in enumerate(subcounts):
examples = []
for k, seq_len in enumerate(strategy):
slice_start = int(slice_offsets[seq_len - 1])
slice_end = slice_start + int(part_count)
slice_offsets[seq_len - 1] = slice_end
examples.append(examples_by_length[seq_len][slice_start:slice_end])
slices.append(examples)
strategies.append(strategy)
part_idx.append(part_id)
return slices, strategies, part_idx
def parallel_pack_according_to_strategy(args, part_idx, strategy, examples):
base_filename = os.path.join(args.output_dir, "strategy_" + "_".join(map(str, strategy)))
filename = base_filename + f"_part_{part_idx}"
lines = []
for i, multi_sequence in enumerate(zip(*examples)):
lines.append(create_multi_sequence_example(multi_sequence, args.max_predictions_per_sequence,
args.max_sequence_length, args.max_sequences_per_pack))
with open(filename, "wb") as f:
f.writelines(lines)
def create_multi_sequence_example(multi_sequence, max_predictions_per_sequence, max_sequence_length, max_sequences_per_pack):
packed_input_ids = np.zeros(max_sequence_length, dtype=np.int32)
packed_input_mask = np.zeros(max_sequence_length, dtype=np.int32)
packed_segment_ids = np.zeros(max_sequence_length, dtype=np.int32)
packed_positions = np.zeros(max_sequence_length, dtype=np.int32)
packed_masked_lm_positions = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
packed_masked_lm_ids = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
packed_masked_lm_weights = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
packed_next_sentence_positions = np.zeros(max_sequences_per_pack, dtype=np.int32)
packed_next_sentence_labels = np.zeros(max_sequences_per_pack, dtype=np.int32)
packed_next_sentence_weights = np.zeros(max_sequences_per_pack, dtype=np.int32)
offset = 0
mlm_offset = 0
sequence_index = 1
for sequence in multi_sequence:
if sequence is not None:
input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, masked_lm_weights, next_sentence_labels = sequence
seq_len = input_mask.sum()
packed_input_ids[offset:offset + seq_len] = input_ids[:seq_len]
packed_input_mask[offset:offset + seq_len] = sequence_index
packed_segment_ids[offset:offset + seq_len] = segment_ids[:seq_len]
packed_positions[offset:offset + seq_len] = np.arange(0, seq_len)
mlm_len = int(masked_lm_weights.sum())
assert mlm_offset + mlm_len < max_predictions_per_sequence + max_sequences_per_pack, "Too many LM predictions per sequences"
max_mlm = mlm_offset + mlm_len
packed_masked_lm_positions[mlm_offset:max_mlm] = offset + masked_lm_positions[:mlm_len]
packed_masked_lm_ids[mlm_offset:max_mlm] = masked_lm_ids[:mlm_len]
packed_masked_lm_weights[mlm_offset:max_mlm] = sequence_index
packed_next_sentence_positions[sequence_index - 1] = offset
packed_next_sentence_labels[sequence_index - 1] = next_sentence_labels
packed_next_sentence_weights[sequence_index - 1] = 1
sequence_index += 1
offset += seq_len
mlm_offset = max_mlm
line = reduce(lambda accl, i: accl + struct.pack('<I', i),
chain(packed_input_ids,
packed_input_mask,
packed_segment_ids,
packed_positions,
packed_masked_lm_positions,
packed_masked_lm_ids,
packed_masked_lm_weights,
packed_next_sentence_positions,
packed_next_sentence_labels,
packed_next_sentence_weights), b'')
return line
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input-glob", help="A glob expression for the input files to read in and pack", required=True, type=str)
parser.add_argument("--output-dir", help="The destination folder for the output files", required=True)
parser.add_argument("--random-seed", help="For shuffling the data", default=12345)
parser.add_argument("--max-files", help="At most how many files to process (limited by RAM)", default=100)
parser.add_argument("--duplication-factor", help="Same as the one passed to create input data", default=1, type=int)
parser.add_argument("--max-sequence-length", help="The maximum number of tokens in an example", default=512, type=int)
parser.add_argument("--max-predictions-per-sequence", help="The maximum number of masked tokens in an un-packed example", default=76, type=int)
parser.add_argument("--max-sequences-per-pack", help="The maximum number of sequences per packed example.", choices=[2, 3], default=3, type=int)
args = parser.parse_args()
random.seed(args.random_seed)
input_files = glob.glob(args.input_glob)
if len(input_files) > args.max_files:
input_files = np.random.choice(input_files, size=args.max_files, replace=False)
assert len(input_files) > 0
sample_sizes = data_file_format(args.max_sequence_length, args.max_predictions_per_sequence)
load_size = 1 if len(input_files) == 1 else 1024
dataset = CachedDataLoader(input_files, sample_sizes, duplication_factor=args.duplication_factor, batch_size=load_size)
sequence_lengths = []
examples_by_length = defaultdict(list)
print("Looping through dataset to collect sequence length information...")
for data in dataset:
input_mask = data[1]
batch_of_lengths = input_mask.sum(1).tolist()
for i, length in enumerate(batch_of_lengths):
examples_by_length[length].append([data[k][i] for k in range(len(data))])
sequence_lengths.extend(batch_of_lengths)
sequence_lengths = np.array(sequence_lengths)
strategy_set, mixture, padding, slicing = get_packing_recipe(sequence_lengths, args.max_sequence_length, args.max_sequences_per_pack)
for i in range(1, args.max_sequence_length + 1):
examples_by_length[i].extend([None] * int(padding[i - 1]))
for key in examples_by_length:
random.shuffle(examples_by_length[key])
print(f"\nPacking and writing packed dataset to {args.output_dir}.")
example_slices, strategies, part_idx = slice_examples(examples_by_length, slicing, strategy_set, mixture)
print(f"Splitting work into {len(part_idx)} parts.")
start = time.time()
with ProcessPoolExecutor(16) as executor:
work = repeat(args), part_idx, strategies, example_slices
for partial_result in executor.map(parallel_pack_according_to_strategy, *work):
pass
print(f"\nDone. Took: {time.time() - start:3.2f} seconds to pack and write dataset.")
| true | true |
f7342279882a08dd430286cdc06a85f6471afcd1 | 1,098 | py | Python | muller_method/urls.py | mhwahdan/muller-method-Django | ad0f578c0d49add64191b7e4bca74e0bbecfb2b2 | [
"MIT"
] | null | null | null | muller_method/urls.py | mhwahdan/muller-method-Django | ad0f578c0d49add64191b7e4bca74e0bbecfb2b2 | [
"MIT"
] | null | null | null | muller_method/urls.py | mhwahdan/muller-method-Django | ad0f578c0d49add64191b7e4bca74e0bbecfb2b2 | [
"MIT"
] | null | null | null | """muller_method URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from API import urls as api_urls
from calculator import urls as calculator_urls
urlpatterns = [
path('admin/', admin.site.urls),
path('', include(calculator_urls)),
path('api/', include(api_urls))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 34.3125 | 80 | 0.73224 | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from API import urls as api_urls
from calculator import urls as calculator_urls
urlpatterns = [
path('admin/', admin.site.urls),
path('', include(calculator_urls)),
path('api/', include(api_urls))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true | true |
f73424a731778f16d203aec5298efed11c687051 | 10,935 | py | Python | LL_AV/GenerateAVFileReputationPenalties/GenerateAVFileReputationPenalties.py | nkrios/LOCKLEVEL | 0ece6c7ea8b84eced38ef3a89ce2e96a7737234f | [
"CC0-1.0"
] | 46 | 2018-07-12T04:38:43.000Z | 2022-03-20T19:25:11.000Z | LL_AV/GenerateAVFileReputationPenalties/GenerateAVFileReputationPenalties.py | nkrios/LOCKLEVEL | 0ece6c7ea8b84eced38ef3a89ce2e96a7737234f | [
"CC0-1.0"
] | null | null | null | LL_AV/GenerateAVFileReputationPenalties/GenerateAVFileReputationPenalties.py | nkrios/LOCKLEVEL | 0ece6c7ea8b84eced38ef3a89ce2e96a7737234f | [
"CC0-1.0"
] | 17 | 2018-07-04T05:07:08.000Z | 2021-09-15T21:42:18.000Z | """
Generate the penalty XML file used by the antivirus file reputation analyzer.
"""
import argparse
import xml.etree.ElementTree as ET
import xml.dom.minidom
import os
import sys
PENALTIES_ELEMENT = 'penalties'
PENALTY_ELEMENT = 'penalty'
REASON_ELEMENT = 'reason'
REMEDIATION_ELEMENT = 'remediation'
ID_ATTRIBUTE = 'id'
NAME_ATTRIBUTE = 'name'
VALUE_ATTRIBUTE = 'value'
class RemediationDefinition(object):
"""
Represents a definition for a remediation.
"""
def __init__(self, identifier, description):
self.identifier = identifier
self.description = description
class PenaltyDefinition(object):
"""
Represents a definition for a penalty.
"""
def __init__(self, identifier, name, value, reason, remediation):
self.identifier = identifier
self.name = name
self.value = int(value)
self.reason = reason
self.remediation = remediation
def fixed_writexml(self, writer, indent='', addindent='', newl=''):
"""
Ignore extra XML text elements when writing XML elements.
"""
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
xml.dom.minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
if len(self.childNodes) == 1 and self.childNodes[0].nodeType == xml.dom.minidom.Node.TEXT_NODE:
writer.write(">")
self.childNodes[0].writexml(writer, "", "", "")
writer.write("</%s>%s" % (self.tagName, newl))
return
writer.write(">%s" % (newl))
for node in self.childNodes:
if not node.nodeType == xml.dom.minidom.Node.TEXT_NODE:
node.writexml(writer, indent+addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % (newl))
def write_penalty_xml(path, penalties):
"""
Write the penalties XML to the passed in path.
"""
penalties_element = ET.Element(PENALTIES_ELEMENT)
for penalty in penalties:
penalty_element = ET.Element(PENALTY_ELEMENT)
penalty_element.set(ID_ATTRIBUTE, penalty.identifier)
penalty_element.set(NAME_ATTRIBUTE, penalty.name)
penalty_element.set(VALUE_ATTRIBUTE, str(penalty.value))
reason_element = ET.Element(REASON_ELEMENT)
reason_element.text = penalty.reason
penalty_element.append(reason_element)
for remediation in penalty.remediation:
remediation_element = ET.Element(REMEDIATION_ELEMENT)
remediation_element.set(ID_ATTRIBUTE, remediation.identifier)
remediation_element.text = remediation.description
penalty_element.append(remediation_element)
penalties_element.append(penalty_element)
xml_string = ET.tostring(penalties_element, 'utf-8')
reparsed = xml.dom.minidom.parseString(xml_string)
pretty_xml = reparsed.toprettyxml(indent='\t', newl='\r\n', encoding='UTF-8')
xml_file = open(path, "w")
xml_file.write(pretty_xml)
xml_file.close()
def get_penalties():
"""
Get penalty definitions as a dictionary where the key is the penalty identifer and the value is the penalty object.
"""
penalties = []
penalty_id = 'DAT_OUTDATED'
remediation = RemediationDefinition('UPDATE_DAT', 'Update the DAT file')
penalty = PenaltyDefinition(penalty_id, 'DAT outdated', 50, 'DAT file version {0} published on {1} is {2} days old', [remediation])
penalties.append(penalty)
penalty_id = 'DAT_VERY_OUTDATED'
remediation = RemediationDefinition('UPDATE_DAT', 'Update the DAT file')
penalty = PenaltyDefinition(penalty_id, 'DAT very outdated', 100, 'DAT file version {0} published on {1} is {2} days old which is beyond the recommended value of {3} days', [remediation])
penalties.append(penalty)
penalty_id = 'ARTEMIS_SERVER_UNREACHABLE'
remediation = RemediationDefinition('CHECK_ARTEMIS_CONNECTIVITY', 'Investigate GTI connectivity issues')
penalty = PenaltyDefinition(penalty_id, 'artemis server unreachable', 100, 'Unable to resolve GTI server DNS address of {0}', [remediation])
penalties.append(penalty)
penalty_id = 'ARTEMIS_SERVER_UNEXPECTED'
remediation = RemediationDefinition('CHECK_ARTMIS_CONFIGURATION', 'Confirm GTI network configuration is correct')
penalty = PenaltyDefinition(penalty_id, 'artemis server unexpected', 50, 'The GTI server DNS address of {0} resolved to {1} which is not the expected value of {2}', [remediation])
penalties.append(penalty)
penalty_id = 'ARTEMIS_DISABLED'
remediation = RemediationDefinition('ENABLE_ARTEMIS', 'Enable GTI')
penalty = PenaltyDefinition(penalty_id, 'artemis disabled', 100, 'GTI is Disabled for the {0} component', [remediation])
penalties.append(penalty)
penalty_id = 'ARTEMIS_SENSITIVITY_LOW'
remediation = RemediationDefinition('SET_ARTEMIS_SENSITIVITY_MEDIUM', 'Set the GTI Sensitivity Level to Medium or higher')
penalty = PenaltyDefinition(penalty_id, 'artemis sensitivity low', 50, 'GTI Sensitivity Level is set to {0} which is below the recommended level of Medium for the {1} component', [remediation])
penalties.append(penalty)
penalty_id = 'ARTEMIS_SENSITIVITY_VERY_LOW'
remediation = RemediationDefinition('SET_ARTEMIS_SENSITIVITY_MEDIUM', 'Set the GTI Sensitivity Level to Medium or higher')
penalty = PenaltyDefinition(penalty_id, 'artemis sensitivity very low', 100, 'GTI Sensitivity Level is set to {0} which is far below the recommended level of Medium for the {1} component', [remediation])
penalties.append(penalty)
penalty_id = 'AV_ENGINE_OUTDATED'
remediation = RemediationDefinition('UPDATE_AV_ENGINE', 'Update the AV engine to the latest supported version. See http://www.mcafee.com/us/support/support-eol-scan-engine.aspx for more information.')
penalty = PenaltyDefinition(penalty_id, 'AV engine is outdated', 50, 'The AV engine version is {0} which is older than the latest supported engine version of {1}. Version 5600 and earlier are end of life.', [remediation])
penalties.append(penalty)
penalty_id = 'AV_ENGINE_VERY_OUTDATED'
remediation = RemediationDefinition('UPDATE_AV_ENGINE', 'Update the AV engine to the latest supported version. See http://www.mcafee.com/us/support/support-eol-scan-engine.aspx for more information.')
penalty = PenaltyDefinition(penalty_id, 'AV engine is very outdated', 100, 'The AV engine version is {0} which is older than the minimum recommended engine version of {1}. Version 5600 and earlier are end of life.', [remediation])
penalties.append(penalty)
penalty_id = 'VSE_OUTDATED'
remediation = RemediationDefinition('UPDATE_OLD_VSE', 'Update VirusScan Enterprise to the latest recommended version. See https://kc.mcafee.com/corporate/index?page=content&id=kb51111 for more information.')
penalty = PenaltyDefinition(penalty_id, 'VSE is outdated', 50, 'The VirusScan Enterprise version is {0} which is older than latest recommended version of {1}', [remediation])
penalties.append(penalty)
penalty_id = 'VSE_VERY_OUTDATED'
remediation = RemediationDefinition('UPDATE_VERY_OLD_VSE', 'Update VirusScan Enterprise to the minimum recommended version. See https://kc.mcafee.com/corporate/index?page=content&id=kb51111 for more information.')
penalty = PenaltyDefinition(penalty_id, 'VSE is very outdated', 100, 'The VirusScan Enterprise version is {0} which is older than minimum recommended version of {1}', [remediation])
penalties.append(penalty)
penalty_id = 'VSE_SERVICE_NOT_AUTOMATIC'
remediation = RemediationDefinition('SET_VSE_SERVICE_AUTO', 'Change the VirusScan Enterprise service Startup Type to Automatic')
penalty = PenaltyDefinition(penalty_id, 'VSE service is not automatically starting', 100, "The {0} service start mode is set to '{1}' rather than '{2}' so the system is not protected at the next boot", [remediation])
penalties.append(penalty)
penalty_id = 'VSE_SERVICE_NOT_RUNNING'
remediation = RemediationDefinition('START_VSE_SERVICE', 'Start the VirusScan Enterprise service')
penalty = PenaltyDefinition(penalty_id, 'VSE service is not running', 100, "The {0} service state is '{1}' rather than '{2}' so the system is not protected", [remediation])
penalties.append(penalty)
penalty_id = 'VSE_STARTUP_DISABLED'
remediation = RemediationDefinition('ENABLE_VSE_STARTUP', 'Enable on access scanning at system startup')
penalty = PenaltyDefinition(penalty_id, 'VSE startup is disabled', 100, 'VSE on access scanning at system startup is disabled', [remediation])
penalties.append(penalty)
penalty_id = 'VSE_NOT_INSTALLED'
remediation = RemediationDefinition('INSTALL_VSE', 'Install VirusScan Enterprise')
penalty = PenaltyDefinition(penalty_id, 'VSE is not installed', 100, "VirusScan Enterprise is not installed", [remediation])
penalties.append(penalty)
return penalties
def generate_penalty_xml(path):
"""
Generate the penalty XML file at the given path.
"""
penalties = get_penalties()
write_penalty_xml(path, penalties)
def sanitize_arguments(args):
"""
Sanitize arguments and return sane defaults
"""
# try and use a penalties file in the same directory as the analyzer
if args.penalty_xml is None:
penalty_xml_file_path = os.path.join(os.getcwd(), 'penalties.xml')
else:
if os.path.exists(args.penalty_xml) and os.path.isfile(args.penalty_xml):
penalty_xml_file_path = args.penalty_xml
else:
# passed in penalties file path was bad so try and use a penalties file in the same directory as the analyzer
penalty_xml_file_path = os.path.join(os.getcwd(), 'penalties.xml')
return os.path.abspath(penalty_xml_file_path)
def main():
"""
Main function
"""
xml.dom.minidom.Element.writexml = fixed_writexml
# Parse program arguments
parser = argparse.ArgumentParser(description='Generate the penalty XML file used by the antivirus file reputation analyzer.')
parser.add_argument('-p', '--p', dest='penalty_xml', required=False, help='The path to create the penalties XML file. Optional. If not specifiedn then a penalties.xml file is created in the same location that the script is executing from.')
args = parser.parse_args()
penalty_xml_file_path = sanitize_arguments(args)
generate_penalty_xml(penalty_xml_file_path)
if __name__ == "__main__":
main()
| 45 | 245 | 0.705167 |
import argparse
import xml.etree.ElementTree as ET
import xml.dom.minidom
import os
import sys
PENALTIES_ELEMENT = 'penalties'
PENALTY_ELEMENT = 'penalty'
REASON_ELEMENT = 'reason'
REMEDIATION_ELEMENT = 'remediation'
ID_ATTRIBUTE = 'id'
NAME_ATTRIBUTE = 'name'
VALUE_ATTRIBUTE = 'value'
class RemediationDefinition(object):
def __init__(self, identifier, description):
self.identifier = identifier
self.description = description
class PenaltyDefinition(object):
def __init__(self, identifier, name, value, reason, remediation):
self.identifier = identifier
self.name = name
self.value = int(value)
self.reason = reason
self.remediation = remediation
def fixed_writexml(self, writer, indent='', addindent='', newl=''):
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
xml.dom.minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
if len(self.childNodes) == 1 and self.childNodes[0].nodeType == xml.dom.minidom.Node.TEXT_NODE:
writer.write(">")
self.childNodes[0].writexml(writer, "", "", "")
writer.write("</%s>%s" % (self.tagName, newl))
return
writer.write(">%s" % (newl))
for node in self.childNodes:
if not node.nodeType == xml.dom.minidom.Node.TEXT_NODE:
node.writexml(writer, indent+addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % (newl))
def write_penalty_xml(path, penalties):
penalties_element = ET.Element(PENALTIES_ELEMENT)
for penalty in penalties:
penalty_element = ET.Element(PENALTY_ELEMENT)
penalty_element.set(ID_ATTRIBUTE, penalty.identifier)
penalty_element.set(NAME_ATTRIBUTE, penalty.name)
penalty_element.set(VALUE_ATTRIBUTE, str(penalty.value))
reason_element = ET.Element(REASON_ELEMENT)
reason_element.text = penalty.reason
penalty_element.append(reason_element)
for remediation in penalty.remediation:
remediation_element = ET.Element(REMEDIATION_ELEMENT)
remediation_element.set(ID_ATTRIBUTE, remediation.identifier)
remediation_element.text = remediation.description
penalty_element.append(remediation_element)
penalties_element.append(penalty_element)
xml_string = ET.tostring(penalties_element, 'utf-8')
reparsed = xml.dom.minidom.parseString(xml_string)
pretty_xml = reparsed.toprettyxml(indent='\t', newl='\r\n', encoding='UTF-8')
xml_file = open(path, "w")
xml_file.write(pretty_xml)
xml_file.close()
def get_penalties():
penalties = []
penalty_id = 'DAT_OUTDATED'
remediation = RemediationDefinition('UPDATE_DAT', 'Update the DAT file')
penalty = PenaltyDefinition(penalty_id, 'DAT outdated', 50, 'DAT file version {0} published on {1} is {2} days old', [remediation])
penalties.append(penalty)
penalty_id = 'DAT_VERY_OUTDATED'
remediation = RemediationDefinition('UPDATE_DAT', 'Update the DAT file')
penalty = PenaltyDefinition(penalty_id, 'DAT very outdated', 100, 'DAT file version {0} published on {1} is {2} days old which is beyond the recommended value of {3} days', [remediation])
penalties.append(penalty)
penalty_id = 'ARTEMIS_SERVER_UNREACHABLE'
remediation = RemediationDefinition('CHECK_ARTEMIS_CONNECTIVITY', 'Investigate GTI connectivity issues')
penalty = PenaltyDefinition(penalty_id, 'artemis server unreachable', 100, 'Unable to resolve GTI server DNS address of {0}', [remediation])
penalties.append(penalty)
penalty_id = 'ARTEMIS_SERVER_UNEXPECTED'
remediation = RemediationDefinition('CHECK_ARTMIS_CONFIGURATION', 'Confirm GTI network configuration is correct')
penalty = PenaltyDefinition(penalty_id, 'artemis server unexpected', 50, 'The GTI server DNS address of {0} resolved to {1} which is not the expected value of {2}', [remediation])
penalties.append(penalty)
penalty_id = 'ARTEMIS_DISABLED'
remediation = RemediationDefinition('ENABLE_ARTEMIS', 'Enable GTI')
penalty = PenaltyDefinition(penalty_id, 'artemis disabled', 100, 'GTI is Disabled for the {0} component', [remediation])
penalties.append(penalty)
penalty_id = 'ARTEMIS_SENSITIVITY_LOW'
remediation = RemediationDefinition('SET_ARTEMIS_SENSITIVITY_MEDIUM', 'Set the GTI Sensitivity Level to Medium or higher')
penalty = PenaltyDefinition(penalty_id, 'artemis sensitivity low', 50, 'GTI Sensitivity Level is set to {0} which is below the recommended level of Medium for the {1} component', [remediation])
penalties.append(penalty)
penalty_id = 'ARTEMIS_SENSITIVITY_VERY_LOW'
remediation = RemediationDefinition('SET_ARTEMIS_SENSITIVITY_MEDIUM', 'Set the GTI Sensitivity Level to Medium or higher')
penalty = PenaltyDefinition(penalty_id, 'artemis sensitivity very low', 100, 'GTI Sensitivity Level is set to {0} which is far below the recommended level of Medium for the {1} component', [remediation])
penalties.append(penalty)
penalty_id = 'AV_ENGINE_OUTDATED'
remediation = RemediationDefinition('UPDATE_AV_ENGINE', 'Update the AV engine to the latest supported version. See http://www.mcafee.com/us/support/support-eol-scan-engine.aspx for more information.')
penalty = PenaltyDefinition(penalty_id, 'AV engine is outdated', 50, 'The AV engine version is {0} which is older than the latest supported engine version of {1}. Version 5600 and earlier are end of life.', [remediation])
penalties.append(penalty)
penalty_id = 'AV_ENGINE_VERY_OUTDATED'
remediation = RemediationDefinition('UPDATE_AV_ENGINE', 'Update the AV engine to the latest supported version. See http://www.mcafee.com/us/support/support-eol-scan-engine.aspx for more information.')
penalty = PenaltyDefinition(penalty_id, 'AV engine is very outdated', 100, 'The AV engine version is {0} which is older than the minimum recommended engine version of {1}. Version 5600 and earlier are end of life.', [remediation])
penalties.append(penalty)
penalty_id = 'VSE_OUTDATED'
remediation = RemediationDefinition('UPDATE_OLD_VSE', 'Update VirusScan Enterprise to the latest recommended version. See https://kc.mcafee.com/corporate/index?page=content&id=kb51111 for more information.')
penalty = PenaltyDefinition(penalty_id, 'VSE is outdated', 50, 'The VirusScan Enterprise version is {0} which is older than latest recommended version of {1}', [remediation])
penalties.append(penalty)
penalty_id = 'VSE_VERY_OUTDATED'
remediation = RemediationDefinition('UPDATE_VERY_OLD_VSE', 'Update VirusScan Enterprise to the minimum recommended version. See https://kc.mcafee.com/corporate/index?page=content&id=kb51111 for more information.')
penalty = PenaltyDefinition(penalty_id, 'VSE is very outdated', 100, 'The VirusScan Enterprise version is {0} which is older than minimum recommended version of {1}', [remediation])
penalties.append(penalty)
penalty_id = 'VSE_SERVICE_NOT_AUTOMATIC'
remediation = RemediationDefinition('SET_VSE_SERVICE_AUTO', 'Change the VirusScan Enterprise service Startup Type to Automatic')
penalty = PenaltyDefinition(penalty_id, 'VSE service is not automatically starting', 100, "The {0} service start mode is set to '{1}' rather than '{2}' so the system is not protected at the next boot", [remediation])
penalties.append(penalty)
penalty_id = 'VSE_SERVICE_NOT_RUNNING'
remediation = RemediationDefinition('START_VSE_SERVICE', 'Start the VirusScan Enterprise service')
penalty = PenaltyDefinition(penalty_id, 'VSE service is not running', 100, "The {0} service state is '{1}' rather than '{2}' so the system is not protected", [remediation])
penalties.append(penalty)
penalty_id = 'VSE_STARTUP_DISABLED'
remediation = RemediationDefinition('ENABLE_VSE_STARTUP', 'Enable on access scanning at system startup')
penalty = PenaltyDefinition(penalty_id, 'VSE startup is disabled', 100, 'VSE on access scanning at system startup is disabled', [remediation])
penalties.append(penalty)
penalty_id = 'VSE_NOT_INSTALLED'
remediation = RemediationDefinition('INSTALL_VSE', 'Install VirusScan Enterprise')
penalty = PenaltyDefinition(penalty_id, 'VSE is not installed', 100, "VirusScan Enterprise is not installed", [remediation])
penalties.append(penalty)
return penalties
def generate_penalty_xml(path):
penalties = get_penalties()
write_penalty_xml(path, penalties)
def sanitize_arguments(args):
if args.penalty_xml is None:
penalty_xml_file_path = os.path.join(os.getcwd(), 'penalties.xml')
else:
if os.path.exists(args.penalty_xml) and os.path.isfile(args.penalty_xml):
penalty_xml_file_path = args.penalty_xml
else:
penalty_xml_file_path = os.path.join(os.getcwd(), 'penalties.xml')
return os.path.abspath(penalty_xml_file_path)
def main():
xml.dom.minidom.Element.writexml = fixed_writexml
parser = argparse.ArgumentParser(description='Generate the penalty XML file used by the antivirus file reputation analyzer.')
parser.add_argument('-p', '--p', dest='penalty_xml', required=False, help='The path to create the penalties XML file. Optional. If not specifiedn then a penalties.xml file is created in the same location that the script is executing from.')
args = parser.parse_args()
penalty_xml_file_path = sanitize_arguments(args)
generate_penalty_xml(penalty_xml_file_path)
if __name__ == "__main__":
main()
| true | true |
f73424c6a01e6b3045d0f8b6a981195fd7ef3b08 | 562 | py | Python | accounts/migrations/0017_auto_20150815_1613.py | bharathramh92/easy-ecom | 164f1b6e11559386efef474c5f50d33b4ddea792 | [
"Apache-2.0"
] | null | null | null | accounts/migrations/0017_auto_20150815_1613.py | bharathramh92/easy-ecom | 164f1b6e11559386efef474c5f50d33b4ddea792 | [
"Apache-2.0"
] | null | null | null | accounts/migrations/0017_auto_20150815_1613.py | bharathramh92/easy-ecom | 164f1b6e11559386efef474c5f50d33b4ddea792 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0016_auto_20150814_1727'),
]
operations = [
migrations.RemoveField(
model_name='userextended',
name='profile_picture',
),
migrations.AddField(
model_name='userextended',
name='profile_picture_url',
field=models.CharField(null=True, max_length=200, blank=True),
),
]
| 23.416667 | 74 | 0.606762 |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0016_auto_20150814_1727'),
]
operations = [
migrations.RemoveField(
model_name='userextended',
name='profile_picture',
),
migrations.AddField(
model_name='userextended',
name='profile_picture_url',
field=models.CharField(null=True, max_length=200, blank=True),
),
]
| true | true |
f734260e0d34c5734639347af6aabfa594fe3c70 | 4,860 | py | Python | docs/conf.py | harvard-nrg/mano | cbb54337f6fcae8905f1a4ab91f7beeac9ce041c | [
"BSD-3-Clause"
] | 1 | 2019-03-22T18:29:12.000Z | 2019-03-22T18:29:12.000Z | docs/conf.py | harvard-nrg/mano | cbb54337f6fcae8905f1a4ab91f7beeac9ce041c | [
"BSD-3-Clause"
] | 5 | 2019-04-08T20:32:29.000Z | 2022-02-21T21:46:11.000Z | docs/conf.py | harvard-nrg/mano | cbb54337f6fcae8905f1a4ab91f7beeac9ce041c | [
"BSD-3-Clause"
] | 1 | 2018-07-24T06:51:09.000Z | 2018-07-24T06:51:09.000Z | from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'mano'
copyright = u'2018, Neuroinformatics Research Group'
author = u'Neuroinformatics Research Group'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'manodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mano.tex', u'mano Documentation',
u'Neuroinformatics Research Group', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mano', u'mano Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mano', u'mano Documentation',
author, 'mano', 'One line description of project.',
'Miscellaneous'),
]
| 30 | 79 | 0.657819 | from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
project = u'mano'
copyright = u'2018, Neuroinformatics Research Group'
author = u'Neuroinformatics Research Group'
version = u''
release = u''
extensions = [
]
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
language = None
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
html_theme = 'alabaster'
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'manodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mano.tex', u'mano Documentation',
u'Neuroinformatics Research Group', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mano', u'mano Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mano', u'mano Documentation',
author, 'mano', 'One line description of project.',
'Miscellaneous'),
]
| true | true |
f73426a6d09771817c4ae4bd334dd2b536e8b40a | 1,680 | py | Python | tapioca_nessus/tapioca_nessus.py | siteblindado/tapioca-nessus | b1e7896c5c2ee6d4bce5d3caf97004f2be445c7e | [
"MIT"
] | null | null | null | tapioca_nessus/tapioca_nessus.py | siteblindado/tapioca-nessus | b1e7896c5c2ee6d4bce5d3caf97004f2be445c7e | [
"MIT"
] | null | null | null | tapioca_nessus/tapioca_nessus.py | siteblindado/tapioca-nessus | b1e7896c5c2ee6d4bce5d3caf97004f2be445c7e | [
"MIT"
] | null | null | null | # coding: utf-8
import json
from requests.auth import HTTPBasicAuth
from tapioca import (
TapiocaAdapter, generate_wrapper_from_adapter, JSONAdapterMixin)
from .resource_mapping import RESOURCE_MAPPING
class NessusClientAdapter(JSONAdapterMixin, TapiocaAdapter):
api_root = None
resource_mapping = RESOURCE_MAPPING
def get_api_root(self, api_params):
return api_params.get('api_root', 'https://localhost:8834')
def get_request_kwargs(self, api_params, *args, **kwargs):
params = super(NessusClientAdapter, self).get_request_kwargs(
api_params, *args, **kwargs)
access_key = api_params.get('accessKey')
secret_key = api_params.get('secretKey')
if access_key and secret_key:
params['headers'].update({'X-Apikeys': f'accessKey={access_key}; secretKey={secret_key}'})
else:
params['auth'] = HTTPBasicAuth(api_params.get('user'), api_params.get('password'))
return params
def get_iterator_list(self, response_data):
return response_data
def get_iterator_next_request_kwargs(self, iterator_request_kwargs,
response_data, response):
pass
def response_to_native(self, response):
if response.headers._store['content-type'][1] == 'application/octet-stream':
return response
elif response.content.strip():
return response.json()
def format_data_to_request(self, data):
if data:
try:
return json.dumps(data)
except:
return data
Nessus = generate_wrapper_from_adapter(NessusClientAdapter)
| 31.111111 | 102 | 0.663095 |
import json
from requests.auth import HTTPBasicAuth
from tapioca import (
TapiocaAdapter, generate_wrapper_from_adapter, JSONAdapterMixin)
from .resource_mapping import RESOURCE_MAPPING
class NessusClientAdapter(JSONAdapterMixin, TapiocaAdapter):
api_root = None
resource_mapping = RESOURCE_MAPPING
def get_api_root(self, api_params):
return api_params.get('api_root', 'https://localhost:8834')
def get_request_kwargs(self, api_params, *args, **kwargs):
params = super(NessusClientAdapter, self).get_request_kwargs(
api_params, *args, **kwargs)
access_key = api_params.get('accessKey')
secret_key = api_params.get('secretKey')
if access_key and secret_key:
params['headers'].update({'X-Apikeys': f'accessKey={access_key}; secretKey={secret_key}'})
else:
params['auth'] = HTTPBasicAuth(api_params.get('user'), api_params.get('password'))
return params
def get_iterator_list(self, response_data):
return response_data
def get_iterator_next_request_kwargs(self, iterator_request_kwargs,
response_data, response):
pass
def response_to_native(self, response):
if response.headers._store['content-type'][1] == 'application/octet-stream':
return response
elif response.content.strip():
return response.json()
def format_data_to_request(self, data):
if data:
try:
return json.dumps(data)
except:
return data
Nessus = generate_wrapper_from_adapter(NessusClientAdapter)
| true | true |
f734280e790810cccc43a5498c22c93115eb39ae | 1,436 | py | Python | tests/xl_methods.py | leobelen/pydatajson | bcc6bbbb9d6554c82a658542522330d8e6337731 | [
"MIT"
] | null | null | null | tests/xl_methods.py | leobelen/pydatajson | bcc6bbbb9d6554c82a658542522330d8e6337731 | [
"MIT"
] | null | null | null | tests/xl_methods.py | leobelen/pydatajson | bcc6bbbb9d6554c82a658542522330d8e6337731 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
xl_methods
Métodos ligeramente modificados a partir de abenassi/xlseries para manipular
archivos en formato XLSX (https://github.com/abenassi/xlseries).
"""
from six import string_types, text_type
def compare_cells(wb1, wb2):
"""Compare two excels based on row iteration."""
# compare each cell of each worksheet
for ws1, ws2 in zip(wb1.worksheets, wb2.worksheets):
compare_cells_ws(ws1, ws2)
return True
def compare_cells_ws(ws1, ws2):
"""Compare two worksheets based on row iteration."""
# compare each cell of each worksheet
for row1, row2 in zip(ws1.rows, ws2.rows):
for cell1, cell2 in zip(row1, row2):
msg = "".join([
_safe_str(cell1.value), " != ",
_safe_str(cell2.value), "\nrow: ",
_safe_str(cell1.row), " column: ",
_safe_str(cell1.column)
])
value1 = normalize_value(cell1.value)
value2 = normalize_value(cell2.value)
assert value1 == value2, msg
return True
def normalize_value(value):
"""Strip spaces if the value is a string, convert None to empty string or
let it pass otherwise."""
if isinstance(value, string_types):
return value.strip()
elif value is None:
return ""
else:
return value
def _safe_str(value):
return text_type(value)
| 25.192982 | 77 | 0.623955 |
from six import string_types, text_type
def compare_cells(wb1, wb2):
for ws1, ws2 in zip(wb1.worksheets, wb2.worksheets):
compare_cells_ws(ws1, ws2)
return True
def compare_cells_ws(ws1, ws2):
for row1, row2 in zip(ws1.rows, ws2.rows):
for cell1, cell2 in zip(row1, row2):
msg = "".join([
_safe_str(cell1.value), " != ",
_safe_str(cell2.value), "\nrow: ",
_safe_str(cell1.row), " column: ",
_safe_str(cell1.column)
])
value1 = normalize_value(cell1.value)
value2 = normalize_value(cell2.value)
assert value1 == value2, msg
return True
def normalize_value(value):
if isinstance(value, string_types):
return value.strip()
elif value is None:
return ""
else:
return value
def _safe_str(value):
return text_type(value)
| true | true |
f73429aa6bd682cc0f815f80c943f752943a6a16 | 1,904 | py | Python | CNNModel.py | night2wolf/CS490-ML-Lab2 | c25e6a58829939eccc84b54a50f14c7d3b0c8c2f | [
"MIT"
] | null | null | null | CNNModel.py | night2wolf/CS490-ML-Lab2 | c25e6a58829939eccc84b54a50f14c7d3b0c8c2f | [
"MIT"
] | null | null | null | CNNModel.py | night2wolf/CS490-ML-Lab2 | c25e6a58829939eccc84b54a50f14c7d3b0c8c2f | [
"MIT"
] | null | null | null | from keras import models, layers
class CNNModel:
@staticmethod
def generateModel(input):
model = models.Sequential()
model.add(layers.Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=input.shape[1:]))
model.add(layers.MaxPool2D(pool_size=(2, 2)))
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(layers.MaxPool2D(pool_size=(2, 2)))
model.add(layers.Dropout(rate=0.2))
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(rate=0.5))
model.add(layers.Dense(8, activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
return model
@staticmethod
def createsentimentLTSMmodel(max_features):
model = models.Sequential()
model.add(layers.Embedding(max_features, 128,input_length = 45))
model.add(layers.LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(layers.Dense(5,activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
return model
@staticmethod
def createsentimentCNNmodel(max_features):
model = models.Sequential()
model.add(layers.Embedding(max_features, 128,input_length = 45))
model.add(layers.Flatten())
model.add(layers.Dense(32,activation='relu'))
model.add(layers.Dropout(rate=0.2))
model.add(layers.Dense(64,activation='relu'))
model.add(layers.Dropout(rate=0.33))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(rate=0.5))
model.add(layers.Dense(5,activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
return model | 48.820513 | 111 | 0.661765 | from keras import models, layers
class CNNModel:
@staticmethod
def generateModel(input):
model = models.Sequential()
model.add(layers.Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=input.shape[1:]))
model.add(layers.MaxPool2D(pool_size=(2, 2)))
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(layers.MaxPool2D(pool_size=(2, 2)))
model.add(layers.Dropout(rate=0.2))
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(rate=0.5))
model.add(layers.Dense(8, activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
return model
@staticmethod
def createsentimentLTSMmodel(max_features):
model = models.Sequential()
model.add(layers.Embedding(max_features, 128,input_length = 45))
model.add(layers.LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(layers.Dense(5,activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
return model
@staticmethod
def createsentimentCNNmodel(max_features):
model = models.Sequential()
model.add(layers.Embedding(max_features, 128,input_length = 45))
model.add(layers.Flatten())
model.add(layers.Dense(32,activation='relu'))
model.add(layers.Dropout(rate=0.2))
model.add(layers.Dense(64,activation='relu'))
model.add(layers.Dropout(rate=0.33))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(rate=0.5))
model.add(layers.Dense(5,activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
return model | true | true |
f73429bdedfeb716fa240ffa82ccccfd74287e74 | 375 | py | Python | python/numpy/weldnumpy/__init__.py | hvanhovell/weld | 0d9e55c71f2a4e0040cb0687d3eb53f54ec4ba94 | [
"BSD-3-Clause"
] | null | null | null | python/numpy/weldnumpy/__init__.py | hvanhovell/weld | 0d9e55c71f2a4e0040cb0687d3eb53f54ec4ba94 | [
"BSD-3-Clause"
] | null | null | null | python/numpy/weldnumpy/__init__.py | hvanhovell/weld | 0d9e55c71f2a4e0040cb0687d3eb53f54ec4ba94 | [
"BSD-3-Clause"
] | null | null | null | from weldarray import *
from weldnumpy import *
# FIXME: Should this be in weldnump? pytest gives errors when trying to import
# weldarray in weldnumpy...so keeping it here for now.
def array(arr, *args, **kwargs):
'''
Wrapper around weldarray - first create np.array and then convert to
weldarray.
'''
return weldarray(np.array(arr, *args, **kwargs))
| 26.785714 | 78 | 0.698667 | from weldarray import *
from weldnumpy import *
def array(arr, *args, **kwargs):
return weldarray(np.array(arr, *args, **kwargs))
| true | true |
f73429e8b78185619483984f8453f70ac968279b | 752 | py | Python | galaxy/main/migrations/0015_auto_20150917_1504.py | maxamillion/galaxy | 0460baf9d2c8da0a0e88c7975eca2e3abcc82f23 | [
"Apache-2.0"
] | 1 | 2017-06-18T21:46:01.000Z | 2017-06-18T21:46:01.000Z | galaxy/main/migrations/0015_auto_20150917_1504.py | maxamillion/galaxy | 0460baf9d2c8da0a0e88c7975eca2e3abcc82f23 | [
"Apache-2.0"
] | 1 | 2021-06-10T23:59:59.000Z | 2021-06-10T23:59:59.000Z | galaxy/main/migrations/0015_auto_20150917_1504.py | connectthefuture/galaxy | 841821957680643e07c1a94fb609f8e4117c19d1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0014_auto_20150917_1211'),
]
operations = [
migrations.AlterField(
model_name='role',
name='categories',
field=models.ManyToManyField(related_name='categories', editable=False, to='main.Category', blank=True, help_text=b'', verbose_name=b'Categories'),
),
migrations.AlterField(
model_name='role',
name='tags',
field=models.ManyToManyField(related_name='roles', editable=False, to='main.Tag', blank=True, help_text=b'', verbose_name=b'Tags'),
),
]
| 30.08 | 159 | 0.625 |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0014_auto_20150917_1211'),
]
operations = [
migrations.AlterField(
model_name='role',
name='categories',
field=models.ManyToManyField(related_name='categories', editable=False, to='main.Category', blank=True, help_text=b'', verbose_name=b'Categories'),
),
migrations.AlterField(
model_name='role',
name='tags',
field=models.ManyToManyField(related_name='roles', editable=False, to='main.Tag', blank=True, help_text=b'', verbose_name=b'Tags'),
),
]
| true | true |
f7342a7e37fa9c6d1e46a7d7b16847841d76e379 | 1,002 | py | Python | setup.py | virink/flask_waf | 9a0cfafb988c2bf043570e0877b1c3e43881affb | [
"BSD-3-Clause"
] | 10 | 2018-09-29T01:42:01.000Z | 2020-09-06T17:39:39.000Z | setup.py | virink/flask_waf | 9a0cfafb988c2bf043570e0877b1c3e43881affb | [
"BSD-3-Clause"
] | 1 | 2018-09-29T02:41:01.000Z | 2018-09-29T14:35:31.000Z | setup.py | virink/flask_waf | 9a0cfafb988c2bf043570e0877b1c3e43881affb | [
"BSD-3-Clause"
] | 1 | 2019-08-27T17:04:39.000Z | 2019-08-27T17:04:39.000Z | # -*- coding: utf-8 -*-
"""
Flask-Waf
---------
Adds server Waf support to your application.
:copyright: (c) 2018 by Virink
:license: BSD, see LICENSE for more details.
"""
from setuptools import setup
setup(
name='Flask-Waf',
version='1.0',
url='https://github.com/virink/flask_waf',
license='BSD',
author='Virink',
author_email='virink@outlook.com',
description='Just a webwaf for log',
long_description=__doc__,
packages=['flask_waf'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask>=0.8'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 25.05 | 70 | 0.605788 |
from setuptools import setup
setup(
name='Flask-Waf',
version='1.0',
url='https://github.com/virink/flask_waf',
license='BSD',
author='Virink',
author_email='virink@outlook.com',
description='Just a webwaf for log',
long_description=__doc__,
packages=['flask_waf'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask>=0.8'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| true | true |
f7342b52669ad7a60e120c7f62bc0adfaa142f7f | 360 | py | Python | configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py | vietawake/mmSegmentation | 1f643d6d81708ebf5726c48f66d02c70fe99fe00 | [
"Apache-2.0"
] | null | null | null | configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py | vietawake/mmSegmentation | 1f643d6d81708ebf5726c48f66d02c70fe99fe00 | [
"Apache-2.0"
] | null | null | null | configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py | vietawake/mmSegmentation | 1f643d6d81708ebf5726c48f66d02c70fe99fe00 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/models/pspnet_r50-d8.py',
'../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
model = dict(
decode_head=dict(align_corners=True),
auxiliary_head=dict(align_corners=True),
test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
| 36 | 80 | 0.672222 | _base_ = [
'../_base_/models/pspnet_r50-d8.py',
'../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
model = dict(
decode_head=dict(align_corners=True),
auxiliary_head=dict(align_corners=True),
test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
| true | true |
f7342bb4b595c8de89773e3d4fa123515307cda0 | 3,384 | py | Python | examples/interface/LPAffineDense.py | pjt1988/Elemental | 71d3e2b98829594e9f52980a8b1ef7c1e99c724b | [
"Apache-2.0"
] | 473 | 2015-01-11T03:22:11.000Z | 2022-03-31T05:28:39.000Z | examples/interface/LPAffineDense.py | pjt1988/Elemental | 71d3e2b98829594e9f52980a8b1ef7c1e99c724b | [
"Apache-2.0"
] | 205 | 2015-01-10T20:33:45.000Z | 2021-07-25T14:53:25.000Z | examples/interface/LPAffineDense.py | pjt1988/Elemental | 71d3e2b98829594e9f52980a8b1ef7c1e99c724b | [
"Apache-2.0"
] | 109 | 2015-02-16T14:06:42.000Z | 2022-03-23T21:34:26.000Z | #
# Copyright (c) 2009-2016, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
m = 500
n = 1000
k = 750
testMehrotra = True
testIPF = False
manualInit = False
display = False
progress = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Make a dense matrix
def RectangDense(height,width):
A = El.DistMatrix()
El.Gaussian( A, height, width )
return A
A = RectangDense(m,n)
G = RectangDense(k,n)
# Generate a (b,h) which implies a primal feasible (x,s)
# ======================================================
xGen = El.DistMatrix()
# b := A xGen
# -----------
El.Gaussian(xGen,n,1)
b = El.DistMatrix()
El.Zeros( b, m, 1 )
El.Gemv( El.NORMAL, 1., A, xGen, 0., b )
# h := G xGen + sGen
# ------------------
sGen = El.DistMatrix()
El.Uniform(sGen,k,1,0.5,0.5)
h = El.DistMatrix()
El.Copy( sGen, h )
El.Gemv( El.NORMAL, 1., G, xGen, 1., h )
# Generate a c which implies a dual feasible (y,z)
# ================================================
yGen = El.DistMatrix()
El.Gaussian(yGen,m,1)
zGen = El.DistMatrix()
El.Uniform(zGen,k,1,0.5,0.5)
c = El.DistMatrix()
El.Zeros( c, n, 1 )
El.Gemv( El.TRANSPOSE, -1., A, yGen, 1., c )
El.Gemv( El.TRANSPOSE, -1., G, zGen, 1., c )
if display:
El.Display( A, "A" )
El.Display( G, "G" )
El.Display( b, "b" )
El.Display( c, "c" )
El.Display( h, "h" )
# Set up the control structure (and possibly initial guesses)
# ===========================================================
ctrl = El.LPAffineCtrl_d()
xOrig = El.DistMatrix()
yOrig = El.DistMatrix()
zOrig = El.DistMatrix()
sOrig = El.DistMatrix()
if manualInit:
El.Uniform(xOrig,n,1,0.5,0.4999)
El.Uniform(yOrig,m,1,0.5,0.4999)
El.Uniform(zOrig,k,1,0.5,0.4999)
El.Uniform(sOrig,k,1,0.5,0.4999)
x = El.DistMatrix()
y = El.DistMatrix()
z = El.DistMatrix()
s = El.DistMatrix()
if testMehrotra:
ctrl.approach = El.LP_MEHROTRA
ctrl.mehrotraCtrl.primalInit = manualInit
ctrl.mehrotraCtrl.dualInit = manualInit
ctrl.mehrotraCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
El.Copy( sOrig, s )
startMehrotra = El.mpi.Time()
El.LPAffine(A,G,b,c,h,x,y,z,s,ctrl)
endMehrotra = El.mpi.Time()
if worldRank == 0:
print('Mehrotra time: {} seconds'.format(endMehrotra-startMehrotra))
if display:
El.Display( x, "x Mehrotra" )
El.Display( y, "y Mehrotra" )
El.Display( z, "z Mehrotra" )
El.Display( s, "s Mehrotra" )
obj = El.Dot(c,x)
if worldRank == 0:
print('Mehrotra c^T x = {}'.format(obj))
if testIPF:
ctrl.approach = El.LP_IPF
ctrl.ipfCtrl.primalInit = manualInit
ctrl.ipfCtrl.dualInit = manualInit
ctrl.ipfCtrl.progress = progress
ctrl.ipfCtrl.lineSearchCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
El.Copy( sOrig, s )
startIPF = El.mpi.Time()
El.LPAffine(A,G,b,c,h,x,y,z,s,ctrl)
endIPF = El.mpi.Time()
if worldRank == 0:
print('IPF time: {} seconds'.format(endIPF-startIPF))
if display:
El.Display( x, "x IPF" )
El.Display( y, "y IPF" )
El.Display( z, "z IPF" )
El.Display( s, "s IPF" )
obj = El.Dot(c,x)
if worldRank == 0:
print('IPF c^T x = {}'.format(obj))
El.Finalize()
| 25.066667 | 73 | 0.610225 |
import El
m = 500
n = 1000
k = 750
testMehrotra = True
testIPF = False
manualInit = False
display = False
progress = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
def RectangDense(height,width):
A = El.DistMatrix()
El.Gaussian( A, height, width )
return A
A = RectangDense(m,n)
G = RectangDense(k,n)
xGen = El.DistMatrix()
El.Gaussian(xGen,n,1)
b = El.DistMatrix()
El.Zeros( b, m, 1 )
El.Gemv( El.NORMAL, 1., A, xGen, 0., b )
sGen = El.DistMatrix()
El.Uniform(sGen,k,1,0.5,0.5)
h = El.DistMatrix()
El.Copy( sGen, h )
El.Gemv( El.NORMAL, 1., G, xGen, 1., h )
yGen = El.DistMatrix()
El.Gaussian(yGen,m,1)
zGen = El.DistMatrix()
El.Uniform(zGen,k,1,0.5,0.5)
c = El.DistMatrix()
El.Zeros( c, n, 1 )
El.Gemv( El.TRANSPOSE, -1., A, yGen, 1., c )
El.Gemv( El.TRANSPOSE, -1., G, zGen, 1., c )
if display:
El.Display( A, "A" )
El.Display( G, "G" )
El.Display( b, "b" )
El.Display( c, "c" )
El.Display( h, "h" )
ctrl = El.LPAffineCtrl_d()
xOrig = El.DistMatrix()
yOrig = El.DistMatrix()
zOrig = El.DistMatrix()
sOrig = El.DistMatrix()
if manualInit:
El.Uniform(xOrig,n,1,0.5,0.4999)
El.Uniform(yOrig,m,1,0.5,0.4999)
El.Uniform(zOrig,k,1,0.5,0.4999)
El.Uniform(sOrig,k,1,0.5,0.4999)
x = El.DistMatrix()
y = El.DistMatrix()
z = El.DistMatrix()
s = El.DistMatrix()
if testMehrotra:
ctrl.approach = El.LP_MEHROTRA
ctrl.mehrotraCtrl.primalInit = manualInit
ctrl.mehrotraCtrl.dualInit = manualInit
ctrl.mehrotraCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
El.Copy( sOrig, s )
startMehrotra = El.mpi.Time()
El.LPAffine(A,G,b,c,h,x,y,z,s,ctrl)
endMehrotra = El.mpi.Time()
if worldRank == 0:
print('Mehrotra time: {} seconds'.format(endMehrotra-startMehrotra))
if display:
El.Display( x, "x Mehrotra" )
El.Display( y, "y Mehrotra" )
El.Display( z, "z Mehrotra" )
El.Display( s, "s Mehrotra" )
obj = El.Dot(c,x)
if worldRank == 0:
print('Mehrotra c^T x = {}'.format(obj))
if testIPF:
ctrl.approach = El.LP_IPF
ctrl.ipfCtrl.primalInit = manualInit
ctrl.ipfCtrl.dualInit = manualInit
ctrl.ipfCtrl.progress = progress
ctrl.ipfCtrl.lineSearchCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
El.Copy( sOrig, s )
startIPF = El.mpi.Time()
El.LPAffine(A,G,b,c,h,x,y,z,s,ctrl)
endIPF = El.mpi.Time()
if worldRank == 0:
print('IPF time: {} seconds'.format(endIPF-startIPF))
if display:
El.Display( x, "x IPF" )
El.Display( y, "y IPF" )
El.Display( z, "z IPF" )
El.Display( s, "s IPF" )
obj = El.Dot(c,x)
if worldRank == 0:
print('IPF c^T x = {}'.format(obj))
El.Finalize()
| true | true |
f7342c6447aca358d841aafe7988c715fb43c139 | 291,590 | py | Python | SprityBird/spritybird/python3.5/lib/python3.5/site-packages/plotly/tools.py | MobileAnalytics/iPython-Framework | da0e598308c067cd5c5290a6364b3ffaf2d2418f | [
"MIT"
] | 4 | 2018-07-04T17:20:12.000Z | 2019-07-14T18:07:25.000Z | SprityBird/spritybird/python3.5/lib/python3.5/site-packages/plotly/tools.py | MobileAnalytics/iPython-Framework | da0e598308c067cd5c5290a6364b3ffaf2d2418f | [
"MIT"
] | null | null | null | SprityBird/spritybird/python3.5/lib/python3.5/site-packages/plotly/tools.py | MobileAnalytics/iPython-Framework | da0e598308c067cd5c5290a6364b3ffaf2d2418f | [
"MIT"
] | 1 | 2018-09-03T03:02:06.000Z | 2018-09-03T03:02:06.000Z | # -*- coding: utf-8 -*-
"""
tools
=====
Functions that USERS will possibly want access to.
"""
from __future__ import absolute_import
from collections import OrderedDict
import warnings
import six
import math
import decimal
from plotly import utils
from plotly import exceptions
from plotly import graph_reference
from plotly import session
from plotly.files import (CONFIG_FILE, CREDENTIALS_FILE, FILE_CONTENT,
GRAPH_REFERENCE_FILE, check_file_permissions)
DEFAULT_PLOTLY_COLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
REQUIRED_GANTT_KEYS = ['Task', 'Start', 'Finish']
PLOTLY_SCALES = {'Greys': ['rgb(0,0,0)', 'rgb(255,255,255)'],
'YlGnBu': ['rgb(8,29,88)', 'rgb(255,255,217)'],
'Greens': ['rgb(0,68,27)', 'rgb(247,252,245)'],
'YlOrRd': ['rgb(128,0,38)', 'rgb(255,255,204)'],
'Bluered': ['rgb(0,0,255)', 'rgb(255,0,0)'],
'RdBu': ['rgb(5,10,172)', 'rgb(178,10,28)'],
'Reds': ['rgb(220,220,220)', 'rgb(178,10,28)'],
'Blues': ['rgb(5,10,172)', 'rgb(220,220,220)'],
'Picnic': ['rgb(0,0,255)', 'rgb(255,0,0)'],
'Rainbow': ['rgb(150,0,90)', 'rgb(255,0,0)'],
'Portland': ['rgb(12,51,131)', 'rgb(217,30,30)'],
'Jet': ['rgb(0,0,131)', 'rgb(128,0,0)'],
'Hot': ['rgb(0,0,0)', 'rgb(255,255,255)'],
'Blackbody': ['rgb(0,0,0)', 'rgb(160,200,255)'],
'Earth': ['rgb(0,0,130)', 'rgb(255,255,255)'],
'Electric': ['rgb(0,0,0)', 'rgb(255,250,220)'],
'Viridis': ['rgb(68,1,84)', 'rgb(253,231,37)']}
# color constants for violin plot
DEFAULT_FILLCOLOR = '#1f77b4'
DEFAULT_HISTNORM = 'probability density'
ALTERNATIVE_HISTNORM = 'probability'
# Warning format
def warning_on_one_line(message, category, filename, lineno,
file=None, line=None):
return '%s:%s: %s:\n\n%s\n\n' % (filename, lineno, category.__name__,
message)
warnings.formatwarning = warning_on_one_line
try:
from . import matplotlylib
_matplotlylib_imported = True
except ImportError:
_matplotlylib_imported = False
try:
import IPython
import IPython.core.display
_ipython_imported = True
except ImportError:
_ipython_imported = False
try:
import numpy as np
_numpy_imported = True
except ImportError:
_numpy_imported = False
try:
import pandas as pd
_pandas_imported = True
except ImportError:
_pandas_imported = False
try:
import scipy as scp
_scipy_imported = True
except ImportError:
_scipy_imported = False
try:
import scipy.spatial as scs
_scipy__spatial_imported = True
except ImportError:
_scipy__spatial_imported = False
try:
import scipy.cluster.hierarchy as sch
_scipy__cluster__hierarchy_imported = True
except ImportError:
_scipy__cluster__hierarchy_imported = False
try:
import scipy
import scipy.stats
_scipy_imported = True
except ImportError:
_scipy_imported = False
def get_config_defaults():
"""
Convenience function to check current settings against defaults.
Example:
if plotly_domain != get_config_defaults()['plotly_domain']:
# do something
"""
return dict(FILE_CONTENT[CONFIG_FILE]) # performs a shallow copy
def ensure_local_plotly_files():
"""Ensure that filesystem is setup/filled out in a valid way.
If the config or credential files aren't filled out, then write them
to the disk.
"""
if check_file_permissions():
for fn in [CREDENTIALS_FILE, CONFIG_FILE]:
utils.ensure_file_exists(fn)
contents = utils.load_json_dict(fn)
for key, val in list(FILE_CONTENT[fn].items()):
# TODO: removed type checking below, may want to revisit
if key not in contents:
contents[key] = val
contents_keys = list(contents.keys())
for key in contents_keys:
if key not in FILE_CONTENT[fn]:
del contents[key]
utils.save_json_dict(fn, contents)
# make a request to get graph reference if DNE.
utils.ensure_file_exists(GRAPH_REFERENCE_FILE)
utils.save_json_dict(GRAPH_REFERENCE_FILE,
graph_reference.GRAPH_REFERENCE)
else:
warnings.warn("Looks like you don't have 'read-write' permission to "
"your 'home' ('~') directory or to our '~/.plotly' "
"directory. That means plotly's python api can't setup "
"local configuration files. No problem though! You'll "
"just have to sign-in using 'plotly.plotly.sign_in()'. "
"For help with that: 'help(plotly.plotly.sign_in)'."
"\nQuestions? support@plot.ly")
### credentials tools ###
def set_credentials_file(username=None,
api_key=None,
stream_ids=None,
proxy_username=None,
proxy_password=None):
"""Set the keyword-value pairs in `~/.plotly_credentials`.
:param (str) username: The username you'd use to sign in to Plotly
:param (str) api_key: The api key associated with above username
:param (list) stream_ids: Stream tokens for above credentials
:param (str) proxy_username: The un associated with with your Proxy
:param (str) proxy_password: The pw associated with your Proxy un
"""
if not check_file_permissions():
raise exceptions.PlotlyError("You don't have proper file permissions "
"to run this function.")
ensure_local_plotly_files() # make sure what's there is OK
credentials = get_credentials_file()
if isinstance(username, six.string_types):
credentials['username'] = username
if isinstance(api_key, six.string_types):
credentials['api_key'] = api_key
if isinstance(proxy_username, six.string_types):
credentials['proxy_username'] = proxy_username
if isinstance(proxy_password, six.string_types):
credentials['proxy_password'] = proxy_password
if isinstance(stream_ids, (list, tuple)):
credentials['stream_ids'] = stream_ids
utils.save_json_dict(CREDENTIALS_FILE, credentials)
ensure_local_plotly_files() # make sure what we just put there is OK
def get_credentials_file(*args):
"""Return specified args from `~/.plotly_credentials`. as dict.
Returns all if no arguments are specified.
Example:
get_credentials_file('username')
"""
if check_file_permissions():
ensure_local_plotly_files() # make sure what's there is OK
return utils.load_json_dict(CREDENTIALS_FILE, *args)
else:
return FILE_CONTENT[CREDENTIALS_FILE]
def reset_credentials_file():
ensure_local_plotly_files() # make sure what's there is OK
utils.save_json_dict(CREDENTIALS_FILE, {})
ensure_local_plotly_files() # put the defaults back
### config tools ###
def set_config_file(plotly_domain=None,
plotly_streaming_domain=None,
plotly_api_domain=None,
plotly_ssl_verification=None,
plotly_proxy_authorization=None,
world_readable=None,
sharing=None,
auto_open=None):
"""Set the keyword-value pairs in `~/.plotly/.config`.
:param (str) plotly_domain: ex - https://plot.ly
:param (str) plotly_streaming_domain: ex - stream.plot.ly
:param (str) plotly_api_domain: ex - https://api.plot.ly
:param (bool) plotly_ssl_verification: True = verify, False = don't verify
:param (bool) plotly_proxy_authorization: True = use plotly proxy auth creds
:param (bool) world_readable: True = public, False = private
"""
if not check_file_permissions():
raise exceptions.PlotlyError("You don't have proper file permissions "
"to run this function.")
ensure_local_plotly_files() # make sure what's there is OK
utils.validate_world_readable_and_sharing_settings({
'sharing': sharing, 'world_readable': world_readable})
settings = get_config_file()
if isinstance(plotly_domain, six.string_types):
settings['plotly_domain'] = plotly_domain
elif plotly_domain is not None:
raise TypeError('plotly_domain should be a string')
if isinstance(plotly_streaming_domain, six.string_types):
settings['plotly_streaming_domain'] = plotly_streaming_domain
elif plotly_streaming_domain is not None:
raise TypeError('plotly_streaming_domain should be a string')
if isinstance(plotly_api_domain, six.string_types):
settings['plotly_api_domain'] = plotly_api_domain
elif plotly_api_domain is not None:
raise TypeError('plotly_api_domain should be a string')
if isinstance(plotly_ssl_verification, (six.string_types, bool)):
settings['plotly_ssl_verification'] = plotly_ssl_verification
elif plotly_ssl_verification is not None:
raise TypeError('plotly_ssl_verification should be a boolean')
if isinstance(plotly_proxy_authorization, (six.string_types, bool)):
settings['plotly_proxy_authorization'] = plotly_proxy_authorization
elif plotly_proxy_authorization is not None:
raise TypeError('plotly_proxy_authorization should be a boolean')
if isinstance(auto_open, bool):
settings['auto_open'] = auto_open
elif auto_open is not None:
raise TypeError('auto_open should be a boolean')
if isinstance(world_readable, bool):
settings['world_readable'] = world_readable
settings.pop('sharing')
elif world_readable is not None:
raise TypeError('Input should be a boolean')
if isinstance(sharing, six.string_types):
settings['sharing'] = sharing
elif sharing is not None:
raise TypeError('sharing should be a string')
utils.set_sharing_and_world_readable(settings)
utils.save_json_dict(CONFIG_FILE, settings)
ensure_local_plotly_files() # make sure what we just put there is OK
def get_config_file(*args):
"""Return specified args from `~/.plotly/.config`. as tuple.
Returns all if no arguments are specified.
Example:
get_config_file('plotly_domain')
"""
if check_file_permissions():
ensure_local_plotly_files() # make sure what's there is OK
return utils.load_json_dict(CONFIG_FILE, *args)
else:
return FILE_CONTENT[CONFIG_FILE]
def reset_config_file():
ensure_local_plotly_files() # make sure what's there is OK
f = open(CONFIG_FILE, 'w')
f.close()
ensure_local_plotly_files() # put the defaults back
### embed tools ###
def get_embed(file_owner_or_url, file_id=None, width="100%", height=525):
"""Returns HTML code to embed figure on a webpage as an <iframe>
Plotly uniquely identifies figures with a 'file_owner'/'file_id' pair.
Since each file is given a corresponding unique url, you may also simply
pass a valid plotly url as the first argument.
Note, if you're using a file_owner string as the first argument, you MUST
specify a `file_id` keyword argument. Else, if you're using a url string
as the first argument, you MUST NOT specify a `file_id` keyword argument,
or file_id must be set to Python's None value.
Positional arguments:
file_owner_or_url (string) -- a valid plotly username OR a valid plotly url
Keyword arguments:
file_id (default=None) -- an int or string that can be converted to int
if you're using a url, don't fill this in!
width (default="100%") -- an int or string corresp. to width of the figure
height (default="525") -- same as width but corresp. to the height of the
figure
"""
plotly_rest_url = (session.get_session_config().get('plotly_domain') or
get_config_file()['plotly_domain'])
if file_id is None: # assume we're using a url
url = file_owner_or_url
if url[:len(plotly_rest_url)] != plotly_rest_url:
raise exceptions.PlotlyError(
"Because you didn't supply a 'file_id' in the call, "
"we're assuming you're trying to snag a figure from a url. "
"You supplied the url, '{0}', we expected it to start with "
"'{1}'."
"\nRun help on this function for more information."
"".format(url, plotly_rest_url))
urlsplit = six.moves.urllib.parse.urlparse(url)
file_owner = urlsplit.path.split('/')[1].split('~')[1]
file_id = urlsplit.path.split('/')[2]
# to check for share_key we check urlsplit.query
query_dict = six.moves.urllib.parse.parse_qs(urlsplit.query)
if query_dict:
share_key = query_dict['share_key'][-1]
else:
share_key = ''
else:
file_owner = file_owner_or_url
share_key = ''
try:
test_if_int = int(file_id)
except ValueError:
raise exceptions.PlotlyError(
"The 'file_id' argument was not able to be converted into an "
"integer number. Make sure that the positional 'file_id' argument "
"is a number that can be converted into an integer or a string "
"that can be converted into an integer."
)
if int(file_id) < 0:
raise exceptions.PlotlyError(
"The 'file_id' argument must be a non-negative number."
)
if share_key is '':
s = ("<iframe id=\"igraph\" scrolling=\"no\" style=\"border:none;\" "
"seamless=\"seamless\" "
"src=\"{plotly_rest_url}/"
"~{file_owner}/{file_id}.embed\" "
"height=\"{iframe_height}\" width=\"{iframe_width}\">"
"</iframe>").format(
plotly_rest_url=plotly_rest_url,
file_owner=file_owner, file_id=file_id,
iframe_height=height, iframe_width=width)
else:
s = ("<iframe id=\"igraph\" scrolling=\"no\" style=\"border:none;\" "
"seamless=\"seamless\" "
"src=\"{plotly_rest_url}/"
"~{file_owner}/{file_id}.embed?share_key={share_key}\" "
"height=\"{iframe_height}\" width=\"{iframe_width}\">"
"</iframe>").format(
plotly_rest_url=plotly_rest_url,
file_owner=file_owner, file_id=file_id, share_key=share_key,
iframe_height=height, iframe_width=width)
return s
def embed(file_owner_or_url, file_id=None, width="100%", height=525):
"""Embeds existing Plotly figure in IPython Notebook
Plotly uniquely identifies figures with a 'file_owner'/'file_id' pair.
Since each file is given a corresponding unique url, you may also simply
pass a valid plotly url as the first argument.
Note, if you're using a file_owner string as the first argument, you MUST
specify a `file_id` keyword argument. Else, if you're using a url string
as the first argument, you MUST NOT specify a `file_id` keyword argument,
or file_id must be set to Python's None value.
Positional arguments:
file_owner_or_url (string) -- a valid plotly username OR a valid plotly url
Keyword arguments:
file_id (default=None) -- an int or string that can be converted to int
if you're using a url, don't fill this in!
width (default="100%") -- an int or string corresp. to width of the figure
height (default="525") -- same as width but corresp. to the height of the
figure
"""
try:
s = get_embed(file_owner_or_url, file_id=file_id, width=width,
height=height)
# see if we are in the SageMath Cloud
from sage_salvus import html
return html(s, hide=False)
except:
pass
if _ipython_imported:
if file_id:
plotly_domain = (
session.get_session_config().get('plotly_domain') or
get_config_file()['plotly_domain']
)
url = "{plotly_domain}/~{un}/{fid}".format(
plotly_domain=plotly_domain,
un=file_owner_or_url,
fid=file_id)
else:
url = file_owner_or_url
return PlotlyDisplay(url, width, height)
else:
if (get_config_defaults()['plotly_domain']
!= session.get_session_config()['plotly_domain']):
feedback_email = 'feedback@plot.ly'
else:
# different domain likely means enterprise
feedback_email = 'support@plot.ly'
warnings.warn(
"Looks like you're not using IPython or Sage to embed this "
"plot. If you just want the *embed code*,\ntry using "
"`get_embed()` instead."
'\nQuestions? {}'.format(feedback_email))
### mpl-related tools ###
@utils.template_doc(**get_config_file())
def mpl_to_plotly(fig, resize=False, strip_style=False, verbose=False):
"""Convert a matplotlib figure to plotly dictionary and send.
All available information about matplotlib visualizations are stored
within a matplotlib.figure.Figure object. You can create a plot in python
using matplotlib, store the figure object, and then pass this object to
the fig_to_plotly function. In the background, mplexporter is used to
crawl through the mpl figure object for appropriate information. This
information is then systematically sent to the PlotlyRenderer which
creates the JSON structure used to make plotly visualizations. Finally,
these dictionaries are sent to plotly and your browser should open up a
new tab for viewing! Optionally, if you're working in IPython, you can
set notebook=True and the PlotlyRenderer will call plotly.iplot instead
of plotly.plot to have the graph appear directly in the IPython notebook.
Note, this function gives the user access to a simple, one-line way to
render an mpl figure in plotly. If you need to trouble shoot, you can do
this step manually by NOT running this fuction and entereing the following:
===========================================================================
from mplexporter import Exporter
from mplexporter.renderers import PlotlyRenderer
# create an mpl figure and store it under a varialble 'fig'
renderer = PlotlyRenderer()
exporter = Exporter(renderer)
exporter.run(fig)
===========================================================================
You can then inspect the JSON structures by accessing these:
renderer.layout -- a plotly layout dictionary
renderer.data -- a list of plotly data dictionaries
Positional arguments:
fig -- a matplotlib figure object
username -- a valid plotly username **
api_key -- a valid api_key for the above username **
notebook -- an option for use with an IPython notebook
** Don't have a username/api_key? Try looking here:
{plotly_domain}/plot
** Forgot your api_key? Try signing in and looking here:
{plotly_domain}/python/getting-started
"""
if _matplotlylib_imported:
renderer = matplotlylib.PlotlyRenderer()
matplotlylib.Exporter(renderer).run(fig)
if resize:
renderer.resize()
if strip_style:
renderer.strip_style()
if verbose:
print(renderer.msg)
return renderer.plotly_fig
else:
warnings.warn(
"To use Plotly's matplotlylib functionality, you'll need to have "
"matplotlib successfully installed with all of its dependencies. "
"You're getting this error because matplotlib or one of its "
"dependencies doesn't seem to be installed correctly.")
### graph_objs related tools ###
def get_subplots(rows=1, columns=1, print_grid=False, **kwargs):
"""Return a dictionary instance with the subplots set in 'layout'.
Example 1:
# stack two subplots vertically
fig = tools.get_subplots(rows=2)
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x1', yaxis='y1')]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
Example 2:
# print out string showing the subplot grid you've put in the layout
fig = tools.get_subplots(rows=3, columns=2, print_grid=True)
Keywords arguments with constant defaults:
rows (kwarg, int greater than 0, default=1):
Number of rows, evenly spaced vertically on the figure.
columns (kwarg, int greater than 0, default=1):
Number of columns, evenly spaced horizontally on the figure.
horizontal_spacing (kwarg, float in [0,1], default=0.1):
Space between subplot columns. Applied to all columns.
vertical_spacing (kwarg, float in [0,1], default=0.05):
Space between subplot rows. Applied to all rows.
print_grid (kwarg, True | False, default=False):
If True, prints a tab-delimited string representation
of your plot grid.
Keyword arguments with variable defaults:
horizontal_spacing (kwarg, float in [0,1], default=0.2 / columns):
Space between subplot columns.
vertical_spacing (kwarg, float in [0,1], default=0.3 / rows):
Space between subplot rows.
"""
# TODO: protected until #282
from plotly.graph_objs import graph_objs
warnings.warn(
"tools.get_subplots is depreciated. "
"Please use tools.make_subplots instead."
)
# Throw exception for non-integer rows and columns
if not isinstance(rows, int) or rows <= 0:
raise Exception("Keyword argument 'rows' "
"must be an int greater than 0")
if not isinstance(columns, int) or columns <= 0:
raise Exception("Keyword argument 'columns' "
"must be an int greater than 0")
# Throw exception if non-valid kwarg is sent
VALID_KWARGS = ['horizontal_spacing', 'vertical_spacing']
for key in kwargs.keys():
if key not in VALID_KWARGS:
raise Exception("Invalid keyword argument: '{0}'".format(key))
# Set 'horizontal_spacing' / 'vertical_spacing' w.r.t. rows / columns
try:
horizontal_spacing = float(kwargs['horizontal_spacing'])
except KeyError:
horizontal_spacing = 0.2 / columns
try:
vertical_spacing = float(kwargs['vertical_spacing'])
except KeyError:
vertical_spacing = 0.3 / rows
fig = dict(layout=graph_objs.Layout()) # will return this at the end
plot_width = (1 - horizontal_spacing * (columns - 1)) / columns
plot_height = (1 - vertical_spacing * (rows - 1)) / rows
plot_num = 0
for rrr in range(rows):
for ccc in range(columns):
xaxis_name = 'xaxis{0}'.format(plot_num + 1)
x_anchor = 'y{0}'.format(plot_num + 1)
x_start = (plot_width + horizontal_spacing) * ccc
x_end = x_start + plot_width
yaxis_name = 'yaxis{0}'.format(plot_num + 1)
y_anchor = 'x{0}'.format(plot_num + 1)
y_start = (plot_height + vertical_spacing) * rrr
y_end = y_start + plot_height
xaxis = graph_objs.XAxis(domain=[x_start, x_end], anchor=x_anchor)
fig['layout'][xaxis_name] = xaxis
yaxis = graph_objs.YAxis(domain=[y_start, y_end], anchor=y_anchor)
fig['layout'][yaxis_name] = yaxis
plot_num += 1
if print_grid:
print("This is the format of your plot grid!")
grid_string = ""
plot = 1
for rrr in range(rows):
grid_line = ""
for ccc in range(columns):
grid_line += "[{0}]\t".format(plot)
plot += 1
grid_string = grid_line + '\n' + grid_string
print(grid_string)
return graph_objs.Figure(fig) # forces us to validate what we just did...
def make_subplots(rows=1, cols=1,
shared_xaxes=False, shared_yaxes=False,
start_cell='top-left', print_grid=True,
**kwargs):
"""Return an instance of plotly.graph_objs.Figure
with the subplots domain set in 'layout'.
Example 1:
# stack two subplots vertically
fig = tools.make_subplots(rows=2)
This is the format of your plot grid:
[ (1,1) x1,y1 ]
[ (2,1) x2,y2 ]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
# or see Figure.append_trace
Example 2:
# subplots with shared x axes
fig = tools.make_subplots(rows=2, shared_xaxes=True)
This is the format of your plot grid:
[ (1,1) x1,y1 ]
[ (2,1) x1,y2 ]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], yaxis='y2')]
Example 3:
# irregular subplot layout (more examples below under 'specs')
fig = tools.make_subplots(rows=2, cols=2,
specs=[[{}, {}],
[{'colspan': 2}, None]])
This is the format of your plot grid!
[ (1,1) x1,y1 ] [ (1,2) x2,y2 ]
[ (2,1) x3,y3 - ]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x3', yaxis='y3')]
Example 4:
# insets
fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])
This is the format of your plot grid!
[ (1,1) x1,y1 ]
With insets:
[ x2,y2 ] over [ (1,1) x1,y1 ]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
Example 5:
# include subplot titles
fig = tools.make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))
This is the format of your plot grid:
[ (1,1) x1,y1 ]
[ (2,1) x2,y2 ]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
Example 6:
# Include subplot title on one plot (but not all)
fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}],
subplot_titles=('','Inset'))
This is the format of your plot grid!
[ (1,1) x1,y1 ]
With insets:
[ x2,y2 ] over [ (1,1) x1,y1 ]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
Keywords arguments with constant defaults:
rows (kwarg, int greater than 0, default=1):
Number of rows in the subplot grid.
cols (kwarg, int greater than 0, default=1):
Number of columns in the subplot grid.
shared_xaxes (kwarg, boolean or list, default=False)
Assign shared x axes.
If True, subplots in the same grid column have one common
shared x-axis at the bottom of the gird.
To assign shared x axes per subplot grid cell (see 'specs'),
send list (or list of lists, one list per shared x axis)
of cell index tuples.
shared_yaxes (kwarg, boolean or list, default=False)
Assign shared y axes.
If True, subplots in the same grid row have one common
shared y-axis on the left-hand side of the gird.
To assign shared y axes per subplot grid cell (see 'specs'),
send list (or list of lists, one list per shared y axis)
of cell index tuples.
start_cell (kwarg, 'bottom-left' or 'top-left', default='top-left')
Choose the starting cell in the subplot grid used to set the
domains of the subplots.
print_grid (kwarg, boolean, default=True):
If True, prints a tab-delimited string representation of
your plot grid.
Keyword arguments with variable defaults:
horizontal_spacing (kwarg, float in [0,1], default=0.2 / cols):
Space between subplot columns.
Applies to all columns (use 'specs' subplot-dependents spacing)
vertical_spacing (kwarg, float in [0,1], default=0.3 / rows):
Space between subplot rows.
Applies to all rows (use 'specs' subplot-dependents spacing)
subplot_titles (kwarg, list of strings, default=empty list):
Title of each subplot.
"" can be included in the list if no subplot title is desired in
that space so that the titles are properly indexed.
specs (kwarg, list of lists of dictionaries):
Subplot specifications.
ex1: specs=[[{}, {}], [{'colspan': 2}, None]]
ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]
- Indices of the outer list correspond to subplot grid rows
starting from the bottom. The number of rows in 'specs'
must be equal to 'rows'.
- Indices of the inner lists correspond to subplot grid columns
starting from the left. The number of columns in 'specs'
must be equal to 'cols'.
- Each item in the 'specs' list corresponds to one subplot
in a subplot grid. (N.B. The subplot grid has exactly 'rows'
times 'cols' cells.)
- Use None for blank a subplot cell (or to move pass a col/row span).
- Note that specs[0][0] has the specs of the 'start_cell' subplot.
- Each item in 'specs' is a dictionary.
The available keys are:
* is_3d (boolean, default=False): flag for 3d scenes
* colspan (int, default=1): number of subplot columns
for this subplot to span.
* rowspan (int, default=1): number of subplot rows
for this subplot to span.
* l (float, default=0.0): padding left of cell
* r (float, default=0.0): padding right of cell
* t (float, default=0.0): padding right of cell
* b (float, default=0.0): padding bottom of cell
- Use 'horizontal_spacing' and 'vertical_spacing' to adjust
the spacing in between the subplots.
insets (kwarg, list of dictionaries):
Inset specifications.
- Each item in 'insets' is a dictionary.
The available keys are:
* cell (tuple, default=(1,1)): (row, col) index of the
subplot cell to overlay inset axes onto.
* is_3d (boolean, default=False): flag for 3d scenes
* l (float, default=0.0): padding left of inset
in fraction of cell width
* w (float or 'to_end', default='to_end') inset width
in fraction of cell width ('to_end': to cell right edge)
* b (float, default=0.0): padding bottom of inset
in fraction of cell height
* h (float or 'to_end', default='to_end') inset height
in fraction of cell height ('to_end': to cell top edge)
"""
# TODO: protected until #282
from plotly.graph_objs import graph_objs
# Throw exception for non-integer rows and cols
if not isinstance(rows, int) or rows <= 0:
raise Exception("Keyword argument 'rows' "
"must be an int greater than 0")
if not isinstance(cols, int) or cols <= 0:
raise Exception("Keyword argument 'cols' "
"must be an int greater than 0")
# Dictionary of things start_cell
START_CELL_all = {
'bottom-left': {
# 'natural' setup where x & y domains increase monotonically
'col_dir': 1,
'row_dir': 1
},
'top-left': {
# 'default' setup visually matching the 'specs' list of lists
'col_dir': 1,
'row_dir': -1
}
# TODO maybe add 'bottom-right' and 'top-right'
}
# Throw exception for invalid 'start_cell' values
try:
START_CELL = START_CELL_all[start_cell]
except KeyError:
raise Exception("Invalid 'start_cell' value")
# Throw exception if non-valid kwarg is sent
VALID_KWARGS = ['horizontal_spacing', 'vertical_spacing',
'specs', 'insets', 'subplot_titles']
for key in kwargs.keys():
if key not in VALID_KWARGS:
raise Exception("Invalid keyword argument: '{0}'".format(key))
# Set 'subplot_titles'
subplot_titles = kwargs.get('subplot_titles', [""] * rows * cols)
# Set 'horizontal_spacing' / 'vertical_spacing' w.r.t. rows / cols
try:
horizontal_spacing = float(kwargs['horizontal_spacing'])
except KeyError:
horizontal_spacing = 0.2 / cols
try:
vertical_spacing = float(kwargs['vertical_spacing'])
except KeyError:
if 'subplot_titles' in kwargs:
vertical_spacing = 0.5 / rows
else:
vertical_spacing = 0.3 / rows
# Sanitize 'specs' (must be a list of lists)
exception_msg = "Keyword argument 'specs' must be a list of lists"
try:
specs = kwargs['specs']
if not isinstance(specs, list):
raise Exception(exception_msg)
else:
for spec_row in specs:
if not isinstance(spec_row, list):
raise Exception(exception_msg)
except KeyError:
specs = [[{}
for c in range(cols)]
for r in range(rows)] # default 'specs'
# Throw exception if specs is over or under specified
if len(specs) != rows:
raise Exception("The number of rows in 'specs' "
"must be equal to 'rows'")
for r, spec_row in enumerate(specs):
if len(spec_row) != cols:
raise Exception("The number of columns in 'specs' "
"must be equal to 'cols'")
# Sanitize 'insets'
try:
insets = kwargs['insets']
if not isinstance(insets, list):
raise Exception("Keyword argument 'insets' must be a list")
except KeyError:
insets = False
# Throw exception if non-valid key / fill in defaults
def _check_keys_and_fill(name, arg, defaults):
def _checks(item, defaults):
if item is None:
return
if not isinstance(item, dict):
raise Exception("Items in keyword argument '{name}' must be "
"dictionaries or None".format(name=name))
for k in item.keys():
if k not in defaults.keys():
raise Exception("Invalid key '{k}' in keyword "
"argument '{name}'".format(k=k, name=name))
for k in defaults.keys():
if k not in item.keys():
item[k] = defaults[k]
for arg_i in arg:
if isinstance(arg_i, list):
for arg_ii in arg_i:
_checks(arg_ii, defaults)
elif isinstance(arg_i, dict):
_checks(arg_i, defaults)
# Default spec key-values
SPEC_defaults = dict(
is_3d=False,
colspan=1,
rowspan=1,
l=0.0,
r=0.0,
b=0.0,
t=0.0
# TODO add support for 'w' and 'h'
)
_check_keys_and_fill('specs', specs, SPEC_defaults)
# Default inset key-values
if insets:
INSET_defaults = dict(
cell=(1, 1),
is_3d=False,
l=0.0,
w='to_end',
b=0.0,
h='to_end'
)
_check_keys_and_fill('insets', insets, INSET_defaults)
# Set width & height of each subplot cell (excluding padding)
width = (1. - horizontal_spacing * (cols - 1)) / cols
height = (1. - vertical_spacing * (rows - 1)) / rows
# Built row/col sequence using 'row_dir' and 'col_dir'
COL_DIR = START_CELL['col_dir']
ROW_DIR = START_CELL['row_dir']
col_seq = range(cols)[::COL_DIR]
row_seq = range(rows)[::ROW_DIR]
# [grid] Build subplot grid (coord tuple of cell)
grid = [[((width + horizontal_spacing) * c,
(height + vertical_spacing) * r)
for c in col_seq]
for r in row_seq]
# [grid_ref] Initialize the grid and insets' axis-reference lists
grid_ref = [[None for c in range(cols)] for r in range(rows)]
insets_ref = [None for inset in range(len(insets))] if insets else None
layout = graph_objs.Layout() # init layout object
# Function handling logic around 2d axis labels
# Returns 'x{}' | 'y{}'
def _get_label(x_or_y, r, c, cnt, shared_axes):
# Default label (given strictly by cnt)
label = "{x_or_y}{cnt}".format(x_or_y=x_or_y, cnt=cnt)
if isinstance(shared_axes, bool):
if shared_axes:
if x_or_y == 'x':
label = "{x_or_y}{c}".format(x_or_y=x_or_y, c=c + 1)
if x_or_y == 'y':
label = "{x_or_y}{r}".format(x_or_y=x_or_y, r=r + 1)
if isinstance(shared_axes, list):
if isinstance(shared_axes[0], tuple):
shared_axes = [shared_axes] # TODO put this elsewhere
for shared_axis in shared_axes:
if (r + 1, c + 1) in shared_axis:
label = {
'x': "x{0}".format(shared_axis[0][1]),
'y': "y{0}".format(shared_axis[0][0])
}[x_or_y]
return label
# Row in grid of anchor row if shared_xaxes=True
ANCHOR_ROW = 0 if ROW_DIR > 0 else rows - 1
# Function handling logic around 2d axis anchors
# Return 'x{}' | 'y{}' | 'free' | False
def _get_anchors(r, c, x_cnt, y_cnt, shared_xaxes, shared_yaxes):
# Default anchors (give strictly by cnt)
x_anchor = "y{y_cnt}".format(y_cnt=y_cnt)
y_anchor = "x{x_cnt}".format(x_cnt=x_cnt)
if isinstance(shared_xaxes, bool):
if shared_xaxes:
if r != ANCHOR_ROW:
x_anchor = False
y_anchor = 'free'
if shared_yaxes and c != 0: # TODO covers all cases?
y_anchor = False
return x_anchor, y_anchor
elif isinstance(shared_xaxes, list):
if isinstance(shared_xaxes[0], tuple):
shared_xaxes = [shared_xaxes] # TODO put this elsewhere
for shared_xaxis in shared_xaxes:
if (r + 1, c + 1) in shared_xaxis[1:]:
x_anchor = False
y_anchor = 'free' # TODO covers all cases?
if isinstance(shared_yaxes, bool):
if shared_yaxes:
if c != 0:
y_anchor = False
x_anchor = 'free'
if shared_xaxes and r != ANCHOR_ROW: # TODO all cases?
x_anchor = False
return x_anchor, y_anchor
elif isinstance(shared_yaxes, list):
if isinstance(shared_yaxes[0], tuple):
shared_yaxes = [shared_yaxes] # TODO put this elsewhere
for shared_yaxis in shared_yaxes:
if (r + 1, c + 1) in shared_yaxis[1:]:
y_anchor = False
x_anchor = 'free' # TODO covers all cases?
return x_anchor, y_anchor
list_of_domains = [] # added for subplot titles
# Function pasting x/y domains in layout object (2d case)
def _add_domain(layout, x_or_y, label, domain, anchor, position):
name = label[0] + 'axis' + label[1:]
graph_obj = '{X_or_Y}Axis'.format(X_or_Y=x_or_y.upper())
axis = getattr(graph_objs, graph_obj)(domain=domain)
if anchor:
axis['anchor'] = anchor
if isinstance(position, float):
axis['position'] = position
layout[name] = axis
list_of_domains.append(domain) # added for subplot titles
# Function pasting x/y domains in layout object (3d case)
def _add_domain_is_3d(layout, s_label, x_domain, y_domain):
scene = graph_objs.Scene(domain={'x': x_domain, 'y': y_domain})
layout[s_label] = scene
x_cnt = y_cnt = s_cnt = 1 # subplot axis/scene counters
# Loop through specs -- (r, c) <-> (row, col)
for r, spec_row in enumerate(specs):
for c, spec in enumerate(spec_row):
if spec is None: # skip over None cells
continue
c_spanned = c + spec['colspan'] - 1 # get spanned c
r_spanned = r + spec['rowspan'] - 1 # get spanned r
# Throw exception if 'colspan' | 'rowspan' is too large for grid
if c_spanned >= cols:
raise Exception("Some 'colspan' value is too large for "
"this subplot grid.")
if r_spanned >= rows:
raise Exception("Some 'rowspan' value is too large for "
"this subplot grid.")
# Get x domain using grid and colspan
x_s = grid[r][c][0] + spec['l']
x_e = grid[r][c_spanned][0] + width - spec['r']
x_domain = [x_s, x_e]
# Get y domain (dep. on row_dir) using grid & r_spanned
if ROW_DIR > 0:
y_s = grid[r][c][1] + spec['b']
y_e = grid[r_spanned][c][1] + height - spec['t']
else:
y_s = grid[r_spanned][c][1] + spec['b']
y_e = grid[r][c][1] + height - spec['t']
y_domain = [y_s, y_e]
if spec['is_3d']:
# Add scene to layout
s_label = 'scene{0}'.format(s_cnt)
_add_domain_is_3d(layout, s_label, x_domain, y_domain)
grid_ref[r][c] = (s_label, )
s_cnt += 1
else:
# Get axis label and anchor
x_label = _get_label('x', r, c, x_cnt, shared_xaxes)
y_label = _get_label('y', r, c, y_cnt, shared_yaxes)
x_anchor, y_anchor = _get_anchors(r, c,
x_cnt, y_cnt,
shared_xaxes,
shared_yaxes)
# Add a xaxis to layout (N.B anchor == False -> no axis)
if x_anchor:
if x_anchor == 'free':
x_position = y_domain[0]
else:
x_position = False
_add_domain(layout, 'x', x_label, x_domain,
x_anchor, x_position)
x_cnt += 1
# Add a yaxis to layout (N.B anchor == False -> no axis)
if y_anchor:
if y_anchor == 'free':
y_position = x_domain[0]
else:
y_position = False
_add_domain(layout, 'y', y_label, y_domain,
y_anchor, y_position)
y_cnt += 1
grid_ref[r][c] = (x_label, y_label) # fill in ref
# Loop through insets
if insets:
for i_inset, inset in enumerate(insets):
r = inset['cell'][0] - 1
c = inset['cell'][1] - 1
# Throw exception if r | c is out of range
if not (0 <= r < rows):
raise Exception("Some 'cell' row value is out of range. "
"Note: the starting cell is (1, 1)")
if not (0 <= c < cols):
raise Exception("Some 'cell' col value is out of range. "
"Note: the starting cell is (1, 1)")
# Get inset x domain using grid
x_s = grid[r][c][0] + inset['l'] * width
if inset['w'] == 'to_end':
x_e = grid[r][c][0] + width
else:
x_e = x_s + inset['w'] * width
x_domain = [x_s, x_e]
# Get inset y domain using grid
y_s = grid[r][c][1] + inset['b'] * height
if inset['h'] == 'to_end':
y_e = grid[r][c][1] + height
else:
y_e = y_s + inset['h'] * height
y_domain = [y_s, y_e]
if inset['is_3d']:
# Add scene to layout
s_label = 'scene{0}'.format(s_cnt)
_add_domain_is_3d(layout, s_label, x_domain, y_domain)
insets_ref[i_inset] = (s_label, )
s_cnt += 1
else:
# Get axis label and anchor
x_label = _get_label('x', False, False, x_cnt, False)
y_label = _get_label('y', False, False, y_cnt, False)
x_anchor, y_anchor = _get_anchors(r, c,
x_cnt, y_cnt,
False, False)
# Add a xaxis to layout (N.B insets always have anchors)
_add_domain(layout, 'x', x_label, x_domain, x_anchor, False)
x_cnt += 1
# Add a yayis to layout (N.B insets always have anchors)
_add_domain(layout, 'y', y_label, y_domain, y_anchor, False)
y_cnt += 1
insets_ref[i_inset] = (x_label, y_label) # fill in ref
# [grid_str] Set the grid's string representation
sp = " " # space between cell
s_str = "[ " # cell start string
e_str = " ]" # cell end string
colspan_str = ' -' # colspan string
rowspan_str = ' |' # rowspan string
empty_str = ' (empty) ' # empty cell string
# Init grid_str with intro message
grid_str = "This is the format of your plot grid:\n"
# Init tmp list of lists of strings (sorta like 'grid_ref' but w/ strings)
_tmp = [['' for c in range(cols)] for r in range(rows)]
# Define cell string as function of (r, c) and grid_ref
def _get_cell_str(r, c, ref):
return '({r},{c}) {ref}'.format(r=r + 1, c=c + 1, ref=','.join(ref))
# Find max len of _cell_str, add define a padding function
cell_len = max([len(_get_cell_str(r, c, ref))
for r, row_ref in enumerate(grid_ref)
for c, ref in enumerate(row_ref)
if ref]) + len(s_str) + len(e_str)
def _pad(s, cell_len=cell_len):
return ' ' * (cell_len - len(s))
# Loop through specs, fill in _tmp
for r, spec_row in enumerate(specs):
for c, spec in enumerate(spec_row):
ref = grid_ref[r][c]
if ref is None:
if _tmp[r][c] == '':
_tmp[r][c] = empty_str + _pad(empty_str)
continue
cell_str = s_str + _get_cell_str(r, c, ref)
if spec['colspan'] > 1:
for cc in range(1, spec['colspan'] - 1):
_tmp[r][c + cc] = colspan_str + _pad(colspan_str)
_tmp[r][c + spec['colspan'] - 1] = (
colspan_str + _pad(colspan_str + e_str)) + e_str
else:
cell_str += e_str
if spec['rowspan'] > 1:
for rr in range(1, spec['rowspan'] - 1):
_tmp[r + rr][c] = rowspan_str + _pad(rowspan_str)
for cc in range(spec['colspan']):
_tmp[r + spec['rowspan'] - 1][c + cc] = (
rowspan_str + _pad(rowspan_str))
_tmp[r][c] = cell_str + _pad(cell_str)
# Append grid_str using data from _tmp in the correct order
for r in row_seq[::-1]:
grid_str += sp.join(_tmp[r]) + '\n'
# Append grid_str to include insets info
if insets:
grid_str += "\nWith insets:\n"
for i_inset, inset in enumerate(insets):
r = inset['cell'][0] - 1
c = inset['cell'][1] - 1
ref = grid_ref[r][c]
grid_str += (
s_str + ','.join(insets_ref[i_inset]) + e_str +
' over ' +
s_str + _get_cell_str(r, c, ref) + e_str + '\n'
)
# Add subplot titles
# If shared_axes is False (default) use list_of_domains
# This is used for insets and irregular layouts
if not shared_xaxes and not shared_yaxes:
x_dom = list_of_domains[::2]
y_dom = list_of_domains[1::2]
subtitle_pos_x = []
subtitle_pos_y = []
for x_domains in x_dom:
subtitle_pos_x.append(sum(x_domains) / 2)
for y_domains in y_dom:
subtitle_pos_y.append(y_domains[1])
# If shared_axes is True the domin of each subplot is not returned so the
# title position must be calculated for each subplot
else:
subtitle_pos_x = [None] * cols
subtitle_pos_y = [None] * rows
delt_x = (x_e - x_s)
for index in range(cols):
subtitle_pos_x[index] = ((delt_x / 2) +
((delt_x + horizontal_spacing) * index))
subtitle_pos_x *= rows
for index in range(rows):
subtitle_pos_y[index] = (1 - ((y_e + vertical_spacing) * index))
subtitle_pos_y *= cols
subtitle_pos_y = sorted(subtitle_pos_y, reverse=True)
plot_titles = []
for index in range(len(subplot_titles)):
if not subplot_titles[index]:
pass
else:
plot_titles.append({'y': subtitle_pos_y[index],
'xref': 'paper',
'x': subtitle_pos_x[index],
'yref': 'paper',
'text': subplot_titles[index],
'showarrow': False,
'font': graph_objs.Font(size=16),
'xanchor': 'center',
'yanchor': 'bottom'
})
layout['annotations'] = plot_titles
if print_grid:
print(grid_str)
fig = graph_objs.Figure(layout=layout)
fig.__dict__['_grid_ref'] = grid_ref
fig.__dict__['_grid_str'] = grid_str
return fig
def get_valid_graph_obj(obj, obj_type=None):
"""Returns a new graph object that won't raise.
CAREFUL: this will *silently* strip out invalid pieces of the object.
"""
# TODO: Deprecate or move. #283
from plotly.graph_objs import graph_objs
try:
cls = getattr(graph_objs, obj_type)
except (AttributeError, KeyError):
raise exceptions.PlotlyError(
"'{}' is not a recognized graph_obj.".format(obj_type)
)
return cls(obj, _raise=False)
def validate(obj, obj_type):
"""Validate a dictionary, list, or graph object as 'obj_type'.
This will not alter the 'obj' referenced in the call signature. It will
raise an error if the 'obj' reference could not be instantiated as a
valid 'obj_type' graph object.
"""
# TODO: Deprecate or move. #283
from plotly.graph_objs import graph_objs
if obj_type not in graph_reference.CLASSES:
obj_type = graph_reference.string_to_class_name(obj_type)
try:
cls = getattr(graph_objs, obj_type)
except AttributeError:
raise exceptions.PlotlyError(
"'{0}' is not a recognizable graph_obj.".
format(obj_type))
cls(obj) # this will raise on invalid keys/items
def _replace_newline(obj):
"""Replaces '\n' with '<br>' for all strings in a collection."""
if isinstance(obj, dict):
d = dict()
for key, val in list(obj.items()):
d[key] = _replace_newline(val)
return d
elif isinstance(obj, list):
l = list()
for index, entry in enumerate(obj):
l += [_replace_newline(entry)]
return l
elif isinstance(obj, six.string_types):
s = obj.replace('\n', '<br>')
if s != obj:
warnings.warn("Looks like you used a newline character: '\\n'.\n\n"
"Plotly uses a subset of HTML escape characters\n"
"to do things like newline (<br>), bold (<b></b>),\n"
"italics (<i></i>), etc. Your newline characters \n"
"have been converted to '<br>' so they will show \n"
"up right on your Plotly figure!")
return s
else:
return obj # we return the actual reference... but DON'T mutate.
if _ipython_imported:
class PlotlyDisplay(IPython.core.display.HTML):
"""An IPython display object for use with plotly urls
PlotlyDisplay objects should be instantiated with a url for a plot.
IPython will *choose* the proper display representation from any
Python object, and using provided methods if they exist. By defining
the following, if an HTML display is unusable, the PlotlyDisplay
object can provide alternate representations.
"""
def __init__(self, url, width, height):
self.resource = url
self.embed_code = get_embed(url, width=width, height=height)
super(PlotlyDisplay, self).__init__(data=self.embed_code)
def _repr_html_(self):
return self.embed_code
def return_figure_from_figure_or_data(figure_or_data, validate_figure):
from plotly.graph_objs import graph_objs
if isinstance(figure_or_data, dict):
figure = figure_or_data
elif isinstance(figure_or_data, list):
figure = {'data': figure_or_data}
else:
raise exceptions.PlotlyError("The `figure_or_data` positional "
"argument must be either "
"`dict`-like or `list`-like.")
if validate_figure:
try:
graph_objs.Figure(figure)
except exceptions.PlotlyError as err:
raise exceptions.PlotlyError("Invalid 'figure_or_data' argument. "
"Plotly will not be able to properly "
"parse the resulting JSON. If you "
"want to send this 'figure_or_data' "
"to Plotly anyway (not recommended), "
"you can set 'validate=False' as a "
"plot option.\nHere's why you're "
"seeing this error:\n\n{0}"
"".format(err))
if not figure['data']:
raise exceptions.PlotlyEmptyDataError(
"Empty data list found. Make sure that you populated the "
"list of data objects you're sending and try again.\n"
"Questions? support@plot.ly"
)
return figure
# Default colours for finance charts
_DEFAULT_INCREASING_COLOR = '#3D9970' # http://clrs.cc
_DEFAULT_DECREASING_COLOR = '#FF4136'
DIAG_CHOICES = ['scatter', 'histogram', 'box']
VALID_COLORMAP_TYPES = ['cat', 'seq']
class FigureFactory(object):
"""
BETA functions to create specific chart types.
This is beta as in: subject to change in a backwards incompatible way
without notice.
Supported chart types include candlestick, open high low close, quiver,
streamline, distplot, dendrogram, annotated heatmap, and tables. See
FigureFactory.create_candlestick, FigureFactory.create_ohlc,
FigureFactory.create_quiver, FigureFactory.create_streamline,
FigureFactory.create_distplot, FigureFactory.create_dendrogram,
FigureFactory.create_annotated_heatmap, or FigureFactory.create_table for
more information and examples of a specific chart type.
"""
@staticmethod
def _make_colorscale(colors, scale=None):
"""
Makes a colorscale from a list of colors and scale
Takes a list of colors and scales and constructs a colorscale based
on the colors in sequential order. If 'scale' is left empty, a linear-
interpolated colorscale will be generated. If 'scale' is a specificed
list, it must be the same legnth as colors and must contain all floats
For documentation regarding to the form of the output, see
https://plot.ly/python/reference/#mesh3d-colorscale
"""
colorscale = []
if not scale:
for j, color in enumerate(colors):
colorscale.append([j * 1./(len(colors) - 1), color])
return colorscale
else:
colorscale = [list(tup) for tup in zip(scale, colors)]
return colorscale
@staticmethod
def _convert_colorscale_to_rgb(colorscale):
"""
Converts the colors in a colorscale to rgb colors
A colorscale is an array of arrays, each with a numeric value as the
first item and a color as the second. This function specifically is
converting a colorscale with tuple colors (each coordinate between 0
and 1) into a colorscale with the colors transformed into rgb colors
"""
for color in colorscale:
color[1] = FigureFactory._convert_to_RGB_255(
color[1]
)
for color in colorscale:
color[1] = FigureFactory._label_rgb(
color[1]
)
return colorscale
@staticmethod
def _make_linear_colorscale(colors):
"""
Makes a list of colors into a colorscale-acceptable form
For documentation regarding to the form of the output, see
https://plot.ly/python/reference/#mesh3d-colorscale
"""
scale = 1./(len(colors) - 1)
return[[i * scale, color] for i, color in enumerate(colors)]
@staticmethod
def create_2D_density(x, y, colorscale='Earth', ncontours=20,
hist_color=(0, 0, 0.5), point_color=(0, 0, 0.5),
point_size=2, title='2D Density Plot',
height=600, width=600):
"""
Returns figure for a 2D density plot
:param (list|array) x: x-axis data for plot generation
:param (list|array) y: y-axis data for plot generation
:param (str|tuple|list) colorscale: either a plotly scale name, an rgb
or hex color, a color tuple or a list or tuple of colors. An rgb
color is of the form 'rgb(x, y, z)' where x, y, z belong to the
interval [0, 255] and a color tuple is a tuple of the form
(a, b, c) where a, b and c belong to [0, 1]. If colormap is a
list, it must contain the valid color types aforementioned as its
members.
:param (int) ncontours: the number of 2D contours to draw on the plot
:param (str) hist_color: the color of the plotted histograms
:param (str) point_color: the color of the scatter points
:param (str) point_size: the color of the scatter points
:param (str) title: set the title for the plot
:param (float) height: the height of the chart
:param (float) width: the width of the chart
Example 1: Simple 2D Density Plot
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import numpy as np
# Make data points
t = np.linspace(-1,1.2,2000)
x = (t**3)+(0.3*np.random.randn(2000))
y = (t**6)+(0.3*np.random.randn(2000))
# Create a figure
fig = FF.create_2D_density(x, y)
# Plot the data
py.iplot(fig, filename='simple-2d-density')
```
Example 2: Using Parameters
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import numpy as np
# Make data points
t = np.linspace(-1,1.2,2000)
x = (t**3)+(0.3*np.random.randn(2000))
y = (t**6)+(0.3*np.random.randn(2000))
# Create custom colorscale
colorscale = ['#7A4579', '#D56073', 'rgb(236,158,105)',
(1, 1, 0.2), (0.98,0.98,0.98)]
# Create a figure
fig = FF.create_2D_density(
x, y, colorscale=colorscale,
hist_color='rgb(255, 237, 222)', point_size=3)
# Plot the data
py.iplot(fig, filename='use-parameters')
```
"""
from plotly.graph_objs import graph_objs
from numbers import Number
# validate x and y are filled with numbers only
for array in [x, y]:
if not all(isinstance(element, Number) for element in array):
raise exceptions.PlotlyError(
"All elements of your 'x' and 'y' lists must be numbers."
)
# validate x and y are the same length
if len(x) != len(y):
raise exceptions.PlotlyError(
"Both lists 'x' and 'y' must be the same length."
)
colorscale = FigureFactory._validate_colors(colorscale, 'rgb')
colorscale = FigureFactory._make_linear_colorscale(colorscale)
# validate hist_color and point_color
hist_color = FigureFactory._validate_colors(hist_color, 'rgb')
point_color = FigureFactory._validate_colors(point_color, 'rgb')
trace1 = graph_objs.Scatter(
x=x, y=y, mode='markers', name='points',
marker=dict(
color=point_color[0],
size=point_size,
opacity=0.4
)
)
trace2 = graph_objs.Histogram2dcontour(
x=x, y=y, name='density', ncontours=ncontours,
colorscale=colorscale, reversescale=True, showscale=False
)
trace3 = graph_objs.Histogram(
x=x, name='x density',
marker=dict(color=hist_color[0]), yaxis='y2'
)
trace4 = graph_objs.Histogram(
y=y, name='y density',
marker=dict(color=hist_color[0]), xaxis='x2'
)
data = [trace1, trace2, trace3, trace4]
layout = graph_objs.Layout(
showlegend=False,
autosize=False,
title=title,
height=height,
width=width,
xaxis=dict(
domain=[0, 0.85],
showgrid=False,
zeroline=False
),
yaxis=dict(
domain=[0, 0.85],
showgrid=False,
zeroline=False
),
margin=dict(
t=50
),
hovermode='closest',
bargap=0,
xaxis2=dict(
domain=[0.85, 1],
showgrid=False,
zeroline=False
),
yaxis2=dict(
domain=[0.85, 1],
showgrid=False,
zeroline=False
)
)
fig = graph_objs.Figure(data=data, layout=layout)
return fig
@staticmethod
def _validate_gantt(df):
"""
Validates the inputted dataframe or list
"""
if _pandas_imported and isinstance(df, pd.core.frame.DataFrame):
# validate that df has all the required keys
for key in REQUIRED_GANTT_KEYS:
if key not in df:
raise exceptions.PlotlyError(
"The columns in your dataframe must include the "
"keys".format(REQUIRED_GANTT_KEYS)
)
num_of_rows = len(df.index)
chart = []
for index in range(num_of_rows):
task_dict = {}
for key in df:
task_dict[key] = df.ix[index][key]
chart.append(task_dict)
return chart
# validate if df is a list
if not isinstance(df, list):
raise exceptions.PlotlyError("You must input either a dataframe "
"or a list of dictionaries.")
# validate if df is empty
if len(df) <= 0:
raise exceptions.PlotlyError("Your list is empty. It must contain "
"at least one dictionary.")
if not isinstance(df[0], dict):
raise exceptions.PlotlyError("Your list must only "
"include dictionaries.")
return df
@staticmethod
def _gantt(chart, colors, title, bar_width, showgrid_x, showgrid_y,
height, width, tasks=None, task_names=None, data=None):
"""
Refer to FigureFactory.create_gantt() for docstring
"""
if tasks is None:
tasks = []
if task_names is None:
task_names = []
if data is None:
data = []
for index in range(len(chart)):
task = dict(x0=chart[index]['Start'],
x1=chart[index]['Finish'],
name=chart[index]['Task'])
tasks.append(task)
shape_template = {
'type': 'rect',
'xref': 'x',
'yref': 'y',
'opacity': 1,
'line': {
'width': 0,
},
'yref': 'y',
}
color_index = 0
for index in range(len(tasks)):
tn = tasks[index]['name']
task_names.append(tn)
del tasks[index]['name']
tasks[index].update(shape_template)
tasks[index]['y0'] = index - bar_width
tasks[index]['y1'] = index + bar_width
# check if colors need to be looped
if color_index >= len(colors):
color_index = 0
tasks[index]['fillcolor'] = colors[color_index]
# Add a line for hover text and autorange
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x1']],
y=[index, index],
name='',
marker={'color': 'white'}
)
)
color_index += 1
layout = dict(
title=title,
showlegend=False,
height=height,
width=width,
shapes=[],
hovermode='closest',
yaxis=dict(
showgrid=showgrid_y,
ticktext=task_names,
tickvals=list(range(len(tasks))),
range=[-1, len(tasks) + 1],
autorange=False,
zeroline=False,
),
xaxis=dict(
showgrid=showgrid_x,
zeroline=False,
rangeselector=dict(
buttons=list([
dict(count=7,
label='1w',
step='day',
stepmode='backward'),
dict(count=1,
label='1m',
step='month',
stepmode='backward'),
dict(count=6,
label='6m',
step='month',
stepmode='backward'),
dict(count=1,
label='YTD',
step='year',
stepmode='todate'),
dict(count=1,
label='1y',
step='year',
stepmode='backward'),
dict(step='all')
])
),
type='date'
)
)
layout['shapes'] = tasks
fig = dict(data=data, layout=layout)
return fig
@staticmethod
def _gantt_colorscale(chart, colors, title, index_col, show_colorbar,
bar_width, showgrid_x, showgrid_y, height,
width, tasks=None, task_names=None, data=None):
"""
Refer to FigureFactory.create_gantt() for docstring
"""
from numbers import Number
if tasks is None:
tasks = []
if task_names is None:
task_names = []
if data is None:
data = []
showlegend = False
for index in range(len(chart)):
task = dict(x0=chart[index]['Start'],
x1=chart[index]['Finish'],
name=chart[index]['Task'])
tasks.append(task)
shape_template = {
'type': 'rect',
'xref': 'x',
'yref': 'y',
'opacity': 1,
'line': {
'width': 0,
},
'yref': 'y',
}
# compute the color for task based on indexing column
if isinstance(chart[0][index_col], Number):
# check that colors has at least 2 colors
if len(colors) < 2:
raise exceptions.PlotlyError(
"You must use at least 2 colors in 'colors' if you "
"are using a colorscale. However only the first two "
"colors given will be used for the lower and upper "
"bounds on the colormap."
)
for index in range(len(tasks)):
tn = tasks[index]['name']
task_names.append(tn)
del tasks[index]['name']
tasks[index].update(shape_template)
tasks[index]['y0'] = index - bar_width
tasks[index]['y1'] = index + bar_width
# unlabel color
colors = FigureFactory._color_parser(
colors, FigureFactory._unlabel_rgb
)
lowcolor = colors[0]
highcolor = colors[1]
intermed = (chart[index][index_col])/100.0
intermed_color = FigureFactory._find_intermediate_color(
lowcolor, highcolor, intermed
)
intermed_color = FigureFactory._color_parser(
intermed_color, FigureFactory._label_rgb
)
tasks[index]['fillcolor'] = intermed_color
# relabel colors with 'rgb'
colors = FigureFactory._color_parser(
colors, FigureFactory._label_rgb
)
# add a line for hover text and autorange
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x1']],
y=[index, index],
name='',
marker={'color': 'white'}
)
)
if show_colorbar is True:
# generate dummy data for colorscale visibility
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x0']],
y=[index, index],
name='',
marker={'color': 'white',
'colorscale': [[0, colors[0]], [1, colors[1]]],
'showscale': True,
'cmax': 100,
'cmin': 0}
)
)
if isinstance(chart[0][index_col], str):
index_vals = []
for row in range(len(tasks)):
if chart[row][index_col] not in index_vals:
index_vals.append(chart[row][index_col])
index_vals.sort()
if len(colors) < len(index_vals):
raise exceptions.PlotlyError(
"Error. The number of colors in 'colors' must be no less "
"than the number of unique index values in your group "
"column."
)
# make a dictionary assignment to each index value
index_vals_dict = {}
# define color index
c_index = 0
for key in index_vals:
if c_index > len(colors) - 1:
c_index = 0
index_vals_dict[key] = colors[c_index]
c_index += 1
for index in range(len(tasks)):
tn = tasks[index]['name']
task_names.append(tn)
del tasks[index]['name']
tasks[index].update(shape_template)
tasks[index]['y0'] = index - bar_width
tasks[index]['y1'] = index + bar_width
tasks[index]['fillcolor'] = index_vals_dict[
chart[index][index_col]
]
# add a line for hover text and autorange
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x1']],
y=[index, index],
name='',
marker={'color': 'white'}
)
)
if show_colorbar is True:
# generate dummy data to generate legend
showlegend = True
for k, index_value in enumerate(index_vals):
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x0']],
y=[k, k],
showlegend=True,
name=str(index_value),
hoverinfo='none',
marker=dict(
color=colors[k],
size=1
)
)
)
layout = dict(
title=title,
showlegend=showlegend,
height=height,
width=width,
shapes=[],
hovermode='closest',
yaxis=dict(
showgrid=showgrid_y,
ticktext=task_names,
tickvals=list(range(len(tasks))),
range=[-1, len(tasks) + 1],
autorange=False,
zeroline=False,
),
xaxis=dict(
showgrid=showgrid_x,
zeroline=False,
rangeselector=dict(
buttons=list([
dict(count=7,
label='1w',
step='day',
stepmode='backward'),
dict(count=1,
label='1m',
step='month',
stepmode='backward'),
dict(count=6,
label='6m',
step='month',
stepmode='backward'),
dict(count=1,
label='YTD',
step='year',
stepmode='todate'),
dict(count=1,
label='1y',
step='year',
stepmode='backward'),
dict(step='all')
])
),
type='date'
)
)
layout['shapes'] = tasks
fig = dict(data=data, layout=layout)
return fig
@staticmethod
def _gantt_dict(chart, colors, title, index_col, show_colorbar, bar_width,
showgrid_x, showgrid_y, height, width, tasks=None,
task_names=None, data=None):
"""
Refer to FigureFactory.create_gantt() for docstring
"""
if tasks is None:
tasks = []
if task_names is None:
task_names = []
if data is None:
data = []
showlegend = False
for index in range(len(chart)):
task = dict(x0=chart[index]['Start'],
x1=chart[index]['Finish'],
name=chart[index]['Task'])
tasks.append(task)
shape_template = {
'type': 'rect',
'xref': 'x',
'yref': 'y',
'opacity': 1,
'line': {
'width': 0,
},
'yref': 'y',
}
index_vals = []
for row in range(len(tasks)):
if chart[row][index_col] not in index_vals:
index_vals.append(chart[row][index_col])
index_vals.sort()
# verify each value in index column appears in colors dictionary
for key in index_vals:
if key not in colors:
raise exceptions.PlotlyError(
"If you are using colors as a dictionary, all of its "
"keys must be all the values in the index column."
)
for index in range(len(tasks)):
tn = tasks[index]['name']
task_names.append(tn)
del tasks[index]['name']
tasks[index].update(shape_template)
tasks[index]['y0'] = index - bar_width
tasks[index]['y1'] = index + bar_width
tasks[index]['fillcolor'] = colors[chart[index][index_col]]
# add a line for hover text and autorange
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x1']],
y=[index, index],
name='',
marker={'color': 'white'}
)
)
if show_colorbar is True:
# generate dummy data to generate legend
showlegend = True
for k, index_value in enumerate(index_vals):
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x0']],
y=[k, k],
showlegend=True,
hoverinfo='none',
name=str(index_value),
marker=dict(
color=colors[index_value],
size=1
)
)
)
layout = dict(
title=title,
showlegend=showlegend,
height=height,
width=width,
shapes=[],
hovermode='closest',
yaxis=dict(
showgrid=showgrid_y,
ticktext=task_names,
tickvals=list(range(len(tasks))),
range=[-1, len(tasks) + 1],
autorange=False,
zeroline=False,
),
xaxis=dict(
showgrid=showgrid_x,
zeroline=False,
rangeselector=dict(
buttons=list([
dict(count=7,
label='1w',
step='day',
stepmode='backward'),
dict(count=1,
label='1m',
step='month',
stepmode='backward'),
dict(count=6,
label='6m',
step='month',
stepmode='backward'),
dict(count=1,
label='YTD',
step='year',
stepmode='todate'),
dict(count=1,
label='1y',
step='year',
stepmode='backward'),
dict(step='all')
])
),
type='date'
)
)
layout['shapes'] = tasks
fig = dict(data=data, layout=layout)
return fig
@staticmethod
def create_gantt(df, colors=None, index_col=None, show_colorbar=False,
reverse_colors=False, title='Gantt Chart',
bar_width=0.2, showgrid_x=False, showgrid_y=False,
height=600, width=900, tasks=None,
task_names=None, data=None):
"""
Returns figure for a gantt chart
:param (array|list) df: input data for gantt chart. Must be either a
a dataframe or a list. If dataframe, the columns must include
'Task', 'Start' and 'Finish'. Other columns can be included and
used for indexing. If a list, its elements must be dictionaries
with the same required column headers: 'Task', 'Start' and
'Finish'.
:param (str|list|dict|tuple) colors: either a plotly scale name, an
rgb or hex color, a color tuple or a list of colors. An rgb color
is of the form 'rgb(x, y, z)' where x, y, z belong to the interval
[0, 255] and a color tuple is a tuple of the form (a, b, c) where
a, b and c belong to [0, 1]. If colors is a list, it must
contain the valid color types aforementioned as its members.
If a dictionary, all values of the indexing column must be keys in
colors.
:param (str|float) index_col: the column header (if df is a data
frame) that will function as the indexing column. If df is a list,
index_col must be one of the keys in all the items of df.
:param (bool) show_colorbar: determines if colorbar will be visible.
Only applies if values in the index column are numeric.
:param (bool) reverse_colors: reverses the order of selected colors
:param (str) title: the title of the chart
:param (float) bar_width: the width of the horizontal bars in the plot
:param (bool) showgrid_x: show/hide the x-axis grid
:param (bool) showgrid_y: show/hide the y-axis grid
:param (float) height: the height of the chart
:param (float) width: the width of the chart
Example 1: Simple Gantt Chart
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
# Make data for chart
df = [dict(Task="Job A", Start='2009-01-01', Finish='2009-02-30'),
dict(Task="Job B", Start='2009-03-05', Finish='2009-04-15'),
dict(Task="Job C", Start='2009-02-20', Finish='2009-05-30')]
# Create a figure
fig = FF.create_gantt(df)
# Plot the data
py.iplot(fig, filename='Simple Gantt Chart', world_readable=True)
```
Example 2: Index by Column with Numerical Entries
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
# Make data for chart
df = [dict(Task="Job A", Start='2009-01-01',
Finish='2009-02-30', Complete=10),
dict(Task="Job B", Start='2009-03-05',
Finish='2009-04-15', Complete=60),
dict(Task="Job C", Start='2009-02-20',
Finish='2009-05-30', Complete=95)]
# Create a figure with Plotly colorscale
fig = FF.create_gantt(df, colors='Blues', index_col='Complete',
show_colorbar=True, bar_width=0.5,
showgrid_x=True, showgrid_y=True)
# Plot the data
py.iplot(fig, filename='Numerical Entries', world_readable=True)
```
Example 3: Index by Column with String Entries
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
# Make data for chart
df = [dict(Task="Job A", Start='2009-01-01',
Finish='2009-02-30', Resource='Apple'),
dict(Task="Job B", Start='2009-03-05',
Finish='2009-04-15', Resource='Grape'),
dict(Task="Job C", Start='2009-02-20',
Finish='2009-05-30', Resource='Banana')]
# Create a figure with Plotly colorscale
fig = FF.create_gantt(df, colors=['rgb(200, 50, 25)',
(1, 0, 1),
'#6c4774'],
index_col='Resource',
reverse_colors=True,
show_colorbar=True)
# Plot the data
py.iplot(fig, filename='String Entries', world_readable=True)
```
Example 4: Use a dictionary for colors
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
# Make data for chart
df = [dict(Task="Job A", Start='2009-01-01',
Finish='2009-02-30', Resource='Apple'),
dict(Task="Job B", Start='2009-03-05',
Finish='2009-04-15', Resource='Grape'),
dict(Task="Job C", Start='2009-02-20',
Finish='2009-05-30', Resource='Banana')]
# Make a dictionary of colors
colors = {'Apple': 'rgb(255, 0, 0)',
'Grape': 'rgb(170, 14, 200)',
'Banana': (1, 1, 0.2)}
# Create a figure with Plotly colorscale
fig = FF.create_gantt(df, colors=colors,
index_col='Resource',
show_colorbar=True)
# Plot the data
py.iplot(fig, filename='dictioanry colors', world_readable=True)
```
Example 5: Use a pandas dataframe
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import pandas as pd
# Make data as a dataframe
df = pd.DataFrame([['Run', '2010-01-01', '2011-02-02', 10],
['Fast', '2011-01-01', '2012-06-05', 55],
['Eat', '2012-01-05', '2013-07-05', 94]],
columns=['Task', 'Start', 'Finish', 'Complete'])
# Create a figure with Plotly colorscale
fig = FF.create_gantt(df, colors='Blues', index_col='Complete',
show_colorbar=True, bar_width=0.5,
showgrid_x=True, showgrid_y=True)
# Plot the data
py.iplot(fig, filename='data with dataframe', world_readable=True)
```
"""
# validate gantt input data
chart = FigureFactory._validate_gantt(df)
if index_col:
if index_col not in chart[0]:
raise exceptions.PlotlyError(
"In order to use an indexing column and assign colors to "
"the values of the index, you must choose an actual "
"column name in the dataframe or key if a list of "
"dictionaries is being used.")
# validate gantt index column
index_list = []
for dictionary in chart:
index_list.append(dictionary[index_col])
FigureFactory._validate_index(index_list)
# Validate colors
if isinstance(colors, dict):
colors = FigureFactory._validate_colors_dict(colors, 'rgb')
else:
colors = FigureFactory._validate_colors(colors, 'rgb')
if reverse_colors is True:
colors.reverse()
if not index_col:
if isinstance(colors, dict):
raise exceptions.PlotlyError(
"Error. You have set colors to a dictionary but have not "
"picked an index. An index is required if you are "
"assigning colors to particular values in a dictioanry."
)
fig = FigureFactory._gantt(
chart, colors, title, bar_width, showgrid_x, showgrid_y,
height, width, tasks=None, task_names=None, data=None
)
return fig
else:
if not isinstance(colors, dict):
fig = FigureFactory._gantt_colorscale(
chart, colors, title, index_col, show_colorbar, bar_width,
showgrid_x, showgrid_y, height, width,
tasks=None, task_names=None, data=None
)
return fig
else:
fig = FigureFactory._gantt_dict(
chart, colors, title, index_col, show_colorbar, bar_width,
showgrid_x, showgrid_y, height, width,
tasks=None, task_names=None, data=None
)
return fig
@staticmethod
def _validate_colors(colors, colortype='tuple'):
"""
Validates color(s) and returns a list of color(s) of a specified type
"""
from numbers import Number
if colors is None:
colors = DEFAULT_PLOTLY_COLORS
if isinstance(colors, str):
if colors in PLOTLY_SCALES:
colors = PLOTLY_SCALES[colors]
elif 'rgb' in colors or '#' in colors:
colors = [colors]
else:
raise exceptions.PlotlyError(
"If your colors variable is a string, it must be a "
"Plotly scale, an rgb color or a hex color.")
elif isinstance(colors, tuple):
if isinstance(colors[0], Number):
colors = [colors]
else:
colors = list(colors)
# convert color elements in list to tuple color
for j, each_color in enumerate(colors):
if 'rgb' in each_color:
each_color = FigureFactory._color_parser(
each_color, FigureFactory._unlabel_rgb
)
for value in each_color:
if value > 255.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your rgb colors "
"tuples cannot exceed 255.0."
)
each_color = FigureFactory._color_parser(
each_color, FigureFactory._unconvert_from_RGB_255
)
colors[j] = each_color
if '#' in each_color:
each_color = FigureFactory._color_parser(
each_color, FigureFactory._hex_to_rgb
)
each_color = FigureFactory._color_parser(
each_color, FigureFactory._unconvert_from_RGB_255
)
colors[j] = each_color
if isinstance(each_color, tuple):
for value in each_color:
if value > 1.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your colors tuples "
"cannot exceed 1.0."
)
colors[j] = each_color
if colortype == 'rgb':
for j, each_color in enumerate(colors):
rgb_color = FigureFactory._color_parser(
each_color, FigureFactory._convert_to_RGB_255
)
colors[j] = FigureFactory._color_parser(
rgb_color, FigureFactory._label_rgb
)
return colors
@staticmethod
def _validate_colors_dict(colors, colortype='tuple'):
"""
Validates dictioanry of color(s)
"""
# validate each color element in the dictionary
for key in colors:
if 'rgb' in colors[key]:
colors[key] = FigureFactory._color_parser(
colors[key], FigureFactory._unlabel_rgb
)
for value in colors[key]:
if value > 255.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your rgb colors "
"tuples cannot exceed 255.0."
)
colors[key] = FigureFactory._color_parser(
colors[key], FigureFactory._unconvert_from_RGB_255
)
if '#' in colors[key]:
colors[key] = FigureFactory._color_parser(
colors[key], FigureFactory._hex_to_rgb
)
colors[key] = FigureFactory._color_parser(
colors[key], FigureFactory._unconvert_from_RGB_255
)
if isinstance(colors[key], tuple):
for value in colors[key]:
if value > 1.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your colors tuples "
"cannot exceed 1.0."
)
if colortype == 'rgb':
for key in colors:
colors[key] = FigureFactory._color_parser(
colors[key], FigureFactory._convert_to_RGB_255
)
colors[key] = FigureFactory._color_parser(
colors[key], FigureFactory._label_rgb
)
return colors
@staticmethod
def _calc_stats(data):
"""
Calculate statistics for use in violin plot.
"""
import numpy as np
x = np.asarray(data, np.float)
vals_min = np.min(x)
vals_max = np.max(x)
q2 = np.percentile(x, 50, interpolation='linear')
q1 = np.percentile(x, 25, interpolation='lower')
q3 = np.percentile(x, 75, interpolation='higher')
iqr = q3 - q1
whisker_dist = 1.5 * iqr
# in order to prevent drawing whiskers outside the interval
# of data one defines the whisker positions as:
d1 = np.min(x[x >= (q1 - whisker_dist)])
d2 = np.max(x[x <= (q3 + whisker_dist)])
return {
'min': vals_min,
'max': vals_max,
'q1': q1,
'q2': q2,
'q3': q3,
'd1': d1,
'd2': d2
}
@staticmethod
def _make_half_violin(x, y, fillcolor='#1f77b4',
linecolor='rgb(0, 0, 0)'):
"""
Produces a sideways probability distribution fig violin plot.
"""
from plotly.graph_objs import graph_objs
text = ['(pdf(y), y)=(' + '{:0.2f}'.format(x[i]) +
', ' + '{:0.2f}'.format(y[i]) + ')'
for i in range(len(x))]
return graph_objs.Scatter(
x=x,
y=y,
mode='lines',
name='',
text=text,
fill='tonextx',
fillcolor=fillcolor,
line=graph_objs.Line(width=0.5, color=linecolor, shape='spline'),
hoverinfo='text',
opacity=0.5
)
@staticmethod
def _make_violin_rugplot(vals, pdf_max, distance,
color='#1f77b4'):
"""
Returns a rugplot fig for a violin plot.
"""
from plotly.graph_objs import graph_objs
return graph_objs.Scatter(
y=vals,
x=[-pdf_max-distance]*len(vals),
marker=graph_objs.Marker(
color=color,
symbol='line-ew-open'
),
mode='markers',
name='',
showlegend=False,
hoverinfo='y'
)
@staticmethod
def _make_quartiles(q1, q3):
"""
Makes the upper and lower quartiles for a violin plot.
"""
from plotly.graph_objs import graph_objs
return graph_objs.Scatter(
x=[0, 0],
y=[q1, q3],
text=['lower-quartile: ' + '{:0.2f}'.format(q1),
'upper-quartile: ' + '{:0.2f}'.format(q3)],
mode='lines',
line=graph_objs.Line(
width=4,
color='rgb(0,0,0)'
),
hoverinfo='text'
)
@staticmethod
def _make_median(q2):
"""
Formats the 'median' hovertext for a violin plot.
"""
from plotly.graph_objs import graph_objs
return graph_objs.Scatter(
x=[0],
y=[q2],
text=['median: ' + '{:0.2f}'.format(q2)],
mode='markers',
marker=dict(symbol='square',
color='rgb(255,255,255)'),
hoverinfo='text'
)
@staticmethod
def _make_non_outlier_interval(d1, d2):
"""
Returns the scatterplot fig of most of a violin plot.
"""
from plotly.graph_objs import graph_objs
return graph_objs.Scatter(
x=[0, 0],
y=[d1, d2],
name='',
mode='lines',
line=graph_objs.Line(width=1.5,
color='rgb(0,0,0)')
)
@staticmethod
def _make_XAxis(xaxis_title, xaxis_range):
"""
Makes the x-axis for a violin plot.
"""
from plotly.graph_objs import graph_objs
xaxis = graph_objs.XAxis(title=xaxis_title,
range=xaxis_range,
showgrid=False,
zeroline=False,
showline=False,
mirror=False,
ticks='',
showticklabels=False,
)
return xaxis
@staticmethod
def _make_YAxis(yaxis_title):
"""
Makes the y-axis for a violin plot.
"""
from plotly.graph_objs import graph_objs
yaxis = graph_objs.YAxis(title=yaxis_title,
showticklabels=True,
autorange=True,
ticklen=4,
showline=True,
zeroline=False,
showgrid=False,
mirror=False)
return yaxis
@staticmethod
def _violinplot(vals, fillcolor='#1f77b4', rugplot=True):
"""
Refer to FigureFactory.create_violin() for docstring.
"""
import numpy as np
from scipy import stats
vals = np.asarray(vals, np.float)
# summary statistics
vals_min = FigureFactory._calc_stats(vals)['min']
vals_max = FigureFactory._calc_stats(vals)['max']
q1 = FigureFactory._calc_stats(vals)['q1']
q2 = FigureFactory._calc_stats(vals)['q2']
q3 = FigureFactory._calc_stats(vals)['q3']
d1 = FigureFactory._calc_stats(vals)['d1']
d2 = FigureFactory._calc_stats(vals)['d2']
# kernel density estimation of pdf
pdf = stats.gaussian_kde(vals)
# grid over the data interval
xx = np.linspace(vals_min, vals_max, 100)
# evaluate the pdf at the grid xx
yy = pdf(xx)
max_pdf = np.max(yy)
# distance from the violin plot to rugplot
distance = (2.0 * max_pdf)/10 if rugplot else 0
# range for x values in the plot
plot_xrange = [-max_pdf - distance - 0.1, max_pdf + 0.1]
plot_data = [FigureFactory._make_half_violin(
-yy, xx, fillcolor=fillcolor),
FigureFactory._make_half_violin(
yy, xx, fillcolor=fillcolor),
FigureFactory._make_non_outlier_interval(d1, d2),
FigureFactory._make_quartiles(q1, q3),
FigureFactory._make_median(q2)]
if rugplot:
plot_data.append(FigureFactory._make_violin_rugplot(
vals,
max_pdf,
distance=distance,
color=fillcolor)
)
return plot_data, plot_xrange
@staticmethod
def _violin_no_colorscale(data, data_header, group_header, colors,
use_colorscale, group_stats,
height, width, title):
"""
Refer to FigureFactory.create_violin() for docstring.
Returns fig for violin plot without colorscale.
"""
from plotly.graph_objs import graph_objs
import numpy as np
# collect all group names
group_name = []
for name in data[group_header]:
if name not in group_name:
group_name.append(name)
group_name.sort()
gb = data.groupby([group_header])
L = len(group_name)
fig = make_subplots(rows=1, cols=L,
shared_yaxes=True,
horizontal_spacing=0.025,
print_grid=True)
color_index = 0
for k, gr in enumerate(group_name):
vals = np.asarray(gb.get_group(gr)[data_header], np.float)
if color_index >= len(colors):
color_index = 0
plot_data, plot_xrange = FigureFactory._violinplot(
vals,
fillcolor=colors[color_index]
)
layout = graph_objs.Layout()
for item in plot_data:
fig.append_trace(item, 1, k + 1)
color_index += 1
# add violin plot labels
fig['layout'].update({'xaxis{}'.format(k + 1):
FigureFactory._make_XAxis(group_name[k],
plot_xrange)})
# set the sharey axis style
fig['layout'].update(
{'yaxis{}'.format(1): FigureFactory._make_YAxis('')}
)
fig['layout'].update(
title=title,
showlegend=False,
hovermode='closest',
autosize=False,
height=height,
width=width
)
return fig
@staticmethod
def _violin_colorscale(data, data_header, group_header, colors,
use_colorscale, group_stats, height, width, title):
"""
Refer to FigureFactory.create_violin() for docstring.
Returns fig for violin plot with colorscale.
"""
from plotly.graph_objs import graph_objs
import numpy as np
# collect all group names
group_name = []
for name in data[group_header]:
if name not in group_name:
group_name.append(name)
group_name.sort()
# make sure all group names are keys in group_stats
for group in group_name:
if group not in group_stats:
raise exceptions.PlotlyError("All values/groups in the index "
"column must be represented "
"as a key in group_stats.")
gb = data.groupby([group_header])
L = len(group_name)
fig = make_subplots(rows=1, cols=L,
shared_yaxes=True,
horizontal_spacing=0.025,
print_grid=True)
# prepare low and high color for colorscale
lowcolor = FigureFactory._color_parser(
colors[0], FigureFactory._unlabel_rgb
)
highcolor = FigureFactory._color_parser(
colors[1], FigureFactory._unlabel_rgb
)
# find min and max values in group_stats
group_stats_values = []
for key in group_stats:
group_stats_values.append(group_stats[key])
max_value = max(group_stats_values)
min_value = min(group_stats_values)
for k, gr in enumerate(group_name):
vals = np.asarray(gb.get_group(gr)[data_header], np.float)
# find intermediate color from colorscale
intermed = (group_stats[gr] - min_value) / (max_value - min_value)
intermed_color = FigureFactory._find_intermediate_color(
lowcolor, highcolor, intermed
)
plot_data, plot_xrange = FigureFactory._violinplot(
vals,
fillcolor='rgb{}'.format(intermed_color)
)
layout = graph_objs.Layout()
for item in plot_data:
fig.append_trace(item, 1, k + 1)
fig['layout'].update({'xaxis{}'.format(k + 1):
FigureFactory._make_XAxis(group_name[k],
plot_xrange)})
# add colorbar to plot
trace_dummy = graph_objs.Scatter(
x=[0],
y=[0],
mode='markers',
marker=dict(
size=2,
cmin=min_value,
cmax=max_value,
colorscale=[[0, colors[0]],
[1, colors[1]]],
showscale=True),
showlegend=False,
)
fig.append_trace(trace_dummy, 1, L)
# set the sharey axis style
fig['layout'].update(
{'yaxis{}'.format(1): FigureFactory._make_YAxis('')}
)
fig['layout'].update(
title=title,
showlegend=False,
hovermode='closest',
autosize=False,
height=height,
width=width
)
return fig
@staticmethod
def _violin_dict(data, data_header, group_header, colors, use_colorscale,
group_stats, height, width, title):
"""
Refer to FigureFactory.create_violin() for docstring.
Returns fig for violin plot without colorscale.
"""
from plotly.graph_objs import graph_objs
import numpy as np
# collect all group names
group_name = []
for name in data[group_header]:
if name not in group_name:
group_name.append(name)
group_name.sort()
# check if all group names appear in colors dict
for group in group_name:
if group not in colors:
raise exceptions.PlotlyError("If colors is a dictionary, all "
"the group names must appear as "
"keys in colors.")
gb = data.groupby([group_header])
L = len(group_name)
fig = make_subplots(rows=1, cols=L,
shared_yaxes=True,
horizontal_spacing=0.025,
print_grid=True)
for k, gr in enumerate(group_name):
vals = np.asarray(gb.get_group(gr)[data_header], np.float)
plot_data, plot_xrange = FigureFactory._violinplot(
vals,
fillcolor=colors[gr]
)
layout = graph_objs.Layout()
for item in plot_data:
fig.append_trace(item, 1, k + 1)
# add violin plot labels
fig['layout'].update({'xaxis{}'.format(k + 1):
FigureFactory._make_XAxis(group_name[k],
plot_xrange)})
# set the sharey axis style
fig['layout'].update(
{'yaxis{}'.format(1): FigureFactory._make_YAxis('')}
)
fig['layout'].update(
title=title,
showlegend=False,
hovermode='closest',
autosize=False,
height=height,
width=width
)
return fig
@staticmethod
def create_violin(data, data_header=None, group_header=None,
colors=None, use_colorscale=False, group_stats=None,
height=450, width=600, title='Violin and Rug Plot'):
"""
Returns figure for a violin plot
:param (list|array) data: accepts either a list of numerical values,
a list of dictionaries all with identical keys and at least one
column of numeric values, or a pandas dataframe with at least one
column of numbers
:param (str) data_header: the header of the data column to be used
from an inputted pandas dataframe. Not applicable if 'data' is
a list of numeric values
:param (str) group_header: applicable if grouping data by a variable.
'group_header' must be set to the name of the grouping variable.
:param (str|tuple|list|dict) colors: either a plotly scale name,
an rgb or hex color, a color tuple, a list of colors or a
dictionary. An rgb color is of the form 'rgb(x, y, z)' where
x, y and z belong to the interval [0, 255] and a color tuple is a
tuple of the form (a, b, c) where a, b and c belong to [0, 1].
If colors is a list, it must contain valid color types as its
members.
:param (bool) use_colorscale: Only applicable if grouping by another
variable. Will implement a colorscale based on the first 2 colors
of param colors. This means colors must be a list with at least 2
colors in it (Plotly colorscales are accepted since they map to a
list of two rgb colors)
:param (dict) group_stats: a dictioanry where each key is a unique
value from the group_header column in data. Each value must be a
number and will be used to color the violin plots if a colorscale
is being used
:param (float) height: the height of the violin plot
:param (float) width: the width of the violin plot
:param (str) title: the title of the violin plot
Example 1: Single Violin Plot
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from plotly.graph_objs import graph_objs
import numpy as np
from scipy import stats
# create list of random values
data_list = np.random.randn(100)
data_list.tolist()
# create violin fig
fig = FF.create_violin(data_list, colors='#604d9e')
# plot
py.iplot(fig, filename='Violin Plot')
```
Example 2: Multiple Violin Plots with Qualitative Coloring
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from plotly.graph_objs import graph_objs
import numpy as np
import pandas as pd
from scipy import stats
# create dataframe
np.random.seed(619517)
Nr=250
y = np.random.randn(Nr)
gr = np.random.choice(list("ABCDE"), Nr)
norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]
for i, letter in enumerate("ABCDE"):
y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]
df = pd.DataFrame(dict(Score=y, Group=gr))
# create violin fig
fig = FF.create_violin(df, data_header='Score', group_header='Group',
height=600, width=1000)
# plot
py.iplot(fig, filename='Violin Plot with Coloring')
```
Example 3: Violin Plots with Colorscale
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from plotly.graph_objs import graph_objs
import numpy as np
import pandas as pd
from scipy import stats
# create dataframe
np.random.seed(619517)
Nr=250
y = np.random.randn(Nr)
gr = np.random.choice(list("ABCDE"), Nr)
norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]
for i, letter in enumerate("ABCDE"):
y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]
df = pd.DataFrame(dict(Score=y, Group=gr))
# define header params
data_header = 'Score'
group_header = 'Group'
# make groupby object with pandas
group_stats = {}
groupby_data = df.groupby([group_header])
for group in "ABCDE":
data_from_group = groupby_data.get_group(group)[data_header]
# take a stat of the grouped data
stat = np.median(data_from_group)
# add to dictionary
group_stats[group] = stat
# create violin fig
fig = FF.create_violin(df, data_header='Score', group_header='Group',
height=600, width=1000, use_colorscale=True,
group_stats=group_stats)
# plot
py.iplot(fig, filename='Violin Plot with Colorscale')
```
"""
from plotly.graph_objs import graph_objs
from numbers import Number
# Validate colors
if isinstance(colors, dict):
valid_colors = FigureFactory._validate_colors_dict(colors, 'rgb')
else:
valid_colors = FigureFactory._validate_colors(colors, 'rgb')
# validate data and choose plot type
if group_header is None:
if isinstance(data, list):
if len(data) <= 0:
raise exceptions.PlotlyError("If data is a list, it must be "
"nonempty and contain either "
"numbers or dictionaries.")
if not all(isinstance(element, Number) for element in data):
raise exceptions.PlotlyError("If data is a list, it must "
"contain only numbers.")
if _pandas_imported and isinstance(data, pd.core.frame.DataFrame):
if data_header is None:
raise exceptions.PlotlyError("data_header must be the "
"column name with the "
"desired numeric data for "
"the violin plot.")
data = data[data_header].values.tolist()
# call the plotting functions
plot_data, plot_xrange = FigureFactory._violinplot(
data, fillcolor=valid_colors[0]
)
layout = graph_objs.Layout(
title=title,
autosize=False,
font=graph_objs.Font(size=11),
height=height,
showlegend=False,
width=width,
xaxis=FigureFactory._make_XAxis('', plot_xrange),
yaxis=FigureFactory._make_YAxis(''),
hovermode='closest'
)
layout['yaxis'].update(dict(showline=False,
showticklabels=False,
ticks=''))
fig = graph_objs.Figure(data=graph_objs.Data(plot_data),
layout=layout)
return fig
else:
if not isinstance(data, pd.core.frame.DataFrame):
raise exceptions.PlotlyError("Error. You must use a pandas "
"DataFrame if you are using a "
"group header.")
if data_header is None:
raise exceptions.PlotlyError("data_header must be the column "
"name with the desired numeric "
"data for the violin plot.")
if use_colorscale is False:
if isinstance(valid_colors, dict):
# validate colors dict choice below
fig = FigureFactory._violin_dict(
data, data_header, group_header, valid_colors,
use_colorscale, group_stats, height, width, title
)
return fig
else:
fig = FigureFactory._violin_no_colorscale(
data, data_header, group_header, valid_colors,
use_colorscale, group_stats, height, width, title
)
return fig
else:
if isinstance(valid_colors, dict):
raise exceptions.PlotlyError("The colors param cannot be "
"a dictionary if you are "
"using a colorscale.")
if len(valid_colors) < 2:
raise exceptions.PlotlyError("colors must be a list with "
"at least 2 colors. A "
"Plotly scale is allowed.")
if not isinstance(group_stats, dict):
raise exceptions.PlotlyError("Your group_stats param "
"must be a dictionary.")
fig = FigureFactory._violin_colorscale(
data, data_header, group_header, valid_colors,
use_colorscale, group_stats, height, width, title
)
return fig
@staticmethod
def _find_intermediate_color(lowcolor, highcolor, intermed):
"""
Returns the color at a given distance between two colors
This function takes two color tuples, where each element is between 0
and 1, along with a value 0 < intermed < 1 and returns a color that is
intermed-percent from lowcolor to highcolor
"""
diff_0 = float(highcolor[0] - lowcolor[0])
diff_1 = float(highcolor[1] - lowcolor[1])
diff_2 = float(highcolor[2] - lowcolor[2])
return (lowcolor[0] + intermed * diff_0,
lowcolor[1] + intermed * diff_1,
lowcolor[2] + intermed * diff_2)
@staticmethod
def _color_parser(colors, function):
"""
Takes color(s) and a function and applies the function on the color(s)
In particular, this function identifies whether the given color object
is an iterable or not and applies the given color-parsing function to
the color or iterable of colors. If given an iterable, it will only be
able to work with it if all items in the iterable are of the same type
- rgb string, hex string or tuple
"""
from numbers import Number
if isinstance(colors, str):
return function(colors)
if isinstance(colors, tuple) and isinstance(colors[0], Number):
return function(colors)
if hasattr(colors, '__iter__'):
if isinstance(colors, tuple):
new_color_tuple = tuple(function(item) for item in colors)
return new_color_tuple
else:
new_color_list = [function(item) for item in colors]
return new_color_list
@staticmethod
def _unconvert_from_RGB_255(colors):
"""
Return a tuple where each element gets divided by 255
Takes a (list of) color tuple(s) where each element is between 0 and
255. Returns the same tuples where each tuple element is normalized to
a value between 0 and 1
"""
return (colors[0]/(255.0),
colors[1]/(255.0),
colors[2]/(255.0))
@staticmethod
def _map_face2color(face, colormap, vmin, vmax):
"""
Normalize facecolor values by vmin/vmax and return rgb-color strings
This function takes a tuple color along with a colormap and a minimum
(vmin) and maximum (vmax) range of possible mean distances for the
given parametrized surface. It returns an rgb color based on the mean
distance between vmin and vmax
"""
if vmin >= vmax:
raise exceptions.PlotlyError("Incorrect relation between vmin "
"and vmax. The vmin value cannot be "
"bigger than or equal to the value "
"of vmax.")
if len(colormap) == 1:
# color each triangle face with the same color in colormap
face_color = colormap[0]
face_color = FigureFactory._convert_to_RGB_255(face_color)
face_color = FigureFactory._label_rgb(face_color)
else:
if face == vmax:
# pick last color in colormap
face_color = colormap[-1]
face_color = FigureFactory._convert_to_RGB_255(face_color)
face_color = FigureFactory._label_rgb(face_color)
else:
# find the normalized distance t of a triangle face between
# vmin and vmax where the distance is between 0 and 1
t = (face - vmin) / float((vmax - vmin))
low_color_index = int(t / (1./(len(colormap) - 1)))
face_color = FigureFactory._find_intermediate_color(
colormap[low_color_index],
colormap[low_color_index + 1],
t * (len(colormap) - 1) - low_color_index
)
face_color = FigureFactory._convert_to_RGB_255(face_color)
face_color = FigureFactory._label_rgb(face_color)
return face_color
@staticmethod
def _trisurf(x, y, z, simplices, show_colorbar, edges_color,
colormap=None, color_func=None, plot_edges=False,
x_edge=None, y_edge=None, z_edge=None, facecolor=None):
"""
Refer to FigureFactory.create_trisurf() for docstring
"""
# numpy import check
if _numpy_imported is False:
raise ImportError("FigureFactory._trisurf() requires "
"numpy imported.")
import numpy as np
from plotly.graph_objs import graph_objs
points3D = np.vstack((x, y, z)).T
simplices = np.atleast_2d(simplices)
# vertices of the surface triangles
tri_vertices = points3D[simplices]
# Define colors for the triangle faces
if color_func is None:
# mean values of z-coordinates of triangle vertices
mean_dists = tri_vertices[:, :, 2].mean(-1)
elif isinstance(color_func, (list, np.ndarray)):
# Pre-computed list / array of values to map onto color
if len(color_func) != len(simplices):
raise ValueError("If color_func is a list/array, it must "
"be the same length as simplices.")
# convert all colors in color_func to rgb
for index in range(len(color_func)):
if isinstance(color_func[index], str):
if '#' in color_func[index]:
foo = FigureFactory._hex_to_rgb(color_func[index])
color_func[index] = FigureFactory._label_rgb(foo)
if isinstance(color_func[index], tuple):
foo = FigureFactory._convert_to_RGB_255(color_func[index])
color_func[index] = FigureFactory._label_rgb(foo)
mean_dists = np.asarray(color_func)
else:
# apply user inputted function to calculate
# custom coloring for triangle vertices
mean_dists = []
for triangle in tri_vertices:
dists = []
for vertex in triangle:
dist = color_func(vertex[0], vertex[1], vertex[2])
dists.append(dist)
mean_dists.append(np.mean(dists))
mean_dists = np.asarray(mean_dists)
# Check if facecolors are already strings and can be skipped
if isinstance(mean_dists[0], str):
facecolor = mean_dists
else:
min_mean_dists = np.min(mean_dists)
max_mean_dists = np.max(mean_dists)
if facecolor is None:
facecolor = []
for index in range(len(mean_dists)):
color = FigureFactory._map_face2color(mean_dists[index],
colormap,
min_mean_dists,
max_mean_dists)
facecolor.append(color)
# Make sure facecolor is a list so output is consistent across Pythons
facecolor = list(facecolor)
ii, jj, kk = simplices.T
triangles = graph_objs.Mesh3d(x=x, y=y, z=z, facecolor=facecolor,
i=ii, j=jj, k=kk, name='')
mean_dists_are_numbers = not isinstance(mean_dists[0], str)
if mean_dists_are_numbers and show_colorbar is True:
# make a colorscale from the colors
colorscale = FigureFactory._make_colorscale(colormap)
colorscale = FigureFactory._convert_colorscale_to_rgb(colorscale)
colorbar = graph_objs.Scatter3d(
x=x[0],
y=y[0],
z=z[0],
mode='markers',
marker=dict(
size=0.1,
color=[min_mean_dists, max_mean_dists],
colorscale=colorscale,
showscale=True),
hoverinfo='None',
showlegend=False
)
# the triangle sides are not plotted
if plot_edges is False:
if mean_dists_are_numbers and show_colorbar is True:
return graph_objs.Data([triangles, colorbar])
else:
return graph_objs.Data([triangles])
# define the lists x_edge, y_edge and z_edge, of x, y, resp z
# coordinates of edge end points for each triangle
# None separates data corresponding to two consecutive triangles
is_none = [ii is None for ii in [x_edge, y_edge, z_edge]]
if any(is_none):
if not all(is_none):
raise ValueError("If any (x_edge, y_edge, z_edge) is None, "
"all must be None")
else:
x_edge = []
y_edge = []
z_edge = []
# Pull indices we care about, then add a None column to separate tris
ixs_triangles = [0, 1, 2, 0]
pull_edges = tri_vertices[:, ixs_triangles, :]
x_edge_pull = np.hstack([pull_edges[:, :, 0],
np.tile(None, [pull_edges.shape[0], 1])])
y_edge_pull = np.hstack([pull_edges[:, :, 1],
np.tile(None, [pull_edges.shape[0], 1])])
z_edge_pull = np.hstack([pull_edges[:, :, 2],
np.tile(None, [pull_edges.shape[0], 1])])
# Now unravel the edges into a 1-d vector for plotting
x_edge = np.hstack([x_edge, x_edge_pull.reshape([1, -1])[0]])
y_edge = np.hstack([y_edge, y_edge_pull.reshape([1, -1])[0]])
z_edge = np.hstack([z_edge, z_edge_pull.reshape([1, -1])[0]])
if not (len(x_edge) == len(y_edge) == len(z_edge)):
raise exceptions.PlotlyError("The lengths of x_edge, y_edge and "
"z_edge are not the same.")
# define the lines for plotting
lines = graph_objs.Scatter3d(
x=x_edge, y=y_edge, z=z_edge, mode='lines',
line=graph_objs.Line(
color=edges_color,
width=1.5
),
showlegend=False
)
if mean_dists_are_numbers and show_colorbar is True:
return graph_objs.Data([triangles, lines, colorbar])
else:
return graph_objs.Data([triangles, lines])
@staticmethod
def create_trisurf(x, y, z, simplices, colormap=None, show_colorbar=True,
color_func=None, title='Trisurf Plot', plot_edges=True,
showbackground=True,
backgroundcolor='rgb(230, 230, 230)',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
edges_color='rgb(50, 50, 50)',
height=800, width=800,
aspectratio=dict(x=1, y=1, z=1)):
"""
Returns figure for a triangulated surface plot
:param (array) x: data values of x in a 1D array
:param (array) y: data values of y in a 1D array
:param (array) z: data values of z in a 1D array
:param (array) simplices: an array of shape (ntri, 3) where ntri is
the number of triangles in the triangularization. Each row of the
array contains the indicies of the verticies of each triangle
:param (str|tuple|list) colormap: either a plotly scale name, an rgb
or hex color, a color tuple or a list of colors. An rgb color is
of the form 'rgb(x, y, z)' where x, y, z belong to the interval
[0, 255] and a color tuple is a tuple of the form (a, b, c) where
a, b and c belong to [0, 1]. If colormap is a list, it must
contain the valid color types aforementioned as its members
:param (bool) show_colorbar: determines if colorbar is visible
:param (function|list) color_func: The parameter that determines the
coloring of the surface. Takes either a function with 3 arguments
x, y, z or a list/array of color values the same length as
simplices. If None, coloring will only depend on the z axis
:param (str) title: title of the plot
:param (bool) plot_edges: determines if the triangles on the trisurf
are visible
:param (bool) showbackground: makes background in plot visible
:param (str) backgroundcolor: color of background. Takes a string of
the form 'rgb(x,y,z)' x,y,z are between 0 and 255 inclusive
:param (str) gridcolor: color of the gridlines besides the axes. Takes
a string of the form 'rgb(x,y,z)' x,y,z are between 0 and 255
inclusive
:param (str) zerolinecolor: color of the axes. Takes a string of the
form 'rgb(x,y,z)' x,y,z are between 0 and 255 inclusive
:param (str) edges_color: color of the edges, if plot_edges is True
:param (int|float) height: the height of the plot (in pixels)
:param (int|float) width: the width of the plot (in pixels)
:param (dict) aspectratio: a dictionary of the aspect ratio values for
the x, y and z axes. 'x', 'y' and 'z' take (int|float) values
Example 1: Sphere
```
# Necessary Imports for Trisurf
import numpy as np
from scipy.spatial import Delaunay
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from plotly.graph_objs import graph_objs
# Make data for plot
u = np.linspace(0, 2*np.pi, 20)
v = np.linspace(0, np.pi, 20)
u,v = np.meshgrid(u,v)
u = u.flatten()
v = v.flatten()
x = np.sin(v)*np.cos(u)
y = np.sin(v)*np.sin(u)
z = np.cos(v)
points2D = np.vstack([u,v]).T
tri = Delaunay(points2D)
simplices = tri.simplices
# Create a figure
fig1 = FF.create_trisurf(x=x, y=y, z=z,
colormap="Blues",
simplices=simplices)
# Plot the data
py.iplot(fig1, filename='trisurf-plot-sphere')
```
Example 2: Torus
```
# Necessary Imports for Trisurf
import numpy as np
from scipy.spatial import Delaunay
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from plotly.graph_objs import graph_objs
# Make data for plot
u = np.linspace(0, 2*np.pi, 20)
v = np.linspace(0, 2*np.pi, 20)
u,v = np.meshgrid(u,v)
u = u.flatten()
v = v.flatten()
x = (3 + (np.cos(v)))*np.cos(u)
y = (3 + (np.cos(v)))*np.sin(u)
z = np.sin(v)
points2D = np.vstack([u,v]).T
tri = Delaunay(points2D)
simplices = tri.simplices
# Create a figure
fig1 = FF.create_trisurf(x=x, y=y, z=z,
colormap="Greys",
simplices=simplices)
# Plot the data
py.iplot(fig1, filename='trisurf-plot-torus')
```
Example 3: Mobius Band
```
# Necessary Imports for Trisurf
import numpy as np
from scipy.spatial import Delaunay
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from plotly.graph_objs import graph_objs
# Make data for plot
u = np.linspace(0, 2*np.pi, 24)
v = np.linspace(-1, 1, 8)
u,v = np.meshgrid(u,v)
u = u.flatten()
v = v.flatten()
tp = 1 + 0.5*v*np.cos(u/2.)
x = tp*np.cos(u)
y = tp*np.sin(u)
z = 0.5*v*np.sin(u/2.)
points2D = np.vstack([u,v]).T
tri = Delaunay(points2D)
simplices = tri.simplices
# Create a figure
fig1 = FF.create_trisurf(x=x, y=y, z=z,
colormap=[(0.2, 0.4, 0.6), (1, 1, 1)],
simplices=simplices)
# Plot the data
py.iplot(fig1, filename='trisurf-plot-mobius-band')
```
Example 4: Using a Custom Colormap Function with Light Cone
```
# Necessary Imports for Trisurf
import numpy as np
from scipy.spatial import Delaunay
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from plotly.graph_objs import graph_objs
# Make data for plot
u=np.linspace(-np.pi, np.pi, 30)
v=np.linspace(-np.pi, np.pi, 30)
u,v=np.meshgrid(u,v)
u=u.flatten()
v=v.flatten()
x = u
y = u*np.cos(v)
z = u*np.sin(v)
points2D = np.vstack([u,v]).T
tri = Delaunay(points2D)
simplices = tri.simplices
# Define distance function
def dist_origin(x, y, z):
return np.sqrt((1.0 * x)**2 + (1.0 * y)**2 + (1.0 * z)**2)
# Create a figure
fig1 = FF.create_trisurf(x=x, y=y, z=z,
colormap=['#604d9e',
'rgb(50, 150, 255)',
(0.2, 0.2, 0.8)],
simplices=simplices,
color_func=dist_origin)
# Plot the data
py.iplot(fig1, filename='trisurf-plot-custom-coloring')
```
Example 5: Enter color_func as a list of colors
```
# Necessary Imports for Trisurf
import numpy as np
from scipy.spatial import Delaunay
import random
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from plotly.graph_objs import graph_objs
# Make data for plot
u=np.linspace(-np.pi, np.pi, 30)
v=np.linspace(-np.pi, np.pi, 30)
u,v=np.meshgrid(u,v)
u=u.flatten()
v=v.flatten()
x = u
y = u*np.cos(v)
z = u*np.sin(v)
points2D = np.vstack([u,v]).T
tri = Delaunay(points2D)
simplices = tri.simplices
colors = []
color_choices = ['rgb(0, 0, 0)', '#6c4774', '#d6c7dd']
for index in range(len(simplices)):
colors.append(random.choice(color_choices))
fig = FF.create_trisurf(
x, y, z, simplices,
color_func=colors,
show_colorbar=True,
edges_color='rgb(2, 85, 180)',
title=' Modern Art'
)
py.iplot(fig, filename="trisurf-plot-modern-art")
```
"""
from plotly.graph_objs import graph_objs
# Validate colormap
colormap = FigureFactory._validate_colors(colormap, 'tuple')
data1 = FigureFactory._trisurf(x, y, z, simplices,
show_colorbar=show_colorbar,
color_func=color_func,
colormap=colormap,
edges_color=edges_color,
plot_edges=plot_edges)
axis = dict(
showbackground=showbackground,
backgroundcolor=backgroundcolor,
gridcolor=gridcolor,
zerolinecolor=zerolinecolor,
)
layout = graph_objs.Layout(
title=title,
width=width,
height=height,
scene=graph_objs.Scene(
xaxis=graph_objs.XAxis(axis),
yaxis=graph_objs.YAxis(axis),
zaxis=graph_objs.ZAxis(axis),
aspectratio=dict(
x=aspectratio['x'],
y=aspectratio['y'],
z=aspectratio['z']),
)
)
return graph_objs.Figure(data=data1, layout=layout)
@staticmethod
def _scatterplot(dataframe, headers, diag, size,
height, width, title, **kwargs):
"""
Refer to FigureFactory.create_scatterplotmatrix() for docstring
Returns fig for scatterplotmatrix without index
"""
from plotly.graph_objs import graph_objs
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim)
trace_list = []
# Insert traces into trace_list
for listy in dataframe:
for listx in dataframe:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=listx,
showlegend=False
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=listx,
name=None,
showlegend=False
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode='markers',
showlegend=False,
**kwargs
)
trace_list.append(trace)
else:
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode='markers',
marker=dict(
size=size),
showlegend=False,
**kwargs
)
trace_list.append(trace)
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
fig.append_trace(trace_list[trace_index],
y_index,
x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)
fig['layout'][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = 'yaxis{}'.format(1 + (dim * j))
fig['layout'][yaxis_key].update(title=headers[j])
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True
)
return fig
@staticmethod
def _scatterplot_dict(dataframe, headers, diag, size,
height, width, title, index, index_vals,
endpts, colormap, colormap_type, **kwargs):
"""
Refer to FigureFactory.create_scatterplotmatrix() for docstring
Returns fig for scatterplotmatrix with both index and colormap picked.
Used if colormap is a dictionary with index values as keys pointing to
colors. Forces colormap_type to behave categorically because it would
not make sense colors are assigned to each index value and thus
implies that a categorical approach should be taken
"""
from plotly.graph_objs import graph_objs
theme = colormap
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim)
trace_list = []
legend_param = 0
# Work over all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
# create a dictionary for index_vals
unique_index_vals = {}
for name in index_vals:
if name not in unique_index_vals:
unique_index_vals[name] = []
# Fill all the rest of the names into the dictionary
for name in sorted(unique_index_vals.keys()):
new_listx = []
new_listy = []
for j in range(len(index_vals)):
if index_vals[j] == name:
new_listx.append(listx[j])
new_listy.append(listy[j])
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(
color=theme[name]),
showlegend=True
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(
color=theme[name]),
showlegend=True
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
kwargs['marker']['color'] = theme[name]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
showlegend=True,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
marker=dict(
size=size,
color=theme[name]),
showlegend=True,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(
color=theme[name]),
showlegend=False
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(
color=theme[name]),
showlegend=False
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
kwargs['marker']['color'] = theme[name]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
marker=dict(
size=size,
color=theme[name]),
showlegend=False,
**kwargs
)
# Push the trace into dictionary
unique_index_vals[name] = trace
trace_list.append(unique_index_vals)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
for name in sorted(trace_list[trace_index].keys()):
fig.append_trace(
trace_list[trace_index][name],
y_index,
x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)
fig['layout'][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = 'yaxis{}'.format(1 + (dim * j))
fig['layout'][yaxis_key].update(title=headers[j])
if diag == 'histogram':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True,
barmode='stack')
return fig
elif diag == 'box':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
else:
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
@staticmethod
def _scatterplot_theme(dataframe, headers, diag, size, height,
width, title, index, index_vals, endpts,
colormap, colormap_type, **kwargs):
"""
Refer to FigureFactory.create_scatterplotmatrix() for docstring
Returns fig for scatterplotmatrix with both index and colormap picked
"""
from plotly.graph_objs import graph_objs
# Check if index is made of string values
if isinstance(index_vals[0], str):
unique_index_vals = []
for name in index_vals:
if name not in unique_index_vals:
unique_index_vals.append(name)
n_colors_len = len(unique_index_vals)
# Convert colormap to list of n RGB tuples
if colormap_type == 'seq':
foo = FigureFactory._color_parser(
colormap, FigureFactory._unlabel_rgb
)
foo = FigureFactory._n_colors(foo[0],
foo[1],
n_colors_len)
theme = FigureFactory._color_parser(
foo, FigureFactory._label_rgb
)
if colormap_type == 'cat':
# leave list of colors the same way
theme = colormap
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim)
trace_list = []
legend_param = 0
# Work over all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
# create a dictionary for index_vals
unique_index_vals = {}
for name in index_vals:
if name not in unique_index_vals:
unique_index_vals[name] = []
c_indx = 0 # color index
# Fill all the rest of the names into the dictionary
for name in sorted(unique_index_vals.keys()):
new_listx = []
new_listy = []
for j in range(len(index_vals)):
if index_vals[j] == name:
new_listx.append(listx[j])
new_listy.append(listy[j])
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(
color=theme[c_indx]),
showlegend=True
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(
color=theme[c_indx]),
showlegend=True
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
kwargs['marker']['color'] = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
showlegend=True,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
marker=dict(
size=size,
color=theme[c_indx]),
showlegend=True,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(
color=theme[c_indx]),
showlegend=False
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(
color=theme[c_indx]),
showlegend=False
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
kwargs['marker']['color'] = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
marker=dict(
size=size,
color=theme[c_indx]),
showlegend=False,
**kwargs
)
# Push the trace into dictionary
unique_index_vals[name] = trace
if c_indx >= (len(theme) - 1):
c_indx = -1
c_indx += 1
trace_list.append(unique_index_vals)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
for name in sorted(trace_list[trace_index].keys()):
fig.append_trace(
trace_list[trace_index][name],
y_index,
x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)
fig['layout'][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = 'yaxis{}'.format(1 + (dim * j))
fig['layout'][yaxis_key].update(title=headers[j])
if diag == 'histogram':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True,
barmode='stack')
return fig
elif diag == 'box':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
else:
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
else:
if endpts:
intervals = FigureFactory._endpts_to_intervals(endpts)
# Convert colormap to list of n RGB tuples
if colormap_type == 'seq':
foo = FigureFactory._color_parser(
colormap, FigureFactory._unlabel_rgb
)
foo = FigureFactory._n_colors(foo[0],
foo[1],
len(intervals))
theme = FigureFactory._color_parser(
foo, FigureFactory._label_rgb
)
if colormap_type == 'cat':
# leave list of colors the same way
theme = colormap
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim)
trace_list = []
legend_param = 0
# Work over all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
interval_labels = {}
for interval in intervals:
interval_labels[str(interval)] = []
c_indx = 0 # color index
# Fill all the rest of the names into the dictionary
for interval in intervals:
new_listx = []
new_listy = []
for j in range(len(index_vals)):
if interval[0] < index_vals[j] <= interval[1]:
new_listx.append(listx[j])
new_listy.append(listy[j])
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(
color=theme[c_indx]),
showlegend=True
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(
color=theme[c_indx]),
showlegend=True
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
(kwargs['marker']
['color']) = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=str(interval),
showlegend=True,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=str(interval),
marker=dict(
size=size,
color=theme[c_indx]),
showlegend=True,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(
color=theme[c_indx]),
showlegend=False
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(
color=theme[c_indx]),
showlegend=False
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
(kwargs['marker']
['color']) = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=str(interval),
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=str(interval),
marker=dict(
size=size,
color=theme[c_indx]),
showlegend=False,
**kwargs
)
# Push the trace into dictionary
interval_labels[str(interval)] = trace
if c_indx >= (len(theme) - 1):
c_indx = -1
c_indx += 1
trace_list.append(interval_labels)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
for interval in intervals:
fig.append_trace(
trace_list[trace_index][str(interval)],
y_index,
x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)
fig['layout'][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = 'yaxis{}'.format(1 + (dim * j))
fig['layout'][yaxis_key].update(title=headers[j])
if diag == 'histogram':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True,
barmode='stack')
return fig
elif diag == 'box':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
else:
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
else:
theme = colormap
# add a copy of rgb color to theme if it contains one color
if len(theme) <= 1:
theme.append(theme[0])
color = []
for incr in range(len(theme)):
color.append([1./(len(theme)-1)*incr, theme[incr]])
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim)
trace_list = []
legend_param = 0
# Run through all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=listx,
marker=dict(
color=theme[0]),
showlegend=False
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=listx,
marker=dict(
color=theme[0]),
showlegend=False
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
kwargs['marker']['color'] = index_vals
kwargs['marker']['colorscale'] = color
kwargs['marker']['showscale'] = True
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode='markers',
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode='markers',
marker=dict(
size=size,
color=index_vals,
colorscale=color,
showscale=True),
showlegend=False,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=listx,
marker=dict(
color=theme[0]),
showlegend=False
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=listx,
marker=dict(
color=theme[0]),
showlegend=False
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
kwargs['marker']['color'] = index_vals
kwargs['marker']['colorscale'] = color
kwargs['marker']['showscale'] = False
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode='markers',
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode='markers',
marker=dict(
size=size,
color=index_vals,
colorscale=color,
showscale=False),
showlegend=False,
**kwargs
)
# Push the trace into list
trace_list.append(trace)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
fig.append_trace(trace_list[trace_index],
y_index,
x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)
fig['layout'][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = 'yaxis{}'.format(1 + (dim * j))
fig['layout'][yaxis_key].update(title=headers[j])
if diag == 'histogram':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True,
barmode='stack')
return fig
elif diag == 'box':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
else:
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
@staticmethod
def _validate_index(index_vals):
"""
Validates if a list contains all numbers or all strings
:raises: (PlotlyError) If there are any two items in the list whose
types differ
"""
from numbers import Number
if isinstance(index_vals[0], Number):
if not all(isinstance(item, Number) for item in index_vals):
raise exceptions.PlotlyError("Error in indexing column. "
"Make sure all entries of each "
"column are all numbers or "
"all strings.")
elif isinstance(index_vals[0], str):
if not all(isinstance(item, str) for item in index_vals):
raise exceptions.PlotlyError("Error in indexing column. "
"Make sure all entries of each "
"column are all numbers or "
"all strings.")
@staticmethod
def _validate_dataframe(array):
"""
Validates all strings or numbers in each dataframe column
:raises: (PlotlyError) If there are any two items in any list whose
types differ
"""
from numbers import Number
for vector in array:
if isinstance(vector[0], Number):
if not all(isinstance(item, Number) for item in vector):
raise exceptions.PlotlyError("Error in dataframe. "
"Make sure all entries of "
"each column are either "
"numbers or strings.")
elif isinstance(vector[0], str):
if not all(isinstance(item, str) for item in vector):
raise exceptions.PlotlyError("Error in dataframe. "
"Make sure all entries of "
"each column are either "
"numbers or strings.")
@staticmethod
def _validate_scatterplotmatrix(df, index, diag, colormap_type, **kwargs):
"""
Validates basic inputs for FigureFactory.create_scatterplotmatrix()
:raises: (PlotlyError) If pandas is not imported
:raises: (PlotlyError) If pandas dataframe is not inputted
:raises: (PlotlyError) If pandas dataframe has <= 1 columns
:raises: (PlotlyError) If diagonal plot choice (diag) is not one of
the viable options
:raises: (PlotlyError) If colormap_type is not a valid choice
:raises: (PlotlyError) If kwargs contains 'size', 'color' or
'colorscale'
"""
if _pandas_imported is False:
raise ImportError("FigureFactory.scatterplotmatrix requires "
"a pandas DataFrame.")
# Check if pandas dataframe
if not isinstance(df, pd.core.frame.DataFrame):
raise exceptions.PlotlyError("Dataframe not inputed. Please "
"use a pandas dataframe to pro"
"duce a scatterplot matrix.")
# Check if dataframe is 1 column or less
if len(df.columns) <= 1:
raise exceptions.PlotlyError("Dataframe has only one column. To "
"use the scatterplot matrix, use at "
"least 2 columns.")
# Check that diag parameter is a valid selection
if diag not in DIAG_CHOICES:
raise exceptions.PlotlyError("Make sure diag is set to "
"one of {}".format(DIAG_CHOICES))
# Check that colormap_types is a valid selection
if colormap_type not in VALID_COLORMAP_TYPES:
raise exceptions.PlotlyError("Must choose a valid colormap type. "
"Either 'cat' or 'seq' for a cate"
"gorical and sequential colormap "
"respectively.")
# Check for not 'size' or 'color' in 'marker' of **kwargs
if 'marker' in kwargs:
FORBIDDEN_PARAMS = ['size', 'color', 'colorscale']
if any(param in kwargs['marker'] for param in FORBIDDEN_PARAMS):
raise exceptions.PlotlyError("Your kwargs dictionary cannot "
"include the 'size', 'color' or "
"'colorscale' key words inside "
"the marker dict since 'size' is "
"already an argument of the "
"scatterplot matrix function and "
"both 'color' and 'colorscale "
"are set internally.")
@staticmethod
def _endpts_to_intervals(endpts):
"""
Returns a list of intervals for categorical colormaps
Accepts a list or tuple of sequentially increasing numbers and returns
a list representation of the mathematical intervals with these numbers
as endpoints. For example, [1, 6] returns [[-inf, 1], [1, 6], [6, inf]]
:raises: (PlotlyError) If input is not a list or tuple
:raises: (PlotlyError) If the input contains a string
:raises: (PlotlyError) If any number does not increase after the
previous one in the sequence
"""
length = len(endpts)
# Check if endpts is a list or tuple
if not (isinstance(endpts, (tuple)) or isinstance(endpts, (list))):
raise exceptions.PlotlyError("The intervals_endpts argument must "
"be a list or tuple of a sequence "
"of increasing numbers.")
# Check if endpts contains only numbers
for item in endpts:
if isinstance(item, str):
raise exceptions.PlotlyError("The intervals_endpts argument "
"must be a list or tuple of a "
"sequence of increasing "
"numbers.")
# Check if numbers in endpts are increasing
for k in range(length-1):
if endpts[k] >= endpts[k+1]:
raise exceptions.PlotlyError("The intervals_endpts argument "
"must be a list or tuple of a "
"sequence of increasing "
"numbers.")
else:
intervals = []
# add -inf to intervals
intervals.append([float('-inf'), endpts[0]])
for k in range(length - 1):
interval = []
interval.append(endpts[k])
interval.append(endpts[k + 1])
intervals.append(interval)
# add +inf to intervals
intervals.append([endpts[length - 1], float('inf')])
return intervals
@staticmethod
def _convert_to_RGB_255(colors):
"""
Multiplies each element of a triplet by 255
Each coordinate of the color tuple is rounded to the nearest float and
then is turned into an integer. If a number is of the form x.5, then
if x is odd, the number rounds up to (x+1). Otherwise, it rounds down
to just x. This is the way rounding works in Python 3 and in current
statistical analysis to avoid rounding bias
"""
rgb_components = []
for component in colors:
rounded_num = decimal.Decimal(str(component*255.0)).quantize(
decimal.Decimal('1'), rounding=decimal.ROUND_HALF_EVEN
)
# convert rounded number to an integer from 'Decimal' form
rounded_num = int(rounded_num)
rgb_components.append(rounded_num)
return (rgb_components[0], rgb_components[1], rgb_components[2])
@staticmethod
def _n_colors(lowcolor, highcolor, n_colors):
"""
Splits a low and high color into a list of n_colors colors in it
Accepts two color tuples and returns a list of n_colors colors
which form the intermediate colors between lowcolor and highcolor
from linearly interpolating through RGB space
"""
diff_0 = float(highcolor[0] - lowcolor[0])
incr_0 = diff_0/(n_colors - 1)
diff_1 = float(highcolor[1] - lowcolor[1])
incr_1 = diff_1/(n_colors - 1)
diff_2 = float(highcolor[2] - lowcolor[2])
incr_2 = diff_2/(n_colors - 1)
color_tuples = []
for index in range(n_colors):
new_tuple = (lowcolor[0] + (index * incr_0),
lowcolor[1] + (index * incr_1),
lowcolor[2] + (index * incr_2))
color_tuples.append(new_tuple)
return color_tuples
@staticmethod
def _label_rgb(colors):
"""
Takes tuple (a, b, c) and returns an rgb color 'rgb(a, b, c)'
"""
return ('rgb(%s, %s, %s)' % (colors[0], colors[1], colors[2]))
@staticmethod
def _unlabel_rgb(colors):
"""
Takes rgb color(s) 'rgb(a, b, c)' and returns tuple(s) (a, b, c)
This function takes either an 'rgb(a, b, c)' color or a list of
such colors and returns the color tuples in tuple(s) (a, b, c)
"""
str_vals = ''
for index in range(len(colors)):
try:
float(colors[index])
str_vals = str_vals + colors[index]
except ValueError:
if colors[index] == ',' or colors[index] == '.':
str_vals = str_vals + colors[index]
str_vals = str_vals + ','
numbers = []
str_num = ''
for char in str_vals:
if char != ',':
str_num = str_num + char
else:
numbers.append(float(str_num))
str_num = ''
return (numbers[0], numbers[1], numbers[2])
@staticmethod
def create_scatterplotmatrix(df, index=None, endpts=None, diag='scatter',
height=500, width=500, size=6,
title='Scatterplot Matrix', colormap=None,
colormap_type='cat', dataframe=None,
headers=None, index_vals=None, **kwargs):
"""
Returns data for a scatterplot matrix.
:param (array) df: array of the data with column headers
:param (str) index: name of the index column in data array
:param (list|tuple) endpts: takes an increasing sequece of numbers
that defines intervals on the real line. They are used to group
the entries in an index of numbers into their corresponding
interval and therefore can be treated as categorical data
:param (str) diag: sets the chart type for the main diagonal plots
:param (int|float) height: sets the height of the chart
:param (int|float) width: sets the width of the chart
:param (float) size: sets the marker size (in px)
:param (str) title: the title label of the scatterplot matrix
:param (str|tuple|list|dict) colormap: either a plotly scale name,
an rgb or hex color, a color tuple, a list of colors or a
dictionary. An rgb color is of the form 'rgb(x, y, z)' where
x, y and z belong to the interval [0, 255] and a color tuple is a
tuple of the form (a, b, c) where a, b and c belong to [0, 1].
If colormap is a list, it must contain valid color types as its
members.
If colormap is a dictionary, all the string entries in
the index column must be a key in colormap. In this case, the
colormap_type is forced to 'cat' or categorical
:param (str) colormap_type: determines how colormap is interpreted.
Valid choices are 'seq' (sequential) and 'cat' (categorical). If
'seq' is selected, only the first two colors in colormap will be
considered (when colormap is a list) and the index values will be
linearly interpolated between those two colors. This option is
forced if all index values are numeric.
If 'cat' is selected, a color from colormap will be assigned to
each category from index, including the intervals if endpts is
being used
:param (dict) **kwargs: a dictionary of scatterplot arguments
The only forbidden parameters are 'size', 'color' and
'colorscale' in 'marker'
Example 1: Vanilla Scatterplot Matrix
```
import plotly.plotly as py
from plotly.graph_objs import graph_objs
from plotly.tools import FigureFactory as FF
import numpy as np
import pandas as pd
# Create dataframe
df = pd.DataFrame(np.random.randn(10, 2),
columns=['Column 1', 'Column 2'])
# Create scatterplot matrix
fig = FF.create_scatterplotmatrix(df)
# Plot
py.iplot(fig, filename='Vanilla Scatterplot Matrix')
```
Example 2: Indexing a Column
```
import plotly.plotly as py
from plotly.graph_objs import graph_objs
from plotly.tools import FigureFactory as FF
import numpy as np
import pandas as pd
# Create dataframe with index
df = pd.DataFrame(np.random.randn(10, 2),
columns=['A', 'B'])
# Add another column of strings to the dataframe
df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',
'grape', 'pear', 'pear', 'apple', 'pear'])
# Create scatterplot matrix
fig = FF.create_scatterplotmatrix(df, index='Fruit', size=10)
# Plot
py.iplot(fig, filename = 'Scatterplot Matrix with Index')
```
Example 3: Styling the Diagonal Subplots
```
import plotly.plotly as py
from plotly.graph_objs import graph_objs
from plotly.tools import FigureFactory as FF
import numpy as np
import pandas as pd
# Create dataframe with index
df = pd.DataFrame(np.random.randn(10, 4),
columns=['A', 'B', 'C', 'D'])
# Add another column of strings to the dataframe
df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',
'grape', 'pear', 'pear', 'apple', 'pear'])
# Create scatterplot matrix
fig = FF.create_scatterplotmatrix(df, diag='box', index='Fruit',
height=1000, width=1000)
# Plot
py.iplot(fig, filename = 'Scatterplot Matrix - Diagonal Styling')
```
Example 4: Use a Theme to Style the Subplots
```
import plotly.plotly as py
from plotly.graph_objs import graph_objs
from plotly.tools import FigureFactory as FF
import numpy as np
import pandas as pd
# Create dataframe with random data
df = pd.DataFrame(np.random.randn(100, 3),
columns=['A', 'B', 'C'])
# Create scatterplot matrix using a built-in
# Plotly palette scale and indexing column 'A'
fig = FF.create_scatterplotmatrix(df, diag='histogram',
index='A', colormap='Blues',
height=800, width=800)
# Plot
py.iplot(fig, filename = 'Scatterplot Matrix - Colormap Theme')
```
Example 5: Example 4 with Interval Factoring
```
import plotly.plotly as py
from plotly.graph_objs import graph_objs
from plotly.tools import FigureFactory as FF
import numpy as np
import pandas as pd
# Create dataframe with random data
df = pd.DataFrame(np.random.randn(100, 3),
columns=['A', 'B', 'C'])
# Create scatterplot matrix using a list of 2 rgb tuples
# and endpoints at -1, 0 and 1
fig = FF.create_scatterplotmatrix(df, diag='histogram', index='A',
colormap=['rgb(140, 255, 50)',
'rgb(170, 60, 115)',
'#6c4774',
(0.5, 0.1, 0.8)],
endpts=[-1, 0, 1],
height=800, width=800)
# Plot
py.iplot(fig, filename = 'Scatterplot Matrix - Intervals')
```
Example 6: Using the colormap as a Dictionary
```
import plotly.plotly as py
from plotly.graph_objs import graph_objs
from plotly.tools import FigureFactory as FF
import numpy as np
import pandas as pd
import random
# Create dataframe with random data
df = pd.DataFrame(np.random.randn(100, 3),
columns=['Column A',
'Column B',
'Column C'])
# Add new color column to dataframe
new_column = []
strange_colors = ['turquoise', 'limegreen', 'goldenrod']
for j in range(100):
new_column.append(random.choice(strange_colors))
df['Colors'] = pd.Series(new_column, index=df.index)
# Create scatterplot matrix using a dictionary of hex color values
# which correspond to actual color names in 'Colors' column
fig = FF.create_scatterplotmatrix(
df, diag='box', index='Colors',
colormap= dict(
turquoise = '#00F5FF',
limegreen = '#32CD32',
goldenrod = '#DAA520'
),
colormap_type='cat',
height=800, width=800
)
# Plot
py.iplot(fig, filename = 'Scatterplot Matrix - colormap dictionary ')
```
"""
# TODO: protected until #282
if dataframe is None:
dataframe = []
if headers is None:
headers = []
if index_vals is None:
index_vals = []
FigureFactory._validate_scatterplotmatrix(df, index, diag,
colormap_type, **kwargs)
# Validate colormap
if isinstance(colormap, dict):
colormap = FigureFactory._validate_colors_dict(colormap, 'rgb')
else:
colormap = FigureFactory._validate_colors(colormap, 'rgb')
if not index:
for name in df:
headers.append(name)
for name in headers:
dataframe.append(df[name].values.tolist())
# Check for same data-type in df columns
FigureFactory._validate_dataframe(dataframe)
figure = FigureFactory._scatterplot(dataframe, headers, diag,
size, height, width, title,
**kwargs)
return figure
else:
# Validate index selection
if index not in df:
raise exceptions.PlotlyError("Make sure you set the index "
"input variable to one of the "
"column names of your "
"dataframe.")
index_vals = df[index].values.tolist()
for name in df:
if name != index:
headers.append(name)
for name in headers:
dataframe.append(df[name].values.tolist())
# check for same data-type in each df column
FigureFactory._validate_dataframe(dataframe)
FigureFactory._validate_index(index_vals)
# check if all colormap keys are in the index
# if colormap is a dictionary
if isinstance(colormap, dict):
for key in colormap:
if not all(index in colormap for index in index_vals):
raise exceptions.PlotlyError("If colormap is a "
"dictionary, all the "
"names in the index "
"must be keys.")
figure = FigureFactory._scatterplot_dict(
dataframe, headers, diag, size, height, width, title,
index, index_vals, endpts, colormap, colormap_type,
**kwargs
)
return figure
else:
figure = FigureFactory._scatterplot_theme(
dataframe, headers, diag, size, height, width, title,
index, index_vals, endpts, colormap, colormap_type,
**kwargs
)
return figure
@staticmethod
def _validate_equal_length(*args):
"""
Validates that data lists or ndarrays are the same length.
:raises: (PlotlyError) If any data lists are not the same length.
"""
length = len(args[0])
if any(len(lst) != length for lst in args):
raise exceptions.PlotlyError("Oops! Your data lists or ndarrays "
"should be the same length.")
@staticmethod
def _validate_ohlc(open, high, low, close, direction, **kwargs):
"""
ohlc and candlestick specific validations
Specifically, this checks that the high value is the greatest value and
the low value is the lowest value in each unit.
See FigureFactory.create_ohlc() or FigureFactory.create_candlestick()
for params
:raises: (PlotlyError) If the high value is not the greatest value in
each unit.
:raises: (PlotlyError) If the low value is not the lowest value in each
unit.
:raises: (PlotlyError) If direction is not 'increasing' or 'decreasing'
"""
for lst in [open, low, close]:
for index in range(len(high)):
if high[index] < lst[index]:
raise exceptions.PlotlyError("Oops! Looks like some of "
"your high values are less "
"the corresponding open, "
"low, or close values. "
"Double check that your data "
"is entered in O-H-L-C order")
for lst in [open, high, close]:
for index in range(len(low)):
if low[index] > lst[index]:
raise exceptions.PlotlyError("Oops! Looks like some of "
"your low values are greater "
"than the corresponding high"
", open, or close values. "
"Double check that your data "
"is entered in O-H-L-C order")
direction_opts = ('increasing', 'decreasing', 'both')
if direction not in direction_opts:
raise exceptions.PlotlyError("direction must be defined as "
"'increasing', 'decreasing', or "
"'both'")
@staticmethod
def _validate_distplot(hist_data, curve_type):
"""
Distplot-specific validations
:raises: (PlotlyError) If hist_data is not a list of lists
:raises: (PlotlyError) If curve_type is not valid (i.e. not 'kde' or
'normal').
"""
try:
import pandas as pd
_pandas_imported = True
except ImportError:
_pandas_imported = False
hist_data_types = (list,)
if _numpy_imported:
hist_data_types += (np.ndarray,)
if _pandas_imported:
hist_data_types += (pd.core.series.Series,)
if not isinstance(hist_data[0], hist_data_types):
raise exceptions.PlotlyError("Oops, this function was written "
"to handle multiple datasets, if "
"you want to plot just one, make "
"sure your hist_data variable is "
"still a list of lists, i.e. x = "
"[1, 2, 3] -> x = [[1, 2, 3]]")
curve_opts = ('kde', 'normal')
if curve_type not in curve_opts:
raise exceptions.PlotlyError("curve_type must be defined as "
"'kde' or 'normal'")
if _scipy_imported is False:
raise ImportError("FigureFactory.create_distplot requires scipy")
@staticmethod
def _validate_positive_scalars(**kwargs):
"""
Validates that all values given in key/val pairs are positive.
Accepts kwargs to improve Exception messages.
:raises: (PlotlyError) If any value is < 0 or raises.
"""
for key, val in kwargs.items():
try:
if val <= 0:
raise ValueError('{} must be > 0, got {}'.format(key, val))
except TypeError:
raise exceptions.PlotlyError('{} must be a number, got {}'
.format(key, val))
@staticmethod
def _validate_streamline(x, y):
"""
Streamline-specific validations
Specifically, this checks that x and y are both evenly spaced,
and that the package numpy is available.
See FigureFactory.create_streamline() for params
:raises: (ImportError) If numpy is not available.
:raises: (PlotlyError) If x is not evenly spaced.
:raises: (PlotlyError) If y is not evenly spaced.
"""
if _numpy_imported is False:
raise ImportError("FigureFactory.create_streamline requires numpy")
for index in range(len(x) - 1):
if ((x[index + 1] - x[index]) - (x[1] - x[0])) > .0001:
raise exceptions.PlotlyError("x must be a 1 dimensional, "
"evenly spaced array")
for index in range(len(y) - 1):
if ((y[index + 1] - y[index]) -
(y[1] - y[0])) > .0001:
raise exceptions.PlotlyError("y must be a 1 dimensional, "
"evenly spaced array")
@staticmethod
def _validate_annotated_heatmap(z, x, y, annotation_text):
"""
Annotated-heatmap-specific validations
Check that if a text matrix is supplied, it has the same
dimensions as the z matrix.
See FigureFactory.create_annotated_heatmap() for params
:raises: (PlotlyError) If z and text matrices do not have the same
dimensions.
"""
if annotation_text is not None and isinstance(annotation_text, list):
FigureFactory._validate_equal_length(z, annotation_text)
for lst in range(len(z)):
if len(z[lst]) != len(annotation_text[lst]):
raise exceptions.PlotlyError("z and text should have the "
"same dimensions")
if x:
if len(x) != len(z[0]):
raise exceptions.PlotlyError("oops, the x list that you "
"provided does not match the "
"width of your z matrix ")
if y:
if len(y) != len(z):
raise exceptions.PlotlyError("oops, the y list that you "
"provided does not match the "
"length of your z matrix ")
@staticmethod
def _validate_table(table_text, font_colors):
"""
Table-specific validations
Check that font_colors is supplied correctly (1, 3, or len(text)
colors).
:raises: (PlotlyError) If font_colors is supplied incorretly.
See FigureFactory.create_table() for params
"""
font_colors_len_options = [1, 3, len(table_text)]
if len(font_colors) not in font_colors_len_options:
raise exceptions.PlotlyError("Oops, font_colors should be a list "
"of length 1, 3 or len(text)")
@staticmethod
def _flatten(array):
"""
Uses list comprehension to flatten array
:param (array): An iterable to flatten
:raises (PlotlyError): If iterable is not nested.
:rtype (list): The flattened list.
"""
try:
return [item for sublist in array for item in sublist]
except TypeError:
raise exceptions.PlotlyError("Your data array could not be "
"flattened! Make sure your data is "
"entered as lists or ndarrays!")
@staticmethod
def _hex_to_rgb(value):
"""
Calculates rgb values from a hex color code.
:param (string) value: Hex color string
:rtype (tuple) (r_value, g_value, b_value): tuple of rgb values
"""
value = value.lstrip('#')
hex_total_length = len(value)
rgb_section_length = hex_total_length // 3
return tuple(int(value[i:i + rgb_section_length], 16)
for i in range(0, hex_total_length, rgb_section_length))
@staticmethod
def create_quiver(x, y, u, v, scale=.1, arrow_scale=.3,
angle=math.pi / 9, **kwargs):
"""
Returns data for a quiver plot.
:param (list|ndarray) x: x coordinates of the arrow locations
:param (list|ndarray) y: y coordinates of the arrow locations
:param (list|ndarray) u: x components of the arrow vectors
:param (list|ndarray) v: y components of the arrow vectors
:param (float in [0,1]) scale: scales size of the arrows(ideally to
avoid overlap). Default = .1
:param (float in [0,1]) arrow_scale: value multiplied to length of barb
to get length of arrowhead. Default = .3
:param (angle in radians) angle: angle of arrowhead. Default = pi/9
:param kwargs: kwargs passed through plotly.graph_objs.Scatter
for more information on valid kwargs call
help(plotly.graph_objs.Scatter)
:rtype (dict): returns a representation of quiver figure.
Example 1: Trivial Quiver
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import math
# 1 Arrow from (0,0) to (1,1)
fig = FF.create_quiver(x=[0], y=[0],
u=[1], v=[1],
scale=1)
py.plot(fig, filename='quiver')
```
Example 2: Quiver plot using meshgrid
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import numpy as np
import math
# Add data
x,y = np.meshgrid(np.arange(0, 2, .2), np.arange(0, 2, .2))
u = np.cos(x)*y
v = np.sin(x)*y
#Create quiver
fig = FF.create_quiver(x, y, u, v)
# Plot
py.plot(fig, filename='quiver')
```
Example 3: Styling the quiver plot
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import numpy as np
import math
# Add data
x, y = np.meshgrid(np.arange(-np.pi, math.pi, .5),
np.arange(-math.pi, math.pi, .5))
u = np.cos(x)*y
v = np.sin(x)*y
# Create quiver
fig = FF.create_quiver(x, y, u, v, scale=.2,
arrow_scale=.3,
angle=math.pi/6,
name='Wind Velocity',
line=Line(width=1))
# Add title to layout
fig['layout'].update(title='Quiver Plot')
# Plot
py.plot(fig, filename='quiver')
```
"""
# TODO: protected until #282
from plotly.graph_objs import graph_objs
FigureFactory._validate_equal_length(x, y, u, v)
FigureFactory._validate_positive_scalars(arrow_scale=arrow_scale,
scale=scale)
barb_x, barb_y = _Quiver(x, y, u, v, scale,
arrow_scale, angle).get_barbs()
arrow_x, arrow_y = _Quiver(x, y, u, v, scale,
arrow_scale, angle).get_quiver_arrows()
quiver = graph_objs.Scatter(x=barb_x + arrow_x,
y=barb_y + arrow_y,
mode='lines', **kwargs)
data = [quiver]
layout = graph_objs.Layout(hovermode='closest')
return graph_objs.Figure(data=data, layout=layout)
@staticmethod
def create_streamline(x, y, u, v,
density=1, angle=math.pi / 9,
arrow_scale=.09, **kwargs):
"""
Returns data for a streamline plot.
:param (list|ndarray) x: 1 dimensional, evenly spaced list or array
:param (list|ndarray) y: 1 dimensional, evenly spaced list or array
:param (ndarray) u: 2 dimensional array
:param (ndarray) v: 2 dimensional array
:param (float|int) density: controls the density of streamlines in
plot. This is multiplied by 30 to scale similiarly to other
available streamline functions such as matplotlib.
Default = 1
:param (angle in radians) angle: angle of arrowhead. Default = pi/9
:param (float in [0,1]) arrow_scale: value to scale length of arrowhead
Default = .09
:param kwargs: kwargs passed through plotly.graph_objs.Scatter
for more information on valid kwargs call
help(plotly.graph_objs.Scatter)
:rtype (dict): returns a representation of streamline figure.
Example 1: Plot simple streamline and increase arrow size
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import numpy as np
import math
# Add data
x = np.linspace(-3, 3, 100)
y = np.linspace(-3, 3, 100)
Y, X = np.meshgrid(x, y)
u = -1 - X**2 + Y
v = 1 + X - Y**2
u = u.T # Transpose
v = v.T # Transpose
# Create streamline
fig = FF.create_streamline(x, y, u, v,
arrow_scale=.1)
# Plot
py.plot(fig, filename='streamline')
```
Example 2: from nbviewer.ipython.org/github/barbagroup/AeroPython
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import numpy as np
import math
# Add data
N = 50
x_start, x_end = -2.0, 2.0
y_start, y_end = -1.0, 1.0
x = np.linspace(x_start, x_end, N)
y = np.linspace(y_start, y_end, N)
X, Y = np.meshgrid(x, y)
ss = 5.0
x_s, y_s = -1.0, 0.0
# Compute the velocity field on the mesh grid
u_s = ss/(2*np.pi) * (X-x_s)/((X-x_s)**2 + (Y-y_s)**2)
v_s = ss/(2*np.pi) * (Y-y_s)/((X-x_s)**2 + (Y-y_s)**2)
# Create streamline
fig = FF.create_streamline(x, y, u_s, v_s,
density=2, name='streamline')
# Add source point
point = Scatter(x=[x_s], y=[y_s], mode='markers',
marker=Marker(size=14), name='source point')
# Plot
fig['data'].append(point)
py.plot(fig, filename='streamline')
```
"""
# TODO: protected until #282
from plotly.graph_objs import graph_objs
FigureFactory._validate_equal_length(x, y)
FigureFactory._validate_equal_length(u, v)
FigureFactory._validate_streamline(x, y)
FigureFactory._validate_positive_scalars(density=density,
arrow_scale=arrow_scale)
streamline_x, streamline_y = _Streamline(x, y, u, v,
density, angle,
arrow_scale).sum_streamlines()
arrow_x, arrow_y = _Streamline(x, y, u, v,
density, angle,
arrow_scale).get_streamline_arrows()
streamline = graph_objs.Scatter(x=streamline_x + arrow_x,
y=streamline_y + arrow_y,
mode='lines', **kwargs)
data = [streamline]
layout = graph_objs.Layout(hovermode='closest')
return graph_objs.Figure(data=data, layout=layout)
@staticmethod
def _make_increasing_ohlc(open, high, low, close, dates, **kwargs):
"""
Makes increasing ohlc sticks
_make_increasing_ohlc() and _make_decreasing_ohlc separate the
increasing trace from the decreasing trace so kwargs (such as
color) can be passed separately to increasing or decreasing traces
when direction is set to 'increasing' or 'decreasing' in
FigureFactory.create_candlestick()
:param (list) open: opening values
:param (list) high: high values
:param (list) low: low values
:param (list) close: closing values
:param (list) dates: list of datetime objects. Default: None
:param kwargs: kwargs to be passed to increasing trace via
plotly.graph_objs.Scatter.
:rtype (trace) ohlc_incr_data: Scatter trace of all increasing ohlc
sticks.
"""
(flat_increase_x,
flat_increase_y,
text_increase) = _OHLC(open, high, low, close, dates).get_increase()
if 'name' in kwargs:
showlegend = True
else:
kwargs.setdefault('name', 'Increasing')
showlegend = False
kwargs.setdefault('line', dict(color=_DEFAULT_INCREASING_COLOR,
width=1))
kwargs.setdefault('text', text_increase)
ohlc_incr = dict(type='scatter',
x=flat_increase_x,
y=flat_increase_y,
mode='lines',
showlegend=showlegend,
**kwargs)
return ohlc_incr
@staticmethod
def _make_decreasing_ohlc(open, high, low, close, dates, **kwargs):
"""
Makes decreasing ohlc sticks
:param (list) open: opening values
:param (list) high: high values
:param (list) low: low values
:param (list) close: closing values
:param (list) dates: list of datetime objects. Default: None
:param kwargs: kwargs to be passed to increasing trace via
plotly.graph_objs.Scatter.
:rtype (trace) ohlc_decr_data: Scatter trace of all decreasing ohlc
sticks.
"""
(flat_decrease_x,
flat_decrease_y,
text_decrease) = _OHLC(open, high, low, close, dates).get_decrease()
kwargs.setdefault('line', dict(color=_DEFAULT_DECREASING_COLOR,
width=1))
kwargs.setdefault('text', text_decrease)
kwargs.setdefault('showlegend', False)
kwargs.setdefault('name', 'Decreasing')
ohlc_decr = dict(type='scatter',
x=flat_decrease_x,
y=flat_decrease_y,
mode='lines',
**kwargs)
return ohlc_decr
@staticmethod
def create_ohlc(open, high, low, close,
dates=None, direction='both',
**kwargs):
"""
BETA function that creates an ohlc chart
:param (list) open: opening values
:param (list) high: high values
:param (list) low: low values
:param (list) close: closing
:param (list) dates: list of datetime objects. Default: None
:param (string) direction: direction can be 'increasing', 'decreasing',
or 'both'. When the direction is 'increasing', the returned figure
consists of all units where the close value is greater than the
corresponding open value, and when the direction is 'decreasing',
the returned figure consists of all units where the close value is
less than or equal to the corresponding open value. When the
direction is 'both', both increasing and decreasing units are
returned. Default: 'both'
:param kwargs: kwargs passed through plotly.graph_objs.Scatter.
These kwargs describe other attributes about the ohlc Scatter trace
such as the color or the legend name. For more information on valid
kwargs call help(plotly.graph_objs.Scatter)
:rtype (dict): returns a representation of an ohlc chart figure.
Example 1: Simple OHLC chart from a Pandas DataFrame
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from datetime import datetime
import pandas.io.data as web
df = web.DataReader("aapl", 'yahoo', datetime(2008, 8, 15), datetime(2008, 10, 15))
fig = FF.create_ohlc(df.Open, df.High, df.Low, df.Close, dates=df.index)
py.plot(fig, filename='finance/aapl-ohlc')
```
Example 2: Add text and annotations to the OHLC chart
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from datetime import datetime
import pandas.io.data as web
df = web.DataReader("aapl", 'yahoo', datetime(2008, 8, 15), datetime(2008, 10, 15))
fig = FF.create_ohlc(df.Open, df.High, df.Low, df.Close, dates=df.index)
# Update the fig - all options here: https://plot.ly/python/reference/#Layout
fig['layout'].update({
'title': 'The Great Recession',
'yaxis': {'title': 'AAPL Stock'},
'shapes': [{
'x0': '2008-09-15', 'x1': '2008-09-15', 'type': 'line',
'y0': 0, 'y1': 1, 'xref': 'x', 'yref': 'paper',
'line': {'color': 'rgb(40,40,40)', 'width': 0.5}
}],
'annotations': [{
'text': "the fall of Lehman Brothers",
'x': '2008-09-15', 'y': 1.02,
'xref': 'x', 'yref': 'paper',
'showarrow': False, 'xanchor': 'left'
}]
})
py.plot(fig, filename='finance/aapl-recession-ohlc', validate=False)
```
Example 3: Customize the OHLC colors
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from plotly.graph_objs import Line, Marker
from datetime import datetime
import pandas.io.data as web
df = web.DataReader("aapl", 'yahoo', datetime(2008, 1, 1), datetime(2009, 4, 1))
# Make increasing ohlc sticks and customize their color and name
fig_increasing = FF.create_ohlc(df.Open, df.High, df.Low, df.Close, dates=df.index,
direction='increasing', name='AAPL',
line=Line(color='rgb(150, 200, 250)'))
# Make decreasing ohlc sticks and customize their color and name
fig_decreasing = FF.create_ohlc(df.Open, df.High, df.Low, df.Close, dates=df.index,
direction='decreasing',
line=Line(color='rgb(128, 128, 128)'))
# Initialize the figure
fig = fig_increasing
# Add decreasing data with .extend()
fig['data'].extend(fig_decreasing['data'])
py.iplot(fig, filename='finance/aapl-ohlc-colors', validate=False)
```
Example 4: OHLC chart with datetime objects
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from datetime import datetime
# Add data
open_data = [33.0, 33.3, 33.5, 33.0, 34.1]
high_data = [33.1, 33.3, 33.6, 33.2, 34.8]
low_data = [32.7, 32.7, 32.8, 32.6, 32.8]
close_data = [33.0, 32.9, 33.3, 33.1, 33.1]
dates = [datetime(year=2013, month=10, day=10),
datetime(year=2013, month=11, day=10),
datetime(year=2013, month=12, day=10),
datetime(year=2014, month=1, day=10),
datetime(year=2014, month=2, day=10)]
# Create ohlc
fig = FF.create_ohlc(open_data, high_data,
low_data, close_data, dates=dates)
py.iplot(fig, filename='finance/simple-ohlc', validate=False)
```
"""
# TODO: protected until #282
from plotly.graph_objs import graph_objs
if dates is not None:
FigureFactory._validate_equal_length(open, high, low, close, dates)
else:
FigureFactory._validate_equal_length(open, high, low, close)
FigureFactory._validate_ohlc(open, high, low, close, direction,
**kwargs)
if direction is 'increasing':
ohlc_incr = FigureFactory._make_increasing_ohlc(open, high,
low, close,
dates, **kwargs)
data = [ohlc_incr]
elif direction is 'decreasing':
ohlc_decr = FigureFactory._make_decreasing_ohlc(open, high,
low, close,
dates, **kwargs)
data = [ohlc_decr]
else:
ohlc_incr = FigureFactory._make_increasing_ohlc(open, high,
low, close,
dates, **kwargs)
ohlc_decr = FigureFactory._make_decreasing_ohlc(open, high,
low, close,
dates, **kwargs)
data = [ohlc_incr, ohlc_decr]
layout = graph_objs.Layout(xaxis=dict(zeroline=False),
hovermode='closest')
return graph_objs.Figure(data=data, layout=layout)
@staticmethod
def _make_increasing_candle(open, high, low, close, dates, **kwargs):
"""
Makes boxplot trace for increasing candlesticks
_make_increasing_candle() and _make_decreasing_candle separate the
increasing traces from the decreasing traces so kwargs (such as
color) can be passed separately to increasing or decreasing traces
when direction is set to 'increasing' or 'decreasing' in
FigureFactory.create_candlestick()
:param (list) open: opening values
:param (list) high: high values
:param (list) low: low values
:param (list) close: closing values
:param (list) dates: list of datetime objects. Default: None
:param kwargs: kwargs to be passed to increasing trace via
plotly.graph_objs.Scatter.
:rtype (list) candle_incr_data: list of the box trace for
increasing candlesticks.
"""
increase_x, increase_y = _Candlestick(
open, high, low, close, dates, **kwargs).get_candle_increase()
if 'line' in kwargs:
kwargs.setdefault('fillcolor', kwargs['line']['color'])
else:
kwargs.setdefault('fillcolor', _DEFAULT_INCREASING_COLOR)
if 'name' in kwargs:
kwargs.setdefault('showlegend', True)
else:
kwargs.setdefault('showlegend', False)
kwargs.setdefault('name', 'Increasing')
kwargs.setdefault('line', dict(color=_DEFAULT_INCREASING_COLOR))
candle_incr_data = dict(type='box',
x=increase_x,
y=increase_y,
whiskerwidth=0,
boxpoints=False,
**kwargs)
return [candle_incr_data]
@staticmethod
def _make_decreasing_candle(open, high, low, close, dates, **kwargs):
"""
Makes boxplot trace for decreasing candlesticks
:param (list) open: opening values
:param (list) high: high values
:param (list) low: low values
:param (list) close: closing values
:param (list) dates: list of datetime objects. Default: None
:param kwargs: kwargs to be passed to decreasing trace via
plotly.graph_objs.Scatter.
:rtype (list) candle_decr_data: list of the box trace for
decreasing candlesticks.
"""
decrease_x, decrease_y = _Candlestick(
open, high, low, close, dates, **kwargs).get_candle_decrease()
if 'line' in kwargs:
kwargs.setdefault('fillcolor', kwargs['line']['color'])
else:
kwargs.setdefault('fillcolor', _DEFAULT_DECREASING_COLOR)
kwargs.setdefault('showlegend', False)
kwargs.setdefault('line', dict(color=_DEFAULT_DECREASING_COLOR))
kwargs.setdefault('name', 'Decreasing')
candle_decr_data = dict(type='box',
x=decrease_x,
y=decrease_y,
whiskerwidth=0,
boxpoints=False,
**kwargs)
return [candle_decr_data]
@staticmethod
def create_candlestick(open, high, low, close,
dates=None, direction='both', **kwargs):
"""
BETA function that creates a candlestick chart
:param (list) open: opening values
:param (list) high: high values
:param (list) low: low values
:param (list) close: closing values
:param (list) dates: list of datetime objects. Default: None
:param (string) direction: direction can be 'increasing', 'decreasing',
or 'both'. When the direction is 'increasing', the returned figure
consists of all candlesticks where the close value is greater than
the corresponding open value, and when the direction is
'decreasing', the returned figure consists of all candlesticks
where the close value is less than or equal to the corresponding
open value. When the direction is 'both', both increasing and
decreasing candlesticks are returned. Default: 'both'
:param kwargs: kwargs passed through plotly.graph_objs.Scatter.
These kwargs describe other attributes about the ohlc Scatter trace
such as the color or the legend name. For more information on valid
kwargs call help(plotly.graph_objs.Scatter)
:rtype (dict): returns a representation of candlestick chart figure.
Example 1: Simple candlestick chart from a Pandas DataFrame
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from datetime import datetime
import pandas.io.data as web
df = web.DataReader("aapl", 'yahoo', datetime(2007, 10, 1), datetime(2009, 4, 1))
fig = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index)
py.plot(fig, filename='finance/aapl-candlestick', validate=False)
```
Example 2: Add text and annotations to the candlestick chart
```
fig = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index)
# Update the fig - all options here: https://plot.ly/python/reference/#Layout
fig['layout'].update({
'title': 'The Great Recession',
'yaxis': {'title': 'AAPL Stock'},
'shapes': [{
'x0': '2007-12-01', 'x1': '2007-12-01',
'y0': 0, 'y1': 1, 'xref': 'x', 'yref': 'paper',
'line': {'color': 'rgb(30,30,30)', 'width': 1}
}],
'annotations': [{
'x': '2007-12-01', 'y': 0.05, 'xref': 'x', 'yref': 'paper',
'showarrow': False, 'xanchor': 'left',
'text': 'Official start of the recession'
}]
})
py.plot(fig, filename='finance/aapl-recession-candlestick', validate=False)
```
Example 3: Customize the candlestick colors
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from plotly.graph_objs import Line, Marker
from datetime import datetime
import pandas.io.data as web
df = web.DataReader("aapl", 'yahoo', datetime(2008, 1, 1), datetime(2009, 4, 1))
# Make increasing candlesticks and customize their color and name
fig_increasing = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index,
direction='increasing', name='AAPL',
marker=Marker(color='rgb(150, 200, 250)'),
line=Line(color='rgb(150, 200, 250)'))
# Make decreasing candlesticks and customize their color and name
fig_decreasing = FF.create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index,
direction='decreasing',
marker=Marker(color='rgb(128, 128, 128)'),
line=Line(color='rgb(128, 128, 128)'))
# Initialize the figure
fig = fig_increasing
# Add decreasing data with .extend()
fig['data'].extend(fig_decreasing['data'])
py.iplot(fig, filename='finance/aapl-candlestick-custom', validate=False)
```
Example 4: Candlestick chart with datetime objects
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
from datetime import datetime
# Add data
open_data = [33.0, 33.3, 33.5, 33.0, 34.1]
high_data = [33.1, 33.3, 33.6, 33.2, 34.8]
low_data = [32.7, 32.7, 32.8, 32.6, 32.8]
close_data = [33.0, 32.9, 33.3, 33.1, 33.1]
dates = [datetime(year=2013, month=10, day=10),
datetime(year=2013, month=11, day=10),
datetime(year=2013, month=12, day=10),
datetime(year=2014, month=1, day=10),
datetime(year=2014, month=2, day=10)]
# Create ohlc
fig = FF.create_candlestick(open_data, high_data,
low_data, close_data, dates=dates)
py.iplot(fig, filename='finance/simple-candlestick', validate=False)
```
"""
# TODO: protected until #282
from plotly.graph_objs import graph_objs
if dates is not None:
FigureFactory._validate_equal_length(open, high, low, close, dates)
else:
FigureFactory._validate_equal_length(open, high, low, close)
FigureFactory._validate_ohlc(open, high, low, close, direction,
**kwargs)
if direction is 'increasing':
candle_incr_data = FigureFactory._make_increasing_candle(
open, high, low, close, dates, **kwargs)
data = candle_incr_data
elif direction is 'decreasing':
candle_decr_data = FigureFactory._make_decreasing_candle(
open, high, low, close, dates, **kwargs)
data = candle_decr_data
else:
candle_incr_data = FigureFactory._make_increasing_candle(
open, high, low, close, dates, **kwargs)
candle_decr_data = FigureFactory._make_decreasing_candle(
open, high, low, close, dates, **kwargs)
data = candle_incr_data + candle_decr_data
layout = graph_objs.Layout()
return graph_objs.Figure(data=data, layout=layout)
@staticmethod
def create_distplot(hist_data, group_labels,
bin_size=1., curve_type='kde',
colors=[], rug_text=[], histnorm=DEFAULT_HISTNORM,
show_hist=True, show_curve=True,
show_rug=True):
"""
BETA function that creates a distplot similar to seaborn.distplot
The distplot can be composed of all or any combination of the following
3 components: (1) histogram, (2) curve: (a) kernel density estimation
or (b) normal curve, and (3) rug plot. Additionally, multiple distplots
(from multiple datasets) can be created in the same plot.
:param (list[list]) hist_data: Use list of lists to plot multiple data
sets on the same plot.
:param (list[str]) group_labels: Names for each data set.
:param (list[float]|float) bin_size: Size of histogram bins.
Default = 1.
:param (str) curve_type: 'kde' or 'normal'. Default = 'kde'
:param (str) histnorm: 'probability density' or 'probability'
Default = 'probability density'
:param (bool) show_hist: Add histogram to distplot? Default = True
:param (bool) show_curve: Add curve to distplot? Default = True
:param (bool) show_rug: Add rug to distplot? Default = True
:param (list[str]) colors: Colors for traces.
:param (list[list]) rug_text: Hovertext values for rug_plot,
:return (dict): Representation of a distplot figure.
Example 1: Simple distplot of 1 data set
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
hist_data = [[1.1, 1.1, 2.5, 3.0, 3.5,
3.5, 4.1, 4.4, 4.5, 4.5,
5.0, 5.0, 5.2, 5.5, 5.5,
5.5, 5.5, 5.5, 6.1, 7.0]]
group_labels = ['distplot example']
fig = FF.create_distplot(hist_data, group_labels)
url = py.plot(fig, filename='Simple distplot', validate=False)
```
Example 2: Two data sets and added rug text
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
# Add histogram data
hist1_x = [0.8, 1.2, 0.2, 0.6, 1.6,
-0.9, -0.07, 1.95, 0.9, -0.2,
-0.5, 0.3, 0.4, -0.37, 0.6]
hist2_x = [0.8, 1.5, 1.5, 0.6, 0.59,
1.0, 0.8, 1.7, 0.5, 0.8,
-0.3, 1.2, 0.56, 0.3, 2.2]
# Group data together
hist_data = [hist1_x, hist2_x]
group_labels = ['2012', '2013']
# Add text
rug_text_1 = ['a1', 'b1', 'c1', 'd1', 'e1',
'f1', 'g1', 'h1', 'i1', 'j1',
'k1', 'l1', 'm1', 'n1', 'o1']
rug_text_2 = ['a2', 'b2', 'c2', 'd2', 'e2',
'f2', 'g2', 'h2', 'i2', 'j2',
'k2', 'l2', 'm2', 'n2', 'o2']
# Group text together
rug_text_all = [rug_text_1, rug_text_2]
# Create distplot
fig = FF.create_distplot(
hist_data, group_labels, rug_text=rug_text_all, bin_size=.2)
# Add title
fig['layout'].update(title='Dist Plot')
# Plot!
url = py.plot(fig, filename='Distplot with rug text', validate=False)
```
Example 3: Plot with normal curve and hide rug plot
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import numpy as np
x1 = np.random.randn(190)
x2 = np.random.randn(200)+1
x3 = np.random.randn(200)-1
x4 = np.random.randn(210)+2
hist_data = [x1, x2, x3, x4]
group_labels = ['2012', '2013', '2014', '2015']
fig = FF.create_distplot(
hist_data, group_labels, curve_type='normal',
show_rug=False, bin_size=.4)
url = py.plot(fig, filename='hist and normal curve', validate=False)
Example 4: Distplot with Pandas
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import numpy as np
import pandas as pd
df = pd.DataFrame({'2012': np.random.randn(200),
'2013': np.random.randn(200)+1})
py.iplot(FF.create_distplot([df[c] for c in df.columns], df.columns),
filename='examples/distplot with pandas',
validate=False)
```
"""
# TODO: protected until #282
from plotly.graph_objs import graph_objs
FigureFactory._validate_distplot(hist_data, curve_type)
FigureFactory._validate_equal_length(hist_data, group_labels)
if isinstance(bin_size, (float, int)):
bin_size = [bin_size]*len(hist_data)
hist = _Distplot(
hist_data, histnorm, group_labels, bin_size,
curve_type, colors, rug_text,
show_hist, show_curve).make_hist()
if curve_type == 'normal':
curve = _Distplot(
hist_data, histnorm, group_labels, bin_size,
curve_type, colors, rug_text,
show_hist, show_curve).make_normal()
else:
curve = _Distplot(
hist_data, histnorm, group_labels, bin_size,
curve_type, colors, rug_text,
show_hist, show_curve).make_kde()
rug = _Distplot(
hist_data, histnorm, group_labels, bin_size,
curve_type, colors, rug_text,
show_hist, show_curve).make_rug()
data = []
if show_hist:
data.append(hist)
if show_curve:
data.append(curve)
if show_rug:
data.append(rug)
layout = graph_objs.Layout(
barmode='overlay',
hovermode='closest',
legend=dict(traceorder='reversed'),
xaxis1=dict(domain=[0.0, 1.0],
anchor='y2',
zeroline=False),
yaxis1=dict(domain=[0.35, 1],
anchor='free',
position=0.0),
yaxis2=dict(domain=[0, 0.25],
anchor='x1',
dtick=1,
showticklabels=False))
else:
layout = graph_objs.Layout(
barmode='overlay',
hovermode='closest',
legend=dict(traceorder='reversed'),
xaxis1=dict(domain=[0.0, 1.0],
anchor='y2',
zeroline=False),
yaxis1=dict(domain=[0., 1],
anchor='free',
position=0.0))
data = sum(data, [])
return graph_objs.Figure(data=data, layout=layout)
@staticmethod
def create_dendrogram(X, orientation="bottom", labels=None,
colorscale=None):
"""
BETA function that returns a dendrogram Plotly figure object.
:param (ndarray) X: Matrix of observations as array of arrays
:param (str) orientation: 'top', 'right', 'bottom', or 'left'
:param (list) labels: List of axis category labels(observation labels)
:param (list) colorscale: Optional colorscale for dendrogram tree
clusters
Example 1: Simple bottom oriented dendrogram
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import numpy as np
X = np.random.rand(10,10)
dendro = FF.create_dendrogram(X)
plot_url = py.plot(dendro, filename='simple-dendrogram')
```
Example 2: Dendrogram to put on the left of the heatmap
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import numpy as np
X = np.random.rand(5,5)
names = ['Jack', 'Oxana', 'John', 'Chelsea', 'Mark']
dendro = FF.create_dendrogram(X, orientation='right', labels=names)
dendro['layout'].update({'width':700, 'height':500})
py.iplot(dendro, filename='vertical-dendrogram')
```
Example 3: Dendrogram with Pandas
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import numpy as np
import pandas as pd
Index= ['A','B','C','D','E','F','G','H','I','J']
df = pd.DataFrame(abs(np.random.randn(10, 10)), index=Index)
fig = FF.create_dendrogram(df, labels=Index)
url = py.plot(fig, filename='pandas-dendrogram')
```
"""
dependencies = (_scipy_imported and _scipy__spatial_imported and
_scipy__cluster__hierarchy_imported)
if dependencies is False:
raise ImportError("FigureFactory.create_dendrogram requires scipy, \
scipy.spatial and scipy.hierarchy")
s = X.shape
if len(s) != 2:
exceptions.PlotlyError("X should be 2-dimensional array.")
dendrogram = _Dendrogram(X, orientation, labels, colorscale)
return {'layout': dendrogram.layout,
'data': dendrogram.data}
@staticmethod
def create_annotated_heatmap(z, x=None, y=None, annotation_text=None,
colorscale='RdBu', font_colors=None,
showscale=False, reversescale=False,
**kwargs):
"""
BETA function that creates annotated heatmaps
This function adds annotations to each cell of the heatmap.
:param (list[list]|ndarray) z: z matrix to create heatmap.
:param (list) x: x axis labels.
:param (list) y: y axis labels.
:param (list[list]|ndarray) annotation_text: Text strings for
annotations. Should have the same dimensions as the z matrix. If no
text is added, the values of the z matrix are annotated. Default =
z matrix values.
:param (list|str) colorscale: heatmap colorscale.
:param (list) font_colors: List of two color strings: [min_text_color,
max_text_color] where min_text_color is applied to annotations for
heatmap values < (max_value - min_value)/2. If font_colors is not
defined, the colors are defined logically as black or white
depending on the heatmap's colorscale.
:param (bool) showscale: Display colorscale. Default = False
:param kwargs: kwargs passed through plotly.graph_objs.Heatmap.
These kwargs describe other attributes about the annotated Heatmap
trace such as the colorscale. For more information on valid kwargs
call help(plotly.graph_objs.Heatmap)
Example 1: Simple annotated heatmap with default configuration
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
z = [[0.300000, 0.00000, 0.65, 0.300000],
[1, 0.100005, 0.45, 0.4300],
[0.300000, 0.00000, 0.65, 0.300000],
[1, 0.100005, 0.45, 0.00000]]
figure = FF.create_annotated_heatmap(z)
py.iplot(figure)
```
"""
# TODO: protected until #282
from plotly.graph_objs import graph_objs
# Avoiding mutables in the call signature
font_colors = font_colors if font_colors is not None else []
FigureFactory._validate_annotated_heatmap(z, x, y, annotation_text)
annotations = _AnnotatedHeatmap(z, x, y, annotation_text,
colorscale, font_colors, reversescale,
**kwargs).make_annotations()
if x or y:
trace = dict(type='heatmap', z=z, x=x, y=y, colorscale=colorscale,
showscale=showscale, **kwargs)
layout = dict(annotations=annotations,
xaxis=dict(ticks='', dtick=1, side='top',
gridcolor='rgb(0, 0, 0)'),
yaxis=dict(ticks='', dtick=1, ticksuffix=' '))
else:
trace = dict(type='heatmap', z=z, colorscale=colorscale,
showscale=showscale, **kwargs)
layout = dict(annotations=annotations,
xaxis=dict(ticks='', side='top',
gridcolor='rgb(0, 0, 0)',
showticklabels=False),
yaxis=dict(ticks='', ticksuffix=' ',
showticklabels=False))
data = [trace]
return graph_objs.Figure(data=data, layout=layout)
@staticmethod
def create_table(table_text, colorscale=None, font_colors=None,
index=False, index_title='', annotation_offset=.45,
height_constant=30, hoverinfo='none', **kwargs):
"""
BETA function that creates data tables
:param (pandas.Dataframe | list[list]) text: data for table.
:param (str|list[list]) colorscale: Colorscale for table where the
color at value 0 is the header color, .5 is the first table color
and 1 is the second table color. (Set .5 and 1 to avoid the striped
table effect). Default=[[0, '#66b2ff'], [.5, '#d9d9d9'],
[1, '#ffffff']]
:param (list) font_colors: Color for fonts in table. Can be a single
color, three colors, or a color for each row in the table.
Default=['#000000'] (black text for the entire table)
:param (int) height_constant: Constant multiplied by # of rows to
create table height. Default=30.
:param (bool) index: Create (header-colored) index column index from
Pandas dataframe or list[0] for each list in text. Default=False.
:param (string) index_title: Title for index column. Default=''.
:param kwargs: kwargs passed through plotly.graph_objs.Heatmap.
These kwargs describe other attributes about the annotated Heatmap
trace such as the colorscale. For more information on valid kwargs
call help(plotly.graph_objs.Heatmap)
Example 1: Simple Plotly Table
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
text = [['Country', 'Year', 'Population'],
['US', 2000, 282200000],
['Canada', 2000, 27790000],
['US', 2010, 309000000],
['Canada', 2010, 34000000]]
table = FF.create_table(text)
py.iplot(table)
```
Example 2: Table with Custom Coloring
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
text = [['Country', 'Year', 'Population'],
['US', 2000, 282200000],
['Canada', 2000, 27790000],
['US', 2010, 309000000],
['Canada', 2010, 34000000]]
table = FF.create_table(text,
colorscale=[[0, '#000000'],
[.5, '#80beff'],
[1, '#cce5ff']],
font_colors=['#ffffff', '#000000',
'#000000'])
py.iplot(table)
```
Example 3: Simple Plotly Table with Pandas
```
import plotly.plotly as py
from plotly.tools import FigureFactory as FF
import pandas as pd
df = pd.read_csv('http://www.stat.ubc.ca/~jenny/notOcto/STAT545A/examples/gapminder/data/gapminderDataFiveYear.txt', sep='\t')
df_p = df[0:25]
table_simple = FF.create_table(df_p)
py.iplot(table_simple)
```
"""
# TODO: protected until #282
from plotly.graph_objs import graph_objs
# Avoiding mutables in the call signature
colorscale = \
colorscale if colorscale is not None else [[0, '#00083e'],
[.5, '#ededee'],
[1, '#ffffff']]
font_colors = font_colors if font_colors is not None else ['#ffffff',
'#000000',
'#000000']
FigureFactory._validate_table(table_text, font_colors)
table_matrix = _Table(table_text, colorscale, font_colors, index,
index_title, annotation_offset,
**kwargs).get_table_matrix()
annotations = _Table(table_text, colorscale, font_colors, index,
index_title, annotation_offset,
**kwargs).make_table_annotations()
trace = dict(type='heatmap', z=table_matrix, opacity=.75,
colorscale=colorscale, showscale=False,
hoverinfo=hoverinfo, **kwargs)
data = [trace]
layout = dict(annotations=annotations,
height=len(table_matrix)*height_constant + 50,
margin=dict(t=0, b=0, r=0, l=0),
yaxis=dict(autorange='reversed', zeroline=False,
gridwidth=2, ticks='', dtick=1, tick0=.5,
showticklabels=False),
xaxis=dict(zeroline=False, gridwidth=2, ticks='',
dtick=1, tick0=-0.5, showticklabels=False))
return graph_objs.Figure(data=data, layout=layout)
class _Quiver(FigureFactory):
"""
Refer to FigureFactory.create_quiver() for docstring
"""
def __init__(self, x, y, u, v,
scale, arrow_scale, angle, **kwargs):
try:
x = FigureFactory._flatten(x)
except exceptions.PlotlyError:
pass
try:
y = FigureFactory._flatten(y)
except exceptions.PlotlyError:
pass
try:
u = FigureFactory._flatten(u)
except exceptions.PlotlyError:
pass
try:
v = FigureFactory._flatten(v)
except exceptions.PlotlyError:
pass
self.x = x
self.y = y
self.u = u
self.v = v
self.scale = scale
self.arrow_scale = arrow_scale
self.angle = angle
self.end_x = []
self.end_y = []
self.scale_uv()
barb_x, barb_y = self.get_barbs()
arrow_x, arrow_y = self.get_quiver_arrows()
def scale_uv(self):
"""
Scales u and v to avoid overlap of the arrows.
u and v are added to x and y to get the
endpoints of the arrows so a smaller scale value will
result in less overlap of arrows.
"""
self.u = [i * self.scale for i in self.u]
self.v = [i * self.scale for i in self.v]
def get_barbs(self):
"""
Creates x and y startpoint and endpoint pairs
After finding the endpoint of each barb this zips startpoint and
endpoint pairs to create 2 lists: x_values for barbs and y values
for barbs
:rtype: (list, list) barb_x, barb_y: list of startpoint and endpoint
x_value pairs separated by a None to create the barb of the arrow,
and list of startpoint and endpoint y_value pairs separated by a
None to create the barb of the arrow.
"""
self.end_x = [i + j for i, j in zip(self.x, self.u)]
self.end_y = [i + j for i, j in zip(self.y, self.v)]
empty = [None] * len(self.x)
barb_x = FigureFactory._flatten(zip(self.x, self.end_x, empty))
barb_y = FigureFactory._flatten(zip(self.y, self.end_y, empty))
return barb_x, barb_y
def get_quiver_arrows(self):
"""
Creates lists of x and y values to plot the arrows
Gets length of each barb then calculates the length of each side of
the arrow. Gets angle of barb and applies angle to each side of the
arrowhead. Next uses arrow_scale to scale the length of arrowhead and
creates x and y values for arrowhead point1 and point2. Finally x and y
values for point1, endpoint and point2s for each arrowhead are
separated by a None and zipped to create lists of x and y values for
the arrows.
:rtype: (list, list) arrow_x, arrow_y: list of point1, endpoint, point2
x_values separated by a None to create the arrowhead and list of
point1, endpoint, point2 y_values separated by a None to create
the barb of the arrow.
"""
dif_x = [i - j for i, j in zip(self.end_x, self.x)]
dif_y = [i - j for i, j in zip(self.end_y, self.y)]
# Get barb lengths(default arrow length = 30% barb length)
barb_len = [None] * len(self.x)
for index in range(len(barb_len)):
barb_len[index] = math.hypot(dif_x[index], dif_y[index])
# Make arrow lengths
arrow_len = [None] * len(self.x)
arrow_len = [i * self.arrow_scale for i in barb_len]
# Get barb angles
barb_ang = [None] * len(self.x)
for index in range(len(barb_ang)):
barb_ang[index] = math.atan2(dif_y[index], dif_x[index])
# Set angles to create arrow
ang1 = [i + self.angle for i in barb_ang]
ang2 = [i - self.angle for i in barb_ang]
cos_ang1 = [None] * len(ang1)
for index in range(len(ang1)):
cos_ang1[index] = math.cos(ang1[index])
seg1_x = [i * j for i, j in zip(arrow_len, cos_ang1)]
sin_ang1 = [None] * len(ang1)
for index in range(len(ang1)):
sin_ang1[index] = math.sin(ang1[index])
seg1_y = [i * j for i, j in zip(arrow_len, sin_ang1)]
cos_ang2 = [None] * len(ang2)
for index in range(len(ang2)):
cos_ang2[index] = math.cos(ang2[index])
seg2_x = [i * j for i, j in zip(arrow_len, cos_ang2)]
sin_ang2 = [None] * len(ang2)
for index in range(len(ang2)):
sin_ang2[index] = math.sin(ang2[index])
seg2_y = [i * j for i, j in zip(arrow_len, sin_ang2)]
# Set coordinates to create arrow
for index in range(len(self.end_x)):
point1_x = [i - j for i, j in zip(self.end_x, seg1_x)]
point1_y = [i - j for i, j in zip(self.end_y, seg1_y)]
point2_x = [i - j for i, j in zip(self.end_x, seg2_x)]
point2_y = [i - j for i, j in zip(self.end_y, seg2_y)]
# Combine lists to create arrow
empty = [None] * len(self.end_x)
arrow_x = FigureFactory._flatten(zip(point1_x, self.end_x,
point2_x, empty))
arrow_y = FigureFactory._flatten(zip(point1_y, self.end_y,
point2_y, empty))
return arrow_x, arrow_y
class _Streamline(FigureFactory):
"""
Refer to FigureFactory.create_streamline() for docstring
"""
def __init__(self, x, y, u, v,
density, angle,
arrow_scale, **kwargs):
self.x = np.array(x)
self.y = np.array(y)
self.u = np.array(u)
self.v = np.array(v)
self.angle = angle
self.arrow_scale = arrow_scale
self.density = int(30 * density) # Scale similarly to other functions
self.delta_x = self.x[1] - self.x[0]
self.delta_y = self.y[1] - self.y[0]
self.val_x = self.x
self.val_y = self.y
# Set up spacing
self.blank = np.zeros((self.density, self.density))
self.spacing_x = len(self.x) / float(self.density - 1)
self.spacing_y = len(self.y) / float(self.density - 1)
self.trajectories = []
# Rescale speed onto axes-coordinates
self.u = self.u / (self.x[-1] - self.x[0])
self.v = self.v / (self.y[-1] - self.y[0])
self.speed = np.sqrt(self.u ** 2 + self.v ** 2)
# Rescale u and v for integrations.
self.u *= len(self.x)
self.v *= len(self.y)
self.st_x = []
self.st_y = []
self.get_streamlines()
streamline_x, streamline_y = self.sum_streamlines()
arrows_x, arrows_y = self.get_streamline_arrows()
def blank_pos(self, xi, yi):
"""
Set up positions for trajectories to be used with rk4 function.
"""
return (int((xi / self.spacing_x) + 0.5),
int((yi / self.spacing_y) + 0.5))
def value_at(self, a, xi, yi):
"""
Set up for RK4 function, based on Bokeh's streamline code
"""
if isinstance(xi, np.ndarray):
self.x = xi.astype(np.int)
self.y = yi.astype(np.int)
else:
self.val_x = np.int(xi)
self.val_y = np.int(yi)
a00 = a[self.val_y, self.val_x]
a01 = a[self.val_y, self.val_x + 1]
a10 = a[self.val_y + 1, self.val_x]
a11 = a[self.val_y + 1, self.val_x + 1]
xt = xi - self.val_x
yt = yi - self.val_y
a0 = a00 * (1 - xt) + a01 * xt
a1 = a10 * (1 - xt) + a11 * xt
return a0 * (1 - yt) + a1 * yt
def rk4_integrate(self, x0, y0):
"""
RK4 forward and back trajectories from the initial conditions.
Adapted from Bokeh's streamline -uses Runge-Kutta method to fill
x and y trajectories then checks length of traj (s in units of axes)
"""
def f(xi, yi):
dt_ds = 1. / self.value_at(self.speed, xi, yi)
ui = self.value_at(self.u, xi, yi)
vi = self.value_at(self.v, xi, yi)
return ui * dt_ds, vi * dt_ds
def g(xi, yi):
dt_ds = 1. / self.value_at(self.speed, xi, yi)
ui = self.value_at(self.u, xi, yi)
vi = self.value_at(self.v, xi, yi)
return -ui * dt_ds, -vi * dt_ds
check = lambda xi, yi: (0 <= xi < len(self.x) - 1 and
0 <= yi < len(self.y) - 1)
xb_changes = []
yb_changes = []
def rk4(x0, y0, f):
ds = 0.01
stotal = 0
xi = x0
yi = y0
xb, yb = self.blank_pos(xi, yi)
xf_traj = []
yf_traj = []
while check(xi, yi):
xf_traj.append(xi)
yf_traj.append(yi)
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + .5 * ds * k1x, yi + .5 * ds * k1y)
k3x, k3y = f(xi + .5 * ds * k2x, yi + .5 * ds * k2y)
k4x, k4y = f(xi + ds * k3x, yi + ds * k3y)
except IndexError:
break
xi += ds * (k1x + 2 * k2x + 2 * k3x + k4x) / 6.
yi += ds * (k1y + 2 * k2y + 2 * k3y + k4y) / 6.
if not check(xi, yi):
break
stotal += ds
new_xb, new_yb = self.blank_pos(xi, yi)
if new_xb != xb or new_yb != yb:
if self.blank[new_yb, new_xb] == 0:
self.blank[new_yb, new_xb] = 1
xb_changes.append(new_xb)
yb_changes.append(new_yb)
xb = new_xb
yb = new_yb
else:
break
if stotal > 2:
break
return stotal, xf_traj, yf_traj
sf, xf_traj, yf_traj = rk4(x0, y0, f)
sb, xb_traj, yb_traj = rk4(x0, y0, g)
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
if len(x_traj) < 1:
return None
if stotal > .2:
initxb, inityb = self.blank_pos(x0, y0)
self.blank[inityb, initxb] = 1
return x_traj, y_traj
else:
for xb, yb in zip(xb_changes, yb_changes):
self.blank[yb, xb] = 0
return None
def traj(self, xb, yb):
"""
Integrate trajectories
:param (int) xb: results of passing xi through self.blank_pos
:param (int) xy: results of passing yi through self.blank_pos
Calculate each trajectory based on rk4 integrate method.
"""
if xb < 0 or xb >= self.density or yb < 0 or yb >= self.density:
return
if self.blank[yb, xb] == 0:
t = self.rk4_integrate(xb * self.spacing_x, yb * self.spacing_y)
if t is not None:
self.trajectories.append(t)
def get_streamlines(self):
"""
Get streamlines by building trajectory set.
"""
for indent in range(self.density // 2):
for xi in range(self.density - 2 * indent):
self.traj(xi + indent, indent)
self.traj(xi + indent, self.density - 1 - indent)
self.traj(indent, xi + indent)
self.traj(self.density - 1 - indent, xi + indent)
self.st_x = [np.array(t[0]) * self.delta_x + self.x[0] for t in
self.trajectories]
self.st_y = [np.array(t[1]) * self.delta_y + self.y[0] for t in
self.trajectories]
for index in range(len(self.st_x)):
self.st_x[index] = self.st_x[index].tolist()
self.st_x[index].append(np.nan)
for index in range(len(self.st_y)):
self.st_y[index] = self.st_y[index].tolist()
self.st_y[index].append(np.nan)
def get_streamline_arrows(self):
"""
Makes an arrow for each streamline.
Gets angle of streamline at 1/3 mark and creates arrow coordinates
based off of user defined angle and arrow_scale.
:param (array) st_x: x-values for all streamlines
:param (array) st_y: y-values for all streamlines
:param (angle in radians) angle: angle of arrowhead. Default = pi/9
:param (float in [0,1]) arrow_scale: value to scale length of arrowhead
Default = .09
:rtype (list, list) arrows_x: x-values to create arrowhead and
arrows_y: y-values to create arrowhead
"""
arrow_end_x = np.empty((len(self.st_x)))
arrow_end_y = np.empty((len(self.st_y)))
arrow_start_x = np.empty((len(self.st_x)))
arrow_start_y = np.empty((len(self.st_y)))
for index in range(len(self.st_x)):
arrow_end_x[index] = (self.st_x[index]
[int(len(self.st_x[index]) / 3)])
arrow_start_x[index] = (self.st_x[index]
[(int(len(self.st_x[index]) / 3)) - 1])
arrow_end_y[index] = (self.st_y[index]
[int(len(self.st_y[index]) / 3)])
arrow_start_y[index] = (self.st_y[index]
[(int(len(self.st_y[index]) / 3)) - 1])
dif_x = arrow_end_x - arrow_start_x
dif_y = arrow_end_y - arrow_start_y
streamline_ang = np.arctan(dif_y / dif_x)
ang1 = streamline_ang + (self.angle)
ang2 = streamline_ang - (self.angle)
seg1_x = np.cos(ang1) * self.arrow_scale
seg1_y = np.sin(ang1) * self.arrow_scale
seg2_x = np.cos(ang2) * self.arrow_scale
seg2_y = np.sin(ang2) * self.arrow_scale
point1_x = np.empty((len(dif_x)))
point1_y = np.empty((len(dif_y)))
point2_x = np.empty((len(dif_x)))
point2_y = np.empty((len(dif_y)))
for index in range(len(dif_x)):
if dif_x[index] >= 0:
point1_x[index] = arrow_end_x[index] - seg1_x[index]
point1_y[index] = arrow_end_y[index] - seg1_y[index]
point2_x[index] = arrow_end_x[index] - seg2_x[index]
point2_y[index] = arrow_end_y[index] - seg2_y[index]
else:
point1_x[index] = arrow_end_x[index] + seg1_x[index]
point1_y[index] = arrow_end_y[index] + seg1_y[index]
point2_x[index] = arrow_end_x[index] + seg2_x[index]
point2_y[index] = arrow_end_y[index] + seg2_y[index]
space = np.empty((len(point1_x)))
space[:] = np.nan
# Combine arrays into matrix
arrows_x = np.matrix([point1_x, arrow_end_x, point2_x, space])
arrows_x = np.array(arrows_x)
arrows_x = arrows_x.flatten('F')
arrows_x = arrows_x.tolist()
# Combine arrays into matrix
arrows_y = np.matrix([point1_y, arrow_end_y, point2_y, space])
arrows_y = np.array(arrows_y)
arrows_y = arrows_y.flatten('F')
arrows_y = arrows_y.tolist()
return arrows_x, arrows_y
def sum_streamlines(self):
"""
Makes all streamlines readable as a single trace.
:rtype (list, list): streamline_x: all x values for each streamline
combined into single list and streamline_y: all y values for each
streamline combined into single list
"""
streamline_x = sum(self.st_x, [])
streamline_y = sum(self.st_y, [])
return streamline_x, streamline_y
class _OHLC(FigureFactory):
"""
Refer to FigureFactory.create_ohlc_increase() for docstring.
"""
def __init__(self, open, high, low, close, dates, **kwargs):
self.open = open
self.high = high
self.low = low
self.close = close
self.empty = [None] * len(open)
self.dates = dates
self.all_x = []
self.all_y = []
self.increase_x = []
self.increase_y = []
self.decrease_x = []
self.decrease_y = []
self.get_all_xy()
self.separate_increase_decrease()
def get_all_xy(self):
"""
Zip data to create OHLC shape
OHLC shape: low to high vertical bar with
horizontal branches for open and close values.
If dates were added, the smallest date difference is calculated and
multiplied by .2 to get the length of the open and close branches.
If no date data was provided, the x-axis is a list of integers and the
length of the open and close branches is .2.
"""
self.all_y = list(zip(self.open, self.open, self.high,
self.low, self.close, self.close, self.empty))
if self.dates is not None:
date_dif = []
for i in range(len(self.dates) - 1):
date_dif.append(self.dates[i + 1] - self.dates[i])
date_dif_min = (min(date_dif)) / 5
self.all_x = [[x - date_dif_min, x, x, x, x, x +
date_dif_min, None] for x in self.dates]
else:
self.all_x = [[x - .2, x, x, x, x, x + .2, None]
for x in range(len(self.open))]
def separate_increase_decrease(self):
"""
Separate data into two groups: increase and decrease
(1) Increase, where close > open and
(2) Decrease, where close <= open
"""
for index in range(len(self.open)):
if self.close[index] is None:
pass
elif self.close[index] > self.open[index]:
self.increase_x.append(self.all_x[index])
self.increase_y.append(self.all_y[index])
else:
self.decrease_x.append(self.all_x[index])
self.decrease_y.append(self.all_y[index])
def get_increase(self):
"""
Flatten increase data and get increase text
:rtype (list, list, list): flat_increase_x: x-values for the increasing
trace, flat_increase_y: y=values for the increasing trace and
text_increase: hovertext for the increasing trace
"""
flat_increase_x = FigureFactory._flatten(self.increase_x)
flat_increase_y = FigureFactory._flatten(self.increase_y)
text_increase = (("Open", "Open", "High",
"Low", "Close", "Close", '')
* (len(self.increase_x)))
return flat_increase_x, flat_increase_y, text_increase
def get_decrease(self):
"""
Flatten decrease data and get decrease text
:rtype (list, list, list): flat_decrease_x: x-values for the decreasing
trace, flat_decrease_y: y=values for the decreasing trace and
text_decrease: hovertext for the decreasing trace
"""
flat_decrease_x = FigureFactory._flatten(self.decrease_x)
flat_decrease_y = FigureFactory._flatten(self.decrease_y)
text_decrease = (("Open", "Open", "High",
"Low", "Close", "Close", '')
* (len(self.decrease_x)))
return flat_decrease_x, flat_decrease_y, text_decrease
class _Candlestick(FigureFactory):
"""
Refer to FigureFactory.create_candlestick() for docstring.
"""
def __init__(self, open, high, low, close, dates, **kwargs):
self.open = open
self.high = high
self.low = low
self.close = close
if dates is not None:
self.x = dates
else:
self.x = [x for x in range(len(self.open))]
self.get_candle_increase()
def get_candle_increase(self):
"""
Separate increasing data from decreasing data.
The data is increasing when close value > open value
and decreasing when the close value <= open value.
"""
increase_y = []
increase_x = []
for index in range(len(self.open)):
if self.close[index] > self.open[index]:
increase_y.append(self.low[index])
increase_y.append(self.open[index])
increase_y.append(self.close[index])
increase_y.append(self.close[index])
increase_y.append(self.close[index])
increase_y.append(self.high[index])
increase_x.append(self.x[index])
increase_x = [[x, x, x, x, x, x] for x in increase_x]
increase_x = FigureFactory._flatten(increase_x)
return increase_x, increase_y
def get_candle_decrease(self):
"""
Separate increasing data from decreasing data.
The data is increasing when close value > open value
and decreasing when the close value <= open value.
"""
decrease_y = []
decrease_x = []
for index in range(len(self.open)):
if self.close[index] <= self.open[index]:
decrease_y.append(self.low[index])
decrease_y.append(self.open[index])
decrease_y.append(self.close[index])
decrease_y.append(self.close[index])
decrease_y.append(self.close[index])
decrease_y.append(self.high[index])
decrease_x.append(self.x[index])
decrease_x = [[x, x, x, x, x, x] for x in decrease_x]
decrease_x = FigureFactory._flatten(decrease_x)
return decrease_x, decrease_y
class _Distplot(FigureFactory):
"""
Refer to TraceFactory.create_distplot() for docstring
"""
def __init__(self, hist_data, histnorm, group_labels,
bin_size, curve_type, colors,
rug_text, show_hist, show_curve):
self.hist_data = hist_data
self.histnorm = histnorm
self.group_labels = group_labels
self.bin_size = bin_size
self.show_hist = show_hist
self.show_curve = show_curve
self.trace_number = len(hist_data)
if rug_text:
self.rug_text = rug_text
else:
self.rug_text = [None] * self.trace_number
self.start = []
self.end = []
if colors:
self.colors = colors
else:
self.colors = [
"rgb(31, 119, 180)", "rgb(255, 127, 14)",
"rgb(44, 160, 44)", "rgb(214, 39, 40)",
"rgb(148, 103, 189)", "rgb(140, 86, 75)",
"rgb(227, 119, 194)", "rgb(127, 127, 127)",
"rgb(188, 189, 34)", "rgb(23, 190, 207)"]
self.curve_x = [None] * self.trace_number
self.curve_y = [None] * self.trace_number
for trace in self.hist_data:
self.start.append(min(trace) * 1.)
self.end.append(max(trace) * 1.)
def make_hist(self):
"""
Makes the histogram(s) for FigureFactory.create_distplot().
:rtype (list) hist: list of histogram representations
"""
hist = [None] * self.trace_number
for index in range(self.trace_number):
hist[index] = dict(type='histogram',
x=self.hist_data[index],
xaxis='x1',
yaxis='y1',
histnorm=self.histnorm,
name=self.group_labels[index],
legendgroup=self.group_labels[index],
marker=dict(color=self.colors[index]),
autobinx=False,
xbins=dict(start=self.start[index],
end=self.end[index],
size=self.bin_size[index]),
opacity=.7)
return hist
def make_kde(self):
"""
Makes the kernel density estimation(s) for create_distplot().
This is called when curve_type = 'kde' in create_distplot().
:rtype (list) curve: list of kde representations
"""
curve = [None] * self.trace_number
for index in range(self.trace_number):
self.curve_x[index] = [self.start[index] +
x * (self.end[index] - self.start[index])
/ 500 for x in range(500)]
self.curve_y[index] = (scipy.stats.gaussian_kde
(self.hist_data[index])
(self.curve_x[index]))
if self.histnorm == ALTERNATIVE_HISTNORM:
self.curve_y[index] *= self.bin_size[index]
for index in range(self.trace_number):
curve[index] = dict(type='scatter',
x=self.curve_x[index],
y=self.curve_y[index],
xaxis='x1',
yaxis='y1',
mode='lines',
name=self.group_labels[index],
legendgroup=self.group_labels[index],
showlegend=False if self.show_hist else True,
marker=dict(color=self.colors[index]))
return curve
def make_normal(self):
"""
Makes the normal curve(s) for create_distplot().
This is called when curve_type = 'normal' in create_distplot().
:rtype (list) curve: list of normal curve representations
"""
curve = [None] * self.trace_number
mean = [None] * self.trace_number
sd = [None] * self.trace_number
for index in range(self.trace_number):
mean[index], sd[index] = (scipy.stats.norm.fit
(self.hist_data[index]))
self.curve_x[index] = [self.start[index] +
x * (self.end[index] - self.start[index])
/ 500 for x in range(500)]
self.curve_y[index] = scipy.stats.norm.pdf(
self.curve_x[index], loc=mean[index], scale=sd[index])
if self.histnorm == ALTERNATIVE_HISTNORM:
self.curve_y[index] *= self.bin_size[index]
for index in range(self.trace_number):
curve[index] = dict(type='scatter',
x=self.curve_x[index],
y=self.curve_y[index],
xaxis='x1',
yaxis='y1',
mode='lines',
name=self.group_labels[index],
legendgroup=self.group_labels[index],
showlegend=False if self.show_hist else True,
marker=dict(color=self.colors[index]))
return curve
def make_rug(self):
"""
Makes the rug plot(s) for create_distplot().
:rtype (list) rug: list of rug plot representations
"""
rug = [None] * self.trace_number
for index in range(self.trace_number):
rug[index] = dict(type='scatter',
x=self.hist_data[index],
y=([self.group_labels[index]] *
len(self.hist_data[index])),
xaxis='x1',
yaxis='y2',
mode='markers',
name=self.group_labels[index],
legendgroup=self.group_labels[index],
showlegend=(False if self.show_hist or
self.show_curve else True),
text=self.rug_text[index],
marker=dict(color=self.colors[index],
symbol='line-ns-open'))
return rug
class _Dendrogram(FigureFactory):
"""Refer to FigureFactory.create_dendrogram() for docstring."""
def __init__(self, X, orientation='bottom', labels=None, colorscale=None,
width="100%", height="100%", xaxis='xaxis', yaxis='yaxis'):
# TODO: protected until #282
from plotly.graph_objs import graph_objs
self.orientation = orientation
self.labels = labels
self.xaxis = xaxis
self.yaxis = yaxis
self.data = []
self.leaves = []
self.sign = {self.xaxis: 1, self.yaxis: 1}
self.layout = {self.xaxis: {}, self.yaxis: {}}
if self.orientation in ['left', 'bottom']:
self.sign[self.xaxis] = 1
else:
self.sign[self.xaxis] = -1
if self.orientation in ['right', 'bottom']:
self.sign[self.yaxis] = 1
else:
self.sign[self.yaxis] = -1
(dd_traces, xvals, yvals,
ordered_labels, leaves) = self.get_dendrogram_traces(X, colorscale)
self.labels = ordered_labels
self.leaves = leaves
yvals_flat = yvals.flatten()
xvals_flat = xvals.flatten()
self.zero_vals = []
for i in range(len(yvals_flat)):
if yvals_flat[i] == 0.0 and xvals_flat[i] not in self.zero_vals:
self.zero_vals.append(xvals_flat[i])
self.zero_vals.sort()
self.layout = self.set_figure_layout(width, height)
self.data = graph_objs.Data(dd_traces)
def get_color_dict(self, colorscale):
"""
Returns colorscale used for dendrogram tree clusters.
:param (list) colorscale: Colors to use for the plot in rgb format.
:rtype (dict): A dict of default colors mapped to the user colorscale.
"""
# These are the color codes returned for dendrograms
# We're replacing them with nicer colors
d = {'r': 'red',
'g': 'green',
'b': 'blue',
'c': 'cyan',
'm': 'magenta',
'y': 'yellow',
'k': 'black',
'w': 'white'}
default_colors = OrderedDict(sorted(d.items(), key=lambda t: t[0]))
if colorscale is None:
colorscale = [
'rgb(0,116,217)', # blue
'rgb(35,205,205)', # cyan
'rgb(61,153,112)', # green
'rgb(40,35,35)', # black
'rgb(133,20,75)', # magenta
'rgb(255,65,54)', # red
'rgb(255,255,255)', # white
'rgb(255,220,0)'] # yellow
for i in range(len(default_colors.keys())):
k = list(default_colors.keys())[i] # PY3 won't index keys
if i < len(colorscale):
default_colors[k] = colorscale[i]
return default_colors
def set_axis_layout(self, axis_key):
"""
Sets and returns default axis object for dendrogram figure.
:param (str) axis_key: E.g., 'xaxis', 'xaxis1', 'yaxis', yaxis1', etc.
:rtype (dict): An axis_key dictionary with set parameters.
"""
axis_defaults = {
'type': 'linear',
'ticks': 'outside',
'mirror': 'allticks',
'rangemode': 'tozero',
'showticklabels': True,
'zeroline': False,
'showgrid': False,
'showline': True,
}
if len(self.labels) != 0:
axis_key_labels = self.xaxis
if self.orientation in ['left', 'right']:
axis_key_labels = self.yaxis
if axis_key_labels not in self.layout:
self.layout[axis_key_labels] = {}
self.layout[axis_key_labels]['tickvals'] = \
[zv*self.sign[axis_key] for zv in self.zero_vals]
self.layout[axis_key_labels]['ticktext'] = self.labels
self.layout[axis_key_labels]['tickmode'] = 'array'
self.layout[axis_key].update(axis_defaults)
return self.layout[axis_key]
def set_figure_layout(self, width, height):
"""
Sets and returns default layout object for dendrogram figure.
"""
self.layout.update({
'showlegend': False,
'autosize': False,
'hovermode': 'closest',
'width': width,
'height': height
})
self.set_axis_layout(self.xaxis)
self.set_axis_layout(self.yaxis)
return self.layout
def get_dendrogram_traces(self, X, colorscale):
"""
Calculates all the elements needed for plotting a dendrogram.
:param (ndarray) X: Matrix of observations as array of arrays
:param (list) colorscale: Color scale for dendrogram tree clusters
:rtype (tuple): Contains all the traces in the following order:
(a) trace_list: List of Plotly trace objects for dendrogram tree
(b) icoord: All X points of the dendrogram tree as array of arrays
with length 4
(c) dcoord: All Y points of the dendrogram tree as array of arrays
with length 4
(d) ordered_labels: leaf labels in the order they are going to
appear on the plot
(e) P['leaves']: left-to-right traversal of the leaves
"""
# TODO: protected until #282
from plotly.graph_objs import graph_objs
d = scs.distance.pdist(X)
Z = sch.linkage(d, method='complete')
P = sch.dendrogram(Z, orientation=self.orientation,
labels=self.labels, no_plot=True)
icoord = scp.array(P['icoord'])
dcoord = scp.array(P['dcoord'])
ordered_labels = scp.array(P['ivl'])
color_list = scp.array(P['color_list'])
colors = self.get_color_dict(colorscale)
trace_list = []
for i in range(len(icoord)):
# xs and ys are arrays of 4 points that make up the '∩' shapes
# of the dendrogram tree
if self.orientation in ['top', 'bottom']:
xs = icoord[i]
else:
xs = dcoord[i]
if self.orientation in ['top', 'bottom']:
ys = dcoord[i]
else:
ys = icoord[i]
color_key = color_list[i]
trace = graph_objs.Scatter(
x=np.multiply(self.sign[self.xaxis], xs),
y=np.multiply(self.sign[self.yaxis], ys),
mode='lines',
marker=graph_objs.Marker(color=colors[color_key])
)
try:
x_index = int(self.xaxis[-1])
except ValueError:
x_index = ''
try:
y_index = int(self.yaxis[-1])
except ValueError:
y_index = ''
trace['xaxis'] = 'x' + x_index
trace['yaxis'] = 'y' + y_index
trace_list.append(trace)
return trace_list, icoord, dcoord, ordered_labels, P['leaves']
class _AnnotatedHeatmap(FigureFactory):
"""
Refer to TraceFactory.create_annotated_heatmap() for docstring
"""
def __init__(self, z, x, y, annotation_text, colorscale,
font_colors, reversescale, **kwargs):
from plotly.graph_objs import graph_objs
self.z = z
if x:
self.x = x
else:
self.x = range(len(z[0]))
if y:
self.y = y
else:
self.y = range(len(z))
if annotation_text is not None:
self.annotation_text = annotation_text
else:
self.annotation_text = self.z
self.colorscale = colorscale
self.reversescale = reversescale
self.font_colors = font_colors
def get_text_color(self):
"""
Get font color for annotations.
The annotated heatmap can feature two text colors: min_text_color and
max_text_color. The min_text_color is applied to annotations for
heatmap values < (max_value - min_value)/2. The user can define these
two colors. Otherwise the colors are defined logically as black or
white depending on the heatmap's colorscale.
:rtype (string, string) min_text_color, max_text_color: text
color for annotations for heatmap values <
(max_value - min_value)/2 and text color for annotations for
heatmap values >= (max_value - min_value)/2
"""
# Plotly colorscales ranging from a lighter shade to a darker shade
colorscales = ['Greys', 'Greens', 'Blues',
'YIGnBu', 'YIOrRd', 'RdBu',
'Picnic', 'Jet', 'Hot', 'Blackbody',
'Earth', 'Electric', 'Viridis']
# Plotly colorscales ranging from a darker shade to a lighter shade
colorscales_reverse = ['Reds']
if self.font_colors:
min_text_color = self.font_colors[0]
max_text_color = self.font_colors[-1]
elif self.colorscale in colorscales and self.reversescale:
min_text_color = '#000000'
max_text_color = '#FFFFFF'
elif self.colorscale in colorscales:
min_text_color = '#FFFFFF'
max_text_color = '#000000'
elif self.colorscale in colorscales_reverse and self.reversescale:
min_text_color = '#FFFFFF'
max_text_color = '#000000'
elif self.colorscale in colorscales_reverse:
min_text_color = '#000000'
max_text_color = '#FFFFFF'
elif isinstance(self.colorscale, list):
if 'rgb' in self.colorscale[0][1]:
min_col = map(int,
self.colorscale[0][1].strip('rgb()').split(','))
max_col = map(int,
self.colorscale[-1][1].strip('rgb()').split(','))
elif '#' in self.colorscale[0][1]:
min_col = FigureFactory._hex_to_rgb(self.colorscale[0][1])
max_col = FigureFactory._hex_to_rgb(self.colorscale[-1][1])
else:
min_col = [255, 255, 255]
max_col = [255, 255, 255]
if (min_col[0]*0.299 + min_col[1]*0.587 + min_col[2]*0.114) > 186:
min_text_color = '#000000'
else:
min_text_color = '#FFFFFF'
if (max_col[0]*0.299 + max_col[1]*0.587 + max_col[2]*0.114) > 186:
max_text_color = '#000000'
else:
max_text_color = '#FFFFFF'
else:
min_text_color = '#000000'
max_text_color = '#000000'
return min_text_color, max_text_color
def get_z_mid(self):
"""
Get the mid value of z matrix
:rtype (float) z_avg: average val from z matrix
"""
if _numpy_imported and isinstance(self.z, np.ndarray):
z_min = np.amin(self.z)
z_max = np.amax(self.z)
else:
z_min = min(min(self.z))
z_max = max(max(self.z))
z_mid = (z_max+z_min) / 2
return z_mid
def make_annotations(self):
"""
Get annotations for each cell of the heatmap with graph_objs.Annotation
:rtype (list[dict]) annotations: list of annotations for each cell of
the heatmap
"""
from plotly.graph_objs import graph_objs
min_text_color, max_text_color = _AnnotatedHeatmap.get_text_color(self)
z_mid = _AnnotatedHeatmap.get_z_mid(self)
annotations = []
for n, row in enumerate(self.z):
for m, val in enumerate(row):
font_color = min_text_color if val < z_mid else max_text_color
annotations.append(
graph_objs.Annotation(
text=str(self.annotation_text[n][m]),
x=self.x[m],
y=self.y[n],
xref='x1',
yref='y1',
font=dict(color=font_color),
showarrow=False))
return annotations
class _Table(FigureFactory):
"""
Refer to TraceFactory.create_table() for docstring
"""
def __init__(self, table_text, colorscale, font_colors, index,
index_title, annotation_offset, **kwargs):
from plotly.graph_objs import graph_objs
if _pandas_imported and isinstance(table_text, pd.DataFrame):
headers = table_text.columns.tolist()
table_text_index = table_text.index.tolist()
table_text = table_text.values.tolist()
table_text.insert(0, headers)
if index:
table_text_index.insert(0, index_title)
for i in range(len(table_text)):
table_text[i].insert(0, table_text_index[i])
self.table_text = table_text
self.colorscale = colorscale
self.font_colors = font_colors
self.index = index
self.annotation_offset = annotation_offset
self.x = range(len(table_text[0]))
self.y = range(len(table_text))
def get_table_matrix(self):
"""
Create z matrix to make heatmap with striped table coloring
:rtype (list[list]) table_matrix: z matrix to make heatmap with striped
table coloring.
"""
header = [0] * len(self.table_text[0])
odd_row = [.5] * len(self.table_text[0])
even_row = [1] * len(self.table_text[0])
table_matrix = [None] * len(self.table_text)
table_matrix[0] = header
for i in range(1, len(self.table_text), 2):
table_matrix[i] = odd_row
for i in range(2, len(self.table_text), 2):
table_matrix[i] = even_row
if self.index:
for array in table_matrix:
array[0] = 0
return table_matrix
def get_table_font_color(self):
"""
Fill font-color array.
Table text color can vary by row so this extends a single color or
creates an array to set a header color and two alternating colors to
create the striped table pattern.
:rtype (list[list]) all_font_colors: list of font colors for each row
in table.
"""
if len(self.font_colors) == 1:
all_font_colors = self.font_colors*len(self.table_text)
elif len(self.font_colors) == 3:
all_font_colors = list(range(len(self.table_text)))
all_font_colors[0] = self.font_colors[0]
for i in range(1, len(self.table_text), 2):
all_font_colors[i] = self.font_colors[1]
for i in range(2, len(self.table_text), 2):
all_font_colors[i] = self.font_colors[2]
elif len(self.font_colors) == len(self.table_text):
all_font_colors = self.font_colors
else:
all_font_colors = ['#000000']*len(self.table_text)
return all_font_colors
def make_table_annotations(self):
"""
Generate annotations to fill in table text
:rtype (list) annotations: list of annotations for each cell of the
table.
"""
from plotly.graph_objs import graph_objs
table_matrix = _Table.get_table_matrix(self)
all_font_colors = _Table.get_table_font_color(self)
annotations = []
for n, row in enumerate(self.table_text):
for m, val in enumerate(row):
# Bold text in header and index
format_text = ('<b>' + str(val) + '</b>' if n == 0 or
self.index and m < 1 else str(val))
# Match font color of index to font color of header
font_color = (self.font_colors[0] if self.index and m == 0
else all_font_colors[n])
annotations.append(
graph_objs.Annotation(
text=format_text,
x=self.x[m] - self.annotation_offset,
y=self.y[n],
xref='x1',
yref='y1',
align="left",
xanchor="left",
font=dict(color=font_color),
showarrow=False))
return annotations
| 39.181672 | 134 | 0.510714 |
from __future__ import absolute_import
from collections import OrderedDict
import warnings
import six
import math
import decimal
from plotly import utils
from plotly import exceptions
from plotly import graph_reference
from plotly import session
from plotly.files import (CONFIG_FILE, CREDENTIALS_FILE, FILE_CONTENT,
GRAPH_REFERENCE_FILE, check_file_permissions)
DEFAULT_PLOTLY_COLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
REQUIRED_GANTT_KEYS = ['Task', 'Start', 'Finish']
PLOTLY_SCALES = {'Greys': ['rgb(0,0,0)', 'rgb(255,255,255)'],
'YlGnBu': ['rgb(8,29,88)', 'rgb(255,255,217)'],
'Greens': ['rgb(0,68,27)', 'rgb(247,252,245)'],
'YlOrRd': ['rgb(128,0,38)', 'rgb(255,255,204)'],
'Bluered': ['rgb(0,0,255)', 'rgb(255,0,0)'],
'RdBu': ['rgb(5,10,172)', 'rgb(178,10,28)'],
'Reds': ['rgb(220,220,220)', 'rgb(178,10,28)'],
'Blues': ['rgb(5,10,172)', 'rgb(220,220,220)'],
'Picnic': ['rgb(0,0,255)', 'rgb(255,0,0)'],
'Rainbow': ['rgb(150,0,90)', 'rgb(255,0,0)'],
'Portland': ['rgb(12,51,131)', 'rgb(217,30,30)'],
'Jet': ['rgb(0,0,131)', 'rgb(128,0,0)'],
'Hot': ['rgb(0,0,0)', 'rgb(255,255,255)'],
'Blackbody': ['rgb(0,0,0)', 'rgb(160,200,255)'],
'Earth': ['rgb(0,0,130)', 'rgb(255,255,255)'],
'Electric': ['rgb(0,0,0)', 'rgb(255,250,220)'],
'Viridis': ['rgb(68,1,84)', 'rgb(253,231,37)']}
DEFAULT_FILLCOLOR = '#1f77b4'
DEFAULT_HISTNORM = 'probability density'
ALTERNATIVE_HISTNORM = 'probability'
def warning_on_one_line(message, category, filename, lineno,
file=None, line=None):
return '%s:%s: %s:\n\n%s\n\n' % (filename, lineno, category.__name__,
message)
warnings.formatwarning = warning_on_one_line
try:
from . import matplotlylib
_matplotlylib_imported = True
except ImportError:
_matplotlylib_imported = False
try:
import IPython
import IPython.core.display
_ipython_imported = True
except ImportError:
_ipython_imported = False
try:
import numpy as np
_numpy_imported = True
except ImportError:
_numpy_imported = False
try:
import pandas as pd
_pandas_imported = True
except ImportError:
_pandas_imported = False
try:
import scipy as scp
_scipy_imported = True
except ImportError:
_scipy_imported = False
try:
import scipy.spatial as scs
_scipy__spatial_imported = True
except ImportError:
_scipy__spatial_imported = False
try:
import scipy.cluster.hierarchy as sch
_scipy__cluster__hierarchy_imported = True
except ImportError:
_scipy__cluster__hierarchy_imported = False
try:
import scipy
import scipy.stats
_scipy_imported = True
except ImportError:
_scipy_imported = False
def get_config_defaults():
return dict(FILE_CONTENT[CONFIG_FILE])
def ensure_local_plotly_files():
if check_file_permissions():
for fn in [CREDENTIALS_FILE, CONFIG_FILE]:
utils.ensure_file_exists(fn)
contents = utils.load_json_dict(fn)
for key, val in list(FILE_CONTENT[fn].items()):
if key not in contents:
contents[key] = val
contents_keys = list(contents.keys())
for key in contents_keys:
if key not in FILE_CONTENT[fn]:
del contents[key]
utils.save_json_dict(fn, contents)
utils.ensure_file_exists(GRAPH_REFERENCE_FILE)
utils.save_json_dict(GRAPH_REFERENCE_FILE,
graph_reference.GRAPH_REFERENCE)
else:
warnings.warn("Looks like you don't have 'read-write' permission to "
"your 'home' ('~') directory or to our '~/.plotly' "
"directory. That means plotly's python api can't setup "
"local configuration files. No problem though! You'll "
"just have to sign-in using 'plotly.plotly.sign_in()'. "
"For help with that: 'help(plotly.plotly.sign_in)'."
"\nQuestions? support@plot.ly")
api_key=None,
stream_ids=None,
proxy_username=None,
proxy_password=None):
if not check_file_permissions():
raise exceptions.PlotlyError("You don't have proper file permissions "
"to run this function.")
ensure_local_plotly_files() # make sure what's there is OK
credentials = get_credentials_file()
if isinstance(username, six.string_types):
credentials['username'] = username
if isinstance(api_key, six.string_types):
credentials['api_key'] = api_key
if isinstance(proxy_username, six.string_types):
credentials['proxy_username'] = proxy_username
if isinstance(proxy_password, six.string_types):
credentials['proxy_password'] = proxy_password
if isinstance(stream_ids, (list, tuple)):
credentials['stream_ids'] = stream_ids
utils.save_json_dict(CREDENTIALS_FILE, credentials)
ensure_local_plotly_files()
def get_credentials_file(*args):
if check_file_permissions():
ensure_local_plotly_files()
return utils.load_json_dict(CREDENTIALS_FILE, *args)
else:
return FILE_CONTENT[CREDENTIALS_FILE]
def reset_credentials_file():
ensure_local_plotly_files() # make sure what's there is OK
utils.save_json_dict(CREDENTIALS_FILE, {})
ensure_local_plotly_files()
plotly_streaming_domain=None,
plotly_api_domain=None,
plotly_ssl_verification=None,
plotly_proxy_authorization=None,
world_readable=None,
sharing=None,
auto_open=None):
if not check_file_permissions():
raise exceptions.PlotlyError("You don't have proper file permissions "
"to run this function.")
ensure_local_plotly_files() # make sure what's there is OK
utils.validate_world_readable_and_sharing_settings({
'sharing': sharing, 'world_readable': world_readable})
settings = get_config_file()
if isinstance(plotly_domain, six.string_types):
settings['plotly_domain'] = plotly_domain
elif plotly_domain is not None:
raise TypeError('plotly_domain should be a string')
if isinstance(plotly_streaming_domain, six.string_types):
settings['plotly_streaming_domain'] = plotly_streaming_domain
elif plotly_streaming_domain is not None:
raise TypeError('plotly_streaming_domain should be a string')
if isinstance(plotly_api_domain, six.string_types):
settings['plotly_api_domain'] = plotly_api_domain
elif plotly_api_domain is not None:
raise TypeError('plotly_api_domain should be a string')
if isinstance(plotly_ssl_verification, (six.string_types, bool)):
settings['plotly_ssl_verification'] = plotly_ssl_verification
elif plotly_ssl_verification is not None:
raise TypeError('plotly_ssl_verification should be a boolean')
if isinstance(plotly_proxy_authorization, (six.string_types, bool)):
settings['plotly_proxy_authorization'] = plotly_proxy_authorization
elif plotly_proxy_authorization is not None:
raise TypeError('plotly_proxy_authorization should be a boolean')
if isinstance(auto_open, bool):
settings['auto_open'] = auto_open
elif auto_open is not None:
raise TypeError('auto_open should be a boolean')
if isinstance(world_readable, bool):
settings['world_readable'] = world_readable
settings.pop('sharing')
elif world_readable is not None:
raise TypeError('Input should be a boolean')
if isinstance(sharing, six.string_types):
settings['sharing'] = sharing
elif sharing is not None:
raise TypeError('sharing should be a string')
utils.set_sharing_and_world_readable(settings)
utils.save_json_dict(CONFIG_FILE, settings)
ensure_local_plotly_files()
def get_config_file(*args):
if check_file_permissions():
ensure_local_plotly_files()
return utils.load_json_dict(CONFIG_FILE, *args)
else:
return FILE_CONTENT[CONFIG_FILE]
def reset_config_file():
ensure_local_plotly_files() # make sure what's there is OK
f = open(CONFIG_FILE, 'w')
f.close()
ensure_local_plotly_files()
d=None, width="100%", height=525):
plotly_rest_url = (session.get_session_config().get('plotly_domain') or
get_config_file()['plotly_domain'])
if file_id is None:
url = file_owner_or_url
if url[:len(plotly_rest_url)] != plotly_rest_url:
raise exceptions.PlotlyError(
"Because you didn't supply a 'file_id' in the call, "
"we're assuming you're trying to snag a figure from a url. "
"You supplied the url, '{0}', we expected it to start with "
"'{1}'."
"\nRun help on this function for more information."
"".format(url, plotly_rest_url))
urlsplit = six.moves.urllib.parse.urlparse(url)
file_owner = urlsplit.path.split('/')[1].split('~')[1]
file_id = urlsplit.path.split('/')[2]
query_dict = six.moves.urllib.parse.parse_qs(urlsplit.query)
if query_dict:
share_key = query_dict['share_key'][-1]
else:
share_key = ''
else:
file_owner = file_owner_or_url
share_key = ''
try:
test_if_int = int(file_id)
except ValueError:
raise exceptions.PlotlyError(
"The 'file_id' argument was not able to be converted into an "
"integer number. Make sure that the positional 'file_id' argument "
"is a number that can be converted into an integer or a string "
"that can be converted into an integer."
)
if int(file_id) < 0:
raise exceptions.PlotlyError(
"The 'file_id' argument must be a non-negative number."
)
if share_key is '':
s = ("<iframe id=\"igraph\" scrolling=\"no\" style=\"border:none;\" "
"seamless=\"seamless\" "
"src=\"{plotly_rest_url}/"
"~{file_owner}/{file_id}.embed\" "
"height=\"{iframe_height}\" width=\"{iframe_width}\">"
"</iframe>").format(
plotly_rest_url=plotly_rest_url,
file_owner=file_owner, file_id=file_id,
iframe_height=height, iframe_width=width)
else:
s = ("<iframe id=\"igraph\" scrolling=\"no\" style=\"border:none;\" "
"seamless=\"seamless\" "
"src=\"{plotly_rest_url}/"
"~{file_owner}/{file_id}.embed?share_key={share_key}\" "
"height=\"{iframe_height}\" width=\"{iframe_width}\">"
"</iframe>").format(
plotly_rest_url=plotly_rest_url,
file_owner=file_owner, file_id=file_id, share_key=share_key,
iframe_height=height, iframe_width=width)
return s
def embed(file_owner_or_url, file_id=None, width="100%", height=525):
try:
s = get_embed(file_owner_or_url, file_id=file_id, width=width,
height=height)
from sage_salvus import html
return html(s, hide=False)
except:
pass
if _ipython_imported:
if file_id:
plotly_domain = (
session.get_session_config().get('plotly_domain') or
get_config_file()['plotly_domain']
)
url = "{plotly_domain}/~{un}/{fid}".format(
plotly_domain=plotly_domain,
un=file_owner_or_url,
fid=file_id)
else:
url = file_owner_or_url
return PlotlyDisplay(url, width, height)
else:
if (get_config_defaults()['plotly_domain']
!= session.get_session_config()['plotly_domain']):
feedback_email = 'feedback@plot.ly'
else:
feedback_email = 'support@plot.ly'
warnings.warn(
"Looks like you're not using IPython or Sage to embed this "
"plot. If you just want the *embed code*,\ntry using "
"`get_embed()` instead."
'\nQuestions? {}'.format(feedback_email))
### mpl-related tools ###
@utils.template_doc(**get_config_file())
def mpl_to_plotly(fig, resize=False, strip_style=False, verbose=False):
if _matplotlylib_imported:
renderer = matplotlylib.PlotlyRenderer()
matplotlylib.Exporter(renderer).run(fig)
if resize:
renderer.resize()
if strip_style:
renderer.strip_style()
if verbose:
print(renderer.msg)
return renderer.plotly_fig
else:
warnings.warn(
"To use Plotly's matplotlylib functionality, you'll need to have "
"matplotlib successfully installed with all of its dependencies. "
"You're getting this error because matplotlib or one of its "
"dependencies doesn't seem to be installed correctly.")
### graph_objs related tools ###
def get_subplots(rows=1, columns=1, print_grid=False, **kwargs):
# TODO: protected until #282
from plotly.graph_objs import graph_objs
warnings.warn(
"tools.get_subplots is depreciated. "
"Please use tools.make_subplots instead."
)
# Throw exception for non-integer rows and columns
if not isinstance(rows, int) or rows <= 0:
raise Exception("Keyword argument 'rows' "
"must be an int greater than 0")
if not isinstance(columns, int) or columns <= 0:
raise Exception("Keyword argument 'columns' "
"must be an int greater than 0")
# Throw exception if non-valid kwarg is sent
VALID_KWARGS = ['horizontal_spacing', 'vertical_spacing']
for key in kwargs.keys():
if key not in VALID_KWARGS:
raise Exception("Invalid keyword argument: '{0}'".format(key))
# Set 'horizontal_spacing' / 'vertical_spacing' w.r.t. rows / columns
try:
horizontal_spacing = float(kwargs['horizontal_spacing'])
except KeyError:
horizontal_spacing = 0.2 / columns
try:
vertical_spacing = float(kwargs['vertical_spacing'])
except KeyError:
vertical_spacing = 0.3 / rows
fig = dict(layout=graph_objs.Layout()) # will return this at the end
plot_width = (1 - horizontal_spacing * (columns - 1)) / columns
plot_height = (1 - vertical_spacing * (rows - 1)) / rows
plot_num = 0
for rrr in range(rows):
for ccc in range(columns):
xaxis_name = 'xaxis{0}'.format(plot_num + 1)
x_anchor = 'y{0}'.format(plot_num + 1)
x_start = (plot_width + horizontal_spacing) * ccc
x_end = x_start + plot_width
yaxis_name = 'yaxis{0}'.format(plot_num + 1)
y_anchor = 'x{0}'.format(plot_num + 1)
y_start = (plot_height + vertical_spacing) * rrr
y_end = y_start + plot_height
xaxis = graph_objs.XAxis(domain=[x_start, x_end], anchor=x_anchor)
fig['layout'][xaxis_name] = xaxis
yaxis = graph_objs.YAxis(domain=[y_start, y_end], anchor=y_anchor)
fig['layout'][yaxis_name] = yaxis
plot_num += 1
if print_grid:
print("This is the format of your plot grid!")
grid_string = ""
plot = 1
for rrr in range(rows):
grid_line = ""
for ccc in range(columns):
grid_line += "[{0}]\t".format(plot)
plot += 1
grid_string = grid_line + '\n' + grid_string
print(grid_string)
return graph_objs.Figure(fig) # forces us to validate what we just did...
def make_subplots(rows=1, cols=1,
shared_xaxes=False, shared_yaxes=False,
start_cell='top-left', print_grid=True,
**kwargs):
# TODO: protected until #282
from plotly.graph_objs import graph_objs
# Throw exception for non-integer rows and cols
if not isinstance(rows, int) or rows <= 0:
raise Exception("Keyword argument 'rows' "
"must be an int greater than 0")
if not isinstance(cols, int) or cols <= 0:
raise Exception("Keyword argument 'cols' "
"must be an int greater than 0")
# Dictionary of things start_cell
START_CELL_all = {
'bottom-left': {
# 'natural' setup where x & y domains increase monotonically
'col_dir': 1,
'row_dir': 1
},
'top-left': {
# 'default' setup visually matching the 'specs' list of lists
'col_dir': 1,
'row_dir': -1
}
# TODO maybe add 'bottom-right' and 'top-right'
}
# Throw exception for invalid 'start_cell' values
try:
START_CELL = START_CELL_all[start_cell]
except KeyError:
raise Exception("Invalid 'start_cell' value")
# Throw exception if non-valid kwarg is sent
VALID_KWARGS = ['horizontal_spacing', 'vertical_spacing',
'specs', 'insets', 'subplot_titles']
for key in kwargs.keys():
if key not in VALID_KWARGS:
raise Exception("Invalid keyword argument: '{0}'".format(key))
# Set 'subplot_titles'
subplot_titles = kwargs.get('subplot_titles', [""] * rows * cols)
# Set 'horizontal_spacing' / 'vertical_spacing' w.r.t. rows / cols
try:
horizontal_spacing = float(kwargs['horizontal_spacing'])
except KeyError:
horizontal_spacing = 0.2 / cols
try:
vertical_spacing = float(kwargs['vertical_spacing'])
except KeyError:
if 'subplot_titles' in kwargs:
vertical_spacing = 0.5 / rows
else:
vertical_spacing = 0.3 / rows
# Sanitize 'specs' (must be a list of lists)
exception_msg = "Keyword argument 'specs' must be a list of lists"
try:
specs = kwargs['specs']
if not isinstance(specs, list):
raise Exception(exception_msg)
else:
for spec_row in specs:
if not isinstance(spec_row, list):
raise Exception(exception_msg)
except KeyError:
specs = [[{}
for c in range(cols)]
for r in range(rows)] # default 'specs'
# Throw exception if specs is over or under specified
if len(specs) != rows:
raise Exception("The number of rows in 'specs' "
"must be equal to 'rows'")
for r, spec_row in enumerate(specs):
if len(spec_row) != cols:
raise Exception("The number of columns in 'specs' "
"must be equal to 'cols'")
# Sanitize 'insets'
try:
insets = kwargs['insets']
if not isinstance(insets, list):
raise Exception("Keyword argument 'insets' must be a list")
except KeyError:
insets = False
# Throw exception if non-valid key / fill in defaults
def _check_keys_and_fill(name, arg, defaults):
def _checks(item, defaults):
if item is None:
return
if not isinstance(item, dict):
raise Exception("Items in keyword argument '{name}' must be "
"dictionaries or None".format(name=name))
for k in item.keys():
if k not in defaults.keys():
raise Exception("Invalid key '{k}' in keyword "
"argument '{name}'".format(k=k, name=name))
for k in defaults.keys():
if k not in item.keys():
item[k] = defaults[k]
for arg_i in arg:
if isinstance(arg_i, list):
for arg_ii in arg_i:
_checks(arg_ii, defaults)
elif isinstance(arg_i, dict):
_checks(arg_i, defaults)
# Default spec key-values
SPEC_defaults = dict(
is_3d=False,
colspan=1,
rowspan=1,
l=0.0,
r=0.0,
b=0.0,
t=0.0
# TODO add support for 'w' and 'h'
)
_check_keys_and_fill('specs', specs, SPEC_defaults)
# Default inset key-values
if insets:
INSET_defaults = dict(
cell=(1, 1),
is_3d=False,
l=0.0,
w='to_end',
b=0.0,
h='to_end'
)
_check_keys_and_fill('insets', insets, INSET_defaults)
# Set width & height of each subplot cell (excluding padding)
width = (1. - horizontal_spacing * (cols - 1)) / cols
height = (1. - vertical_spacing * (rows - 1)) / rows
# Built row/col sequence using 'row_dir' and 'col_dir'
COL_DIR = START_CELL['col_dir']
ROW_DIR = START_CELL['row_dir']
col_seq = range(cols)[::COL_DIR]
row_seq = range(rows)[::ROW_DIR]
# [grid] Build subplot grid (coord tuple of cell)
grid = [[((width + horizontal_spacing) * c,
(height + vertical_spacing) * r)
for c in col_seq]
for r in row_seq]
# [grid_ref] Initialize the grid and insets' axis-reference lists
grid_ref = [[None for c in range(cols)] for r in range(rows)]
insets_ref = [None for inset in range(len(insets))] if insets else None
layout = graph_objs.Layout()
def _get_label(x_or_y, r, c, cnt, shared_axes):
label = "{x_or_y}{cnt}".format(x_or_y=x_or_y, cnt=cnt)
if isinstance(shared_axes, bool):
if shared_axes:
if x_or_y == 'x':
label = "{x_or_y}{c}".format(x_or_y=x_or_y, c=c + 1)
if x_or_y == 'y':
label = "{x_or_y}{r}".format(x_or_y=x_or_y, r=r + 1)
if isinstance(shared_axes, list):
if isinstance(shared_axes[0], tuple):
shared_axes = [shared_axes]
for shared_axis in shared_axes:
if (r + 1, c + 1) in shared_axis:
label = {
'x': "x{0}".format(shared_axis[0][1]),
'y': "y{0}".format(shared_axis[0][0])
}[x_or_y]
return label
ANCHOR_ROW = 0 if ROW_DIR > 0 else rows - 1
def _get_anchors(r, c, x_cnt, y_cnt, shared_xaxes, shared_yaxes):
x_anchor = "y{y_cnt}".format(y_cnt=y_cnt)
y_anchor = "x{x_cnt}".format(x_cnt=x_cnt)
if isinstance(shared_xaxes, bool):
if shared_xaxes:
if r != ANCHOR_ROW:
x_anchor = False
y_anchor = 'free'
if shared_yaxes and c != 0:
y_anchor = False
return x_anchor, y_anchor
elif isinstance(shared_xaxes, list):
if isinstance(shared_xaxes[0], tuple):
shared_xaxes = [shared_xaxes]
for shared_xaxis in shared_xaxes:
if (r + 1, c + 1) in shared_xaxis[1:]:
x_anchor = False
y_anchor = 'free'
if isinstance(shared_yaxes, bool):
if shared_yaxes:
if c != 0:
y_anchor = False
x_anchor = 'free'
if shared_xaxes and r != ANCHOR_ROW:
x_anchor = False
return x_anchor, y_anchor
elif isinstance(shared_yaxes, list):
if isinstance(shared_yaxes[0], tuple):
shared_yaxes = [shared_yaxes]
for shared_yaxis in shared_yaxes:
if (r + 1, c + 1) in shared_yaxis[1:]:
y_anchor = False
x_anchor = 'free'
return x_anchor, y_anchor
list_of_domains = []
def _add_domain(layout, x_or_y, label, domain, anchor, position):
name = label[0] + 'axis' + label[1:]
graph_obj = '{X_or_Y}Axis'.format(X_or_Y=x_or_y.upper())
axis = getattr(graph_objs, graph_obj)(domain=domain)
if anchor:
axis['anchor'] = anchor
if isinstance(position, float):
axis['position'] = position
layout[name] = axis
list_of_domains.append(domain)
def _add_domain_is_3d(layout, s_label, x_domain, y_domain):
scene = graph_objs.Scene(domain={'x': x_domain, 'y': y_domain})
layout[s_label] = scene
x_cnt = y_cnt = s_cnt = 1
for r, spec_row in enumerate(specs):
for c, spec in enumerate(spec_row):
if spec is None:
continue
c_spanned = c + spec['colspan'] - 1
r_spanned = r + spec['rowspan'] - 1
if c_spanned >= cols:
raise Exception("Some 'colspan' value is too large for "
"this subplot grid.")
if r_spanned >= rows:
raise Exception("Some 'rowspan' value is too large for "
"this subplot grid.")
x_s = grid[r][c][0] + spec['l']
x_e = grid[r][c_spanned][0] + width - spec['r']
x_domain = [x_s, x_e]
if ROW_DIR > 0:
y_s = grid[r][c][1] + spec['b']
y_e = grid[r_spanned][c][1] + height - spec['t']
else:
y_s = grid[r_spanned][c][1] + spec['b']
y_e = grid[r][c][1] + height - spec['t']
y_domain = [y_s, y_e]
if spec['is_3d']:
s_label = 'scene{0}'.format(s_cnt)
_add_domain_is_3d(layout, s_label, x_domain, y_domain)
grid_ref[r][c] = (s_label, )
s_cnt += 1
else:
x_label = _get_label('x', r, c, x_cnt, shared_xaxes)
y_label = _get_label('y', r, c, y_cnt, shared_yaxes)
x_anchor, y_anchor = _get_anchors(r, c,
x_cnt, y_cnt,
shared_xaxes,
shared_yaxes)
if x_anchor:
if x_anchor == 'free':
x_position = y_domain[0]
else:
x_position = False
_add_domain(layout, 'x', x_label, x_domain,
x_anchor, x_position)
x_cnt += 1
if y_anchor:
if y_anchor == 'free':
y_position = x_domain[0]
else:
y_position = False
_add_domain(layout, 'y', y_label, y_domain,
y_anchor, y_position)
y_cnt += 1
grid_ref[r][c] = (x_label, y_label)
if insets:
for i_inset, inset in enumerate(insets):
r = inset['cell'][0] - 1
c = inset['cell'][1] - 1
if not (0 <= r < rows):
raise Exception("Some 'cell' row value is out of range. "
"Note: the starting cell is (1, 1)")
if not (0 <= c < cols):
raise Exception("Some 'cell' col value is out of range. "
"Note: the starting cell is (1, 1)")
x_s = grid[r][c][0] + inset['l'] * width
if inset['w'] == 'to_end':
x_e = grid[r][c][0] + width
else:
x_e = x_s + inset['w'] * width
x_domain = [x_s, x_e]
y_s = grid[r][c][1] + inset['b'] * height
if inset['h'] == 'to_end':
y_e = grid[r][c][1] + height
else:
y_e = y_s + inset['h'] * height
y_domain = [y_s, y_e]
if inset['is_3d']:
s_label = 'scene{0}'.format(s_cnt)
_add_domain_is_3d(layout, s_label, x_domain, y_domain)
insets_ref[i_inset] = (s_label, )
s_cnt += 1
else:
x_label = _get_label('x', False, False, x_cnt, False)
y_label = _get_label('y', False, False, y_cnt, False)
x_anchor, y_anchor = _get_anchors(r, c,
x_cnt, y_cnt,
False, False)
_add_domain(layout, 'x', x_label, x_domain, x_anchor, False)
x_cnt += 1
_add_domain(layout, 'y', y_label, y_domain, y_anchor, False)
y_cnt += 1
insets_ref[i_inset] = (x_label, y_label)
sp = " " # space between cell
s_str = "[ " # cell start string
e_str = " ]" # cell end string
colspan_str = ' -' # colspan string
rowspan_str = ' |' # rowspan string
empty_str = ' (empty) ' # empty cell string
# Init grid_str with intro message
grid_str = "This is the format of your plot grid:\n"
# Init tmp list of lists of strings (sorta like 'grid_ref' but w/ strings)
_tmp = [['' for c in range(cols)] for r in range(rows)]
# Define cell string as function of (r, c) and grid_ref
def _get_cell_str(r, c, ref):
return '({r},{c}) {ref}'.format(r=r + 1, c=c + 1, ref=','.join(ref))
# Find max len of _cell_str, add define a padding function
cell_len = max([len(_get_cell_str(r, c, ref))
for r, row_ref in enumerate(grid_ref)
for c, ref in enumerate(row_ref)
if ref]) + len(s_str) + len(e_str)
def _pad(s, cell_len=cell_len):
return ' ' * (cell_len - len(s))
# Loop through specs, fill in _tmp
for r, spec_row in enumerate(specs):
for c, spec in enumerate(spec_row):
ref = grid_ref[r][c]
if ref is None:
if _tmp[r][c] == '':
_tmp[r][c] = empty_str + _pad(empty_str)
continue
cell_str = s_str + _get_cell_str(r, c, ref)
if spec['colspan'] > 1:
for cc in range(1, spec['colspan'] - 1):
_tmp[r][c + cc] = colspan_str + _pad(colspan_str)
_tmp[r][c + spec['colspan'] - 1] = (
colspan_str + _pad(colspan_str + e_str)) + e_str
else:
cell_str += e_str
if spec['rowspan'] > 1:
for rr in range(1, spec['rowspan'] - 1):
_tmp[r + rr][c] = rowspan_str + _pad(rowspan_str)
for cc in range(spec['colspan']):
_tmp[r + spec['rowspan'] - 1][c + cc] = (
rowspan_str + _pad(rowspan_str))
_tmp[r][c] = cell_str + _pad(cell_str)
# Append grid_str using data from _tmp in the correct order
for r in row_seq[::-1]:
grid_str += sp.join(_tmp[r]) + '\n'
# Append grid_str to include insets info
if insets:
grid_str += "\nWith insets:\n"
for i_inset, inset in enumerate(insets):
r = inset['cell'][0] - 1
c = inset['cell'][1] - 1
ref = grid_ref[r][c]
grid_str += (
s_str + ','.join(insets_ref[i_inset]) + e_str +
' over ' +
s_str + _get_cell_str(r, c, ref) + e_str + '\n'
)
# Add subplot titles
# If shared_axes is False (default) use list_of_domains
# This is used for insets and irregular layouts
if not shared_xaxes and not shared_yaxes:
x_dom = list_of_domains[::2]
y_dom = list_of_domains[1::2]
subtitle_pos_x = []
subtitle_pos_y = []
for x_domains in x_dom:
subtitle_pos_x.append(sum(x_domains) / 2)
for y_domains in y_dom:
subtitle_pos_y.append(y_domains[1])
# If shared_axes is True the domin of each subplot is not returned so the
# title position must be calculated for each subplot
else:
subtitle_pos_x = [None] * cols
subtitle_pos_y = [None] * rows
delt_x = (x_e - x_s)
for index in range(cols):
subtitle_pos_x[index] = ((delt_x / 2) +
((delt_x + horizontal_spacing) * index))
subtitle_pos_x *= rows
for index in range(rows):
subtitle_pos_y[index] = (1 - ((y_e + vertical_spacing) * index))
subtitle_pos_y *= cols
subtitle_pos_y = sorted(subtitle_pos_y, reverse=True)
plot_titles = []
for index in range(len(subplot_titles)):
if not subplot_titles[index]:
pass
else:
plot_titles.append({'y': subtitle_pos_y[index],
'xref': 'paper',
'x': subtitle_pos_x[index],
'yref': 'paper',
'text': subplot_titles[index],
'showarrow': False,
'font': graph_objs.Font(size=16),
'xanchor': 'center',
'yanchor': 'bottom'
})
layout['annotations'] = plot_titles
if print_grid:
print(grid_str)
fig = graph_objs.Figure(layout=layout)
fig.__dict__['_grid_ref'] = grid_ref
fig.__dict__['_grid_str'] = grid_str
return fig
def get_valid_graph_obj(obj, obj_type=None):
# TODO: Deprecate or move. #283
from plotly.graph_objs import graph_objs
try:
cls = getattr(graph_objs, obj_type)
except (AttributeError, KeyError):
raise exceptions.PlotlyError(
"'{}' is not a recognized graph_obj.".format(obj_type)
)
return cls(obj, _raise=False)
def validate(obj, obj_type):
# TODO: Deprecate or move. #283
from plotly.graph_objs import graph_objs
if obj_type not in graph_reference.CLASSES:
obj_type = graph_reference.string_to_class_name(obj_type)
try:
cls = getattr(graph_objs, obj_type)
except AttributeError:
raise exceptions.PlotlyError(
"'{0}' is not a recognizable graph_obj.".
format(obj_type))
cls(obj) # this will raise on invalid keys/items
def _replace_newline(obj):
if isinstance(obj, dict):
d = dict()
for key, val in list(obj.items()):
d[key] = _replace_newline(val)
return d
elif isinstance(obj, list):
l = list()
for index, entry in enumerate(obj):
l += [_replace_newline(entry)]
return l
elif isinstance(obj, six.string_types):
s = obj.replace('\n', '<br>')
if s != obj:
warnings.warn("Looks like you used a newline character: '\\n'.\n\n"
"Plotly uses a subset of HTML escape characters\n"
"to do things like newline (<br>), bold (<b></b>),\n"
"italics (<i></i>), etc. Your newline characters \n"
"have been converted to '<br>' so they will show \n"
"up right on your Plotly figure!")
return s
else:
return obj # we return the actual reference... but DON'T mutate.
if _ipython_imported:
class PlotlyDisplay(IPython.core.display.HTML):
def __init__(self, url, width, height):
self.resource = url
self.embed_code = get_embed(url, width=width, height=height)
super(PlotlyDisplay, self).__init__(data=self.embed_code)
def _repr_html_(self):
return self.embed_code
def return_figure_from_figure_or_data(figure_or_data, validate_figure):
from plotly.graph_objs import graph_objs
if isinstance(figure_or_data, dict):
figure = figure_or_data
elif isinstance(figure_or_data, list):
figure = {'data': figure_or_data}
else:
raise exceptions.PlotlyError("The `figure_or_data` positional "
"argument must be either "
"`dict`-like or `list`-like.")
if validate_figure:
try:
graph_objs.Figure(figure)
except exceptions.PlotlyError as err:
raise exceptions.PlotlyError("Invalid 'figure_or_data' argument. "
"Plotly will not be able to properly "
"parse the resulting JSON. If you "
"want to send this 'figure_or_data' "
"to Plotly anyway (not recommended), "
"you can set 'validate=False' as a "
"plot option.\nHere's why you're "
"seeing this error:\n\n{0}"
"".format(err))
if not figure['data']:
raise exceptions.PlotlyEmptyDataError(
"Empty data list found. Make sure that you populated the "
"list of data objects you're sending and try again.\n"
"Questions? support@plot.ly"
)
return figure
# Default colours for finance charts
_DEFAULT_INCREASING_COLOR = '
_DEFAULT_DECREASING_COLOR = '
DIAG_CHOICES = ['scatter', 'histogram', 'box']
VALID_COLORMAP_TYPES = ['cat', 'seq']
class FigureFactory(object):
@staticmethod
def _make_colorscale(colors, scale=None):
colorscale = []
if not scale:
for j, color in enumerate(colors):
colorscale.append([j * 1./(len(colors) - 1), color])
return colorscale
else:
colorscale = [list(tup) for tup in zip(scale, colors)]
return colorscale
@staticmethod
def _convert_colorscale_to_rgb(colorscale):
for color in colorscale:
color[1] = FigureFactory._convert_to_RGB_255(
color[1]
)
for color in colorscale:
color[1] = FigureFactory._label_rgb(
color[1]
)
return colorscale
@staticmethod
def _make_linear_colorscale(colors):
scale = 1./(len(colors) - 1)
return[[i * scale, color] for i, color in enumerate(colors)]
@staticmethod
def create_2D_density(x, y, colorscale='Earth', ncontours=20,
hist_color=(0, 0, 0.5), point_color=(0, 0, 0.5),
point_size=2, title='2D Density Plot',
height=600, width=600):
from plotly.graph_objs import graph_objs
from numbers import Number
# validate x and y are filled with numbers only
for array in [x, y]:
if not all(isinstance(element, Number) for element in array):
raise exceptions.PlotlyError(
"All elements of your 'x' and 'y' lists must be numbers."
)
# validate x and y are the same length
if len(x) != len(y):
raise exceptions.PlotlyError(
"Both lists 'x' and 'y' must be the same length."
)
colorscale = FigureFactory._validate_colors(colorscale, 'rgb')
colorscale = FigureFactory._make_linear_colorscale(colorscale)
# validate hist_color and point_color
hist_color = FigureFactory._validate_colors(hist_color, 'rgb')
point_color = FigureFactory._validate_colors(point_color, 'rgb')
trace1 = graph_objs.Scatter(
x=x, y=y, mode='markers', name='points',
marker=dict(
color=point_color[0],
size=point_size,
opacity=0.4
)
)
trace2 = graph_objs.Histogram2dcontour(
x=x, y=y, name='density', ncontours=ncontours,
colorscale=colorscale, reversescale=True, showscale=False
)
trace3 = graph_objs.Histogram(
x=x, name='x density',
marker=dict(color=hist_color[0]), yaxis='y2'
)
trace4 = graph_objs.Histogram(
y=y, name='y density',
marker=dict(color=hist_color[0]), xaxis='x2'
)
data = [trace1, trace2, trace3, trace4]
layout = graph_objs.Layout(
showlegend=False,
autosize=False,
title=title,
height=height,
width=width,
xaxis=dict(
domain=[0, 0.85],
showgrid=False,
zeroline=False
),
yaxis=dict(
domain=[0, 0.85],
showgrid=False,
zeroline=False
),
margin=dict(
t=50
),
hovermode='closest',
bargap=0,
xaxis2=dict(
domain=[0.85, 1],
showgrid=False,
zeroline=False
),
yaxis2=dict(
domain=[0.85, 1],
showgrid=False,
zeroline=False
)
)
fig = graph_objs.Figure(data=data, layout=layout)
return fig
@staticmethod
def _validate_gantt(df):
if _pandas_imported and isinstance(df, pd.core.frame.DataFrame):
# validate that df has all the required keys
for key in REQUIRED_GANTT_KEYS:
if key not in df:
raise exceptions.PlotlyError(
"The columns in your dataframe must include the "
"keys".format(REQUIRED_GANTT_KEYS)
)
num_of_rows = len(df.index)
chart = []
for index in range(num_of_rows):
task_dict = {}
for key in df:
task_dict[key] = df.ix[index][key]
chart.append(task_dict)
return chart
# validate if df is a list
if not isinstance(df, list):
raise exceptions.PlotlyError("You must input either a dataframe "
"or a list of dictionaries.")
# validate if df is empty
if len(df) <= 0:
raise exceptions.PlotlyError("Your list is empty. It must contain "
"at least one dictionary.")
if not isinstance(df[0], dict):
raise exceptions.PlotlyError("Your list must only "
"include dictionaries.")
return df
@staticmethod
def _gantt(chart, colors, title, bar_width, showgrid_x, showgrid_y,
height, width, tasks=None, task_names=None, data=None):
if tasks is None:
tasks = []
if task_names is None:
task_names = []
if data is None:
data = []
for index in range(len(chart)):
task = dict(x0=chart[index]['Start'],
x1=chart[index]['Finish'],
name=chart[index]['Task'])
tasks.append(task)
shape_template = {
'type': 'rect',
'xref': 'x',
'yref': 'y',
'opacity': 1,
'line': {
'width': 0,
},
'yref': 'y',
}
color_index = 0
for index in range(len(tasks)):
tn = tasks[index]['name']
task_names.append(tn)
del tasks[index]['name']
tasks[index].update(shape_template)
tasks[index]['y0'] = index - bar_width
tasks[index]['y1'] = index + bar_width
# check if colors need to be looped
if color_index >= len(colors):
color_index = 0
tasks[index]['fillcolor'] = colors[color_index]
# Add a line for hover text and autorange
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x1']],
y=[index, index],
name='',
marker={'color': 'white'}
)
)
color_index += 1
layout = dict(
title=title,
showlegend=False,
height=height,
width=width,
shapes=[],
hovermode='closest',
yaxis=dict(
showgrid=showgrid_y,
ticktext=task_names,
tickvals=list(range(len(tasks))),
range=[-1, len(tasks) + 1],
autorange=False,
zeroline=False,
),
xaxis=dict(
showgrid=showgrid_x,
zeroline=False,
rangeselector=dict(
buttons=list([
dict(count=7,
label='1w',
step='day',
stepmode='backward'),
dict(count=1,
label='1m',
step='month',
stepmode='backward'),
dict(count=6,
label='6m',
step='month',
stepmode='backward'),
dict(count=1,
label='YTD',
step='year',
stepmode='todate'),
dict(count=1,
label='1y',
step='year',
stepmode='backward'),
dict(step='all')
])
),
type='date'
)
)
layout['shapes'] = tasks
fig = dict(data=data, layout=layout)
return fig
@staticmethod
def _gantt_colorscale(chart, colors, title, index_col, show_colorbar,
bar_width, showgrid_x, showgrid_y, height,
width, tasks=None, task_names=None, data=None):
from numbers import Number
if tasks is None:
tasks = []
if task_names is None:
task_names = []
if data is None:
data = []
showlegend = False
for index in range(len(chart)):
task = dict(x0=chart[index]['Start'],
x1=chart[index]['Finish'],
name=chart[index]['Task'])
tasks.append(task)
shape_template = {
'type': 'rect',
'xref': 'x',
'yref': 'y',
'opacity': 1,
'line': {
'width': 0,
},
'yref': 'y',
}
# compute the color for task based on indexing column
if isinstance(chart[0][index_col], Number):
# check that colors has at least 2 colors
if len(colors) < 2:
raise exceptions.PlotlyError(
"You must use at least 2 colors in 'colors' if you "
"are using a colorscale. However only the first two "
"colors given will be used for the lower and upper "
"bounds on the colormap."
)
for index in range(len(tasks)):
tn = tasks[index]['name']
task_names.append(tn)
del tasks[index]['name']
tasks[index].update(shape_template)
tasks[index]['y0'] = index - bar_width
tasks[index]['y1'] = index + bar_width
# unlabel color
colors = FigureFactory._color_parser(
colors, FigureFactory._unlabel_rgb
)
lowcolor = colors[0]
highcolor = colors[1]
intermed = (chart[index][index_col])/100.0
intermed_color = FigureFactory._find_intermediate_color(
lowcolor, highcolor, intermed
)
intermed_color = FigureFactory._color_parser(
intermed_color, FigureFactory._label_rgb
)
tasks[index]['fillcolor'] = intermed_color
# relabel colors with 'rgb'
colors = FigureFactory._color_parser(
colors, FigureFactory._label_rgb
)
# add a line for hover text and autorange
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x1']],
y=[index, index],
name='',
marker={'color': 'white'}
)
)
if show_colorbar is True:
# generate dummy data for colorscale visibility
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x0']],
y=[index, index],
name='',
marker={'color': 'white',
'colorscale': [[0, colors[0]], [1, colors[1]]],
'showscale': True,
'cmax': 100,
'cmin': 0}
)
)
if isinstance(chart[0][index_col], str):
index_vals = []
for row in range(len(tasks)):
if chart[row][index_col] not in index_vals:
index_vals.append(chart[row][index_col])
index_vals.sort()
if len(colors) < len(index_vals):
raise exceptions.PlotlyError(
"Error. The number of colors in 'colors' must be no less "
"than the number of unique index values in your group "
"column."
)
# make a dictionary assignment to each index value
index_vals_dict = {}
# define color index
c_index = 0
for key in index_vals:
if c_index > len(colors) - 1:
c_index = 0
index_vals_dict[key] = colors[c_index]
c_index += 1
for index in range(len(tasks)):
tn = tasks[index]['name']
task_names.append(tn)
del tasks[index]['name']
tasks[index].update(shape_template)
tasks[index]['y0'] = index - bar_width
tasks[index]['y1'] = index + bar_width
tasks[index]['fillcolor'] = index_vals_dict[
chart[index][index_col]
]
# add a line for hover text and autorange
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x1']],
y=[index, index],
name='',
marker={'color': 'white'}
)
)
if show_colorbar is True:
# generate dummy data to generate legend
showlegend = True
for k, index_value in enumerate(index_vals):
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x0']],
y=[k, k],
showlegend=True,
name=str(index_value),
hoverinfo='none',
marker=dict(
color=colors[k],
size=1
)
)
)
layout = dict(
title=title,
showlegend=showlegend,
height=height,
width=width,
shapes=[],
hovermode='closest',
yaxis=dict(
showgrid=showgrid_y,
ticktext=task_names,
tickvals=list(range(len(tasks))),
range=[-1, len(tasks) + 1],
autorange=False,
zeroline=False,
),
xaxis=dict(
showgrid=showgrid_x,
zeroline=False,
rangeselector=dict(
buttons=list([
dict(count=7,
label='1w',
step='day',
stepmode='backward'),
dict(count=1,
label='1m',
step='month',
stepmode='backward'),
dict(count=6,
label='6m',
step='month',
stepmode='backward'),
dict(count=1,
label='YTD',
step='year',
stepmode='todate'),
dict(count=1,
label='1y',
step='year',
stepmode='backward'),
dict(step='all')
])
),
type='date'
)
)
layout['shapes'] = tasks
fig = dict(data=data, layout=layout)
return fig
@staticmethod
def _gantt_dict(chart, colors, title, index_col, show_colorbar, bar_width,
showgrid_x, showgrid_y, height, width, tasks=None,
task_names=None, data=None):
if tasks is None:
tasks = []
if task_names is None:
task_names = []
if data is None:
data = []
showlegend = False
for index in range(len(chart)):
task = dict(x0=chart[index]['Start'],
x1=chart[index]['Finish'],
name=chart[index]['Task'])
tasks.append(task)
shape_template = {
'type': 'rect',
'xref': 'x',
'yref': 'y',
'opacity': 1,
'line': {
'width': 0,
},
'yref': 'y',
}
index_vals = []
for row in range(len(tasks)):
if chart[row][index_col] not in index_vals:
index_vals.append(chart[row][index_col])
index_vals.sort()
# verify each value in index column appears in colors dictionary
for key in index_vals:
if key not in colors:
raise exceptions.PlotlyError(
"If you are using colors as a dictionary, all of its "
"keys must be all the values in the index column."
)
for index in range(len(tasks)):
tn = tasks[index]['name']
task_names.append(tn)
del tasks[index]['name']
tasks[index].update(shape_template)
tasks[index]['y0'] = index - bar_width
tasks[index]['y1'] = index + bar_width
tasks[index]['fillcolor'] = colors[chart[index][index_col]]
# add a line for hover text and autorange
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x1']],
y=[index, index],
name='',
marker={'color': 'white'}
)
)
if show_colorbar is True:
# generate dummy data to generate legend
showlegend = True
for k, index_value in enumerate(index_vals):
data.append(
dict(
x=[tasks[index]['x0'], tasks[index]['x0']],
y=[k, k],
showlegend=True,
hoverinfo='none',
name=str(index_value),
marker=dict(
color=colors[index_value],
size=1
)
)
)
layout = dict(
title=title,
showlegend=showlegend,
height=height,
width=width,
shapes=[],
hovermode='closest',
yaxis=dict(
showgrid=showgrid_y,
ticktext=task_names,
tickvals=list(range(len(tasks))),
range=[-1, len(tasks) + 1],
autorange=False,
zeroline=False,
),
xaxis=dict(
showgrid=showgrid_x,
zeroline=False,
rangeselector=dict(
buttons=list([
dict(count=7,
label='1w',
step='day',
stepmode='backward'),
dict(count=1,
label='1m',
step='month',
stepmode='backward'),
dict(count=6,
label='6m',
step='month',
stepmode='backward'),
dict(count=1,
label='YTD',
step='year',
stepmode='todate'),
dict(count=1,
label='1y',
step='year',
stepmode='backward'),
dict(step='all')
])
),
type='date'
)
)
layout['shapes'] = tasks
fig = dict(data=data, layout=layout)
return fig
@staticmethod
def create_gantt(df, colors=None, index_col=None, show_colorbar=False,
reverse_colors=False, title='Gantt Chart',
bar_width=0.2, showgrid_x=False, showgrid_y=False,
height=600, width=900, tasks=None,
task_names=None, data=None):
# validate gantt input data
chart = FigureFactory._validate_gantt(df)
if index_col:
if index_col not in chart[0]:
raise exceptions.PlotlyError(
"In order to use an indexing column and assign colors to "
"the values of the index, you must choose an actual "
"column name in the dataframe or key if a list of "
"dictionaries is being used.")
# validate gantt index column
index_list = []
for dictionary in chart:
index_list.append(dictionary[index_col])
FigureFactory._validate_index(index_list)
# Validate colors
if isinstance(colors, dict):
colors = FigureFactory._validate_colors_dict(colors, 'rgb')
else:
colors = FigureFactory._validate_colors(colors, 'rgb')
if reverse_colors is True:
colors.reverse()
if not index_col:
if isinstance(colors, dict):
raise exceptions.PlotlyError(
"Error. You have set colors to a dictionary but have not "
"picked an index. An index is required if you are "
"assigning colors to particular values in a dictioanry."
)
fig = FigureFactory._gantt(
chart, colors, title, bar_width, showgrid_x, showgrid_y,
height, width, tasks=None, task_names=None, data=None
)
return fig
else:
if not isinstance(colors, dict):
fig = FigureFactory._gantt_colorscale(
chart, colors, title, index_col, show_colorbar, bar_width,
showgrid_x, showgrid_y, height, width,
tasks=None, task_names=None, data=None
)
return fig
else:
fig = FigureFactory._gantt_dict(
chart, colors, title, index_col, show_colorbar, bar_width,
showgrid_x, showgrid_y, height, width,
tasks=None, task_names=None, data=None
)
return fig
@staticmethod
def _validate_colors(colors, colortype='tuple'):
from numbers import Number
if colors is None:
colors = DEFAULT_PLOTLY_COLORS
if isinstance(colors, str):
if colors in PLOTLY_SCALES:
colors = PLOTLY_SCALES[colors]
elif 'rgb' in colors or '
colors = [colors]
else:
raise exceptions.PlotlyError(
"If your colors variable is a string, it must be a "
"Plotly scale, an rgb color or a hex color.")
elif isinstance(colors, tuple):
if isinstance(colors[0], Number):
colors = [colors]
else:
colors = list(colors)
# convert color elements in list to tuple color
for j, each_color in enumerate(colors):
if 'rgb' in each_color:
each_color = FigureFactory._color_parser(
each_color, FigureFactory._unlabel_rgb
)
for value in each_color:
if value > 255.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your rgb colors "
"tuples cannot exceed 255.0."
)
each_color = FigureFactory._color_parser(
each_color, FigureFactory._unconvert_from_RGB_255
)
colors[j] = each_color
if '
each_color = FigureFactory._color_parser(
each_color, FigureFactory._hex_to_rgb
)
each_color = FigureFactory._color_parser(
each_color, FigureFactory._unconvert_from_RGB_255
)
colors[j] = each_color
if isinstance(each_color, tuple):
for value in each_color:
if value > 1.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your colors tuples "
"cannot exceed 1.0."
)
colors[j] = each_color
if colortype == 'rgb':
for j, each_color in enumerate(colors):
rgb_color = FigureFactory._color_parser(
each_color, FigureFactory._convert_to_RGB_255
)
colors[j] = FigureFactory._color_parser(
rgb_color, FigureFactory._label_rgb
)
return colors
@staticmethod
def _validate_colors_dict(colors, colortype='tuple'):
# validate each color element in the dictionary
for key in colors:
if 'rgb' in colors[key]:
colors[key] = FigureFactory._color_parser(
colors[key], FigureFactory._unlabel_rgb
)
for value in colors[key]:
if value > 255.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your rgb colors "
"tuples cannot exceed 255.0."
)
colors[key] = FigureFactory._color_parser(
colors[key], FigureFactory._unconvert_from_RGB_255
)
if '
colors[key] = FigureFactory._color_parser(
colors[key], FigureFactory._hex_to_rgb
)
colors[key] = FigureFactory._color_parser(
colors[key], FigureFactory._unconvert_from_RGB_255
)
if isinstance(colors[key], tuple):
for value in colors[key]:
if value > 1.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your colors tuples "
"cannot exceed 1.0."
)
if colortype == 'rgb':
for key in colors:
colors[key] = FigureFactory._color_parser(
colors[key], FigureFactory._convert_to_RGB_255
)
colors[key] = FigureFactory._color_parser(
colors[key], FigureFactory._label_rgb
)
return colors
@staticmethod
def _calc_stats(data):
import numpy as np
x = np.asarray(data, np.float)
vals_min = np.min(x)
vals_max = np.max(x)
q2 = np.percentile(x, 50, interpolation='linear')
q1 = np.percentile(x, 25, interpolation='lower')
q3 = np.percentile(x, 75, interpolation='higher')
iqr = q3 - q1
whisker_dist = 1.5 * iqr
# in order to prevent drawing whiskers outside the interval
# of data one defines the whisker positions as:
d1 = np.min(x[x >= (q1 - whisker_dist)])
d2 = np.max(x[x <= (q3 + whisker_dist)])
return {
'min': vals_min,
'max': vals_max,
'q1': q1,
'q2': q2,
'q3': q3,
'd1': d1,
'd2': d2
}
@staticmethod
def _make_half_violin(x, y, fillcolor='
linecolor='rgb(0, 0, 0)'):
from plotly.graph_objs import graph_objs
text = ['(pdf(y), y)=(' + '{:0.2f}'.format(x[i]) +
', ' + '{:0.2f}'.format(y[i]) + ')'
for i in range(len(x))]
return graph_objs.Scatter(
x=x,
y=y,
mode='lines',
name='',
text=text,
fill='tonextx',
fillcolor=fillcolor,
line=graph_objs.Line(width=0.5, color=linecolor, shape='spline'),
hoverinfo='text',
opacity=0.5
)
@staticmethod
def _make_violin_rugplot(vals, pdf_max, distance,
color='
from plotly.graph_objs import graph_objs
return graph_objs.Scatter(
y=vals,
x=[-pdf_max-distance]*len(vals),
marker=graph_objs.Marker(
color=color,
symbol='line-ew-open'
),
mode='markers',
name='',
showlegend=False,
hoverinfo='y'
)
@staticmethod
def _make_quartiles(q1, q3):
from plotly.graph_objs import graph_objs
return graph_objs.Scatter(
x=[0, 0],
y=[q1, q3],
text=['lower-quartile: ' + '{:0.2f}'.format(q1),
'upper-quartile: ' + '{:0.2f}'.format(q3)],
mode='lines',
line=graph_objs.Line(
width=4,
color='rgb(0,0,0)'
),
hoverinfo='text'
)
@staticmethod
def _make_median(q2):
from plotly.graph_objs import graph_objs
return graph_objs.Scatter(
x=[0],
y=[q2],
text=['median: ' + '{:0.2f}'.format(q2)],
mode='markers',
marker=dict(symbol='square',
color='rgb(255,255,255)'),
hoverinfo='text'
)
@staticmethod
def _make_non_outlier_interval(d1, d2):
from plotly.graph_objs import graph_objs
return graph_objs.Scatter(
x=[0, 0],
y=[d1, d2],
name='',
mode='lines',
line=graph_objs.Line(width=1.5,
color='rgb(0,0,0)')
)
@staticmethod
def _make_XAxis(xaxis_title, xaxis_range):
from plotly.graph_objs import graph_objs
xaxis = graph_objs.XAxis(title=xaxis_title,
range=xaxis_range,
showgrid=False,
zeroline=False,
showline=False,
mirror=False,
ticks='',
showticklabels=False,
)
return xaxis
@staticmethod
def _make_YAxis(yaxis_title):
from plotly.graph_objs import graph_objs
yaxis = graph_objs.YAxis(title=yaxis_title,
showticklabels=True,
autorange=True,
ticklen=4,
showline=True,
zeroline=False,
showgrid=False,
mirror=False)
return yaxis
@staticmethod
def _violinplot(vals, fillcolor='
import numpy as np
from scipy import stats
vals = np.asarray(vals, np.float)
# summary statistics
vals_min = FigureFactory._calc_stats(vals)['min']
vals_max = FigureFactory._calc_stats(vals)['max']
q1 = FigureFactory._calc_stats(vals)['q1']
q2 = FigureFactory._calc_stats(vals)['q2']
q3 = FigureFactory._calc_stats(vals)['q3']
d1 = FigureFactory._calc_stats(vals)['d1']
d2 = FigureFactory._calc_stats(vals)['d2']
# kernel density estimation of pdf
pdf = stats.gaussian_kde(vals)
# grid over the data interval
xx = np.linspace(vals_min, vals_max, 100)
# evaluate the pdf at the grid xx
yy = pdf(xx)
max_pdf = np.max(yy)
# distance from the violin plot to rugplot
distance = (2.0 * max_pdf)/10 if rugplot else 0
# range for x values in the plot
plot_xrange = [-max_pdf - distance - 0.1, max_pdf + 0.1]
plot_data = [FigureFactory._make_half_violin(
-yy, xx, fillcolor=fillcolor),
FigureFactory._make_half_violin(
yy, xx, fillcolor=fillcolor),
FigureFactory._make_non_outlier_interval(d1, d2),
FigureFactory._make_quartiles(q1, q3),
FigureFactory._make_median(q2)]
if rugplot:
plot_data.append(FigureFactory._make_violin_rugplot(
vals,
max_pdf,
distance=distance,
color=fillcolor)
)
return plot_data, plot_xrange
@staticmethod
def _violin_no_colorscale(data, data_header, group_header, colors,
use_colorscale, group_stats,
height, width, title):
from plotly.graph_objs import graph_objs
import numpy as np
# collect all group names
group_name = []
for name in data[group_header]:
if name not in group_name:
group_name.append(name)
group_name.sort()
gb = data.groupby([group_header])
L = len(group_name)
fig = make_subplots(rows=1, cols=L,
shared_yaxes=True,
horizontal_spacing=0.025,
print_grid=True)
color_index = 0
for k, gr in enumerate(group_name):
vals = np.asarray(gb.get_group(gr)[data_header], np.float)
if color_index >= len(colors):
color_index = 0
plot_data, plot_xrange = FigureFactory._violinplot(
vals,
fillcolor=colors[color_index]
)
layout = graph_objs.Layout()
for item in plot_data:
fig.append_trace(item, 1, k + 1)
color_index += 1
# add violin plot labels
fig['layout'].update({'xaxis{}'.format(k + 1):
FigureFactory._make_XAxis(group_name[k],
plot_xrange)})
# set the sharey axis style
fig['layout'].update(
{'yaxis{}'.format(1): FigureFactory._make_YAxis('')}
)
fig['layout'].update(
title=title,
showlegend=False,
hovermode='closest',
autosize=False,
height=height,
width=width
)
return fig
@staticmethod
def _violin_colorscale(data, data_header, group_header, colors,
use_colorscale, group_stats, height, width, title):
from plotly.graph_objs import graph_objs
import numpy as np
# collect all group names
group_name = []
for name in data[group_header]:
if name not in group_name:
group_name.append(name)
group_name.sort()
# make sure all group names are keys in group_stats
for group in group_name:
if group not in group_stats:
raise exceptions.PlotlyError("All values/groups in the index "
"column must be represented "
"as a key in group_stats.")
gb = data.groupby([group_header])
L = len(group_name)
fig = make_subplots(rows=1, cols=L,
shared_yaxes=True,
horizontal_spacing=0.025,
print_grid=True)
# prepare low and high color for colorscale
lowcolor = FigureFactory._color_parser(
colors[0], FigureFactory._unlabel_rgb
)
highcolor = FigureFactory._color_parser(
colors[1], FigureFactory._unlabel_rgb
)
# find min and max values in group_stats
group_stats_values = []
for key in group_stats:
group_stats_values.append(group_stats[key])
max_value = max(group_stats_values)
min_value = min(group_stats_values)
for k, gr in enumerate(group_name):
vals = np.asarray(gb.get_group(gr)[data_header], np.float)
# find intermediate color from colorscale
intermed = (group_stats[gr] - min_value) / (max_value - min_value)
intermed_color = FigureFactory._find_intermediate_color(
lowcolor, highcolor, intermed
)
plot_data, plot_xrange = FigureFactory._violinplot(
vals,
fillcolor='rgb{}'.format(intermed_color)
)
layout = graph_objs.Layout()
for item in plot_data:
fig.append_trace(item, 1, k + 1)
fig['layout'].update({'xaxis{}'.format(k + 1):
FigureFactory._make_XAxis(group_name[k],
plot_xrange)})
# add colorbar to plot
trace_dummy = graph_objs.Scatter(
x=[0],
y=[0],
mode='markers',
marker=dict(
size=2,
cmin=min_value,
cmax=max_value,
colorscale=[[0, colors[0]],
[1, colors[1]]],
showscale=True),
showlegend=False,
)
fig.append_trace(trace_dummy, 1, L)
# set the sharey axis style
fig['layout'].update(
{'yaxis{}'.format(1): FigureFactory._make_YAxis('')}
)
fig['layout'].update(
title=title,
showlegend=False,
hovermode='closest',
autosize=False,
height=height,
width=width
)
return fig
@staticmethod
def _violin_dict(data, data_header, group_header, colors, use_colorscale,
group_stats, height, width, title):
from plotly.graph_objs import graph_objs
import numpy as np
# collect all group names
group_name = []
for name in data[group_header]:
if name not in group_name:
group_name.append(name)
group_name.sort()
# check if all group names appear in colors dict
for group in group_name:
if group not in colors:
raise exceptions.PlotlyError("If colors is a dictionary, all "
"the group names must appear as "
"keys in colors.")
gb = data.groupby([group_header])
L = len(group_name)
fig = make_subplots(rows=1, cols=L,
shared_yaxes=True,
horizontal_spacing=0.025,
print_grid=True)
for k, gr in enumerate(group_name):
vals = np.asarray(gb.get_group(gr)[data_header], np.float)
plot_data, plot_xrange = FigureFactory._violinplot(
vals,
fillcolor=colors[gr]
)
layout = graph_objs.Layout()
for item in plot_data:
fig.append_trace(item, 1, k + 1)
# add violin plot labels
fig['layout'].update({'xaxis{}'.format(k + 1):
FigureFactory._make_XAxis(group_name[k],
plot_xrange)})
# set the sharey axis style
fig['layout'].update(
{'yaxis{}'.format(1): FigureFactory._make_YAxis('')}
)
fig['layout'].update(
title=title,
showlegend=False,
hovermode='closest',
autosize=False,
height=height,
width=width
)
return fig
@staticmethod
def create_violin(data, data_header=None, group_header=None,
colors=None, use_colorscale=False, group_stats=None,
height=450, width=600, title='Violin and Rug Plot'):
from plotly.graph_objs import graph_objs
from numbers import Number
# Validate colors
if isinstance(colors, dict):
valid_colors = FigureFactory._validate_colors_dict(colors, 'rgb')
else:
valid_colors = FigureFactory._validate_colors(colors, 'rgb')
# validate data and choose plot type
if group_header is None:
if isinstance(data, list):
if len(data) <= 0:
raise exceptions.PlotlyError("If data is a list, it must be "
"nonempty and contain either "
"numbers or dictionaries.")
if not all(isinstance(element, Number) for element in data):
raise exceptions.PlotlyError("If data is a list, it must "
"contain only numbers.")
if _pandas_imported and isinstance(data, pd.core.frame.DataFrame):
if data_header is None:
raise exceptions.PlotlyError("data_header must be the "
"column name with the "
"desired numeric data for "
"the violin plot.")
data = data[data_header].values.tolist()
# call the plotting functions
plot_data, plot_xrange = FigureFactory._violinplot(
data, fillcolor=valid_colors[0]
)
layout = graph_objs.Layout(
title=title,
autosize=False,
font=graph_objs.Font(size=11),
height=height,
showlegend=False,
width=width,
xaxis=FigureFactory._make_XAxis('', plot_xrange),
yaxis=FigureFactory._make_YAxis(''),
hovermode='closest'
)
layout['yaxis'].update(dict(showline=False,
showticklabels=False,
ticks=''))
fig = graph_objs.Figure(data=graph_objs.Data(plot_data),
layout=layout)
return fig
else:
if not isinstance(data, pd.core.frame.DataFrame):
raise exceptions.PlotlyError("Error. You must use a pandas "
"DataFrame if you are using a "
"group header.")
if data_header is None:
raise exceptions.PlotlyError("data_header must be the column "
"name with the desired numeric "
"data for the violin plot.")
if use_colorscale is False:
if isinstance(valid_colors, dict):
# validate colors dict choice below
fig = FigureFactory._violin_dict(
data, data_header, group_header, valid_colors,
use_colorscale, group_stats, height, width, title
)
return fig
else:
fig = FigureFactory._violin_no_colorscale(
data, data_header, group_header, valid_colors,
use_colorscale, group_stats, height, width, title
)
return fig
else:
if isinstance(valid_colors, dict):
raise exceptions.PlotlyError("The colors param cannot be "
"a dictionary if you are "
"using a colorscale.")
if len(valid_colors) < 2:
raise exceptions.PlotlyError("colors must be a list with "
"at least 2 colors. A "
"Plotly scale is allowed.")
if not isinstance(group_stats, dict):
raise exceptions.PlotlyError("Your group_stats param "
"must be a dictionary.")
fig = FigureFactory._violin_colorscale(
data, data_header, group_header, valid_colors,
use_colorscale, group_stats, height, width, title
)
return fig
@staticmethod
def _find_intermediate_color(lowcolor, highcolor, intermed):
diff_0 = float(highcolor[0] - lowcolor[0])
diff_1 = float(highcolor[1] - lowcolor[1])
diff_2 = float(highcolor[2] - lowcolor[2])
return (lowcolor[0] + intermed * diff_0,
lowcolor[1] + intermed * diff_1,
lowcolor[2] + intermed * diff_2)
@staticmethod
def _color_parser(colors, function):
from numbers import Number
if isinstance(colors, str):
return function(colors)
if isinstance(colors, tuple) and isinstance(colors[0], Number):
return function(colors)
if hasattr(colors, '__iter__'):
if isinstance(colors, tuple):
new_color_tuple = tuple(function(item) for item in colors)
return new_color_tuple
else:
new_color_list = [function(item) for item in colors]
return new_color_list
@staticmethod
def _unconvert_from_RGB_255(colors):
return (colors[0]/(255.0),
colors[1]/(255.0),
colors[2]/(255.0))
@staticmethod
def _map_face2color(face, colormap, vmin, vmax):
if vmin >= vmax:
raise exceptions.PlotlyError("Incorrect relation between vmin "
"and vmax. The vmin value cannot be "
"bigger than or equal to the value "
"of vmax.")
if len(colormap) == 1:
# color each triangle face with the same color in colormap
face_color = colormap[0]
face_color = FigureFactory._convert_to_RGB_255(face_color)
face_color = FigureFactory._label_rgb(face_color)
else:
if face == vmax:
# pick last color in colormap
face_color = colormap[-1]
face_color = FigureFactory._convert_to_RGB_255(face_color)
face_color = FigureFactory._label_rgb(face_color)
else:
# find the normalized distance t of a triangle face between
# vmin and vmax where the distance is between 0 and 1
t = (face - vmin) / float((vmax - vmin))
low_color_index = int(t / (1./(len(colormap) - 1)))
face_color = FigureFactory._find_intermediate_color(
colormap[low_color_index],
colormap[low_color_index + 1],
t * (len(colormap) - 1) - low_color_index
)
face_color = FigureFactory._convert_to_RGB_255(face_color)
face_color = FigureFactory._label_rgb(face_color)
return face_color
@staticmethod
def _trisurf(x, y, z, simplices, show_colorbar, edges_color,
colormap=None, color_func=None, plot_edges=False,
x_edge=None, y_edge=None, z_edge=None, facecolor=None):
# numpy import check
if _numpy_imported is False:
raise ImportError("FigureFactory._trisurf() requires "
"numpy imported.")
import numpy as np
from plotly.graph_objs import graph_objs
points3D = np.vstack((x, y, z)).T
simplices = np.atleast_2d(simplices)
# vertices of the surface triangles
tri_vertices = points3D[simplices]
# Define colors for the triangle faces
if color_func is None:
# mean values of z-coordinates of triangle vertices
mean_dists = tri_vertices[:, :, 2].mean(-1)
elif isinstance(color_func, (list, np.ndarray)):
# Pre-computed list / array of values to map onto color
if len(color_func) != len(simplices):
raise ValueError("If color_func is a list/array, it must "
"be the same length as simplices.")
# convert all colors in color_func to rgb
for index in range(len(color_func)):
if isinstance(color_func[index], str):
if '
foo = FigureFactory._hex_to_rgb(color_func[index])
color_func[index] = FigureFactory._label_rgb(foo)
if isinstance(color_func[index], tuple):
foo = FigureFactory._convert_to_RGB_255(color_func[index])
color_func[index] = FigureFactory._label_rgb(foo)
mean_dists = np.asarray(color_func)
else:
# apply user inputted function to calculate
# custom coloring for triangle vertices
mean_dists = []
for triangle in tri_vertices:
dists = []
for vertex in triangle:
dist = color_func(vertex[0], vertex[1], vertex[2])
dists.append(dist)
mean_dists.append(np.mean(dists))
mean_dists = np.asarray(mean_dists)
# Check if facecolors are already strings and can be skipped
if isinstance(mean_dists[0], str):
facecolor = mean_dists
else:
min_mean_dists = np.min(mean_dists)
max_mean_dists = np.max(mean_dists)
if facecolor is None:
facecolor = []
for index in range(len(mean_dists)):
color = FigureFactory._map_face2color(mean_dists[index],
colormap,
min_mean_dists,
max_mean_dists)
facecolor.append(color)
# Make sure facecolor is a list so output is consistent across Pythons
facecolor = list(facecolor)
ii, jj, kk = simplices.T
triangles = graph_objs.Mesh3d(x=x, y=y, z=z, facecolor=facecolor,
i=ii, j=jj, k=kk, name='')
mean_dists_are_numbers = not isinstance(mean_dists[0], str)
if mean_dists_are_numbers and show_colorbar is True:
# make a colorscale from the colors
colorscale = FigureFactory._make_colorscale(colormap)
colorscale = FigureFactory._convert_colorscale_to_rgb(colorscale)
colorbar = graph_objs.Scatter3d(
x=x[0],
y=y[0],
z=z[0],
mode='markers',
marker=dict(
size=0.1,
color=[min_mean_dists, max_mean_dists],
colorscale=colorscale,
showscale=True),
hoverinfo='None',
showlegend=False
)
# the triangle sides are not plotted
if plot_edges is False:
if mean_dists_are_numbers and show_colorbar is True:
return graph_objs.Data([triangles, colorbar])
else:
return graph_objs.Data([triangles])
# define the lists x_edge, y_edge and z_edge, of x, y, resp z
# coordinates of edge end points for each triangle
# None separates data corresponding to two consecutive triangles
is_none = [ii is None for ii in [x_edge, y_edge, z_edge]]
if any(is_none):
if not all(is_none):
raise ValueError("If any (x_edge, y_edge, z_edge) is None, "
"all must be None")
else:
x_edge = []
y_edge = []
z_edge = []
# Pull indices we care about, then add a None column to separate tris
ixs_triangles = [0, 1, 2, 0]
pull_edges = tri_vertices[:, ixs_triangles, :]
x_edge_pull = np.hstack([pull_edges[:, :, 0],
np.tile(None, [pull_edges.shape[0], 1])])
y_edge_pull = np.hstack([pull_edges[:, :, 1],
np.tile(None, [pull_edges.shape[0], 1])])
z_edge_pull = np.hstack([pull_edges[:, :, 2],
np.tile(None, [pull_edges.shape[0], 1])])
# Now unravel the edges into a 1-d vector for plotting
x_edge = np.hstack([x_edge, x_edge_pull.reshape([1, -1])[0]])
y_edge = np.hstack([y_edge, y_edge_pull.reshape([1, -1])[0]])
z_edge = np.hstack([z_edge, z_edge_pull.reshape([1, -1])[0]])
if not (len(x_edge) == len(y_edge) == len(z_edge)):
raise exceptions.PlotlyError("The lengths of x_edge, y_edge and "
"z_edge are not the same.")
# define the lines for plotting
lines = graph_objs.Scatter3d(
x=x_edge, y=y_edge, z=z_edge, mode='lines',
line=graph_objs.Line(
color=edges_color,
width=1.5
),
showlegend=False
)
if mean_dists_are_numbers and show_colorbar is True:
return graph_objs.Data([triangles, lines, colorbar])
else:
return graph_objs.Data([triangles, lines])
@staticmethod
def create_trisurf(x, y, z, simplices, colormap=None, show_colorbar=True,
color_func=None, title='Trisurf Plot', plot_edges=True,
showbackground=True,
backgroundcolor='rgb(230, 230, 230)',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
edges_color='rgb(50, 50, 50)',
height=800, width=800,
aspectratio=dict(x=1, y=1, z=1)):
from plotly.graph_objs import graph_objs
# Validate colormap
colormap = FigureFactory._validate_colors(colormap, 'tuple')
data1 = FigureFactory._trisurf(x, y, z, simplices,
show_colorbar=show_colorbar,
color_func=color_func,
colormap=colormap,
edges_color=edges_color,
plot_edges=plot_edges)
axis = dict(
showbackground=showbackground,
backgroundcolor=backgroundcolor,
gridcolor=gridcolor,
zerolinecolor=zerolinecolor,
)
layout = graph_objs.Layout(
title=title,
width=width,
height=height,
scene=graph_objs.Scene(
xaxis=graph_objs.XAxis(axis),
yaxis=graph_objs.YAxis(axis),
zaxis=graph_objs.ZAxis(axis),
aspectratio=dict(
x=aspectratio['x'],
y=aspectratio['y'],
z=aspectratio['z']),
)
)
return graph_objs.Figure(data=data1, layout=layout)
@staticmethod
def _scatterplot(dataframe, headers, diag, size,
height, width, title, **kwargs):
from plotly.graph_objs import graph_objs
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim)
trace_list = []
# Insert traces into trace_list
for listy in dataframe:
for listx in dataframe:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=listx,
showlegend=False
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=listx,
name=None,
showlegend=False
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode='markers',
showlegend=False,
**kwargs
)
trace_list.append(trace)
else:
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode='markers',
marker=dict(
size=size),
showlegend=False,
**kwargs
)
trace_list.append(trace)
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
fig.append_trace(trace_list[trace_index],
y_index,
x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)
fig['layout'][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = 'yaxis{}'.format(1 + (dim * j))
fig['layout'][yaxis_key].update(title=headers[j])
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True
)
return fig
@staticmethod
def _scatterplot_dict(dataframe, headers, diag, size,
height, width, title, index, index_vals,
endpts, colormap, colormap_type, **kwargs):
from plotly.graph_objs import graph_objs
theme = colormap
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim)
trace_list = []
legend_param = 0
# Work over all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
# create a dictionary for index_vals
unique_index_vals = {}
for name in index_vals:
if name not in unique_index_vals:
unique_index_vals[name] = []
# Fill all the rest of the names into the dictionary
for name in sorted(unique_index_vals.keys()):
new_listx = []
new_listy = []
for j in range(len(index_vals)):
if index_vals[j] == name:
new_listx.append(listx[j])
new_listy.append(listy[j])
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(
color=theme[name]),
showlegend=True
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(
color=theme[name]),
showlegend=True
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
kwargs['marker']['color'] = theme[name]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
showlegend=True,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
marker=dict(
size=size,
color=theme[name]),
showlegend=True,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(
color=theme[name]),
showlegend=False
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(
color=theme[name]),
showlegend=False
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
kwargs['marker']['color'] = theme[name]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
marker=dict(
size=size,
color=theme[name]),
showlegend=False,
**kwargs
)
# Push the trace into dictionary
unique_index_vals[name] = trace
trace_list.append(unique_index_vals)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
for name in sorted(trace_list[trace_index].keys()):
fig.append_trace(
trace_list[trace_index][name],
y_index,
x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)
fig['layout'][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = 'yaxis{}'.format(1 + (dim * j))
fig['layout'][yaxis_key].update(title=headers[j])
if diag == 'histogram':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True,
barmode='stack')
return fig
elif diag == 'box':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
else:
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
@staticmethod
def _scatterplot_theme(dataframe, headers, diag, size, height,
width, title, index, index_vals, endpts,
colormap, colormap_type, **kwargs):
from plotly.graph_objs import graph_objs
# Check if index is made of string values
if isinstance(index_vals[0], str):
unique_index_vals = []
for name in index_vals:
if name not in unique_index_vals:
unique_index_vals.append(name)
n_colors_len = len(unique_index_vals)
# Convert colormap to list of n RGB tuples
if colormap_type == 'seq':
foo = FigureFactory._color_parser(
colormap, FigureFactory._unlabel_rgb
)
foo = FigureFactory._n_colors(foo[0],
foo[1],
n_colors_len)
theme = FigureFactory._color_parser(
foo, FigureFactory._label_rgb
)
if colormap_type == 'cat':
# leave list of colors the same way
theme = colormap
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim)
trace_list = []
legend_param = 0
# Work over all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
# create a dictionary for index_vals
unique_index_vals = {}
for name in index_vals:
if name not in unique_index_vals:
unique_index_vals[name] = []
c_indx = 0 # color index
# Fill all the rest of the names into the dictionary
for name in sorted(unique_index_vals.keys()):
new_listx = []
new_listy = []
for j in range(len(index_vals)):
if index_vals[j] == name:
new_listx.append(listx[j])
new_listy.append(listy[j])
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(
color=theme[c_indx]),
showlegend=True
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(
color=theme[c_indx]),
showlegend=True
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
kwargs['marker']['color'] = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
showlegend=True,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
marker=dict(
size=size,
color=theme[c_indx]),
showlegend=True,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(
color=theme[c_indx]),
showlegend=False
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(
color=theme[c_indx]),
showlegend=False
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
kwargs['marker']['color'] = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=name,
marker=dict(
size=size,
color=theme[c_indx]),
showlegend=False,
**kwargs
)
# Push the trace into dictionary
unique_index_vals[name] = trace
if c_indx >= (len(theme) - 1):
c_indx = -1
c_indx += 1
trace_list.append(unique_index_vals)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
for name in sorted(trace_list[trace_index].keys()):
fig.append_trace(
trace_list[trace_index][name],
y_index,
x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)
fig['layout'][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = 'yaxis{}'.format(1 + (dim * j))
fig['layout'][yaxis_key].update(title=headers[j])
if diag == 'histogram':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True,
barmode='stack')
return fig
elif diag == 'box':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
else:
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
else:
if endpts:
intervals = FigureFactory._endpts_to_intervals(endpts)
# Convert colormap to list of n RGB tuples
if colormap_type == 'seq':
foo = FigureFactory._color_parser(
colormap, FigureFactory._unlabel_rgb
)
foo = FigureFactory._n_colors(foo[0],
foo[1],
len(intervals))
theme = FigureFactory._color_parser(
foo, FigureFactory._label_rgb
)
if colormap_type == 'cat':
# leave list of colors the same way
theme = colormap
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim)
trace_list = []
legend_param = 0
# Work over all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
interval_labels = {}
for interval in intervals:
interval_labels[str(interval)] = []
c_indx = 0 # color index
# Fill all the rest of the names into the dictionary
for interval in intervals:
new_listx = []
new_listy = []
for j in range(len(index_vals)):
if interval[0] < index_vals[j] <= interval[1]:
new_listx.append(listx[j])
new_listy.append(listy[j])
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(
color=theme[c_indx]),
showlegend=True
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(
color=theme[c_indx]),
showlegend=True
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
(kwargs['marker']
['color']) = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=str(interval),
showlegend=True,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=str(interval),
marker=dict(
size=size,
color=theme[c_indx]),
showlegend=True,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(
color=theme[c_indx]),
showlegend=False
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(
color=theme[c_indx]),
showlegend=False
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
(kwargs['marker']
['color']) = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=str(interval),
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode='markers',
name=str(interval),
marker=dict(
size=size,
color=theme[c_indx]),
showlegend=False,
**kwargs
)
# Push the trace into dictionary
interval_labels[str(interval)] = trace
if c_indx >= (len(theme) - 1):
c_indx = -1
c_indx += 1
trace_list.append(interval_labels)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
for interval in intervals:
fig.append_trace(
trace_list[trace_index][str(interval)],
y_index,
x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)
fig['layout'][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = 'yaxis{}'.format(1 + (dim * j))
fig['layout'][yaxis_key].update(title=headers[j])
if diag == 'histogram':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True,
barmode='stack')
return fig
elif diag == 'box':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
else:
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
else:
theme = colormap
# add a copy of rgb color to theme if it contains one color
if len(theme) <= 1:
theme.append(theme[0])
color = []
for incr in range(len(theme)):
color.append([1./(len(theme)-1)*incr, theme[incr]])
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim)
trace_list = []
legend_param = 0
# Run through all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=listx,
marker=dict(
color=theme[0]),
showlegend=False
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=listx,
marker=dict(
color=theme[0]),
showlegend=False
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
kwargs['marker']['color'] = index_vals
kwargs['marker']['colorscale'] = color
kwargs['marker']['showscale'] = True
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode='markers',
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode='markers',
marker=dict(
size=size,
color=index_vals,
colorscale=color,
showscale=True),
showlegend=False,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == 'histogram'):
trace = graph_objs.Histogram(
x=listx,
marker=dict(
color=theme[0]),
showlegend=False
)
elif (listx == listy) and (diag == 'box'):
trace = graph_objs.Box(
y=listx,
marker=dict(
color=theme[0]),
showlegend=False
)
else:
if 'marker' in kwargs:
kwargs['marker']['size'] = size
kwargs['marker']['color'] = index_vals
kwargs['marker']['colorscale'] = color
kwargs['marker']['showscale'] = False
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode='markers',
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode='markers',
marker=dict(
size=size,
color=index_vals,
colorscale=color,
showscale=False),
showlegend=False,
**kwargs
)
# Push the trace into list
trace_list.append(trace)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
fig.append_trace(trace_list[trace_index],
y_index,
x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = 'xaxis{}'.format((dim * dim) - dim + 1 + j)
fig['layout'][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = 'yaxis{}'.format(1 + (dim * j))
fig['layout'][yaxis_key].update(title=headers[j])
if diag == 'histogram':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True,
barmode='stack')
return fig
elif diag == 'box':
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
else:
fig['layout'].update(
height=height, width=width,
title=title,
showlegend=True)
return fig
@staticmethod
def _validate_index(index_vals):
from numbers import Number
if isinstance(index_vals[0], Number):
if not all(isinstance(item, Number) for item in index_vals):
raise exceptions.PlotlyError("Error in indexing column. "
"Make sure all entries of each "
"column are all numbers or "
"all strings.")
elif isinstance(index_vals[0], str):
if not all(isinstance(item, str) for item in index_vals):
raise exceptions.PlotlyError("Error in indexing column. "
"Make sure all entries of each "
"column are all numbers or "
"all strings.")
@staticmethod
def _validate_dataframe(array):
from numbers import Number
for vector in array:
if isinstance(vector[0], Number):
if not all(isinstance(item, Number) for item in vector):
raise exceptions.PlotlyError("Error in dataframe. "
"Make sure all entries of "
"each column are either "
"numbers or strings.")
elif isinstance(vector[0], str):
if not all(isinstance(item, str) for item in vector):
raise exceptions.PlotlyError("Error in dataframe. "
"Make sure all entries of "
"each column are either "
"numbers or strings.")
@staticmethod
def _validate_scatterplotmatrix(df, index, diag, colormap_type, **kwargs):
if _pandas_imported is False:
raise ImportError("FigureFactory.scatterplotmatrix requires "
"a pandas DataFrame.")
# Check if pandas dataframe
if not isinstance(df, pd.core.frame.DataFrame):
raise exceptions.PlotlyError("Dataframe not inputed. Please "
"use a pandas dataframe to pro"
"duce a scatterplot matrix.")
# Check if dataframe is 1 column or less
if len(df.columns) <= 1:
raise exceptions.PlotlyError("Dataframe has only one column. To "
"use the scatterplot matrix, use at "
"least 2 columns.")
# Check that diag parameter is a valid selection
if diag not in DIAG_CHOICES:
raise exceptions.PlotlyError("Make sure diag is set to "
"one of {}".format(DIAG_CHOICES))
# Check that colormap_types is a valid selection
if colormap_type not in VALID_COLORMAP_TYPES:
raise exceptions.PlotlyError("Must choose a valid colormap type. "
"Either 'cat' or 'seq' for a cate"
"gorical and sequential colormap "
"respectively.")
# Check for not 'size' or 'color' in 'marker' of **kwargs
if 'marker' in kwargs:
FORBIDDEN_PARAMS = ['size', 'color', 'colorscale']
if any(param in kwargs['marker'] for param in FORBIDDEN_PARAMS):
raise exceptions.PlotlyError("Your kwargs dictionary cannot "
"include the 'size', 'color' or "
"'colorscale' key words inside "
"the marker dict since 'size' is "
"already an argument of the "
"scatterplot matrix function and "
"both 'color' and 'colorscale "
"are set internally.")
@staticmethod
def _endpts_to_intervals(endpts):
length = len(endpts)
if not (isinstance(endpts, (tuple)) or isinstance(endpts, (list))):
raise exceptions.PlotlyError("The intervals_endpts argument must "
"be a list or tuple of a sequence "
"of increasing numbers.")
for item in endpts:
if isinstance(item, str):
raise exceptions.PlotlyError("The intervals_endpts argument "
"must be a list or tuple of a "
"sequence of increasing "
"numbers.")
for k in range(length-1):
if endpts[k] >= endpts[k+1]:
raise exceptions.PlotlyError("The intervals_endpts argument "
"must be a list or tuple of a "
"sequence of increasing "
"numbers.")
else:
intervals = []
intervals.append([float('-inf'), endpts[0]])
for k in range(length - 1):
interval = []
interval.append(endpts[k])
interval.append(endpts[k + 1])
intervals.append(interval)
intervals.append([endpts[length - 1], float('inf')])
return intervals
@staticmethod
def _convert_to_RGB_255(colors):
rgb_components = []
for component in colors:
rounded_num = decimal.Decimal(str(component*255.0)).quantize(
decimal.Decimal('1'), rounding=decimal.ROUND_HALF_EVEN
)
rounded_num = int(rounded_num)
rgb_components.append(rounded_num)
return (rgb_components[0], rgb_components[1], rgb_components[2])
@staticmethod
def _n_colors(lowcolor, highcolor, n_colors):
diff_0 = float(highcolor[0] - lowcolor[0])
incr_0 = diff_0/(n_colors - 1)
diff_1 = float(highcolor[1] - lowcolor[1])
incr_1 = diff_1/(n_colors - 1)
diff_2 = float(highcolor[2] - lowcolor[2])
incr_2 = diff_2/(n_colors - 1)
color_tuples = []
for index in range(n_colors):
new_tuple = (lowcolor[0] + (index * incr_0),
lowcolor[1] + (index * incr_1),
lowcolor[2] + (index * incr_2))
color_tuples.append(new_tuple)
return color_tuples
@staticmethod
def _label_rgb(colors):
return ('rgb(%s, %s, %s)' % (colors[0], colors[1], colors[2]))
@staticmethod
def _unlabel_rgb(colors):
str_vals = ''
for index in range(len(colors)):
try:
float(colors[index])
str_vals = str_vals + colors[index]
except ValueError:
if colors[index] == ',' or colors[index] == '.':
str_vals = str_vals + colors[index]
str_vals = str_vals + ','
numbers = []
str_num = ''
for char in str_vals:
if char != ',':
str_num = str_num + char
else:
numbers.append(float(str_num))
str_num = ''
return (numbers[0], numbers[1], numbers[2])
@staticmethod
def create_scatterplotmatrix(df, index=None, endpts=None, diag='scatter',
height=500, width=500, size=6,
title='Scatterplot Matrix', colormap=None,
colormap_type='cat', dataframe=None,
headers=None, index_vals=None, **kwargs):
if dataframe is None:
dataframe = []
if headers is None:
headers = []
if index_vals is None:
index_vals = []
FigureFactory._validate_scatterplotmatrix(df, index, diag,
colormap_type, **kwargs)
if isinstance(colormap, dict):
colormap = FigureFactory._validate_colors_dict(colormap, 'rgb')
else:
colormap = FigureFactory._validate_colors(colormap, 'rgb')
if not index:
for name in df:
headers.append(name)
for name in headers:
dataframe.append(df[name].values.tolist())
FigureFactory._validate_dataframe(dataframe)
figure = FigureFactory._scatterplot(dataframe, headers, diag,
size, height, width, title,
**kwargs)
return figure
else:
if index not in df:
raise exceptions.PlotlyError("Make sure you set the index "
"input variable to one of the "
"column names of your "
"dataframe.")
index_vals = df[index].values.tolist()
for name in df:
if name != index:
headers.append(name)
for name in headers:
dataframe.append(df[name].values.tolist())
FigureFactory._validate_dataframe(dataframe)
FigureFactory._validate_index(index_vals)
if isinstance(colormap, dict):
for key in colormap:
if not all(index in colormap for index in index_vals):
raise exceptions.PlotlyError("If colormap is a "
"dictionary, all the "
"names in the index "
"must be keys.")
figure = FigureFactory._scatterplot_dict(
dataframe, headers, diag, size, height, width, title,
index, index_vals, endpts, colormap, colormap_type,
**kwargs
)
return figure
else:
figure = FigureFactory._scatterplot_theme(
dataframe, headers, diag, size, height, width, title,
index, index_vals, endpts, colormap, colormap_type,
**kwargs
)
return figure
@staticmethod
def _validate_equal_length(*args):
length = len(args[0])
if any(len(lst) != length for lst in args):
raise exceptions.PlotlyError("Oops! Your data lists or ndarrays "
"should be the same length.")
@staticmethod
def _validate_ohlc(open, high, low, close, direction, **kwargs):
for lst in [open, low, close]:
for index in range(len(high)):
if high[index] < lst[index]:
raise exceptions.PlotlyError("Oops! Looks like some of "
"your high values are less "
"the corresponding open, "
"low, or close values. "
"Double check that your data "
"is entered in O-H-L-C order")
for lst in [open, high, close]:
for index in range(len(low)):
if low[index] > lst[index]:
raise exceptions.PlotlyError("Oops! Looks like some of "
"your low values are greater "
"than the corresponding high"
", open, or close values. "
"Double check that your data "
"is entered in O-H-L-C order")
direction_opts = ('increasing', 'decreasing', 'both')
if direction not in direction_opts:
raise exceptions.PlotlyError("direction must be defined as "
"'increasing', 'decreasing', or "
"'both'")
@staticmethod
def _validate_distplot(hist_data, curve_type):
try:
import pandas as pd
_pandas_imported = True
except ImportError:
_pandas_imported = False
hist_data_types = (list,)
if _numpy_imported:
hist_data_types += (np.ndarray,)
if _pandas_imported:
hist_data_types += (pd.core.series.Series,)
if not isinstance(hist_data[0], hist_data_types):
raise exceptions.PlotlyError("Oops, this function was written "
"to handle multiple datasets, if "
"you want to plot just one, make "
"sure your hist_data variable is "
"still a list of lists, i.e. x = "
"[1, 2, 3] -> x = [[1, 2, 3]]")
curve_opts = ('kde', 'normal')
if curve_type not in curve_opts:
raise exceptions.PlotlyError("curve_type must be defined as "
"'kde' or 'normal'")
if _scipy_imported is False:
raise ImportError("FigureFactory.create_distplot requires scipy")
@staticmethod
def _validate_positive_scalars(**kwargs):
for key, val in kwargs.items():
try:
if val <= 0:
raise ValueError('{} must be > 0, got {}'.format(key, val))
except TypeError:
raise exceptions.PlotlyError('{} must be a number, got {}'
.format(key, val))
@staticmethod
def _validate_streamline(x, y):
if _numpy_imported is False:
raise ImportError("FigureFactory.create_streamline requires numpy")
for index in range(len(x) - 1):
if ((x[index + 1] - x[index]) - (x[1] - x[0])) > .0001:
raise exceptions.PlotlyError("x must be a 1 dimensional, "
"evenly spaced array")
for index in range(len(y) - 1):
if ((y[index + 1] - y[index]) -
(y[1] - y[0])) > .0001:
raise exceptions.PlotlyError("y must be a 1 dimensional, "
"evenly spaced array")
@staticmethod
def _validate_annotated_heatmap(z, x, y, annotation_text):
if annotation_text is not None and isinstance(annotation_text, list):
FigureFactory._validate_equal_length(z, annotation_text)
for lst in range(len(z)):
if len(z[lst]) != len(annotation_text[lst]):
raise exceptions.PlotlyError("z and text should have the "
"same dimensions")
if x:
if len(x) != len(z[0]):
raise exceptions.PlotlyError("oops, the x list that you "
"provided does not match the "
"width of your z matrix ")
if y:
if len(y) != len(z):
raise exceptions.PlotlyError("oops, the y list that you "
"provided does not match the "
"length of your z matrix ")
@staticmethod
def _validate_table(table_text, font_colors):
font_colors_len_options = [1, 3, len(table_text)]
if len(font_colors) not in font_colors_len_options:
raise exceptions.PlotlyError("Oops, font_colors should be a list "
"of length 1, 3 or len(text)")
@staticmethod
def _flatten(array):
try:
return [item for sublist in array for item in sublist]
except TypeError:
raise exceptions.PlotlyError("Your data array could not be "
"flattened! Make sure your data is "
"entered as lists or ndarrays!")
@staticmethod
def _hex_to_rgb(value):
value = value.lstrip('#')
hex_total_length = len(value)
rgb_section_length = hex_total_length // 3
return tuple(int(value[i:i + rgb_section_length], 16)
for i in range(0, hex_total_length, rgb_section_length))
@staticmethod
def create_quiver(x, y, u, v, scale=.1, arrow_scale=.3,
angle=math.pi / 9, **kwargs):
from plotly.graph_objs import graph_objs
FigureFactory._validate_equal_length(x, y, u, v)
FigureFactory._validate_positive_scalars(arrow_scale=arrow_scale,
scale=scale)
barb_x, barb_y = _Quiver(x, y, u, v, scale,
arrow_scale, angle).get_barbs()
arrow_x, arrow_y = _Quiver(x, y, u, v, scale,
arrow_scale, angle).get_quiver_arrows()
quiver = graph_objs.Scatter(x=barb_x + arrow_x,
y=barb_y + arrow_y,
mode='lines', **kwargs)
data = [quiver]
layout = graph_objs.Layout(hovermode='closest')
return graph_objs.Figure(data=data, layout=layout)
@staticmethod
def create_streamline(x, y, u, v,
density=1, angle=math.pi / 9,
arrow_scale=.09, **kwargs):
from plotly.graph_objs import graph_objs
FigureFactory._validate_equal_length(x, y)
FigureFactory._validate_equal_length(u, v)
FigureFactory._validate_streamline(x, y)
FigureFactory._validate_positive_scalars(density=density,
arrow_scale=arrow_scale)
streamline_x, streamline_y = _Streamline(x, y, u, v,
density, angle,
arrow_scale).sum_streamlines()
arrow_x, arrow_y = _Streamline(x, y, u, v,
density, angle,
arrow_scale).get_streamline_arrows()
streamline = graph_objs.Scatter(x=streamline_x + arrow_x,
y=streamline_y + arrow_y,
mode='lines', **kwargs)
data = [streamline]
layout = graph_objs.Layout(hovermode='closest')
return graph_objs.Figure(data=data, layout=layout)
@staticmethod
def _make_increasing_ohlc(open, high, low, close, dates, **kwargs):
(flat_increase_x,
flat_increase_y,
text_increase) = _OHLC(open, high, low, close, dates).get_increase()
if 'name' in kwargs:
showlegend = True
else:
kwargs.setdefault('name', 'Increasing')
showlegend = False
kwargs.setdefault('line', dict(color=_DEFAULT_INCREASING_COLOR,
width=1))
kwargs.setdefault('text', text_increase)
ohlc_incr = dict(type='scatter',
x=flat_increase_x,
y=flat_increase_y,
mode='lines',
showlegend=showlegend,
**kwargs)
return ohlc_incr
@staticmethod
def _make_decreasing_ohlc(open, high, low, close, dates, **kwargs):
(flat_decrease_x,
flat_decrease_y,
text_decrease) = _OHLC(open, high, low, close, dates).get_decrease()
kwargs.setdefault('line', dict(color=_DEFAULT_DECREASING_COLOR,
width=1))
kwargs.setdefault('text', text_decrease)
kwargs.setdefault('showlegend', False)
kwargs.setdefault('name', 'Decreasing')
ohlc_decr = dict(type='scatter',
x=flat_decrease_x,
y=flat_decrease_y,
mode='lines',
**kwargs)
return ohlc_decr
@staticmethod
def create_ohlc(open, high, low, close,
dates=None, direction='both',
**kwargs):
from plotly.graph_objs import graph_objs
if dates is not None:
FigureFactory._validate_equal_length(open, high, low, close, dates)
else:
FigureFactory._validate_equal_length(open, high, low, close)
FigureFactory._validate_ohlc(open, high, low, close, direction,
**kwargs)
if direction is 'increasing':
ohlc_incr = FigureFactory._make_increasing_ohlc(open, high,
low, close,
dates, **kwargs)
data = [ohlc_incr]
elif direction is 'decreasing':
ohlc_decr = FigureFactory._make_decreasing_ohlc(open, high,
low, close,
dates, **kwargs)
data = [ohlc_decr]
else:
ohlc_incr = FigureFactory._make_increasing_ohlc(open, high,
low, close,
dates, **kwargs)
ohlc_decr = FigureFactory._make_decreasing_ohlc(open, high,
low, close,
dates, **kwargs)
data = [ohlc_incr, ohlc_decr]
layout = graph_objs.Layout(xaxis=dict(zeroline=False),
hovermode='closest')
return graph_objs.Figure(data=data, layout=layout)
@staticmethod
def _make_increasing_candle(open, high, low, close, dates, **kwargs):
increase_x, increase_y = _Candlestick(
open, high, low, close, dates, **kwargs).get_candle_increase()
if 'line' in kwargs:
kwargs.setdefault('fillcolor', kwargs['line']['color'])
else:
kwargs.setdefault('fillcolor', _DEFAULT_INCREASING_COLOR)
if 'name' in kwargs:
kwargs.setdefault('showlegend', True)
else:
kwargs.setdefault('showlegend', False)
kwargs.setdefault('name', 'Increasing')
kwargs.setdefault('line', dict(color=_DEFAULT_INCREASING_COLOR))
candle_incr_data = dict(type='box',
x=increase_x,
y=increase_y,
whiskerwidth=0,
boxpoints=False,
**kwargs)
return [candle_incr_data]
@staticmethod
def _make_decreasing_candle(open, high, low, close, dates, **kwargs):
decrease_x, decrease_y = _Candlestick(
open, high, low, close, dates, **kwargs).get_candle_decrease()
if 'line' in kwargs:
kwargs.setdefault('fillcolor', kwargs['line']['color'])
else:
kwargs.setdefault('fillcolor', _DEFAULT_DECREASING_COLOR)
kwargs.setdefault('showlegend', False)
kwargs.setdefault('line', dict(color=_DEFAULT_DECREASING_COLOR))
kwargs.setdefault('name', 'Decreasing')
candle_decr_data = dict(type='box',
x=decrease_x,
y=decrease_y,
whiskerwidth=0,
boxpoints=False,
**kwargs)
return [candle_decr_data]
@staticmethod
def create_candlestick(open, high, low, close,
dates=None, direction='both', **kwargs):
from plotly.graph_objs import graph_objs
if dates is not None:
FigureFactory._validate_equal_length(open, high, low, close, dates)
else:
FigureFactory._validate_equal_length(open, high, low, close)
FigureFactory._validate_ohlc(open, high, low, close, direction,
**kwargs)
if direction is 'increasing':
candle_incr_data = FigureFactory._make_increasing_candle(
open, high, low, close, dates, **kwargs)
data = candle_incr_data
elif direction is 'decreasing':
candle_decr_data = FigureFactory._make_decreasing_candle(
open, high, low, close, dates, **kwargs)
data = candle_decr_data
else:
candle_incr_data = FigureFactory._make_increasing_candle(
open, high, low, close, dates, **kwargs)
candle_decr_data = FigureFactory._make_decreasing_candle(
open, high, low, close, dates, **kwargs)
data = candle_incr_data + candle_decr_data
layout = graph_objs.Layout()
return graph_objs.Figure(data=data, layout=layout)
@staticmethod
def create_distplot(hist_data, group_labels,
bin_size=1., curve_type='kde',
colors=[], rug_text=[], histnorm=DEFAULT_HISTNORM,
show_hist=True, show_curve=True,
show_rug=True):
from plotly.graph_objs import graph_objs
FigureFactory._validate_distplot(hist_data, curve_type)
FigureFactory._validate_equal_length(hist_data, group_labels)
if isinstance(bin_size, (float, int)):
bin_size = [bin_size]*len(hist_data)
hist = _Distplot(
hist_data, histnorm, group_labels, bin_size,
curve_type, colors, rug_text,
show_hist, show_curve).make_hist()
if curve_type == 'normal':
curve = _Distplot(
hist_data, histnorm, group_labels, bin_size,
curve_type, colors, rug_text,
show_hist, show_curve).make_normal()
else:
curve = _Distplot(
hist_data, histnorm, group_labels, bin_size,
curve_type, colors, rug_text,
show_hist, show_curve).make_kde()
rug = _Distplot(
hist_data, histnorm, group_labels, bin_size,
curve_type, colors, rug_text,
show_hist, show_curve).make_rug()
data = []
if show_hist:
data.append(hist)
if show_curve:
data.append(curve)
if show_rug:
data.append(rug)
layout = graph_objs.Layout(
barmode='overlay',
hovermode='closest',
legend=dict(traceorder='reversed'),
xaxis1=dict(domain=[0.0, 1.0],
anchor='y2',
zeroline=False),
yaxis1=dict(domain=[0.35, 1],
anchor='free',
position=0.0),
yaxis2=dict(domain=[0, 0.25],
anchor='x1',
dtick=1,
showticklabels=False))
else:
layout = graph_objs.Layout(
barmode='overlay',
hovermode='closest',
legend=dict(traceorder='reversed'),
xaxis1=dict(domain=[0.0, 1.0],
anchor='y2',
zeroline=False),
yaxis1=dict(domain=[0., 1],
anchor='free',
position=0.0))
data = sum(data, [])
return graph_objs.Figure(data=data, layout=layout)
@staticmethod
def create_dendrogram(X, orientation="bottom", labels=None,
colorscale=None):
dependencies = (_scipy_imported and _scipy__spatial_imported and
_scipy__cluster__hierarchy_imported)
if dependencies is False:
raise ImportError("FigureFactory.create_dendrogram requires scipy, \
scipy.spatial and scipy.hierarchy")
s = X.shape
if len(s) != 2:
exceptions.PlotlyError("X should be 2-dimensional array.")
dendrogram = _Dendrogram(X, orientation, labels, colorscale)
return {'layout': dendrogram.layout,
'data': dendrogram.data}
@staticmethod
def create_annotated_heatmap(z, x=None, y=None, annotation_text=None,
colorscale='RdBu', font_colors=None,
showscale=False, reversescale=False,
**kwargs):
from plotly.graph_objs import graph_objs
font_colors = font_colors if font_colors is not None else []
FigureFactory._validate_annotated_heatmap(z, x, y, annotation_text)
annotations = _AnnotatedHeatmap(z, x, y, annotation_text,
colorscale, font_colors, reversescale,
**kwargs).make_annotations()
if x or y:
trace = dict(type='heatmap', z=z, x=x, y=y, colorscale=colorscale,
showscale=showscale, **kwargs)
layout = dict(annotations=annotations,
xaxis=dict(ticks='', dtick=1, side='top',
gridcolor='rgb(0, 0, 0)'),
yaxis=dict(ticks='', dtick=1, ticksuffix=' '))
else:
trace = dict(type='heatmap', z=z, colorscale=colorscale,
showscale=showscale, **kwargs)
layout = dict(annotations=annotations,
xaxis=dict(ticks='', side='top',
gridcolor='rgb(0, 0, 0)',
showticklabels=False),
yaxis=dict(ticks='', ticksuffix=' ',
showticklabels=False))
data = [trace]
return graph_objs.Figure(data=data, layout=layout)
@staticmethod
def create_table(table_text, colorscale=None, font_colors=None,
index=False, index_title='', annotation_offset=.45,
height_constant=30, hoverinfo='none', **kwargs):
from plotly.graph_objs import graph_objs
colorscale = \
colorscale if colorscale is not None else [[0, '#00083e'],
[.5, '#ededee'],
[1, '#ffffff']]
font_colors = font_colors if font_colors is not None else ['#ffffff',
'#000000',
'#000000']
FigureFactory._validate_table(table_text, font_colors)
table_matrix = _Table(table_text, colorscale, font_colors, index,
index_title, annotation_offset,
**kwargs).get_table_matrix()
annotations = _Table(table_text, colorscale, font_colors, index,
index_title, annotation_offset,
**kwargs).make_table_annotations()
trace = dict(type='heatmap', z=table_matrix, opacity=.75,
colorscale=colorscale, showscale=False,
hoverinfo=hoverinfo, **kwargs)
data = [trace]
layout = dict(annotations=annotations,
height=len(table_matrix)*height_constant + 50,
margin=dict(t=0, b=0, r=0, l=0),
yaxis=dict(autorange='reversed', zeroline=False,
gridwidth=2, ticks='', dtick=1, tick0=.5,
showticklabels=False),
xaxis=dict(zeroline=False, gridwidth=2, ticks='',
dtick=1, tick0=-0.5, showticklabels=False))
return graph_objs.Figure(data=data, layout=layout)
class _Quiver(FigureFactory):
def __init__(self, x, y, u, v,
scale, arrow_scale, angle, **kwargs):
try:
x = FigureFactory._flatten(x)
except exceptions.PlotlyError:
pass
try:
y = FigureFactory._flatten(y)
except exceptions.PlotlyError:
pass
try:
u = FigureFactory._flatten(u)
except exceptions.PlotlyError:
pass
try:
v = FigureFactory._flatten(v)
except exceptions.PlotlyError:
pass
self.x = x
self.y = y
self.u = u
self.v = v
self.scale = scale
self.arrow_scale = arrow_scale
self.angle = angle
self.end_x = []
self.end_y = []
self.scale_uv()
barb_x, barb_y = self.get_barbs()
arrow_x, arrow_y = self.get_quiver_arrows()
def scale_uv(self):
self.u = [i * self.scale for i in self.u]
self.v = [i * self.scale for i in self.v]
def get_barbs(self):
self.end_x = [i + j for i, j in zip(self.x, self.u)]
self.end_y = [i + j for i, j in zip(self.y, self.v)]
empty = [None] * len(self.x)
barb_x = FigureFactory._flatten(zip(self.x, self.end_x, empty))
barb_y = FigureFactory._flatten(zip(self.y, self.end_y, empty))
return barb_x, barb_y
def get_quiver_arrows(self):
dif_x = [i - j for i, j in zip(self.end_x, self.x)]
dif_y = [i - j for i, j in zip(self.end_y, self.y)]
barb_len = [None] * len(self.x)
for index in range(len(barb_len)):
barb_len[index] = math.hypot(dif_x[index], dif_y[index])
arrow_len = [None] * len(self.x)
arrow_len = [i * self.arrow_scale for i in barb_len]
barb_ang = [None] * len(self.x)
for index in range(len(barb_ang)):
barb_ang[index] = math.atan2(dif_y[index], dif_x[index])
ang1 = [i + self.angle for i in barb_ang]
ang2 = [i - self.angle for i in barb_ang]
cos_ang1 = [None] * len(ang1)
for index in range(len(ang1)):
cos_ang1[index] = math.cos(ang1[index])
seg1_x = [i * j for i, j in zip(arrow_len, cos_ang1)]
sin_ang1 = [None] * len(ang1)
for index in range(len(ang1)):
sin_ang1[index] = math.sin(ang1[index])
seg1_y = [i * j for i, j in zip(arrow_len, sin_ang1)]
cos_ang2 = [None] * len(ang2)
for index in range(len(ang2)):
cos_ang2[index] = math.cos(ang2[index])
seg2_x = [i * j for i, j in zip(arrow_len, cos_ang2)]
sin_ang2 = [None] * len(ang2)
for index in range(len(ang2)):
sin_ang2[index] = math.sin(ang2[index])
seg2_y = [i * j for i, j in zip(arrow_len, sin_ang2)]
for index in range(len(self.end_x)):
point1_x = [i - j for i, j in zip(self.end_x, seg1_x)]
point1_y = [i - j for i, j in zip(self.end_y, seg1_y)]
point2_x = [i - j for i, j in zip(self.end_x, seg2_x)]
point2_y = [i - j for i, j in zip(self.end_y, seg2_y)]
empty = [None] * len(self.end_x)
arrow_x = FigureFactory._flatten(zip(point1_x, self.end_x,
point2_x, empty))
arrow_y = FigureFactory._flatten(zip(point1_y, self.end_y,
point2_y, empty))
return arrow_x, arrow_y
class _Streamline(FigureFactory):
def __init__(self, x, y, u, v,
density, angle,
arrow_scale, **kwargs):
self.x = np.array(x)
self.y = np.array(y)
self.u = np.array(u)
self.v = np.array(v)
self.angle = angle
self.arrow_scale = arrow_scale
self.density = int(30 * density)
self.delta_x = self.x[1] - self.x[0]
self.delta_y = self.y[1] - self.y[0]
self.val_x = self.x
self.val_y = self.y
self.blank = np.zeros((self.density, self.density))
self.spacing_x = len(self.x) / float(self.density - 1)
self.spacing_y = len(self.y) / float(self.density - 1)
self.trajectories = []
self.u = self.u / (self.x[-1] - self.x[0])
self.v = self.v / (self.y[-1] - self.y[0])
self.speed = np.sqrt(self.u ** 2 + self.v ** 2)
self.u *= len(self.x)
self.v *= len(self.y)
self.st_x = []
self.st_y = []
self.get_streamlines()
streamline_x, streamline_y = self.sum_streamlines()
arrows_x, arrows_y = self.get_streamline_arrows()
def blank_pos(self, xi, yi):
return (int((xi / self.spacing_x) + 0.5),
int((yi / self.spacing_y) + 0.5))
def value_at(self, a, xi, yi):
if isinstance(xi, np.ndarray):
self.x = xi.astype(np.int)
self.y = yi.astype(np.int)
else:
self.val_x = np.int(xi)
self.val_y = np.int(yi)
a00 = a[self.val_y, self.val_x]
a01 = a[self.val_y, self.val_x + 1]
a10 = a[self.val_y + 1, self.val_x]
a11 = a[self.val_y + 1, self.val_x + 1]
xt = xi - self.val_x
yt = yi - self.val_y
a0 = a00 * (1 - xt) + a01 * xt
a1 = a10 * (1 - xt) + a11 * xt
return a0 * (1 - yt) + a1 * yt
def rk4_integrate(self, x0, y0):
def f(xi, yi):
dt_ds = 1. / self.value_at(self.speed, xi, yi)
ui = self.value_at(self.u, xi, yi)
vi = self.value_at(self.v, xi, yi)
return ui * dt_ds, vi * dt_ds
def g(xi, yi):
dt_ds = 1. / self.value_at(self.speed, xi, yi)
ui = self.value_at(self.u, xi, yi)
vi = self.value_at(self.v, xi, yi)
return -ui * dt_ds, -vi * dt_ds
check = lambda xi, yi: (0 <= xi < len(self.x) - 1 and
0 <= yi < len(self.y) - 1)
xb_changes = []
yb_changes = []
def rk4(x0, y0, f):
ds = 0.01
stotal = 0
xi = x0
yi = y0
xb, yb = self.blank_pos(xi, yi)
xf_traj = []
yf_traj = []
while check(xi, yi):
xf_traj.append(xi)
yf_traj.append(yi)
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + .5 * ds * k1x, yi + .5 * ds * k1y)
k3x, k3y = f(xi + .5 * ds * k2x, yi + .5 * ds * k2y)
k4x, k4y = f(xi + ds * k3x, yi + ds * k3y)
except IndexError:
break
xi += ds * (k1x + 2 * k2x + 2 * k3x + k4x) / 6.
yi += ds * (k1y + 2 * k2y + 2 * k3y + k4y) / 6.
if not check(xi, yi):
break
stotal += ds
new_xb, new_yb = self.blank_pos(xi, yi)
if new_xb != xb or new_yb != yb:
if self.blank[new_yb, new_xb] == 0:
self.blank[new_yb, new_xb] = 1
xb_changes.append(new_xb)
yb_changes.append(new_yb)
xb = new_xb
yb = new_yb
else:
break
if stotal > 2:
break
return stotal, xf_traj, yf_traj
sf, xf_traj, yf_traj = rk4(x0, y0, f)
sb, xb_traj, yb_traj = rk4(x0, y0, g)
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
if len(x_traj) < 1:
return None
if stotal > .2:
initxb, inityb = self.blank_pos(x0, y0)
self.blank[inityb, initxb] = 1
return x_traj, y_traj
else:
for xb, yb in zip(xb_changes, yb_changes):
self.blank[yb, xb] = 0
return None
def traj(self, xb, yb):
if xb < 0 or xb >= self.density or yb < 0 or yb >= self.density:
return
if self.blank[yb, xb] == 0:
t = self.rk4_integrate(xb * self.spacing_x, yb * self.spacing_y)
if t is not None:
self.trajectories.append(t)
def get_streamlines(self):
for indent in range(self.density // 2):
for xi in range(self.density - 2 * indent):
self.traj(xi + indent, indent)
self.traj(xi + indent, self.density - 1 - indent)
self.traj(indent, xi + indent)
self.traj(self.density - 1 - indent, xi + indent)
self.st_x = [np.array(t[0]) * self.delta_x + self.x[0] for t in
self.trajectories]
self.st_y = [np.array(t[1]) * self.delta_y + self.y[0] for t in
self.trajectories]
for index in range(len(self.st_x)):
self.st_x[index] = self.st_x[index].tolist()
self.st_x[index].append(np.nan)
for index in range(len(self.st_y)):
self.st_y[index] = self.st_y[index].tolist()
self.st_y[index].append(np.nan)
def get_streamline_arrows(self):
arrow_end_x = np.empty((len(self.st_x)))
arrow_end_y = np.empty((len(self.st_y)))
arrow_start_x = np.empty((len(self.st_x)))
arrow_start_y = np.empty((len(self.st_y)))
for index in range(len(self.st_x)):
arrow_end_x[index] = (self.st_x[index]
[int(len(self.st_x[index]) / 3)])
arrow_start_x[index] = (self.st_x[index]
[(int(len(self.st_x[index]) / 3)) - 1])
arrow_end_y[index] = (self.st_y[index]
[int(len(self.st_y[index]) / 3)])
arrow_start_y[index] = (self.st_y[index]
[(int(len(self.st_y[index]) / 3)) - 1])
dif_x = arrow_end_x - arrow_start_x
dif_y = arrow_end_y - arrow_start_y
streamline_ang = np.arctan(dif_y / dif_x)
ang1 = streamline_ang + (self.angle)
ang2 = streamline_ang - (self.angle)
seg1_x = np.cos(ang1) * self.arrow_scale
seg1_y = np.sin(ang1) * self.arrow_scale
seg2_x = np.cos(ang2) * self.arrow_scale
seg2_y = np.sin(ang2) * self.arrow_scale
point1_x = np.empty((len(dif_x)))
point1_y = np.empty((len(dif_y)))
point2_x = np.empty((len(dif_x)))
point2_y = np.empty((len(dif_y)))
for index in range(len(dif_x)):
if dif_x[index] >= 0:
point1_x[index] = arrow_end_x[index] - seg1_x[index]
point1_y[index] = arrow_end_y[index] - seg1_y[index]
point2_x[index] = arrow_end_x[index] - seg2_x[index]
point2_y[index] = arrow_end_y[index] - seg2_y[index]
else:
point1_x[index] = arrow_end_x[index] + seg1_x[index]
point1_y[index] = arrow_end_y[index] + seg1_y[index]
point2_x[index] = arrow_end_x[index] + seg2_x[index]
point2_y[index] = arrow_end_y[index] + seg2_y[index]
space = np.empty((len(point1_x)))
space[:] = np.nan
arrows_x = np.matrix([point1_x, arrow_end_x, point2_x, space])
arrows_x = np.array(arrows_x)
arrows_x = arrows_x.flatten('F')
arrows_x = arrows_x.tolist()
arrows_y = np.matrix([point1_y, arrow_end_y, point2_y, space])
arrows_y = np.array(arrows_y)
arrows_y = arrows_y.flatten('F')
arrows_y = arrows_y.tolist()
return arrows_x, arrows_y
def sum_streamlines(self):
streamline_x = sum(self.st_x, [])
streamline_y = sum(self.st_y, [])
return streamline_x, streamline_y
class _OHLC(FigureFactory):
def __init__(self, open, high, low, close, dates, **kwargs):
self.open = open
self.high = high
self.low = low
self.close = close
self.empty = [None] * len(open)
self.dates = dates
self.all_x = []
self.all_y = []
self.increase_x = []
self.increase_y = []
self.decrease_x = []
self.decrease_y = []
self.get_all_xy()
self.separate_increase_decrease()
def get_all_xy(self):
self.all_y = list(zip(self.open, self.open, self.high,
self.low, self.close, self.close, self.empty))
if self.dates is not None:
date_dif = []
for i in range(len(self.dates) - 1):
date_dif.append(self.dates[i + 1] - self.dates[i])
date_dif_min = (min(date_dif)) / 5
self.all_x = [[x - date_dif_min, x, x, x, x, x +
date_dif_min, None] for x in self.dates]
else:
self.all_x = [[x - .2, x, x, x, x, x + .2, None]
for x in range(len(self.open))]
def separate_increase_decrease(self):
for index in range(len(self.open)):
if self.close[index] is None:
pass
elif self.close[index] > self.open[index]:
self.increase_x.append(self.all_x[index])
self.increase_y.append(self.all_y[index])
else:
self.decrease_x.append(self.all_x[index])
self.decrease_y.append(self.all_y[index])
def get_increase(self):
flat_increase_x = FigureFactory._flatten(self.increase_x)
flat_increase_y = FigureFactory._flatten(self.increase_y)
text_increase = (("Open", "Open", "High",
"Low", "Close", "Close", '')
* (len(self.increase_x)))
return flat_increase_x, flat_increase_y, text_increase
def get_decrease(self):
flat_decrease_x = FigureFactory._flatten(self.decrease_x)
flat_decrease_y = FigureFactory._flatten(self.decrease_y)
text_decrease = (("Open", "Open", "High",
"Low", "Close", "Close", '')
* (len(self.decrease_x)))
return flat_decrease_x, flat_decrease_y, text_decrease
class _Candlestick(FigureFactory):
def __init__(self, open, high, low, close, dates, **kwargs):
self.open = open
self.high = high
self.low = low
self.close = close
if dates is not None:
self.x = dates
else:
self.x = [x for x in range(len(self.open))]
self.get_candle_increase()
def get_candle_increase(self):
increase_y = []
increase_x = []
for index in range(len(self.open)):
if self.close[index] > self.open[index]:
increase_y.append(self.low[index])
increase_y.append(self.open[index])
increase_y.append(self.close[index])
increase_y.append(self.close[index])
increase_y.append(self.close[index])
increase_y.append(self.high[index])
increase_x.append(self.x[index])
increase_x = [[x, x, x, x, x, x] for x in increase_x]
increase_x = FigureFactory._flatten(increase_x)
return increase_x, increase_y
def get_candle_decrease(self):
decrease_y = []
decrease_x = []
for index in range(len(self.open)):
if self.close[index] <= self.open[index]:
decrease_y.append(self.low[index])
decrease_y.append(self.open[index])
decrease_y.append(self.close[index])
decrease_y.append(self.close[index])
decrease_y.append(self.close[index])
decrease_y.append(self.high[index])
decrease_x.append(self.x[index])
decrease_x = [[x, x, x, x, x, x] for x in decrease_x]
decrease_x = FigureFactory._flatten(decrease_x)
return decrease_x, decrease_y
class _Distplot(FigureFactory):
def __init__(self, hist_data, histnorm, group_labels,
bin_size, curve_type, colors,
rug_text, show_hist, show_curve):
self.hist_data = hist_data
self.histnorm = histnorm
self.group_labels = group_labels
self.bin_size = bin_size
self.show_hist = show_hist
self.show_curve = show_curve
self.trace_number = len(hist_data)
if rug_text:
self.rug_text = rug_text
else:
self.rug_text = [None] * self.trace_number
self.start = []
self.end = []
if colors:
self.colors = colors
else:
self.colors = [
"rgb(31, 119, 180)", "rgb(255, 127, 14)",
"rgb(44, 160, 44)", "rgb(214, 39, 40)",
"rgb(148, 103, 189)", "rgb(140, 86, 75)",
"rgb(227, 119, 194)", "rgb(127, 127, 127)",
"rgb(188, 189, 34)", "rgb(23, 190, 207)"]
self.curve_x = [None] * self.trace_number
self.curve_y = [None] * self.trace_number
for trace in self.hist_data:
self.start.append(min(trace) * 1.)
self.end.append(max(trace) * 1.)
def make_hist(self):
hist = [None] * self.trace_number
for index in range(self.trace_number):
hist[index] = dict(type='histogram',
x=self.hist_data[index],
xaxis='x1',
yaxis='y1',
histnorm=self.histnorm,
name=self.group_labels[index],
legendgroup=self.group_labels[index],
marker=dict(color=self.colors[index]),
autobinx=False,
xbins=dict(start=self.start[index],
end=self.end[index],
size=self.bin_size[index]),
opacity=.7)
return hist
def make_kde(self):
curve = [None] * self.trace_number
for index in range(self.trace_number):
self.curve_x[index] = [self.start[index] +
x * (self.end[index] - self.start[index])
/ 500 for x in range(500)]
self.curve_y[index] = (scipy.stats.gaussian_kde
(self.hist_data[index])
(self.curve_x[index]))
if self.histnorm == ALTERNATIVE_HISTNORM:
self.curve_y[index] *= self.bin_size[index]
for index in range(self.trace_number):
curve[index] = dict(type='scatter',
x=self.curve_x[index],
y=self.curve_y[index],
xaxis='x1',
yaxis='y1',
mode='lines',
name=self.group_labels[index],
legendgroup=self.group_labels[index],
showlegend=False if self.show_hist else True,
marker=dict(color=self.colors[index]))
return curve
def make_normal(self):
curve = [None] * self.trace_number
mean = [None] * self.trace_number
sd = [None] * self.trace_number
for index in range(self.trace_number):
mean[index], sd[index] = (scipy.stats.norm.fit
(self.hist_data[index]))
self.curve_x[index] = [self.start[index] +
x * (self.end[index] - self.start[index])
/ 500 for x in range(500)]
self.curve_y[index] = scipy.stats.norm.pdf(
self.curve_x[index], loc=mean[index], scale=sd[index])
if self.histnorm == ALTERNATIVE_HISTNORM:
self.curve_y[index] *= self.bin_size[index]
for index in range(self.trace_number):
curve[index] = dict(type='scatter',
x=self.curve_x[index],
y=self.curve_y[index],
xaxis='x1',
yaxis='y1',
mode='lines',
name=self.group_labels[index],
legendgroup=self.group_labels[index],
showlegend=False if self.show_hist else True,
marker=dict(color=self.colors[index]))
return curve
def make_rug(self):
rug = [None] * self.trace_number
for index in range(self.trace_number):
rug[index] = dict(type='scatter',
x=self.hist_data[index],
y=([self.group_labels[index]] *
len(self.hist_data[index])),
xaxis='x1',
yaxis='y2',
mode='markers',
name=self.group_labels[index],
legendgroup=self.group_labels[index],
showlegend=(False if self.show_hist or
self.show_curve else True),
text=self.rug_text[index],
marker=dict(color=self.colors[index],
symbol='line-ns-open'))
return rug
class _Dendrogram(FigureFactory):
def __init__(self, X, orientation='bottom', labels=None, colorscale=None,
width="100%", height="100%", xaxis='xaxis', yaxis='yaxis'):
from plotly.graph_objs import graph_objs
self.orientation = orientation
self.labels = labels
self.xaxis = xaxis
self.yaxis = yaxis
self.data = []
self.leaves = []
self.sign = {self.xaxis: 1, self.yaxis: 1}
self.layout = {self.xaxis: {}, self.yaxis: {}}
if self.orientation in ['left', 'bottom']:
self.sign[self.xaxis] = 1
else:
self.sign[self.xaxis] = -1
if self.orientation in ['right', 'bottom']:
self.sign[self.yaxis] = 1
else:
self.sign[self.yaxis] = -1
(dd_traces, xvals, yvals,
ordered_labels, leaves) = self.get_dendrogram_traces(X, colorscale)
self.labels = ordered_labels
self.leaves = leaves
yvals_flat = yvals.flatten()
xvals_flat = xvals.flatten()
self.zero_vals = []
for i in range(len(yvals_flat)):
if yvals_flat[i] == 0.0 and xvals_flat[i] not in self.zero_vals:
self.zero_vals.append(xvals_flat[i])
self.zero_vals.sort()
self.layout = self.set_figure_layout(width, height)
self.data = graph_objs.Data(dd_traces)
def get_color_dict(self, colorscale):
d = {'r': 'red',
'g': 'green',
'b': 'blue',
'c': 'cyan',
'm': 'magenta',
'y': 'yellow',
'k': 'black',
'w': 'white'}
default_colors = OrderedDict(sorted(d.items(), key=lambda t: t[0]))
if colorscale is None:
colorscale = [
'rgb(0,116,217)', # blue
'rgb(35,205,205)', # cyan
'rgb(61,153,112)', # green
'rgb(40,35,35)', # black
'rgb(133,20,75)', # magenta
'rgb(255,65,54)', # red
'rgb(255,255,255)', # white
'rgb(255,220,0)'] # yellow
for i in range(len(default_colors.keys())):
k = list(default_colors.keys())[i] # PY3 won't index keys
if i < len(colorscale):
default_colors[k] = colorscale[i]
return default_colors
def set_axis_layout(self, axis_key):
axis_defaults = {
'type': 'linear',
'ticks': 'outside',
'mirror': 'allticks',
'rangemode': 'tozero',
'showticklabels': True,
'zeroline': False,
'showgrid': False,
'showline': True,
}
if len(self.labels) != 0:
axis_key_labels = self.xaxis
if self.orientation in ['left', 'right']:
axis_key_labels = self.yaxis
if axis_key_labels not in self.layout:
self.layout[axis_key_labels] = {}
self.layout[axis_key_labels]['tickvals'] = \
[zv*self.sign[axis_key] for zv in self.zero_vals]
self.layout[axis_key_labels]['ticktext'] = self.labels
self.layout[axis_key_labels]['tickmode'] = 'array'
self.layout[axis_key].update(axis_defaults)
return self.layout[axis_key]
def set_figure_layout(self, width, height):
self.layout.update({
'showlegend': False,
'autosize': False,
'hovermode': 'closest',
'width': width,
'height': height
})
self.set_axis_layout(self.xaxis)
self.set_axis_layout(self.yaxis)
return self.layout
def get_dendrogram_traces(self, X, colorscale):
from plotly.graph_objs import graph_objs
d = scs.distance.pdist(X)
Z = sch.linkage(d, method='complete')
P = sch.dendrogram(Z, orientation=self.orientation,
labels=self.labels, no_plot=True)
icoord = scp.array(P['icoord'])
dcoord = scp.array(P['dcoord'])
ordered_labels = scp.array(P['ivl'])
color_list = scp.array(P['color_list'])
colors = self.get_color_dict(colorscale)
trace_list = []
for i in range(len(icoord)):
if self.orientation in ['top', 'bottom']:
xs = icoord[i]
else:
xs = dcoord[i]
if self.orientation in ['top', 'bottom']:
ys = dcoord[i]
else:
ys = icoord[i]
color_key = color_list[i]
trace = graph_objs.Scatter(
x=np.multiply(self.sign[self.xaxis], xs),
y=np.multiply(self.sign[self.yaxis], ys),
mode='lines',
marker=graph_objs.Marker(color=colors[color_key])
)
try:
x_index = int(self.xaxis[-1])
except ValueError:
x_index = ''
try:
y_index = int(self.yaxis[-1])
except ValueError:
y_index = ''
trace['xaxis'] = 'x' + x_index
trace['yaxis'] = 'y' + y_index
trace_list.append(trace)
return trace_list, icoord, dcoord, ordered_labels, P['leaves']
class _AnnotatedHeatmap(FigureFactory):
def __init__(self, z, x, y, annotation_text, colorscale,
font_colors, reversescale, **kwargs):
from plotly.graph_objs import graph_objs
self.z = z
if x:
self.x = x
else:
self.x = range(len(z[0]))
if y:
self.y = y
else:
self.y = range(len(z))
if annotation_text is not None:
self.annotation_text = annotation_text
else:
self.annotation_text = self.z
self.colorscale = colorscale
self.reversescale = reversescale
self.font_colors = font_colors
def get_text_color(self):
colorscales = ['Greys', 'Greens', 'Blues',
'YIGnBu', 'YIOrRd', 'RdBu',
'Picnic', 'Jet', 'Hot', 'Blackbody',
'Earth', 'Electric', 'Viridis']
colorscales_reverse = ['Reds']
if self.font_colors:
min_text_color = self.font_colors[0]
max_text_color = self.font_colors[-1]
elif self.colorscale in colorscales and self.reversescale:
min_text_color = '#000000'
max_text_color = '#FFFFFF'
elif self.colorscale in colorscales:
min_text_color = '#FFFFFF'
max_text_color = '#000000'
elif self.colorscale in colorscales_reverse and self.reversescale:
min_text_color = '#FFFFFF'
max_text_color = '#000000'
elif self.colorscale in colorscales_reverse:
min_text_color = '#000000'
max_text_color = '#FFFFFF'
elif isinstance(self.colorscale, list):
if 'rgb' in self.colorscale[0][1]:
min_col = map(int,
self.colorscale[0][1].strip('rgb()').split(','))
max_col = map(int,
self.colorscale[-1][1].strip('rgb()').split(','))
elif '#' in self.colorscale[0][1]:
min_col = FigureFactory._hex_to_rgb(self.colorscale[0][1])
max_col = FigureFactory._hex_to_rgb(self.colorscale[-1][1])
else:
min_col = [255, 255, 255]
max_col = [255, 255, 255]
if (min_col[0]*0.299 + min_col[1]*0.587 + min_col[2]*0.114) > 186:
min_text_color = '#000000'
else:
min_text_color = '#FFFFFF'
if (max_col[0]*0.299 + max_col[1]*0.587 + max_col[2]*0.114) > 186:
max_text_color = '#000000'
else:
max_text_color = '#FFFFFF'
else:
min_text_color = '#000000'
max_text_color = '#000000'
return min_text_color, max_text_color
def get_z_mid(self):
if _numpy_imported and isinstance(self.z, np.ndarray):
z_min = np.amin(self.z)
z_max = np.amax(self.z)
else:
z_min = min(min(self.z))
z_max = max(max(self.z))
z_mid = (z_max+z_min) / 2
return z_mid
def make_annotations(self):
from plotly.graph_objs import graph_objs
min_text_color, max_text_color = _AnnotatedHeatmap.get_text_color(self)
z_mid = _AnnotatedHeatmap.get_z_mid(self)
annotations = []
for n, row in enumerate(self.z):
for m, val in enumerate(row):
font_color = min_text_color if val < z_mid else max_text_color
annotations.append(
graph_objs.Annotation(
text=str(self.annotation_text[n][m]),
x=self.x[m],
y=self.y[n],
xref='x1',
yref='y1',
font=dict(color=font_color),
showarrow=False))
return annotations
class _Table(FigureFactory):
def __init__(self, table_text, colorscale, font_colors, index,
index_title, annotation_offset, **kwargs):
from plotly.graph_objs import graph_objs
if _pandas_imported and isinstance(table_text, pd.DataFrame):
headers = table_text.columns.tolist()
table_text_index = table_text.index.tolist()
table_text = table_text.values.tolist()
table_text.insert(0, headers)
if index:
table_text_index.insert(0, index_title)
for i in range(len(table_text)):
table_text[i].insert(0, table_text_index[i])
self.table_text = table_text
self.colorscale = colorscale
self.font_colors = font_colors
self.index = index
self.annotation_offset = annotation_offset
self.x = range(len(table_text[0]))
self.y = range(len(table_text))
def get_table_matrix(self):
header = [0] * len(self.table_text[0])
odd_row = [.5] * len(self.table_text[0])
even_row = [1] * len(self.table_text[0])
table_matrix = [None] * len(self.table_text)
table_matrix[0] = header
for i in range(1, len(self.table_text), 2):
table_matrix[i] = odd_row
for i in range(2, len(self.table_text), 2):
table_matrix[i] = even_row
if self.index:
for array in table_matrix:
array[0] = 0
return table_matrix
def get_table_font_color(self):
if len(self.font_colors) == 1:
all_font_colors = self.font_colors*len(self.table_text)
elif len(self.font_colors) == 3:
all_font_colors = list(range(len(self.table_text)))
all_font_colors[0] = self.font_colors[0]
for i in range(1, len(self.table_text), 2):
all_font_colors[i] = self.font_colors[1]
for i in range(2, len(self.table_text), 2):
all_font_colors[i] = self.font_colors[2]
elif len(self.font_colors) == len(self.table_text):
all_font_colors = self.font_colors
else:
all_font_colors = ['#000000']*len(self.table_text)
return all_font_colors
def make_table_annotations(self):
from plotly.graph_objs import graph_objs
table_matrix = _Table.get_table_matrix(self)
all_font_colors = _Table.get_table_font_color(self)
annotations = []
for n, row in enumerate(self.table_text):
for m, val in enumerate(row):
format_text = ('<b>' + str(val) + '</b>' if n == 0 or
self.index and m < 1 else str(val))
font_color = (self.font_colors[0] if self.index and m == 0
else all_font_colors[n])
annotations.append(
graph_objs.Annotation(
text=format_text,
x=self.x[m] - self.annotation_offset,
y=self.y[n],
xref='x1',
yref='y1',
align="left",
xanchor="left",
font=dict(color=font_color),
showarrow=False))
return annotations
| true | true |
f7342d5a9f5798569cd6e83dadbc84ca8f4c2ab8 | 520 | py | Python | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxr/lldp/configure.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 94 | 2018-04-30T20:29:15.000Z | 2022-03-29T13:40:31.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxr/lldp/configure.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 67 | 2018-12-06T21:08:09.000Z | 2022-03-29T18:00:46.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxr/lldp/configure.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 49 | 2018-06-29T18:59:03.000Z | 2022-03-10T02:07:59.000Z | """Common configure functions for lldp"""
# Python
import logging
log = logging.getLogger(__name__)
def configure_lldp(device):
""" Enables lldp on target device
Args:
device ('obj'): Device object
Returns:
None
"""
device.configure(['lldp'])
def unconfigure_lldp(device):
""" Disables lldp on target device
Args:
device ('obj'): Device object
Returns:
None
"""
device.configure('no lldp') | 21.666667 | 42 | 0.553846 |
import logging
log = logging.getLogger(__name__)
def configure_lldp(device):
device.configure(['lldp'])
def unconfigure_lldp(device):
device.configure('no lldp') | true | true |
f7342dc10025238831db10191fe54509c36df5ef | 1,757 | py | Python | github_poster/loader/duolingo_loader.py | phh95/GitHubPoster | 6bb0eb4cce8f60b9b25a0ae813601dcc13b32f66 | [
"MIT"
] | null | null | null | github_poster/loader/duolingo_loader.py | phh95/GitHubPoster | 6bb0eb4cce8f60b9b25a0ae813601dcc13b32f66 | [
"MIT"
] | null | null | null | github_poster/loader/duolingo_loader.py | phh95/GitHubPoster | 6bb0eb4cce8f60b9b25a0ae813601dcc13b32f66 | [
"MIT"
] | null | null | null | import pendulum
import requests
from github_poster.loader.base_loader import BaseLoader
from github_poster.loader.config import DUOLINGO_CALENDAR_API
class DuolingoLoader(BaseLoader):
unit = "XP"
def __init__(self, from_year, to_year, **kwargs):
super().__init__(from_year, to_year)
self.user_name = kwargs.get("user_name", "")
@classmethod
def add_loader_arguments(cls, parser):
parser.add_argument(
"--user_name",
dest="user_name",
type=str,
help="",
required=True,
)
def get_api_data(self):
month_list = self.make_month_list()
data_list = []
for m in month_list:
r = requests.get(
DUOLINGO_CALENDAR_API.format(
user_id=self.user_name,
start_date=m.to_date_string(),
end_date=m.end_of("month").to_date_string(),
)
)
if not r.ok:
print(f"get duolingo calendar api failed {str(r.text)}")
try:
data_list.extend(r.json()["summaries"])
except Exception:
# just pass for now
pass
return data_list
def make_track_dict(self):
data_list = self.get_api_data()
for d in data_list:
date_str = pendulum.from_timestamp(d["date"]).to_date_string()
number = d["gainedXp"]
if number:
self.number_by_date_dict[date_str] = number
self.number_list.append(number)
def get_all_track_data(self):
self.make_track_dict()
self.make_special_number()
return self.number_by_date_dict, self.year_list
| 30.293103 | 74 | 0.569152 | import pendulum
import requests
from github_poster.loader.base_loader import BaseLoader
from github_poster.loader.config import DUOLINGO_CALENDAR_API
class DuolingoLoader(BaseLoader):
unit = "XP"
def __init__(self, from_year, to_year, **kwargs):
super().__init__(from_year, to_year)
self.user_name = kwargs.get("user_name", "")
@classmethod
def add_loader_arguments(cls, parser):
parser.add_argument(
"--user_name",
dest="user_name",
type=str,
help="",
required=True,
)
def get_api_data(self):
month_list = self.make_month_list()
data_list = []
for m in month_list:
r = requests.get(
DUOLINGO_CALENDAR_API.format(
user_id=self.user_name,
start_date=m.to_date_string(),
end_date=m.end_of("month").to_date_string(),
)
)
if not r.ok:
print(f"get duolingo calendar api failed {str(r.text)}")
try:
data_list.extend(r.json()["summaries"])
except Exception:
pass
return data_list
def make_track_dict(self):
data_list = self.get_api_data()
for d in data_list:
date_str = pendulum.from_timestamp(d["date"]).to_date_string()
number = d["gainedXp"]
if number:
self.number_by_date_dict[date_str] = number
self.number_list.append(number)
def get_all_track_data(self):
self.make_track_dict()
self.make_special_number()
return self.number_by_date_dict, self.year_list
| true | true |
f7342f4e225e7f068f707b4f2ec284cf601056e1 | 26,885 | py | Python | lib/services/vserver/ncloud_vserver/model/server_instance.py | NaverCloudPlatform/ncloud-sdk-python | 5976dfabd205c615fcf57ac2f0ab67313ee6953c | [
"MIT"
] | 12 | 2018-11-20T04:30:49.000Z | 2021-11-09T12:34:26.000Z | lib/services/vserver/ncloud_vserver/model/server_instance.py | NaverCloudPlatform/ncloud-sdk-python | 5976dfabd205c615fcf57ac2f0ab67313ee6953c | [
"MIT"
] | 1 | 2019-01-24T15:56:15.000Z | 2019-05-31T07:56:55.000Z | lib/services/vserver/ncloud_vserver/model/server_instance.py | NaverCloudPlatform/ncloud-sdk-python | 5976dfabd205c615fcf57ac2f0ab67313ee6953c | [
"MIT"
] | 6 | 2018-06-29T03:45:50.000Z | 2022-03-18T01:51:45.000Z | # coding: utf-8
"""
vserver
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ncloud_vserver.model.common_code import CommonCode # noqa: F401,E501
class ServerInstance(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'server_instance_no': 'str',
'server_name': 'str',
'server_description': 'str',
'cpu_count': 'int',
'memory_size': 'int',
'platform_type': 'CommonCode',
'login_key_name': 'str',
'public_ip_instance_no': 'str',
'public_ip': 'str',
'server_instance_status': 'CommonCode',
'server_instance_operation': 'CommonCode',
'server_instance_status_name': 'str',
'create_date': 'str',
'uptime': 'str',
'server_image_product_code': 'str',
'server_product_code': 'str',
'is_protect_server_termination': 'bool',
'zone_code': 'str',
'region_code': 'str',
'vpc_no': 'str',
'subnet_no': 'str',
'network_interface_no_list': 'list[str]',
'init_script_no': 'str',
'server_instance_type': 'CommonCode',
'base_block_storage_disk_type': 'CommonCode',
'base_block_storage_disk_detail_type': 'CommonCode',
'placement_group_no': 'str'
}
attribute_map = {
'server_instance_no': 'serverInstanceNo',
'server_name': 'serverName',
'server_description': 'serverDescription',
'cpu_count': 'cpuCount',
'memory_size': 'memorySize',
'platform_type': 'platformType',
'login_key_name': 'loginKeyName',
'public_ip_instance_no': 'publicIpInstanceNo',
'public_ip': 'publicIp',
'server_instance_status': 'serverInstanceStatus',
'server_instance_operation': 'serverInstanceOperation',
'server_instance_status_name': 'serverInstanceStatusName',
'create_date': 'createDate',
'uptime': 'uptime',
'server_image_product_code': 'serverImageProductCode',
'server_product_code': 'serverProductCode',
'is_protect_server_termination': 'isProtectServerTermination',
'zone_code': 'zoneCode',
'region_code': 'regionCode',
'vpc_no': 'vpcNo',
'subnet_no': 'subnetNo',
'network_interface_no_list': 'networkInterfaceNoList',
'init_script_no': 'initScriptNo',
'server_instance_type': 'serverInstanceType',
'base_block_storage_disk_type': 'baseBlockStorageDiskType',
'base_block_storage_disk_detail_type': 'baseBlockStorageDiskDetailType',
'placement_group_no': 'placementGroupNo'
}
def __init__(self, server_instance_no=None, server_name=None, server_description=None, cpu_count=None, memory_size=None, platform_type=None, login_key_name=None, public_ip_instance_no=None, public_ip=None, server_instance_status=None, server_instance_operation=None, server_instance_status_name=None, create_date=None, uptime=None, server_image_product_code=None, server_product_code=None, is_protect_server_termination=None, zone_code=None, region_code=None, vpc_no=None, subnet_no=None, network_interface_no_list=None, init_script_no=None, server_instance_type=None, base_block_storage_disk_type=None, base_block_storage_disk_detail_type=None, placement_group_no=None): # noqa: E501
"""ServerInstance - a model defined in Swagger""" # noqa: E501
self._server_instance_no = None
self._server_name = None
self._server_description = None
self._cpu_count = None
self._memory_size = None
self._platform_type = None
self._login_key_name = None
self._public_ip_instance_no = None
self._public_ip = None
self._server_instance_status = None
self._server_instance_operation = None
self._server_instance_status_name = None
self._create_date = None
self._uptime = None
self._server_image_product_code = None
self._server_product_code = None
self._is_protect_server_termination = None
self._zone_code = None
self._region_code = None
self._vpc_no = None
self._subnet_no = None
self._network_interface_no_list = None
self._init_script_no = None
self._server_instance_type = None
self._base_block_storage_disk_type = None
self._base_block_storage_disk_detail_type = None
self._placement_group_no = None
self.discriminator = None
if server_instance_no is not None:
self.server_instance_no = server_instance_no
if server_name is not None:
self.server_name = server_name
if server_description is not None:
self.server_description = server_description
if cpu_count is not None:
self.cpu_count = cpu_count
if memory_size is not None:
self.memory_size = memory_size
if platform_type is not None:
self.platform_type = platform_type
if login_key_name is not None:
self.login_key_name = login_key_name
if public_ip_instance_no is not None:
self.public_ip_instance_no = public_ip_instance_no
if public_ip is not None:
self.public_ip = public_ip
if server_instance_status is not None:
self.server_instance_status = server_instance_status
if server_instance_operation is not None:
self.server_instance_operation = server_instance_operation
if server_instance_status_name is not None:
self.server_instance_status_name = server_instance_status_name
if create_date is not None:
self.create_date = create_date
if uptime is not None:
self.uptime = uptime
if server_image_product_code is not None:
self.server_image_product_code = server_image_product_code
if server_product_code is not None:
self.server_product_code = server_product_code
if is_protect_server_termination is not None:
self.is_protect_server_termination = is_protect_server_termination
if zone_code is not None:
self.zone_code = zone_code
if region_code is not None:
self.region_code = region_code
if vpc_no is not None:
self.vpc_no = vpc_no
if subnet_no is not None:
self.subnet_no = subnet_no
if network_interface_no_list is not None:
self.network_interface_no_list = network_interface_no_list
if init_script_no is not None:
self.init_script_no = init_script_no
if server_instance_type is not None:
self.server_instance_type = server_instance_type
if base_block_storage_disk_type is not None:
self.base_block_storage_disk_type = base_block_storage_disk_type
if base_block_storage_disk_detail_type is not None:
self.base_block_storage_disk_detail_type = base_block_storage_disk_detail_type
if placement_group_no is not None:
self.placement_group_no = placement_group_no
@property
def server_instance_no(self):
"""Gets the server_instance_no of this ServerInstance. # noqa: E501
서버인스턴스번호 # noqa: E501
:return: The server_instance_no of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._server_instance_no
@server_instance_no.setter
def server_instance_no(self, server_instance_no):
"""Sets the server_instance_no of this ServerInstance.
서버인스턴스번호 # noqa: E501
:param server_instance_no: The server_instance_no of this ServerInstance. # noqa: E501
:type: str
"""
self._server_instance_no = server_instance_no
@property
def server_name(self):
"""Gets the server_name of this ServerInstance. # noqa: E501
서버이름 # noqa: E501
:return: The server_name of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._server_name
@server_name.setter
def server_name(self, server_name):
"""Sets the server_name of this ServerInstance.
서버이름 # noqa: E501
:param server_name: The server_name of this ServerInstance. # noqa: E501
:type: str
"""
self._server_name = server_name
@property
def server_description(self):
"""Gets the server_description of this ServerInstance. # noqa: E501
서버설명 # noqa: E501
:return: The server_description of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._server_description
@server_description.setter
def server_description(self, server_description):
"""Sets the server_description of this ServerInstance.
서버설명 # noqa: E501
:param server_description: The server_description of this ServerInstance. # noqa: E501
:type: str
"""
self._server_description = server_description
@property
def cpu_count(self):
"""Gets the cpu_count of this ServerInstance. # noqa: E501
CPU개수 # noqa: E501
:return: The cpu_count of this ServerInstance. # noqa: E501
:rtype: int
"""
return self._cpu_count
@cpu_count.setter
def cpu_count(self, cpu_count):
"""Sets the cpu_count of this ServerInstance.
CPU개수 # noqa: E501
:param cpu_count: The cpu_count of this ServerInstance. # noqa: E501
:type: int
"""
self._cpu_count = cpu_count
@property
def memory_size(self):
"""Gets the memory_size of this ServerInstance. # noqa: E501
메모리사이즈 # noqa: E501
:return: The memory_size of this ServerInstance. # noqa: E501
:rtype: int
"""
return self._memory_size
@memory_size.setter
def memory_size(self, memory_size):
"""Sets the memory_size of this ServerInstance.
메모리사이즈 # noqa: E501
:param memory_size: The memory_size of this ServerInstance. # noqa: E501
:type: int
"""
self._memory_size = memory_size
@property
def platform_type(self):
"""Gets the platform_type of this ServerInstance. # noqa: E501
플랫폼유형 # noqa: E501
:return: The platform_type of this ServerInstance. # noqa: E501
:rtype: CommonCode
"""
return self._platform_type
@platform_type.setter
def platform_type(self, platform_type):
"""Sets the platform_type of this ServerInstance.
플랫폼유형 # noqa: E501
:param platform_type: The platform_type of this ServerInstance. # noqa: E501
:type: CommonCode
"""
self._platform_type = platform_type
@property
def login_key_name(self):
"""Gets the login_key_name of this ServerInstance. # noqa: E501
로그인키이름 # noqa: E501
:return: The login_key_name of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._login_key_name
@login_key_name.setter
def login_key_name(self, login_key_name):
"""Sets the login_key_name of this ServerInstance.
로그인키이름 # noqa: E501
:param login_key_name: The login_key_name of this ServerInstance. # noqa: E501
:type: str
"""
self._login_key_name = login_key_name
@property
def public_ip_instance_no(self):
"""Gets the public_ip_instance_no of this ServerInstance. # noqa: E501
공인IP인스턴스번호 # noqa: E501
:return: The public_ip_instance_no of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._public_ip_instance_no
@public_ip_instance_no.setter
def public_ip_instance_no(self, public_ip_instance_no):
"""Sets the public_ip_instance_no of this ServerInstance.
공인IP인스턴스번호 # noqa: E501
:param public_ip_instance_no: The public_ip_instance_no of this ServerInstance. # noqa: E501
:type: str
"""
self._public_ip_instance_no = public_ip_instance_no
@property
def public_ip(self):
"""Gets the public_ip of this ServerInstance. # noqa: E501
공인IP주소 # noqa: E501
:return: The public_ip of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._public_ip
@public_ip.setter
def public_ip(self, public_ip):
"""Sets the public_ip of this ServerInstance.
공인IP주소 # noqa: E501
:param public_ip: The public_ip of this ServerInstance. # noqa: E501
:type: str
"""
self._public_ip = public_ip
@property
def server_instance_status(self):
"""Gets the server_instance_status of this ServerInstance. # noqa: E501
서버인스턴스상태 # noqa: E501
:return: The server_instance_status of this ServerInstance. # noqa: E501
:rtype: CommonCode
"""
return self._server_instance_status
@server_instance_status.setter
def server_instance_status(self, server_instance_status):
"""Sets the server_instance_status of this ServerInstance.
서버인스턴스상태 # noqa: E501
:param server_instance_status: The server_instance_status of this ServerInstance. # noqa: E501
:type: CommonCode
"""
self._server_instance_status = server_instance_status
@property
def server_instance_operation(self):
"""Gets the server_instance_operation of this ServerInstance. # noqa: E501
서버인스턴스OP # noqa: E501
:return: The server_instance_operation of this ServerInstance. # noqa: E501
:rtype: CommonCode
"""
return self._server_instance_operation
@server_instance_operation.setter
def server_instance_operation(self, server_instance_operation):
"""Sets the server_instance_operation of this ServerInstance.
서버인스턴스OP # noqa: E501
:param server_instance_operation: The server_instance_operation of this ServerInstance. # noqa: E501
:type: CommonCode
"""
self._server_instance_operation = server_instance_operation
@property
def server_instance_status_name(self):
"""Gets the server_instance_status_name of this ServerInstance. # noqa: E501
서버인스턴스상태이름 # noqa: E501
:return: The server_instance_status_name of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._server_instance_status_name
@server_instance_status_name.setter
def server_instance_status_name(self, server_instance_status_name):
"""Sets the server_instance_status_name of this ServerInstance.
서버인스턴스상태이름 # noqa: E501
:param server_instance_status_name: The server_instance_status_name of this ServerInstance. # noqa: E501
:type: str
"""
self._server_instance_status_name = server_instance_status_name
@property
def create_date(self):
"""Gets the create_date of this ServerInstance. # noqa: E501
생성일시 # noqa: E501
:return: The create_date of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._create_date
@create_date.setter
def create_date(self, create_date):
"""Sets the create_date of this ServerInstance.
생성일시 # noqa: E501
:param create_date: The create_date of this ServerInstance. # noqa: E501
:type: str
"""
self._create_date = create_date
@property
def uptime(self):
"""Gets the uptime of this ServerInstance. # noqa: E501
업시간 # noqa: E501
:return: The uptime of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._uptime
@uptime.setter
def uptime(self, uptime):
"""Sets the uptime of this ServerInstance.
업시간 # noqa: E501
:param uptime: The uptime of this ServerInstance. # noqa: E501
:type: str
"""
self._uptime = uptime
@property
def server_image_product_code(self):
"""Gets the server_image_product_code of this ServerInstance. # noqa: E501
서버이미지상품코드 # noqa: E501
:return: The server_image_product_code of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._server_image_product_code
@server_image_product_code.setter
def server_image_product_code(self, server_image_product_code):
"""Sets the server_image_product_code of this ServerInstance.
서버이미지상품코드 # noqa: E501
:param server_image_product_code: The server_image_product_code of this ServerInstance. # noqa: E501
:type: str
"""
self._server_image_product_code = server_image_product_code
@property
def server_product_code(self):
"""Gets the server_product_code of this ServerInstance. # noqa: E501
서버상품코드 # noqa: E501
:return: The server_product_code of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._server_product_code
@server_product_code.setter
def server_product_code(self, server_product_code):
"""Sets the server_product_code of this ServerInstance.
서버상품코드 # noqa: E501
:param server_product_code: The server_product_code of this ServerInstance. # noqa: E501
:type: str
"""
self._server_product_code = server_product_code
@property
def is_protect_server_termination(self):
"""Gets the is_protect_server_termination of this ServerInstance. # noqa: E501
서버반납보호설정여부 # noqa: E501
:return: The is_protect_server_termination of this ServerInstance. # noqa: E501
:rtype: bool
"""
return self._is_protect_server_termination
@is_protect_server_termination.setter
def is_protect_server_termination(self, is_protect_server_termination):
"""Sets the is_protect_server_termination of this ServerInstance.
서버반납보호설정여부 # noqa: E501
:param is_protect_server_termination: The is_protect_server_termination of this ServerInstance. # noqa: E501
:type: bool
"""
self._is_protect_server_termination = is_protect_server_termination
@property
def zone_code(self):
"""Gets the zone_code of this ServerInstance. # noqa: E501
ZONE코드 # noqa: E501
:return: The zone_code of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._zone_code
@zone_code.setter
def zone_code(self, zone_code):
"""Sets the zone_code of this ServerInstance.
ZONE코드 # noqa: E501
:param zone_code: The zone_code of this ServerInstance. # noqa: E501
:type: str
"""
self._zone_code = zone_code
@property
def region_code(self):
"""Gets the region_code of this ServerInstance. # noqa: E501
REGION코드 # noqa: E501
:return: The region_code of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._region_code
@region_code.setter
def region_code(self, region_code):
"""Sets the region_code of this ServerInstance.
REGION코드 # noqa: E501
:param region_code: The region_code of this ServerInstance. # noqa: E501
:type: str
"""
self._region_code = region_code
@property
def vpc_no(self):
"""Gets the vpc_no of this ServerInstance. # noqa: E501
VPC번호 # noqa: E501
:return: The vpc_no of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._vpc_no
@vpc_no.setter
def vpc_no(self, vpc_no):
"""Sets the vpc_no of this ServerInstance.
VPC번호 # noqa: E501
:param vpc_no: The vpc_no of this ServerInstance. # noqa: E501
:type: str
"""
self._vpc_no = vpc_no
@property
def subnet_no(self):
"""Gets the subnet_no of this ServerInstance. # noqa: E501
서브넷번호 # noqa: E501
:return: The subnet_no of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._subnet_no
@subnet_no.setter
def subnet_no(self, subnet_no):
"""Sets the subnet_no of this ServerInstance.
서브넷번호 # noqa: E501
:param subnet_no: The subnet_no of this ServerInstance. # noqa: E501
:type: str
"""
self._subnet_no = subnet_no
@property
def network_interface_no_list(self):
"""Gets the network_interface_no_list of this ServerInstance. # noqa: E501
네트워크인터페이스번호리스트 # noqa: E501
:return: The network_interface_no_list of this ServerInstance. # noqa: E501
:rtype: list[str]
"""
return self._network_interface_no_list
@network_interface_no_list.setter
def network_interface_no_list(self, network_interface_no_list):
"""Sets the network_interface_no_list of this ServerInstance.
네트워크인터페이스번호리스트 # noqa: E501
:param network_interface_no_list: The network_interface_no_list of this ServerInstance. # noqa: E501
:type: list[str]
"""
self._network_interface_no_list = network_interface_no_list
@property
def init_script_no(self):
"""Gets the init_script_no of this ServerInstance. # noqa: E501
초기화스크립트번호 # noqa: E501
:return: The init_script_no of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._init_script_no
@init_script_no.setter
def init_script_no(self, init_script_no):
"""Sets the init_script_no of this ServerInstance.
초기화스크립트번호 # noqa: E501
:param init_script_no: The init_script_no of this ServerInstance. # noqa: E501
:type: str
"""
self._init_script_no = init_script_no
@property
def server_instance_type(self):
"""Gets the server_instance_type of this ServerInstance. # noqa: E501
서버인스턴스유형 # noqa: E501
:return: The server_instance_type of this ServerInstance. # noqa: E501
:rtype: CommonCode
"""
return self._server_instance_type
@server_instance_type.setter
def server_instance_type(self, server_instance_type):
"""Sets the server_instance_type of this ServerInstance.
서버인스턴스유형 # noqa: E501
:param server_instance_type: The server_instance_type of this ServerInstance. # noqa: E501
:type: CommonCode
"""
self._server_instance_type = server_instance_type
@property
def base_block_storage_disk_type(self):
"""Gets the base_block_storage_disk_type of this ServerInstance. # noqa: E501
기본블록스토리지디스크유형 # noqa: E501
:return: The base_block_storage_disk_type of this ServerInstance. # noqa: E501
:rtype: CommonCode
"""
return self._base_block_storage_disk_type
@base_block_storage_disk_type.setter
def base_block_storage_disk_type(self, base_block_storage_disk_type):
"""Sets the base_block_storage_disk_type of this ServerInstance.
기본블록스토리지디스크유형 # noqa: E501
:param base_block_storage_disk_type: The base_block_storage_disk_type of this ServerInstance. # noqa: E501
:type: CommonCode
"""
self._base_block_storage_disk_type = base_block_storage_disk_type
@property
def base_block_storage_disk_detail_type(self):
"""Gets the base_block_storage_disk_detail_type of this ServerInstance. # noqa: E501
기본블록스토리지디스크상세유형 # noqa: E501
:return: The base_block_storage_disk_detail_type of this ServerInstance. # noqa: E501
:rtype: CommonCode
"""
return self._base_block_storage_disk_detail_type
@base_block_storage_disk_detail_type.setter
def base_block_storage_disk_detail_type(self, base_block_storage_disk_detail_type):
"""Sets the base_block_storage_disk_detail_type of this ServerInstance.
기본블록스토리지디스크상세유형 # noqa: E501
:param base_block_storage_disk_detail_type: The base_block_storage_disk_detail_type of this ServerInstance. # noqa: E501
:type: CommonCode
"""
self._base_block_storage_disk_detail_type = base_block_storage_disk_detail_type
@property
def placement_group_no(self):
"""Gets the placement_group_no of this ServerInstance. # noqa: E501
물리배치그룹번호 # noqa: E501
:return: The placement_group_no of this ServerInstance. # noqa: E501
:rtype: str
"""
return self._placement_group_no
@placement_group_no.setter
def placement_group_no(self, placement_group_no):
"""Sets the placement_group_no of this ServerInstance.
물리배치그룹번호 # noqa: E501
:param placement_group_no: The placement_group_no of this ServerInstance. # noqa: E501
:type: str
"""
self._placement_group_no = placement_group_no
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ServerInstance):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.929929 | 689 | 0.650921 |
import pprint
import re
import six
from ncloud_vserver.model.common_code import CommonCode
class ServerInstance(object):
swagger_types = {
'server_instance_no': 'str',
'server_name': 'str',
'server_description': 'str',
'cpu_count': 'int',
'memory_size': 'int',
'platform_type': 'CommonCode',
'login_key_name': 'str',
'public_ip_instance_no': 'str',
'public_ip': 'str',
'server_instance_status': 'CommonCode',
'server_instance_operation': 'CommonCode',
'server_instance_status_name': 'str',
'create_date': 'str',
'uptime': 'str',
'server_image_product_code': 'str',
'server_product_code': 'str',
'is_protect_server_termination': 'bool',
'zone_code': 'str',
'region_code': 'str',
'vpc_no': 'str',
'subnet_no': 'str',
'network_interface_no_list': 'list[str]',
'init_script_no': 'str',
'server_instance_type': 'CommonCode',
'base_block_storage_disk_type': 'CommonCode',
'base_block_storage_disk_detail_type': 'CommonCode',
'placement_group_no': 'str'
}
attribute_map = {
'server_instance_no': 'serverInstanceNo',
'server_name': 'serverName',
'server_description': 'serverDescription',
'cpu_count': 'cpuCount',
'memory_size': 'memorySize',
'platform_type': 'platformType',
'login_key_name': 'loginKeyName',
'public_ip_instance_no': 'publicIpInstanceNo',
'public_ip': 'publicIp',
'server_instance_status': 'serverInstanceStatus',
'server_instance_operation': 'serverInstanceOperation',
'server_instance_status_name': 'serverInstanceStatusName',
'create_date': 'createDate',
'uptime': 'uptime',
'server_image_product_code': 'serverImageProductCode',
'server_product_code': 'serverProductCode',
'is_protect_server_termination': 'isProtectServerTermination',
'zone_code': 'zoneCode',
'region_code': 'regionCode',
'vpc_no': 'vpcNo',
'subnet_no': 'subnetNo',
'network_interface_no_list': 'networkInterfaceNoList',
'init_script_no': 'initScriptNo',
'server_instance_type': 'serverInstanceType',
'base_block_storage_disk_type': 'baseBlockStorageDiskType',
'base_block_storage_disk_detail_type': 'baseBlockStorageDiskDetailType',
'placement_group_no': 'placementGroupNo'
}
def __init__(self, server_instance_no=None, server_name=None, server_description=None, cpu_count=None, memory_size=None, platform_type=None, login_key_name=None, public_ip_instance_no=None, public_ip=None, server_instance_status=None, server_instance_operation=None, server_instance_status_name=None, create_date=None, uptime=None, server_image_product_code=None, server_product_code=None, is_protect_server_termination=None, zone_code=None, region_code=None, vpc_no=None, subnet_no=None, network_interface_no_list=None, init_script_no=None, server_instance_type=None, base_block_storage_disk_type=None, base_block_storage_disk_detail_type=None, placement_group_no=None):
self._server_instance_no = None
self._server_name = None
self._server_description = None
self._cpu_count = None
self._memory_size = None
self._platform_type = None
self._login_key_name = None
self._public_ip_instance_no = None
self._public_ip = None
self._server_instance_status = None
self._server_instance_operation = None
self._server_instance_status_name = None
self._create_date = None
self._uptime = None
self._server_image_product_code = None
self._server_product_code = None
self._is_protect_server_termination = None
self._zone_code = None
self._region_code = None
self._vpc_no = None
self._subnet_no = None
self._network_interface_no_list = None
self._init_script_no = None
self._server_instance_type = None
self._base_block_storage_disk_type = None
self._base_block_storage_disk_detail_type = None
self._placement_group_no = None
self.discriminator = None
if server_instance_no is not None:
self.server_instance_no = server_instance_no
if server_name is not None:
self.server_name = server_name
if server_description is not None:
self.server_description = server_description
if cpu_count is not None:
self.cpu_count = cpu_count
if memory_size is not None:
self.memory_size = memory_size
if platform_type is not None:
self.platform_type = platform_type
if login_key_name is not None:
self.login_key_name = login_key_name
if public_ip_instance_no is not None:
self.public_ip_instance_no = public_ip_instance_no
if public_ip is not None:
self.public_ip = public_ip
if server_instance_status is not None:
self.server_instance_status = server_instance_status
if server_instance_operation is not None:
self.server_instance_operation = server_instance_operation
if server_instance_status_name is not None:
self.server_instance_status_name = server_instance_status_name
if create_date is not None:
self.create_date = create_date
if uptime is not None:
self.uptime = uptime
if server_image_product_code is not None:
self.server_image_product_code = server_image_product_code
if server_product_code is not None:
self.server_product_code = server_product_code
if is_protect_server_termination is not None:
self.is_protect_server_termination = is_protect_server_termination
if zone_code is not None:
self.zone_code = zone_code
if region_code is not None:
self.region_code = region_code
if vpc_no is not None:
self.vpc_no = vpc_no
if subnet_no is not None:
self.subnet_no = subnet_no
if network_interface_no_list is not None:
self.network_interface_no_list = network_interface_no_list
if init_script_no is not None:
self.init_script_no = init_script_no
if server_instance_type is not None:
self.server_instance_type = server_instance_type
if base_block_storage_disk_type is not None:
self.base_block_storage_disk_type = base_block_storage_disk_type
if base_block_storage_disk_detail_type is not None:
self.base_block_storage_disk_detail_type = base_block_storage_disk_detail_type
if placement_group_no is not None:
self.placement_group_no = placement_group_no
@property
def server_instance_no(self):
return self._server_instance_no
@server_instance_no.setter
def server_instance_no(self, server_instance_no):
self._server_instance_no = server_instance_no
@property
def server_name(self):
return self._server_name
@server_name.setter
def server_name(self, server_name):
self._server_name = server_name
@property
def server_description(self):
return self._server_description
@server_description.setter
def server_description(self, server_description):
self._server_description = server_description
@property
def cpu_count(self):
return self._cpu_count
@cpu_count.setter
def cpu_count(self, cpu_count):
self._cpu_count = cpu_count
@property
def memory_size(self):
return self._memory_size
@memory_size.setter
def memory_size(self, memory_size):
self._memory_size = memory_size
@property
def platform_type(self):
return self._platform_type
@platform_type.setter
def platform_type(self, platform_type):
self._platform_type = platform_type
@property
def login_key_name(self):
return self._login_key_name
@login_key_name.setter
def login_key_name(self, login_key_name):
self._login_key_name = login_key_name
@property
def public_ip_instance_no(self):
return self._public_ip_instance_no
@public_ip_instance_no.setter
def public_ip_instance_no(self, public_ip_instance_no):
self._public_ip_instance_no = public_ip_instance_no
@property
def public_ip(self):
return self._public_ip
@public_ip.setter
def public_ip(self, public_ip):
self._public_ip = public_ip
@property
def server_instance_status(self):
return self._server_instance_status
@server_instance_status.setter
def server_instance_status(self, server_instance_status):
self._server_instance_status = server_instance_status
@property
def server_instance_operation(self):
return self._server_instance_operation
@server_instance_operation.setter
def server_instance_operation(self, server_instance_operation):
self._server_instance_operation = server_instance_operation
@property
def server_instance_status_name(self):
return self._server_instance_status_name
@server_instance_status_name.setter
def server_instance_status_name(self, server_instance_status_name):
self._server_instance_status_name = server_instance_status_name
@property
def create_date(self):
return self._create_date
@create_date.setter
def create_date(self, create_date):
self._create_date = create_date
@property
def uptime(self):
return self._uptime
@uptime.setter
def uptime(self, uptime):
self._uptime = uptime
@property
def server_image_product_code(self):
return self._server_image_product_code
@server_image_product_code.setter
def server_image_product_code(self, server_image_product_code):
self._server_image_product_code = server_image_product_code
@property
def server_product_code(self):
return self._server_product_code
@server_product_code.setter
def server_product_code(self, server_product_code):
self._server_product_code = server_product_code
@property
def is_protect_server_termination(self):
return self._is_protect_server_termination
@is_protect_server_termination.setter
def is_protect_server_termination(self, is_protect_server_termination):
self._is_protect_server_termination = is_protect_server_termination
@property
def zone_code(self):
return self._zone_code
@zone_code.setter
def zone_code(self, zone_code):
self._zone_code = zone_code
@property
def region_code(self):
return self._region_code
@region_code.setter
def region_code(self, region_code):
self._region_code = region_code
@property
def vpc_no(self):
return self._vpc_no
@vpc_no.setter
def vpc_no(self, vpc_no):
self._vpc_no = vpc_no
@property
def subnet_no(self):
return self._subnet_no
@subnet_no.setter
def subnet_no(self, subnet_no):
self._subnet_no = subnet_no
@property
def network_interface_no_list(self):
return self._network_interface_no_list
@network_interface_no_list.setter
def network_interface_no_list(self, network_interface_no_list):
self._network_interface_no_list = network_interface_no_list
@property
def init_script_no(self):
return self._init_script_no
@init_script_no.setter
def init_script_no(self, init_script_no):
self._init_script_no = init_script_no
@property
def server_instance_type(self):
return self._server_instance_type
@server_instance_type.setter
def server_instance_type(self, server_instance_type):
self._server_instance_type = server_instance_type
@property
def base_block_storage_disk_type(self):
return self._base_block_storage_disk_type
@base_block_storage_disk_type.setter
def base_block_storage_disk_type(self, base_block_storage_disk_type):
self._base_block_storage_disk_type = base_block_storage_disk_type
@property
def base_block_storage_disk_detail_type(self):
return self._base_block_storage_disk_detail_type
@base_block_storage_disk_detail_type.setter
def base_block_storage_disk_detail_type(self, base_block_storage_disk_detail_type):
self._base_block_storage_disk_detail_type = base_block_storage_disk_detail_type
@property
def placement_group_no(self):
return self._placement_group_no
@placement_group_no.setter
def placement_group_no(self, placement_group_no):
self._placement_group_no = placement_group_no
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ServerInstance):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f73430eb46e3d4244ec8ada9522ec81546a1cbb7 | 18,521 | py | Python | tensorflow_text/python/ops/sentencepiece_tokenizer_test.py | vbod/text | 07c044b8b851ace1e9a033c9597cdb1bee2d69e0 | [
"Apache-2.0"
] | null | null | null | tensorflow_text/python/ops/sentencepiece_tokenizer_test.py | vbod/text | 07c044b8b851ace1e9a033c9597cdb1bee2d69e0 | [
"Apache-2.0"
] | null | null | null | tensorflow_text/python/ops/sentencepiece_tokenizer_test.py | vbod/text | 07c044b8b851ace1e9a033c9597cdb1bee2d69e0 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for SentencePieceProcessor Tensorflow op."""
import sys
import tempfile
from absl.testing import parameterized
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow_text.python.ops.sentencepiece_tokenizer import SentencepieceTokenizer
def _utf8(tokens):
if sys.version_info[0] == 2:
return tokens
if isinstance(tokens, list):
return [_utf8(t) for t in tokens]
else:
return tokens.encode('utf-8')
class TestSavedModelModule(module.Module):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[None], dtype=dtypes.string)
])
def tokenize(self, inputs):
return self.tokenizer.tokenize(inputs)
@test_util.run_all_in_graph_and_eager_modes
class SentencepieceTokenizerOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def getTokenizerAndSetOptions(self, reverse, add_bos, add_eos, out_type):
self.reverse = reverse
self.add_bos = add_bos
self.add_eos = add_eos
self.out_type = out_type
return SentencepieceTokenizer(
self.model,
reverse=reverse,
add_bos=add_bos,
add_eos=add_eos,
out_type=out_type)
def transformExpected(self, expected, is_offsets=False):
bos = _utf8('<s>')
eos = _utf8('</s>')
if is_offsets:
bos = 0
eos = 0
elif self.out_type == dtypes.int32:
bos = 1
eos = 2
if not isinstance(expected[0], list):
if self.add_bos:
expected = [bos] + expected
if self.add_eos:
expected = expected + [eos]
if self.reverse:
expected = [x for x in reversed(expected)]
else:
return [self.transformExpected(x) for x in expected]
return expected
def setUp(self):
super(SentencepieceTokenizerOpTest, self).setUp()
sentencepiece_model_file = (
'tensorflow_text/python/ops/test_data/'
'test_oss_model.model')
self.model = gfile.GFile(sentencepiece_model_file, 'rb').read()
def testGetVocabSize(self):
sp = SentencepieceTokenizer(self.model)
self.assertAllEqual(1000, sp.vocab_size())
def testIdToStringScalar(self):
sp = SentencepieceTokenizer(self.model)
result = sp.id_to_string(125)
self.assertAllEqual('ve', result)
def testIdToStringVector(self):
sp = SentencepieceTokenizer(self.model)
pieces = _utf8([['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']])
ids = [[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],
[9, 169, 21, 125, 169, 579, 6]]
result = sp.id_to_string(ragged_factory_ops.constant(ids))
self.assertAllEqual(pieces, result)
def testIdToStringRagged(self):
sp = SentencepieceTokenizer(self.model)
pieces = _utf8(
[[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],
[['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd', 's']]])
ids = [[[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],
[9, 169, 21, 125, 169, 579, 6]],
[[4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]
result = sp.id_to_string(ragged_factory_ops.constant(ids, dtypes.int32))
self.assertAllEqual(pieces, result)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeScalar(self, reverse, add_bos, add_eos,
out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentence = 'I love lamp.'
expected = []
if out_type == dtypes.int32:
expected = [9, 169, 21, 125, 169, 579, 6]
else:
expected = _utf8(['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'])
expected = self.transformExpected(expected)
result = sp.tokenize(sentence)
self.assertAllEqual(expected, result)
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentence), detokenized)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeVec(self, reverse, add_bos, add_eos, out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentences = ['I love carpet', 'I love desk.', 'I love lamp.']
expected = []
if out_type == dtypes.int32:
expected = [[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],
[9, 169, 21, 125, 169, 579, 6]]
else:
expected = _utf8([['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']])
expected = self.transformExpected(expected)
result = sp.tokenize(sentences)
self.assertAllEqual(expected, result)
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentences), detokenized)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeUniformTensorMatrix(self, reverse, add_bos,
add_eos, out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentences = [['I love carpet', 'I love desk.'],
['I love lamp.', 'Never tell me the odds']]
expected = []
if out_type == dtypes.int32:
expected = [[[9, 169, 21, 125, 78, 48, 132, 15],
[9, 169, 21, 125, 727, 6]],
[[9, 169, 21, 125, 169, 579, 6],
[4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]
else:
expected = _utf8(
[[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.']],
[['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'],
['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd',
's']]])
expected = self.transformExpected(expected)
result = sp.tokenize(constant_op.constant(sentences))
self.assertAllEqual(expected, result)
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentences), detokenized)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeRaggedMatrix(self, reverse, add_bos, add_eos,
out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentences = [['I love carpet', 'I love desk.', 'I love lamp.'],
['Never tell me the odds']]
expected = []
if out_type == dtypes.int32:
expected = [[[9, 169, 21, 125, 78, 48, 132, 15],
[9, 169, 21, 125, 727, 6], [9, 169, 21, 125, 169, 579, 6]],
[[4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]
else:
expected = _utf8(
[[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],
[['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd',
's']]])
expected = self.transformExpected(expected)
result = sp.tokenize(ragged_factory_ops.constant(sentences))
self.assertAllEqual(expected, result)
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentences), detokenized)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeWithOffsetsScalar(self, reverse, add_bos,
add_eos, out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentence = 'I love lamp.'
expected_tok = []
expected_starts = [0, 1, 3, 4, 6, 8, 11]
expected_limits = [1, 3, 4, 6, 8, 11, 12]
if out_type == dtypes.int32:
expected_tok = [9, 169, 21, 125, 169, 579, 6]
else:
expected_tok = _utf8(['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'])
expected_tok = self.transformExpected(expected_tok)
expected_starts = self.transformExpected(expected_starts, True)
expected_limits = self.transformExpected(expected_limits, True)
(tokens, starts,
limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentence))
self.assertAllEqual(expected_tok, tokens)
self.assertAllEqual(expected_starts, starts)
self.assertAllEqual(expected_limits, limits)
detokenized = sp.detokenize(tokens)
self.assertAllEqual(_utf8(sentence), detokenized)
def testTokenizeAndDetokenizeWithOffsetsSingleElementVector(self):
sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)
sentences = ['I love lamp.']
expected_tokens = [['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']]
expected_tokens = _utf8(expected_tokens)
expected_starts = [[0, 1, 3, 4, 6, 8, 11]]
expected_limits = [[1, 3, 4, 6, 8, 11, 12]]
(tokens, starts,
limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))
self.assertAllEqual(expected_tokens, tokens)
self.assertAllEqual(expected_starts, starts)
self.assertAllEqual(expected_limits, limits)
detokenized = sp.detokenize(tokens)
self.assertAllEqual(_utf8(sentences), detokenized)
def testTokenizeAndDetokenizeWithOffsetsVector(self):
sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)
sentences = ['I love carpet.', 'I love desk.', 'I love lamp.']
expected_tokens = [['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't', '.'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']]
expected_tokens = _utf8(expected_tokens)
expected_starts = [[0, 1, 3, 4, 6, 8, 10, 12, 13], [0, 1, 3, 4, 6, 11],
[0, 1, 3, 4, 6, 8, 11]]
expected_limits = [[1, 3, 4, 6, 8, 10, 12, 13, 14], [1, 3, 4, 6, 11, 12],
[1, 3, 4, 6, 8, 11, 12]]
(tokens, starts,
limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))
self.assertAllEqual(expected_tokens, tokens)
self.assertAllEqual(expected_starts, starts)
self.assertAllEqual(expected_limits, limits)
detokenized = sp.detokenize(tokens)
self.assertAllEqual(_utf8(sentences), detokenized)
def testTokenizeAndDetokenizeWithOffsetsMatrix(self):
sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)
sentences = [['I love carpet.', 'I love desk.', 'I love lamp.'],
['Never tell me the odds']]
expected_tokens = [[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't', '.'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],
[[
'▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o',
'd', 'd', 's'
]]]
expected_tokens = _utf8(expected_tokens)
expected_starts = [[[0, 1, 3, 4, 6, 8, 10, 12, 13], [0, 1, 3, 4, 6, 11],
[0, 1, 3, 4, 6, 8, 11]],
[[0, 0, 1, 5, 10, 13, 17, 18, 19, 20, 21]]]
expected_limits = [[[1, 3, 4, 6, 8, 10, 12, 13, 14], [1, 3, 4, 6, 11, 12],
[1, 3, 4, 6, 8, 11, 12]],
[[0, 1, 5, 10, 13, 17, 18, 19, 20, 21, 22]]]
(tokens, starts,
limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))
self.assertAllEqual(expected_tokens, tokens)
self.assertAllEqual(expected_starts, starts)
self.assertAllEqual(expected_limits, limits)
detokenized = sp.detokenize(tokens)
self.assertAllEqual(_utf8(sentences), detokenized)
@parameterized.parameters([
(-1, 0.1, dtypes.int32),
(64, 0.1, dtypes.int32),
(0, 0.0, dtypes.int32),
(-1, 0.1, dtypes.string),
(64, 0.1, dtypes.string),
(0, 0.0, dtypes.string),
])
def testSampleTokenizeAndDetokenize(self, nbest_size, alpha, out_type):
sp = SentencepieceTokenizer(
self.model, nbest_size=nbest_size, alpha=alpha, out_type=out_type)
sentences = [['I love carpet', 'I love desk.', 'I love lamp.'],
['Never tell me the odds']]
result = sp.tokenize(ragged_factory_ops.constant(sentences))
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentences), detokenized)
def testSavedModel(self):
sp = SentencepieceTokenizer(self.model)
test_module = TestSavedModelModule(sp)
inputs = constant_op.constant(['hello world'])
expected_result = test_module.tokenize(inputs)
temp_dir = tempfile.mkdtemp(dir=test.get_temp_dir())
save.save(test_module, temp_dir)
restored_model = load.load(temp_dir)
self.assertAllEqual(restored_model.tokenize(inputs), expected_result)
file_io.delete_recursively(temp_dir)
def testBasicPipeline(self):
if not context.executing_eagerly():
self.skipTest('testBasicPipeline only supported in eager mode.')
sp = SentencepieceTokenizer(self.model)
strings = ['hello', 'world']
dataset = dataset_ops.Dataset.from_tensor_slices(strings)
# Ensure we can map the tokenizer across the dataset.
dataset1 = dataset.map(sp.tokenize)
# Ensure there's no error with a second map call.
dataset2 = dataset.map(sp.tokenize)
expected = sp.tokenize(strings)
for i, result in enumerate(dataset1):
self.assertAllEqual(result, expected[i])
for i, result in enumerate(dataset2):
self.assertAllEqual(result, expected[i])
def testEmptyModel(self):
with self.cached_session():
with self.assertRaises(errors.InvalidArgumentError):
sp = SentencepieceTokenizer()
result = sp.tokenize('whatever')
result.eval()
def testInvalidModel(self):
with self.cached_session():
with self.assertRaises(errors.InternalError):
sp = SentencepieceTokenizer('invalid model')
result = sp.tokenize('whatever')
result.eval()
if __name__ == '__main__':
test.main()
| 41.157778 | 85 | 0.606933 |
import sys
import tempfile
from absl.testing import parameterized
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow_text.python.ops.sentencepiece_tokenizer import SentencepieceTokenizer
def _utf8(tokens):
if sys.version_info[0] == 2:
return tokens
if isinstance(tokens, list):
return [_utf8(t) for t in tokens]
else:
return tokens.encode('utf-8')
class TestSavedModelModule(module.Module):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[None], dtype=dtypes.string)
])
def tokenize(self, inputs):
return self.tokenizer.tokenize(inputs)
@test_util.run_all_in_graph_and_eager_modes
class SentencepieceTokenizerOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def getTokenizerAndSetOptions(self, reverse, add_bos, add_eos, out_type):
self.reverse = reverse
self.add_bos = add_bos
self.add_eos = add_eos
self.out_type = out_type
return SentencepieceTokenizer(
self.model,
reverse=reverse,
add_bos=add_bos,
add_eos=add_eos,
out_type=out_type)
def transformExpected(self, expected, is_offsets=False):
bos = _utf8('<s>')
eos = _utf8('</s>')
if is_offsets:
bos = 0
eos = 0
elif self.out_type == dtypes.int32:
bos = 1
eos = 2
if not isinstance(expected[0], list):
if self.add_bos:
expected = [bos] + expected
if self.add_eos:
expected = expected + [eos]
if self.reverse:
expected = [x for x in reversed(expected)]
else:
return [self.transformExpected(x) for x in expected]
return expected
def setUp(self):
super(SentencepieceTokenizerOpTest, self).setUp()
sentencepiece_model_file = (
'tensorflow_text/python/ops/test_data/'
'test_oss_model.model')
self.model = gfile.GFile(sentencepiece_model_file, 'rb').read()
def testGetVocabSize(self):
sp = SentencepieceTokenizer(self.model)
self.assertAllEqual(1000, sp.vocab_size())
def testIdToStringScalar(self):
sp = SentencepieceTokenizer(self.model)
result = sp.id_to_string(125)
self.assertAllEqual('ve', result)
def testIdToStringVector(self):
sp = SentencepieceTokenizer(self.model)
pieces = _utf8([['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']])
ids = [[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],
[9, 169, 21, 125, 169, 579, 6]]
result = sp.id_to_string(ragged_factory_ops.constant(ids))
self.assertAllEqual(pieces, result)
def testIdToStringRagged(self):
sp = SentencepieceTokenizer(self.model)
pieces = _utf8(
[[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],
[['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd', 's']]])
ids = [[[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],
[9, 169, 21, 125, 169, 579, 6]],
[[4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]
result = sp.id_to_string(ragged_factory_ops.constant(ids, dtypes.int32))
self.assertAllEqual(pieces, result)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeScalar(self, reverse, add_bos, add_eos,
out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentence = 'I love lamp.'
expected = []
if out_type == dtypes.int32:
expected = [9, 169, 21, 125, 169, 579, 6]
else:
expected = _utf8(['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'])
expected = self.transformExpected(expected)
result = sp.tokenize(sentence)
self.assertAllEqual(expected, result)
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentence), detokenized)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeVec(self, reverse, add_bos, add_eos, out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentences = ['I love carpet', 'I love desk.', 'I love lamp.']
expected = []
if out_type == dtypes.int32:
expected = [[9, 169, 21, 125, 78, 48, 132, 15], [9, 169, 21, 125, 727, 6],
[9, 169, 21, 125, 169, 579, 6]]
else:
expected = _utf8([['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']])
expected = self.transformExpected(expected)
result = sp.tokenize(sentences)
self.assertAllEqual(expected, result)
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentences), detokenized)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeUniformTensorMatrix(self, reverse, add_bos,
add_eos, out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentences = [['I love carpet', 'I love desk.'],
['I love lamp.', 'Never tell me the odds']]
expected = []
if out_type == dtypes.int32:
expected = [[[9, 169, 21, 125, 78, 48, 132, 15],
[9, 169, 21, 125, 727, 6]],
[[9, 169, 21, 125, 169, 579, 6],
[4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]
else:
expected = _utf8(
[[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.']],
[['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'],
['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd',
's']]])
expected = self.transformExpected(expected)
result = sp.tokenize(constant_op.constant(sentences))
self.assertAllEqual(expected, result)
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentences), detokenized)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeRaggedMatrix(self, reverse, add_bos, add_eos,
out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentences = [['I love carpet', 'I love desk.', 'I love lamp.'],
['Never tell me the odds']]
expected = []
if out_type == dtypes.int32:
expected = [[[9, 169, 21, 125, 78, 48, 132, 15],
[9, 169, 21, 125, 727, 6], [9, 169, 21, 125, 169, 579, 6]],
[[4, 199, 363, 310, 33, 7, 4, 21, 17, 17, 8]]]
else:
expected = _utf8(
[[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],
[['▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o', 'd', 'd',
's']]])
expected = self.transformExpected(expected)
result = sp.tokenize(ragged_factory_ops.constant(sentences))
self.assertAllEqual(expected, result)
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentences), detokenized)
@parameterized.parameters([
(False, False, False, dtypes.int32),
(False, False, True, dtypes.int32),
(False, True, False, dtypes.int32),
(False, True, True, dtypes.int32),
(True, False, False, dtypes.int32),
(True, False, True, dtypes.int32),
(True, True, False, dtypes.int32),
(True, True, True, dtypes.int32),
(False, False, False, dtypes.string),
(False, False, True, dtypes.string),
(False, True, False, dtypes.string),
(False, True, True, dtypes.string),
(True, False, False, dtypes.string),
(True, False, True, dtypes.string),
(True, True, False, dtypes.string),
(True, True, True, dtypes.string),
])
def testTokenizeAndDetokenizeWithOffsetsScalar(self, reverse, add_bos,
add_eos, out_type):
sp = self.getTokenizerAndSetOptions(reverse, add_bos, add_eos, out_type)
sentence = 'I love lamp.'
expected_tok = []
expected_starts = [0, 1, 3, 4, 6, 8, 11]
expected_limits = [1, 3, 4, 6, 8, 11, 12]
if out_type == dtypes.int32:
expected_tok = [9, 169, 21, 125, 169, 579, 6]
else:
expected_tok = _utf8(['▁I', '▁l', 'o', 've', '▁l', 'amp', '.'])
expected_tok = self.transformExpected(expected_tok)
expected_starts = self.transformExpected(expected_starts, True)
expected_limits = self.transformExpected(expected_limits, True)
(tokens, starts,
limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentence))
self.assertAllEqual(expected_tok, tokens)
self.assertAllEqual(expected_starts, starts)
self.assertAllEqual(expected_limits, limits)
detokenized = sp.detokenize(tokens)
self.assertAllEqual(_utf8(sentence), detokenized)
def testTokenizeAndDetokenizeWithOffsetsSingleElementVector(self):
sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)
sentences = ['I love lamp.']
expected_tokens = [['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']]
expected_tokens = _utf8(expected_tokens)
expected_starts = [[0, 1, 3, 4, 6, 8, 11]]
expected_limits = [[1, 3, 4, 6, 8, 11, 12]]
(tokens, starts,
limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))
self.assertAllEqual(expected_tokens, tokens)
self.assertAllEqual(expected_starts, starts)
self.assertAllEqual(expected_limits, limits)
detokenized = sp.detokenize(tokens)
self.assertAllEqual(_utf8(sentences), detokenized)
def testTokenizeAndDetokenizeWithOffsetsVector(self):
sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)
sentences = ['I love carpet.', 'I love desk.', 'I love lamp.']
expected_tokens = [['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't', '.'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']]
expected_tokens = _utf8(expected_tokens)
expected_starts = [[0, 1, 3, 4, 6, 8, 10, 12, 13], [0, 1, 3, 4, 6, 11],
[0, 1, 3, 4, 6, 8, 11]]
expected_limits = [[1, 3, 4, 6, 8, 10, 12, 13, 14], [1, 3, 4, 6, 11, 12],
[1, 3, 4, 6, 8, 11, 12]]
(tokens, starts,
limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))
self.assertAllEqual(expected_tokens, tokens)
self.assertAllEqual(expected_starts, starts)
self.assertAllEqual(expected_limits, limits)
detokenized = sp.detokenize(tokens)
self.assertAllEqual(_utf8(sentences), detokenized)
def testTokenizeAndDetokenizeWithOffsetsMatrix(self):
sp = SentencepieceTokenizer(self.model, out_type=dtypes.string)
sentences = [['I love carpet.', 'I love desk.', 'I love lamp.'],
['Never tell me the odds']]
expected_tokens = [[['▁I', '▁l', 'o', 've', '▁c', 'ar', 'pe', 't', '.'],
['▁I', '▁l', 'o', 've', '▁desk', '.'],
['▁I', '▁l', 'o', 've', '▁l', 'amp', '.']],
[[
'▁', 'N', 'ever', '▁tell', '▁me', '▁the', '▁', 'o',
'd', 'd', 's'
]]]
expected_tokens = _utf8(expected_tokens)
expected_starts = [[[0, 1, 3, 4, 6, 8, 10, 12, 13], [0, 1, 3, 4, 6, 11],
[0, 1, 3, 4, 6, 8, 11]],
[[0, 0, 1, 5, 10, 13, 17, 18, 19, 20, 21]]]
expected_limits = [[[1, 3, 4, 6, 8, 10, 12, 13, 14], [1, 3, 4, 6, 11, 12],
[1, 3, 4, 6, 8, 11, 12]],
[[0, 1, 5, 10, 13, 17, 18, 19, 20, 21, 22]]]
(tokens, starts,
limits) = sp.tokenize_with_offsets(ragged_factory_ops.constant(sentences))
self.assertAllEqual(expected_tokens, tokens)
self.assertAllEqual(expected_starts, starts)
self.assertAllEqual(expected_limits, limits)
detokenized = sp.detokenize(tokens)
self.assertAllEqual(_utf8(sentences), detokenized)
@parameterized.parameters([
(-1, 0.1, dtypes.int32),
(64, 0.1, dtypes.int32),
(0, 0.0, dtypes.int32),
(-1, 0.1, dtypes.string),
(64, 0.1, dtypes.string),
(0, 0.0, dtypes.string),
])
def testSampleTokenizeAndDetokenize(self, nbest_size, alpha, out_type):
sp = SentencepieceTokenizer(
self.model, nbest_size=nbest_size, alpha=alpha, out_type=out_type)
sentences = [['I love carpet', 'I love desk.', 'I love lamp.'],
['Never tell me the odds']]
result = sp.tokenize(ragged_factory_ops.constant(sentences))
detokenized = sp.detokenize(result)
self.assertAllEqual(_utf8(sentences), detokenized)
def testSavedModel(self):
sp = SentencepieceTokenizer(self.model)
test_module = TestSavedModelModule(sp)
inputs = constant_op.constant(['hello world'])
expected_result = test_module.tokenize(inputs)
temp_dir = tempfile.mkdtemp(dir=test.get_temp_dir())
save.save(test_module, temp_dir)
restored_model = load.load(temp_dir)
self.assertAllEqual(restored_model.tokenize(inputs), expected_result)
file_io.delete_recursively(temp_dir)
def testBasicPipeline(self):
if not context.executing_eagerly():
self.skipTest('testBasicPipeline only supported in eager mode.')
sp = SentencepieceTokenizer(self.model)
strings = ['hello', 'world']
dataset = dataset_ops.Dataset.from_tensor_slices(strings)
dataset1 = dataset.map(sp.tokenize)
dataset2 = dataset.map(sp.tokenize)
expected = sp.tokenize(strings)
for i, result in enumerate(dataset1):
self.assertAllEqual(result, expected[i])
for i, result in enumerate(dataset2):
self.assertAllEqual(result, expected[i])
def testEmptyModel(self):
with self.cached_session():
with self.assertRaises(errors.InvalidArgumentError):
sp = SentencepieceTokenizer()
result = sp.tokenize('whatever')
result.eval()
def testInvalidModel(self):
with self.cached_session():
with self.assertRaises(errors.InternalError):
sp = SentencepieceTokenizer('invalid model')
result = sp.tokenize('whatever')
result.eval()
if __name__ == '__main__':
test.main()
| true | true |
f7343149bcb9f8ac184634f891b354142315a99b | 1,318 | py | Python | projectile.py | jmkinder1/code-samples | 9c6cd3c6f16579a6c1f5210779b8ec6ad53fbdba | [
"BSD-3-Clause"
] | 5 | 2021-07-17T05:19:00.000Z | 2022-01-05T05:39:50.000Z | projectile.py | jmkinder1/code-samples | 9c6cd3c6f16579a6c1f5210779b8ec6ad53fbdba | [
"BSD-3-Clause"
] | null | null | null | projectile.py | jmkinder1/code-samples | 9c6cd3c6f16579a6c1f5210779b8ec6ad53fbdba | [
"BSD-3-Clause"
] | 8 | 2020-12-26T23:41:27.000Z | 2022-02-24T22:18:41.000Z | # projectile.py
# -----------------------------------------------------------------------------
# Calculate how long an object is in the air when thrown from a specified height
# with a range of initial speeds assuming constant acceleration due to gravity:
# 0.5 * g * t**2 - v0 * t - y0 = 0
# -----------------------------------------------------------------------------
import numpy as np
#%% Initialization of variables.
initial_speed = 0.0 # v0 = initial vertical speed of ball in [m/s]
impact_time = 0.0 # t = time of impact in [s] (computed in loop)
#%% Initialization of parameters.
g = 9.8066 # gravitational acceleration in [m/s^2]
initial_height = 2.0 # y0 = height ball is thrown from in [m]
speed_increment = 5.0 # how much to increase speed in [m/s] for each iteration
cutoff_time = 10.0 # stop computing after impact time exceeds cutoff
#%% Calculate and display impact time. Increment initial speed each step.
# Repeat until impact time exceeds cutoff.
while impact_time < cutoff_time:
# Use quadratic equation to solve kinematic equation for impact time:
impact_time = (np.sqrt(initial_speed**2 + 2 * g * initial_height) + initial_speed) / g
print("speed= {} m/s; time= {:.1f} s".format(initial_speed, impact_time))
initial_speed += speed_increment
print("Calculation complete.")
| 48.814815 | 87 | 0.636571 |
import numpy as np
initial_speed = 0.0
impact_time = 0.0
g = 9.8066
initial_height = 2.0
speed_increment = 5.0
cutoff_time = 10.0
while impact_time < cutoff_time:
impact_time = (np.sqrt(initial_speed**2 + 2 * g * initial_height) + initial_speed) / g
print("speed= {} m/s; time= {:.1f} s".format(initial_speed, impact_time))
initial_speed += speed_increment
print("Calculation complete.")
| true | true |
f73431baeb0c90da847b750fe45e3a856b6a9533 | 3,748 | py | Python | model/functional.py | yil8/GPN | e0ccba70db6f1d3264f8d3dd38fc4c62bcebd7ad | [
"Apache-2.0"
] | 16 | 2019-03-01T17:52:52.000Z | 2021-12-31T09:39:01.000Z | model/functional.py | xiamenwcy/GPN | e0ccba70db6f1d3264f8d3dd38fc4c62bcebd7ad | [
"Apache-2.0"
] | 1 | 2021-05-12T11:24:05.000Z | 2021-05-12T17:44:46.000Z | model/functional.py | xiamenwcy/GPN | e0ccba70db6f1d3264f8d3dd38fc4c62bcebd7ad | [
"Apache-2.0"
] | 5 | 2019-04-02T03:42:02.000Z | 2020-08-01T20:40:15.000Z | import torch
import torch.nn.functional as F
import numpy as np
# Original author: Francisco Massa:
# https://github.com/fmassa/object-detection.torch
# Ported to PyTorch by Max deGroot (02/01/2017)
def nms(boxes, scores, overlap=0.7):
"""Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
scores: (N) FloatTensor
boxes: (N, 4) FloatTensor
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
Return:
The indices of the kept boxes with respect to N.
"""
keep = scores.new(scores.size(0)).zero_().long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(dim=0, descending=True) # sort in ascending order
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
# keep = torch.Tensor()
count = 0
while idx.numel() > 0:
i = idx[0] # index of current largest val
# keep.append(i)
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[1:] # remove kept element from view
# load bboxes of next highest vals
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w*h
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter/union # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
keep = keep[:count]
return keep
def n_proposals(out_cls):
vals, idcs = out_cls.view(-1, 2).max(1)
n_proposals = idcs.eq(1).type(torch.cuda.FloatTensor).sum() / len(out_cls)
return n_proposals
def acc(out_cls, labels):
pos_idcs = labels.view(-1).eq(1).nonzero().view(-1)
out_cls_pos = torch.index_select(out_cls.view(-1, 2), 0, pos_idcs)
prob_pos = F.softmax(out_cls_pos, dim=1)[:, 1]
acc_pos = prob_pos.ge(0.5).type(
torch.cuda.FloatTensor).sum() / len(prob_pos)
neg_idcs = labels.view(-1).eq(0).nonzero().view(-1)
out_cls_neg = torch.index_select(out_cls.view(-1, 2), 0, neg_idcs)
prob_neg = F.softmax(out_cls_neg, dim=1)[:, 0]
acc_neg = prob_neg.ge(0.5).type(
torch.cuda.FloatTensor).sum() / len(prob_neg)
return (acc_pos, acc_neg)
def angle_err(out_ellipse, labels, ellipse_targets):
pos_idcs = labels.view(-1).eq(1).nonzero().view(-1)
out_ellipse_keep = torch.index_select(out_ellipse.view(-1, 5), 0, pos_idcs)
ellipse_targets_keep = torch.index_select(ellipse_targets.view(-1, 5), 0,
pos_idcs)
out_tan = out_ellipse_keep[:, 4]
out_angle = torch.atan(out_tan) * 180 / np.pi
targets_tan = ellipse_targets_keep[:, 4]
targets_angle = torch.atan(targets_tan) * 180 / np.pi
err = torch.abs(out_angle - targets_angle).sum() / len(out_angle)
return err
| 33.168142 | 79 | 0.602721 | import torch
import torch.nn.functional as F
import numpy as np
def nms(boxes, scores, overlap=0.7):
keep = scores.new(scores.size(0)).zero_().long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(dim=0, descending=True)
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
count = 0
while idx.numel() > 0:
i = idx[0]
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[1:]
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w*h
rem_areas = torch.index_select(area, 0, idx)
union = (rem_areas - inter) + area[i]
IoU = inter/union
idx = idx[IoU.le(overlap)]
keep = keep[:count]
return keep
def n_proposals(out_cls):
vals, idcs = out_cls.view(-1, 2).max(1)
n_proposals = idcs.eq(1).type(torch.cuda.FloatTensor).sum() / len(out_cls)
return n_proposals
def acc(out_cls, labels):
pos_idcs = labels.view(-1).eq(1).nonzero().view(-1)
out_cls_pos = torch.index_select(out_cls.view(-1, 2), 0, pos_idcs)
prob_pos = F.softmax(out_cls_pos, dim=1)[:, 1]
acc_pos = prob_pos.ge(0.5).type(
torch.cuda.FloatTensor).sum() / len(prob_pos)
neg_idcs = labels.view(-1).eq(0).nonzero().view(-1)
out_cls_neg = torch.index_select(out_cls.view(-1, 2), 0, neg_idcs)
prob_neg = F.softmax(out_cls_neg, dim=1)[:, 0]
acc_neg = prob_neg.ge(0.5).type(
torch.cuda.FloatTensor).sum() / len(prob_neg)
return (acc_pos, acc_neg)
def angle_err(out_ellipse, labels, ellipse_targets):
pos_idcs = labels.view(-1).eq(1).nonzero().view(-1)
out_ellipse_keep = torch.index_select(out_ellipse.view(-1, 5), 0, pos_idcs)
ellipse_targets_keep = torch.index_select(ellipse_targets.view(-1, 5), 0,
pos_idcs)
out_tan = out_ellipse_keep[:, 4]
out_angle = torch.atan(out_tan) * 180 / np.pi
targets_tan = ellipse_targets_keep[:, 4]
targets_angle = torch.atan(targets_tan) * 180 / np.pi
err = torch.abs(out_angle - targets_angle).sum() / len(out_angle)
return err
| true | true |
f734325c72b04acb108ec2de3b0506cfa207023f | 62,690 | py | Python | tests/nat/nat_helpers.py | stephengao-ragilenetworks/sonic-mgmt | b7ce8f6592f12ee8a7a0daca9a4337d5eaf9313f | [
"Apache-2.0"
] | 1 | 2020-10-15T05:52:17.000Z | 2020-10-15T05:52:17.000Z | tests/nat/nat_helpers.py | stephengao-ragilenetworks/sonic-mgmt | b7ce8f6592f12ee8a7a0daca9a4337d5eaf9313f | [
"Apache-2.0"
] | 4 | 2019-07-26T08:42:01.000Z | 2020-12-16T08:34:52.000Z | tests/nat/nat_helpers.py | stephengao-ragilenetworks/sonic-mgmt | b7ce8f6592f12ee8a7a0daca9a4337d5eaf9313f | [
"Apache-2.0"
] | null | null | null | import re
import os
import time
import logging
import json
from collections import namedtuple
import ptf.mask as mask
import ptf.packet as packet
import ptf.testutils as testutils
from tests.common.errors import RunAnsibleModuleFail
from tests.common.helpers.assertions import pytest_assert
from jinja2 import Environment, FileSystemLoader
from tests.common.config_reload import config_reload
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
DUT_TMP_DIR = os.path.join('tmp', os.path.basename(BASE_DIR))
NAT_CONF_J2_TEMPLATE = "templates/create_nat_binding.j2"
FILES_DIR = os.path.join(BASE_DIR, 'files')
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
NAT_GLOBAL_TEMPLATE = 'global_nat_table_config.j2'
NAT_STATIC_TEMPLATE = 'static_nat_napt_table_config.j2'
ZONES_TEMPLATE = 'nat_zone_table_config.j2'
NAT_ADMIN_MODE = "enabled"
STATIC_NAT_TABLE_NAME = "STATIC_NAT"
STATIC_NAPT_TABLE_NAME = "STATIC_NAPT"
ACL_TEMPLATE = 'create_acl_rule.j2'
GLOBAL_NAT_TIMEOUT = 300
GLOBAL_UDP_NAPT_TIMEOUT = 120
GLOBAL_TCP_NAPT_TIMEOUT = 300
TCP_GLOBAL_PORT = 3700
UDP_GLOBAL_PORT = 3000
TCP_LOCAL_PORT = 80
UDP_LOCAL_PORT = 161
POOL_RANGE_START_PORT = 5000
POOL_RANGE_END_PORT = 6000
logger = logging.getLogger(__name__)
DYNAMIC_POOL_NAME = "test_pool"
ACL_TABLE_GLOBAL_NAME = "test_acl_table"
DYNAMIC_BINDING_NAME = "test_binding"
ACL_SUBNET = "192.168.0.0/24"
BR_MAC = ["22:22:22:22:22:21"]
VRF = {"red": {"ip": "11.1.0.2", "id": "1", "mask": "30", "gw": "11.1.0.1", "dut_iface": "PortChannel0001", "port_id": {"t0": ["28"],
"t0-64": ["0", "1"],
"t0-64-32": ["0", "1"]
}
},
"blue": {"ip": "192.168.0.101", "id": "2", "mask": "24", "gw": "192.168.0.1", "port_id": "6"},
"yellow": {"ip": "192.168.0.201", "id": "3", "mask": "24", "gw": "192.168.0.1", "port_id": "7"}
}
SETUP_CONF = {"loopback": {"vrf": VRF, "acl_subnet": ACL_SUBNET},
"port_in_lag": {"vrf": VRF, "acl_subnet": ACL_SUBNET}
}
DIRECTION_PARAMS = ['host-tor', 'leaf-tor']
FULL_CONE_TEST_IP = "172.20.1.2"
FULL_CONE_TEST_SUBNET = "172.20.1.0/24"
REBOOT_MAP = {'cold': {"timeout": 300}, 'fast': {"timeout": 180}, 'warm': {"timeout": 180}}
PTF_NETWORK_DATA = namedtuple('PTF_NETWORK_DATA', ['outer_ports', 'inner_ports', 'eth_dst', 'eth_src', 'ip_src',
'ip_dst', 'public_ip', 'private_ip', 'exp_src_ip', 'exp_dst_ip'])
L4_PORTS_DATA = namedtuple('L4_PORTS_DATA', ['src_port', 'dst_port', 'exp_src_port', 'exp_dst_port'])
def check_peers_by_ping(duthost):
for vrf in VRF:
duthost.command("ping {0} -c 5".format(VRF[vrf]['ip']))
def configure_nat_over_cli(duthost, action, nat_type, global_ip, local_ip, proto=None,
global_port=None, local_port=None):
"""
static NAT/NAPT CLI wrapper
:param duthost: DUT host object
:param action: string rule action
:param nat_type: string static nat type
:param global_ip: string global IP address value
:param local_ip: string local IP address value
:param proto: string protocol type
:param global_port: string global l4 port
:param local_port: string local l4 port
:return : dict with rule parameters
"""
action_type_map = {'add': '-nat_type dnat', 'remove': ''}
if nat_type == 'static_nat':
duthost.command("sudo config nat {} static basic {} {} {}".format(action, global_ip, local_ip, action_type_map[action]))
return {
global_ip: {'local_ip': local_ip, 'nat_type': 'dnat'}
}
elif nat_type == 'static_napt':
duthost.command("sudo config nat {} static {} {} {} {} {} {}".format(action, proto.lower(),
global_ip, global_port,
local_ip, local_port,
action_type_map[action]))
return {
"{}|{}|{}".format(global_ip, proto.upper(), global_port): {'local_ip': local_ip,
'local_port': "{}".format(local_port),
'nat_type': 'dnat'
}
}
return "Unkown NAT type"
def nat_statistics(duthost, show=False, clear=False):
"""
NAT CLI helper which gets or clears NAT statistics
:param duthost: DUT host object
:param show: bool
:param clear: bool
:return : formatted CLI output
"""
if show:
output_cli = exec_command(duthost, ["show nat statistics"])
if output_cli["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
output = {}
entries = output_cli["stdout"].split()[10:]
if entries:
num_entries = len(entries[::5])
keys = output_cli["stdout"].split()[:5]
for num in range(0, num_entries):
entry_values = entries[(num * 5):(num * 5) + 5]
key = entry_values[1] if entry_values[1] != "---" else entry_values[2]
output[key] = {keys[i]: entry_values[i] for i in range(0, len(keys))}
return output
elif clear:
output_cli = exec_command(duthost, ["sudo sonic-clear nat statistics"])
if output_cli["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
return output_cli["stdout"].lstrip()
return None
def dut_nat_iptables_status(duthost):
"""
NAT CLI helper gets DUT's iptables entries
:param duthost: DUT host object
:return : dict with nat PREROUTING/POSTROUTING iptables entries
"""
nat_table_status = {}
output_cli = exec_command(duthost, ["sudo iptables -nL -t nat"])
if output_cli["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
entries = output_cli["stdout"].split("\n")
index_prerouting = [i for i in range(0, len(entries)) if "PREROUTING" in entries[i]][0] + 2
index_input = [i for i in range(0, len(entries)) if "INPUT" in entries[i]][0]
index_postrouting = [i for i in range(0, len(entries)) if 'POSTROUTING' in entries[i]][0] + 2
if any(['DOCKER' in entry for entry in entries]):
index_docker = [i for i in range(0, len(entries)) if 'DOCKER' in entries[i]][0]
postrouting = [el for el in entries[index_postrouting:index_docker] if len(el) > 1]
else:
postrouting = [el for el in entries[index_postrouting:] if len(el) > 1]
prerouting = [el for el in entries[index_prerouting:index_input] if len(el) > 0]
nat_table_status["prerouting"] = [" ".join([s.strip() for s in el.split() if len(el) > 0])
for el in prerouting]
nat_table_status["postrouting"] = [" ".join([s.strip() for s in el.split() if len(el) > 0])
for el in postrouting]
return nat_table_status
def dut_interface_status(duthost, interface_name):
"""
NAT CLI helper gets DUT's interface status
:param duthost: DUT host object
:param interface_name: string interface to configure
:return : string formatted CLI output with interface current operstatus
"""
return duthost.show_interface(command='status', interfaces=interface_name)['ansible_facts']['int_status'][interface_name]['oper_state']
def dut_interface_control(duthost, action, interface_name, ip_addr=""):
"""
NAT CLI helper enable/disable DUT's interface
:param duthost: DUT host object
:param action: string action to configure interface
:param interface_name: string interface to configure
:return : formatted CLI output with interface current operstatus
"""
interface_actions = {"disable": "shutdown {}".format(interface_name),
"enable": "startup {}".format(interface_name),
"ip remove": "{} {}".format(action, ip_addr),
"ip add": "{} {}".format(action, ip_addr)
}
expected_operstatus = {"disable": "down", "enable": "up", "ip remove": "up", "ip add": "up"}
output_cli = exec_command(duthost, ["sudo config interface {}".format(interface_actions[action])])
if output_cli["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
attempts = 3
current_operstatus = dut_interface_status(duthost, interface_name)
while current_operstatus != expected_operstatus[action]:
if attempts == 0:
break
time.sleep(15)
current_operstatus = dut_interface_status(duthost, interface_name)
attempts -= 1
return current_operstatus
def nat_translations(duthost, show=False, clear=False):
"""
NAT CLI helper which gets or clears NAT translations
:param duthost: DUT host object
:param show: bool
:param clear: bool
:return : formatted CLI output
"""
if show:
output_cli = exec_command(duthost, ["show nat translations"])
if output_cli["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
output = {}
entries = output_cli["stdout"].split('\n')[15:]
splited_entries = []
for el in entries:
splited_entries.extend(el.split())
if splited_entries:
num_entries = len(splited_entries[::5])
keys = [el.strip() for el in output_cli["stdout"].split("\n")[13].split(" ") if el]
for num in range(0, num_entries):
entry_values = splited_entries[(num * 5):(num * 5) + 5]
key = entry_values[1] if entry_values[1] != "---" else entry_values[2]
output[key] = {keys[i]: entry_values[i] for i in range(0, len(keys))}
return output
elif clear:
output_cli = exec_command(duthost, ["sudo sonic-clear nat translations"])
if output_cli["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
return output_cli["stdout"].lstrip()
return None
def crud_operations_basic(duthost, crud_operation):
"""
static NAT CLI helper
:param duthost: DUT host object
:param crud_operation: dict dict with action and rule parameters
:return : dict with rule parameters
"""
nat_type = "static_nat"
for key in crud_operation.keys():
output = configure_nat_over_cli(duthost, crud_operation[key]["action"], nat_type,
crud_operation[key]["global_ip"], crud_operation[key]["local_ip"])
return output
def crud_operations_napt(duthost, crud_operation):
"""
static NAPT CLI helper
:param duthost: DUT host object
:param crud_operation: dict dict with action and rule parameters
:return : dict with rule parameters
"""
nat_type = 'static_napt'
for key in crud_operation.keys():
output = configure_nat_over_cli(duthost, crud_operation[key]["action"], nat_type,
crud_operation[key]["global_ip"], crud_operation[key]["local_ip"],
proto=crud_operation[key]["proto"],
global_port=crud_operation[key]["global_port"],
local_port=crud_operation[key]["local_port"])
return output
def exec_command(host, command_list):
"""
Executes shell commands on host
:param host: host object
:param command_list: list of commands to execute
:return : response from host or exception __str__
"""
if len(command_list) == 1:
try:
response = host.shell(command_list[0])
return response
except Exception as e:
return e.__str__()
else:
for command in command_list:
exec_command(host, [command])
def nat_zones_config(duthost, setup_info, interface_type):
"""
generate and deploy NAT zones configuration files
:param duthost: DUT host object
:param setup_info: dict, setup info fixture
:param interface_type: interface type
"""
# Get inner and outer interfaces from setup info
inner_zone_interfaces = setup_info[interface_type]["inner_zone_interfaces"]
outer_zone_interfaces = setup_info[interface_type]["outer_zone_interfaces"]
for rif in setup_info["dut_rifs_in_topo_t0"]:
if rif in inner_zone_interfaces or rif in outer_zone_interfaces:
nat_zone_vars = setup_info['interfaces_nat_zone'][rif]
# Add zone configuration
duthost.command("sudo config nat add interface {0} -nat_zone {1}".format(rif, nat_zone_vars['zone_id']))
# Check that zone was applied
show_zones = duthost.command("show nat config zones")['stdout']
zone_id = re.search(r"{}\s+(\d)".format(rif), show_zones).group(1)
pytest_assert(str(nat_zone_vars['zone_id']) == zone_id, "NAT zone was not set to {}".format(zone_id))
def get_cli_show_nat_config_output(duthost, command):
"""
created ditionary with output of show nat command
:param duthost: DUT host object
:param command: str, command to execute
:return: list of dict with output
"""
return duthost.show_and_parse("show nat config {}".format(command))
def apply_static_nat_config(duthost, ptfadapter, ptfhost, setup_data,
network_data, direction, interface_type, nat_type, public_ip,
private_ip, protocol_type=None, nat_entry=None, handshake=False):
"""
generate and deploy static NAT/NAPT configuration files
:param duthost: DUT host object
:param ptfadapter: ptf adapter fixture
:param ptfhost: PTF host object
:param setup_info: dict, setup info fixture
:param direction: string, traffic's flow direction
:param interface_type: interface type
:param nat_type: string, static NAT type
:param public_ip: IP Address of Internet IP (host-tor) or IP Address of Public Interface (leaf-tor)
:param private_ip: IP Address of Local IP (host-tor) or IP Address of Internet IP (leaf-tor)
:param nat_entry: static_nat/static_napt
:param protocol_type: TCP/UDP
"""
# Define network data and L4 ports
network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type)
src_port, dst_port = get_l4_default_ports(protocol_type)
global_port = dst_port
local_port = src_port
if nat_entry != 'static_napt':
# Add static basic rule
duthost.command("sudo config nat add static basic {0} {1} -nat_type=dnat".format(public_ip, private_ip))
else:
# Add static napt rule
duthost.command("sudo config nat add static {0} {1} {2} {3} {4} -nat_type=dnat".
format(protocol_type.lower(), public_ip, global_port, private_ip, local_port))
# Check that rule was applied
static_nat = get_cli_show_nat_config_output(duthost, "static")
pytest_assert('dnat' == static_nat[0]['nat type'], "Default NAT type was changed")
pytest_assert(public_ip == static_nat[0]['global ip'], "Global IP does not match {}".format(public_ip))
pytest_assert(private_ip == static_nat[0]['local ip'], "Local IP does not match {}".format(private_ip))
if nat_entry == 'static_napt':
pytest_assert(protocol_type == static_nat[0]['ip protocol'], "Protocol does not match {}".format(protocol_type))
pytest_assert(str(global_port) == static_nat[0]['global port'], "Global Port does not match {}".format(global_port))
pytest_assert(str(local_port) == static_nat[0]['local port'], "Local Port does not match {}".format(local_port))
else:
pytest_assert('all' == static_nat[0]['ip protocol'])
nat_zones_config(duthost, setup_data, interface_type)
# Perform TCP handshake
if handshake:
if direction == 'leaf-tor':
# set_arp entries
check_peers_by_ping(duthost)
perform_handshake(ptfhost, setup_data,
protocol_type, direction,
network_data.ip_dst, dst_port,
network_data.ip_src, src_port,
network_data.public_ip)
def get_src_port(setup_info, direction, interface_type, second_port=False):
"""
return source port ids based on test case direction and interface_type
:param setup_info: setup info fixture
:param direction: 'host-tor', 'leaf-tor'
:param interface_type: type of interface
:param second_port: boolean if second port id needs to be returned
:return: source port ids
"""
if direction == 'host-tor':
if second_port:
return [setup_info[interface_type]['inner_port_id'][0] + 1]
return setup_info[interface_type]['inner_port_id']
return setup_info[interface_type]['outer_port_id']
def get_dst_port(setup_info, direction, interface_type, second_port=False):
"""
return destination port ids based on test case direction and interface_type
:param setup_info: setup info fixture
:param direction: 'host-tor', 'leaf-tor'
:param interface_type: type of interface
:param second_port: boolean if second port id needs to be returned
:return: destination port ids
"""
if direction == 'leaf-tor':
if second_port:
return [setup_info[interface_type]['inner_port_id'][0] + 1]
return setup_info[interface_type]['inner_port_id']
return setup_info[interface_type]['outer_port_id']
def get_src_ip(setup_info, direction, interface_type, nat_type=None, second_port=False):
"""
return source IP based on test case direction and interface_type
:param setup_info: setup info fixture
:param direction: 'host-tor', 'leaf-tor'
:param interface_type: type of interface
:param second_port: boolean if second port's IP settings need to be returned
:param nat_type: string nat type
:return: source IP
"""
if direction == 'host-tor' or nat_type == "static_napt":
if second_port:
return setup_info[interface_type]["second_src_ip"]
return setup_info[interface_type]['src_ip']
return setup_info[interface_type]['dst_ip']
def get_dst_ip(setup_info, direction, interface_type, nat_type=None):
"""
return destination IP based on test case direction and interface_type
:param setup_info: setup info fixture
:param direction: 'host-tor', 'leaf-tor'
:param interface_type: type of interface
:param nat_type: string nat type
:return: destination IP
"""
if direction == 'host-tor' or nat_type == "static_napt":
return setup_info[interface_type]['dst_ip']
return setup_info[interface_type]['public_ip']
def get_public_ip(setup_info, interface_type):
"""
return public IP based on test case interface_type
:param setup_info: setup info fixture
:param interface_type: type of interface
:return: public IP
"""
return setup_info[interface_type]['public_ip']
def setup_ptf_interfaces(testbed, ptfhost, duthost, setup_info, interface_type, vrf_id, vrf_name, port_id,
ip_address, mask, gw_ip, key):
"""
setup ptf interfaces for tests
:param testbed: Testbed object
:param ptfhost: PTF host object
:param duthost: DUT host object
:param setup_info: setup info fixture
:param interface_type: string interface type
:param vrf_id: id of vrf
:param vrf_name: vrf name
:param port_id: port id of interface
:param ip_address: ip address of interface
:param mask: vrf mask
:param gw_ip: ip address of gateway
:param key: dictionary key if vrf configuration
"""
ptfhost.shell("grep -Fxq '{} {}' /etc/iproute2/rt_tables "
"|| echo '{} {}' >> /etc/iproute2/rt_tables".format(vrf_id, vrf_name, vrf_id, vrf_name))
ptfhost.shell("ip link add {} type vrf table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip link set dev {} up".format(vrf_name))
if vrf_name == "red":
bond_interface = "bond1"
ptfhost.shell("ip link add {} type bond".format(bond_interface))
ptfhost.shell("ip link set {} type bond miimon 100 mode balance-xor".format(bond_interface))
for iface_id in port_id[testbed['topo']['name']]:
ptfhost.shell("ip link set eth{} down".format(iface_id))
ptfhost.shell("ip link set eth{} master {}".format(iface_id, bond_interface))
ptfhost.shell("ip link set dev {} up".format(bond_interface))
ptfhost.shell("ifconfig {} hw ether {}".format(bond_interface, BR_MAC[0]))
ptfhost.shell("ifconfig {} mtu 9216 up".format(bond_interface))
ptfhost.shell("ip link set {} master {}".format(bond_interface, vrf_name))
ptfhost.shell("ip addr add {}/{} dev {}".format(ip_address, mask, bond_interface))
else:
ptfhost.shell("ip link set eth{} master {}".format(port_id, vrf_name))
ptfhost.shell("ip addr add {}/{} dev eth{}".format(ip_address, mask, port_id))
ptfhost.shell("ip rule add iif {} table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip rule add oif {} table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip route add 0.0.0.0/0 via {} table {}".format(gw_ip, vrf_id))
if "dut_iface" in setup_info[interface_type]["vrf_conf"][key].keys():
dut_iface = setup_info[interface_type]["vrf_conf"][key]["dut_iface"]
pch_ip = setup_info["pch_ips"][dut_iface]
duthost.shell("sudo config interface ip remove {} {}/31".format(dut_iface, pch_ip))
duthost.shell("sudo config interface ip add {} {}/{}".format(dut_iface, gw_ip, mask))
def teardown_ptf_interfaces(testbed, ptfhost, gw_ip, vrf_id, ip_address, mask, port_id, vrf_name):
"""
teardown ptf interfaces after tests
:param testbed: Testbed object
:param ptfhost: PTF host object
:param gw_ip: ip address of gateway
:param vrf_id: id of vrf
:param ip_address: ip address of interface
:param mask: vrf mask
:param port_id: port id of interface
:param vrf_name: vrf name
"""
ptfhost.shell("ip route del 0.0.0.0/0 via {} table {}".format(gw_ip, vrf_id))
if vrf_name == "red":
bond_interface = "bond1"
ptfhost.shell("ip addr del {}/{} dev {}".format(ip_address, mask, bond_interface))
ptfhost.shell("ip rule del iif {} table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip rule del oif {} table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip link set {} nomaster".format(bond_interface))
for iface_id in port_id[testbed['topo']['name']]:
ptfhost.shell("ip link set eth{} nomaster".format(iface_id))
ptfhost.shell("ip link set eth{} up".format(iface_id))
ptfhost.shell("ip link del {}".format(bond_interface))
ptfhost.shell("ip link del {} type vrf table {}".format(vrf_name, vrf_id))
else:
ptfhost.shell("ip addr del {}/{} dev eth{}".format(ip_address, mask, port_id))
ptfhost.shell("ip rule del iif {} table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip rule del oif {} table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip link set eth{} nomaster".format(port_id))
ptfhost.shell("ip link del {} type vrf table {}".format(vrf_name, vrf_id))
def conf_ptf_interfaces(testbed, ptfhost, duthost, setup_info, interface_type, teardown=False):
"""
setup testbed's environment for CT run
:param testbed: Testbed object
:param ptfhost: PTF host object
:param duthost: DUT host object
:param setup_info: setup info fixture
:param interface_type: string interface type
:param teardown: Boolean parameter to remove or not PTF's interfaces config
"""
if not teardown:
ptfhost.script("./scripts/change_mac.sh")
for key in setup_info[interface_type]["vrf_conf"]:
vrf_id = setup_info[interface_type]["vrf_conf"][key]["id"]
vrf_name = key
ip_address = setup_info[interface_type]["vrf_conf"][key]["ip"]
gw_ip = setup_info[interface_type]["vrf_conf"][key]["gw"]
port_id = setup_info[interface_type]["vrf_conf"][key]["port_id"]
mask = setup_info[interface_type]["vrf_conf"][key]["mask"]
if teardown:
teardown_ptf_interfaces(testbed, ptfhost, gw_ip, vrf_id, ip_address, mask, port_id, vrf_name)
else:
setup_ptf_interfaces(testbed, ptfhost, duthost, setup_info, interface_type, vrf_id, vrf_name, port_id, ip_address,
mask, gw_ip, key)
if not teardown:
ptfhost.shell('supervisorctl restart ptf_nn_agent')
def expected_mask_nated_packet(pkt, protocol_type, ip_dst, ip_src,
src_port=None, dst_port=None, icmp_id=None):
"""
Generate expected packet
:param pkt: packet to be sent
:param protocol_type: protocol type TCP, UDP or ICMP
:param ip_src: expected source IP
:param ip_dst: expected destination IP
:param src_port: source L4 expected port
:param dst_port: destination L4 expected port
:param icmp_id: id for specify ICMP dynamic connection
:return: expected packet
"""
# Set up all fields
exp_pkt = pkt.copy()
exp_pkt['IP'].ttl -= 1
exp_pkt['IP'].dst = ip_dst
exp_pkt['IP'].src = ip_src
if protocol_type in ["TCP", "UDP"]:
exp_pkt[protocol_type].sport = src_port
exp_pkt[protocol_type].dport = dst_port
if protocol_type == "ICMP":
exp_pkt[protocol_type].id = icmp_id
exp_pkt = mask.Mask(exp_pkt)
exp_pkt.set_do_not_care_scapy(packet.Ether, 'dst')
exp_pkt.set_do_not_care_scapy(packet.Ether, 'src')
exp_pkt.set_do_not_care_scapy(packet.IP, 'chksum')
exp_pkt.set_do_not_care_scapy(packet.IP, 'id')
return exp_pkt
def create_packet(eth_dst, eth_src, ip_dst, ip_src, protocol_type, sport=None, dport=None):
"""
generate packet to send
:param eth_dst: destination Ethernet address
:param eth_src: source Ethernet address
:param ip_dst: destination IP address
:param ip_src: source IP address
:param protocol_type: TCP/UDP/ICMP
:param sport: source port for UDP/TCP packet
:param dport: destination port for UDP/TCP traffic
:return: packet based on protocol type
"""
if protocol_type == "TCP":
return testutils.simple_tcp_packet(eth_dst=eth_dst, eth_src=eth_src, ip_dst=ip_dst, ip_src=ip_src,
tcp_sport=sport, tcp_dport=dport, ip_ttl=64)
elif protocol_type == "UDP":
return testutils.simple_udp_packet(eth_dst=eth_dst, eth_src=eth_src, ip_dst=ip_dst, ip_src=ip_src,
udp_sport=sport, udp_dport=dport, ip_ttl=64)
return testutils.simple_icmp_packet(eth_dst=eth_dst, eth_src=eth_src, ip_dst=ip_dst, ip_src=ip_src, icmp_type=8,
icmp_code=0, ip_ttl=64)
def teardown_test_env(testbed, duthost, ptfhost, setup_info, interface_type, reboot=False, before_test=False):
"""
teardown function cleans DUT's config and PTF's interfaces
:param duthost: duthost fixture
:param ptfhost: ptfhost fixture
:param setup_info: setup_info fixture
:param interface_type: string interface type
:param reboot: if True perform DUT reboot
:param before_test: boolean to not clear/clear PTF configuration
"""
# reset dut to initial T0 configuration
if reboot:
duthost.command('reboot')
else:
config_reload(duthost)
# wait for dut become stable
time.sleep(180)
# remove ptf interfaces configuration
if not before_test:
conf_ptf_interfaces(testbed, ptfhost, duthost, setup_info, interface_type, teardown=True)
def get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type=None, second_port=False):
"""
Gets network data: MACs, IPs, inner/outer ports ids
Args:
ptfadapter: ptf adapter fixture
setup_info: setup_info fixture
direction: string with current flow direction
interface_type: string interface type
nat_type: string with static napt/nat/dynamic types
second_port: boolean if second port id needs to be returned
"""
# Get outer and inner ports
outer_ports = get_dst_port(setup_info, direction, interface_type,
second_port=second_port)
inner_ports = get_src_port(setup_info, direction, interface_type,
second_port=second_port)
mac_map = {"host-tor": ptfadapter.dataplane.get_mac(0, inner_ports[0]), "leaf-tor": BR_MAC[0]}
# Get source and destination IPs for packets to send
ip_src = get_src_ip(setup_info, direction, interface_type,
nat_type=nat_type, second_port=second_port)
ip_dst = get_dst_ip(setup_info, direction, interface_type,
nat_type=nat_type)
# Define expected source and destination IP based on direction
if nat_type == "static_napt" and direction == "leaf-tor":
exp_dst_ip = ip_src
ip_src = ip_dst
ip_dst = setup_info[interface_type]["public_ip"]
exp_src_ip = ip_src
elif direction == 'host-tor':
exp_dst_ip = setup_info[interface_type]["dst_ip"]
exp_src_ip = setup_info[interface_type]["public_ip"]
else:
exp_dst_ip = setup_info[interface_type]["src_ip"]
exp_src_ip = setup_info[interface_type]["dst_ip"]
if second_port:
exp_dst_ip = setup_info[interface_type]["second_src_ip"]
# Get MAC addresses for packets to send
eth_dst = setup_info['router_mac']
eth_src = mac_map[direction]
# Get public and private IPs for NAT configuration
public_ip = get_public_ip(setup_info, interface_type)
private_ip = get_src_ip(setup_info, direction, interface_type, nat_type, second_port)
return PTF_NETWORK_DATA(outer_ports, inner_ports, eth_dst, eth_src, ip_src, ip_dst, public_ip, private_ip, exp_src_ip, exp_dst_ip)
def perform_handshake(ptfhost, setup_info, protocol_type, direction,
ip_dst, dest_l4_port, ip_src, source_l4_port, public_ip, second_port=False):
"""
Performs TCP handshake to initiate NAT translation
Args:
ptfhost: ptf host fixture
setup_info: setup_info fixture
protocol_type: sting with TCP/UDP values
direction: string with current flow direction
ip_dst: IP destination
dest_l4_port: destination L4 port
ip_src: IP source
source_l4_port: source L4 port
public_ip: Public IP
second_port: boolean if second port id needs to be returned
n_perf: int specifing number of connection for performance test
"""
src_vrf = setup_info["inner_vrf"][0]
dst_vrf = setup_info["outer_vrf"][0]
if second_port:
src_vrf = setup_info["inner_vrf"][1]
dst_vrf = setup_info["outer_vrf"][1]
if direction == "host-tor":
echo_cmd = "python /tmp/nat_ptf_echo.py {} {} {} {} {} {} {} None &".format(protocol_type.lower(),
ip_dst, dest_l4_port,
ip_src, source_l4_port,
dst_vrf, src_vrf)
else:
echo_cmd = "python /tmp/nat_ptf_echo.py {} {} {} {} {} {} {} {} &".format(protocol_type.lower(),
ip_src, source_l4_port,
ip_dst, dest_l4_port,
dst_vrf, src_vrf,
public_ip)
ptfhost.copy(src="./scripts/nat_ptf_echo.py", dest="/tmp")
ptfhost.command(echo_cmd)
def generate_and_verify_traffic(duthost, ptfadapter, setup_info, interface_type, direction, protocol_type, nat_type, second_port=False,
src_port=None, dst_port=None, exp_src_port=None, exp_dst_port=None):
"""
Generates TCP/UDP traffic and checks that traffic is translated due to NAT types/rules
Args:
duthost: duthost fixture
ptfadapter: ptf adapter fixture
setup_info: setup_info fixture
interface_type: string interface type
direction: string with current flow direction
protocol_type: sting with TCP/UDP values
nat_type: string with static napt/nat/dynamic types
second_port: boolean if second port id needs to be returned
src_port: L4 source port in packet to send
dst_port: L4 destination port in packet to send
exp_src_port: L4 source port in expected packet
exp_dst_port: L4 destination port in expected packet
"""
# Define network data and L4 ports
network_data = get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type=nat_type, second_port=second_port)
if nat_type != 'dynamic':
l4_ports = get_static_l4_ports(protocol_type, direction, nat_type)
else:
l4_ports = get_dynamic_l4_ports(duthost, protocol_type, direction, network_data.public_ip)
if src_port is None:
src_port = l4_ports.src_port
if dst_port is None:
dst_port = l4_ports.dst_port
if exp_src_port is None:
exp_src_port = l4_ports.exp_src_port
if exp_dst_port is None:
exp_dst_port = l4_ports.exp_dst_port
# Create packet to send
pkt = create_packet(network_data.eth_dst, network_data.eth_src,
network_data.ip_dst, network_data.ip_src,
protocol_type, sport=src_port, dport=dst_port)
# Define expected packet
exp_pkt = expected_mask_nated_packet(pkt, protocol_type, network_data.exp_dst_ip, network_data.exp_src_ip,
src_port=exp_src_port, dst_port=exp_dst_port)
# clear buffer
ptfadapter.dataplane.flush()
# Send packet
for port in network_data.inner_ports:
testutils.send(ptfadapter, port, pkt, count=5)
# Verify that expected packets arrive on outer ports
testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=network_data.outer_ports)
def generate_and_verify_not_translated_traffic(ptfadapter, setup_info, interface_type, direction, protocol_type, nat_type, second_port=False,
ip_src=None, ip_dst=None, exp_ip_src=None, exp_ip_dst=None):
"""
Generates TCP/UDP traffic and checks that traffic is not translated due to NAT types/rules
Args:
ptfadapter: ptf adapter fixture
setup_info: setup_info fixture
interface_type: string interface type
direction: string with current flow direction
protocol_type: sting with TCP/UDP values
nat_type: string with static napt/nat/dynamic types
second_port: boolean if second port id needs to be returned
ip_src: IP source in packet to send
ip_dst: IP destination in packet to send
exp_ip_src: IP source in expected packet
exp_ip_dst: IP destination in expected packet
"""
# Define network data and L4 ports
network_data = get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type=nat_type, second_port=second_port)
src_port, dst_port = get_l4_default_ports(protocol_type)
if ip_src is None:
ip_src = network_data.ip_src
if ip_dst is None:
ip_dst = network_data.ip_dst
if exp_ip_src is None:
exp_ip_src = network_data.ip_src
if exp_ip_dst is None:
exp_ip_dst = network_data.ip_dst
# Create packet to send
pkt = create_packet(network_data.eth_dst, network_data.eth_src, ip_dst, ip_src,
protocol_type, sport=src_port, dport=dst_port)
# Define expected packet
exp_pkt = expected_mask_nated_packet(pkt, protocol_type, exp_ip_dst, exp_ip_src,
src_port=src_port, dst_port=dst_port)
# clear buffer
ptfadapter.dataplane.flush()
# Send packet
for port in network_data.inner_ports:
testutils.send(ptfadapter, port, pkt, count=5)
# Verify that expected packets arrive on outer ports
testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=network_data.outer_ports)
def generate_and_verify_traffic_dropped(ptfadapter, setup_info, interface_type, direction, protocol_type, nat_type,
src_port, dst_port, exp_src_port, exp_dst_port, second_port=False):
"""
Generates TCP/UDP traffic and checks that traffic is dropped
Args:
ptfadapter: ptf adapter fixture
setup_info: setup_info fixture
interface_type: string interface type
direction: string with current flow direction
protocol_type: sting with TCP/UDP values
nat_type: string with static napt/nat/dynamic types
src_port: L4 source port in packet to send
dst_port: L4 destination port in packet to send
exp_src_port: L4 source port in expected packet
exp_dst_port: L4 destination port in expected packet
second_port: boolean if second port id needs to be returned
"""
# Define network data and L4 ports
network_data = get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type=nat_type, second_port=second_port)
# Create packet to send
pkt = create_packet(network_data.eth_dst, network_data.eth_src,
network_data.ip_dst, network_data.ip_src,
protocol_type, sport=src_port, dport=dst_port)
# Define expected packet
exp_pkt = expected_mask_nated_packet(pkt, protocol_type, network_data.exp_dst_ip, network_data.exp_src_ip,
src_port=exp_src_port, dst_port=exp_dst_port)
# clear buffer
ptfadapter.dataplane.flush()
# Send packet
for port in network_data.inner_ports:
testutils.send(ptfadapter, port, pkt, count=5)
# Verify that expected packets arrive on outer ports
testutils.verify_no_packet_any(ptfadapter, exp_pkt, ports=network_data.outer_ports)
def generate_and_verify_icmp_traffic(ptfadapter, setup_info, interface_type, direction, nat_type, second_port=False, icmp_id=None):
"""
Generates ICMP traffic and checks that traffic is translated due to NAT types/rules.
Args:
ptfadapter: ptf adapter fixture
setup_info: setup_info fixture
interface_type: string interface type
direction: string with current flow direction
nat_type: string with static napt/nat/dynamic types
second_port: boolean if second port id needs to be returned
icmp_id: id for specify ICMP dynamic connection
"""
protocol_type = 'ICMP'
# Define network data
network_data = get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type=nat_type, second_port=second_port)
# Create packet to send
pkt = create_packet(network_data.eth_dst, network_data.eth_src, network_data.ip_dst, network_data.ip_src, protocol_type)
# Define expected packet(ICMP request)
exp_pkt_request = expected_mask_nated_packet(pkt, protocol_type, network_data.exp_dst_ip, network_data.exp_src_ip, icmp_id=icmp_id)
# Reverse source and destination IPs for reply
exp_dst_ip = get_src_ip(setup_info, direction, interface_type,
nat_type=nat_type, second_port=second_port)
exp_src_ip = get_dst_ip(setup_info, direction, interface_type,
nat_type=nat_type)
# Define expected packet(ICMP reply)
exp_pkt_reply = expected_mask_nated_packet(pkt, protocol_type, exp_dst_ip, exp_src_ip, icmp_id=0)
exp_pkt_reply.exp_pkt[protocol_type].type = 0
# clear buffer
ptfadapter.dataplane.flush()
# Send packet
for port in network_data.inner_ports:
testutils.send(ptfadapter, port, pkt, count=5)
# Verify ICMP request packets arrive on outer ports
testutils.verify_packet_any_port(ptfadapter, exp_pkt_request, ports=network_data.outer_ports)
# Verify ICMP peply packets arrive on inner ports
testutils.verify_packet_any_port(ptfadapter, exp_pkt_reply, ports=network_data.inner_ports)
def generate_and_verify_not_translated_icmp_traffic(ptfadapter, setup_info, interface_type, direction, nat_type, second_port=False,
ip_src=None, ip_dst=None, check_reply=True):
"""
Generates ICMP traffic and checks that traffic is not translated due to NAT types/rules.
Args:
ptfadapter: ptf adapter fixture
setup_info: setup_info fixture
interface_type: string interface type
direction: string with current flow direction
nat_type: string with static napt/nat/dynamic types
second_port: boolean if second port id needs to be returned
ip_src: IP source in packet to send
ip_dst: IP destination in packet to send
check_reply: boolean if requires to verify ICMP reply
"""
protocol_type = 'ICMP'
# Define network data
network_data = get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type=nat_type, second_port=second_port)
if ip_src is None:
ip_src = network_data.ip_src
if ip_dst is None:
ip_dst = network_data.ip_dst
# Create packet to send
pkt = create_packet(network_data.eth_dst, network_data.eth_src, ip_dst, ip_src, protocol_type)
# Define expected packet(ICMP request)
exp_pkt_request = expected_mask_nated_packet(pkt, protocol_type, ip_dst, ip_src)
# Define expected packet(ICMP reply)
exp_pkt_reply = expected_mask_nated_packet(pkt, protocol_type, ip_src, ip_dst)
exp_pkt_reply.exp_pkt[protocol_type].type = 0
# clear buffer
ptfadapter.dataplane.flush()
# Send packet
for port in network_data.inner_ports:
testutils.send(ptfadapter, port, pkt, count=5)
# Verify ICMP request packets arrive on outer ports
testutils.verify_packet_any_port(ptfadapter, exp_pkt_request, ports=network_data.outer_ports)
if check_reply:
# Verify ICMP peply packets arrive on inner ports
testutils.verify_packet_any_port(ptfadapter, exp_pkt_reply, ports=network_data.inner_ports)
def get_l4_default_ports(protocol_type):
"""
Get default L4 ports
:param protocol_type: type of protocol TCP/UDP
:return source_l4_port, dest_l4_port
"""
source_l4_port = TCP_LOCAL_PORT
dest_l4_port = TCP_GLOBAL_PORT
if protocol_type == "UDP":
source_l4_port = UDP_LOCAL_PORT
dest_l4_port = UDP_GLOBAL_PORT
return source_l4_port, dest_l4_port
def get_dynamic_l4_ports(duthost, proto, direction, public_ip):
"""
Get l4 ports for dynamic NAT test cases
:param proto: sting with TCP/UDP values
:param direction: string with current flow direction
:return named tuple with values src_port, dst_port, exp_src_port, exp_dst_por
"""
time.sleep(5)
# Get expected source port
output = exec_command(duthost, ["show nat translation"])['stdout']
# Find expected source port
pattern = r"{}.+{}:(\d+)".format(proto.lower(), public_ip)
ports = re.findall(pattern, output)
if not ports:
raise Exception("Dynamic NAT translation was not created")
dynamic_global_port = int(sorted(ports)[-1])
src_port, dst_port = get_l4_default_ports(proto)
if direction == "leaf-tor":
exp_src_port = dynamic_global_port
exp_dst_port = src_port
src_port = dynamic_global_port
dst_port = dynamic_global_port
else:
exp_src_port = dynamic_global_port
exp_dst_port = dynamic_global_port
dst_port = dynamic_global_port
return L4_PORTS_DATA(src_port, dst_port, exp_src_port, exp_dst_port)
def configure_dynamic_nat_rule(duthost, ptfadapter, ptfhost, setup_info, interface_type, protocol_type, pool_name=DYNAMIC_POOL_NAME,
public_ip=None, acl_table=ACL_TABLE_GLOBAL_NAME, ports_assigned=None, acl_rules=None,
binding_name=DYNAMIC_BINDING_NAME, port_range=None,
default=False, remove_bindings=False, handshake=False):
"""
method configure Dynamic NAT rules
:param duthost: duthost fixture
:param setup_info: setup_info fixture
:param interface_type: interface_type Loopback, Portchannel etc
:param pool_name: name of the pool to apply
:param public_ip: IP of Public L3 interface
:param acl_table: acl table name to create
:param ports_assigned: assigned ports to ACL table
:param acl_rules: ALC rules to apply
:param binding_name: NAT binding name
:param port_range: range of L4 port to apply
:param remove_bindings: if True remove applied bindings from NAT rules
:param default: use default ports
:param handshake: if True perform handshake
"""
if default:
# Set private IP for dynamic NAT configuration
public_ip = get_public_ip(setup_info, interface_type) if not public_ip else public_ip
acl_subnet = setup_info[interface_type]["acl_subnet"]
acl_rules = [{"priority": "10", "src_ip": acl_subnet, "action": "forward"}] if not acl_rules else acl_rules
port_range = "{0}-{1}".format(POOL_RANGE_START_PORT, POOL_RANGE_END_PORT) if not port_range else port_range
ports_assigned = setup_info['indices_to_ports_config'][setup_info[interface_type]['inner_port_id'][0]] if not \
ports_assigned else ports_assigned
# Set NAT configuration for test
duthost.command("sudo config nat add pool {0} {1} {2}".format(pool_name, public_ip, port_range))
# Check that pool configuration was applied
show_nat_pool = get_cli_show_nat_config_output(duthost, "pool")
pytest_assert(pool_name == show_nat_pool[0]['pool name'], "Pool name was not set to {}".format(pool_name))
pytest_assert(public_ip == show_nat_pool[0]['global ip range'], "Global IP Range was not set to {}".format(public_ip))
pytest_assert(port_range == show_nat_pool[0]['global port range'], "Global Port Range was not set to {}".format(port_range))
# Add bindings
duthost.command("sudo config nat add binding {0} {1} {2}".format(binding_name, pool_name, acl_table))
# Check that binding configuration was applied
show_nat_binding = get_cli_show_nat_config_output(duthost, "bindings")
pytest_assert(binding_name == show_nat_binding[0]['binding name'], "Binding Name was not set to {}".format(binding_name))
pytest_assert(pool_name == show_nat_binding[0]['pool name'], "Pool Name was not set to {}".format(pool_name))
pytest_assert(acl_table == show_nat_binding[0]['access-list'], "Access-List was not set to {}".format(acl_table))
# Apply acl table and rule
duthost.command("mkdir -p {}".format(DUT_TMP_DIR))
# Initialize variables for NAT global table
acl_rule_vars = {
'acl_table_name': acl_table,
'stage': "INGRESS",
'ports_assigned': ports_assigned,
'acl_rules': acl_rules
}
duthost.host.options['variable_manager'].extra_vars.update(acl_rule_vars)
acl_config = 'acl_table.json'
acl_config_path = os.path.join(DUT_TMP_DIR, acl_config)
duthost.template(src=os.path.join(TEMPLATE_DIR, ACL_TEMPLATE), dest=acl_config_path)
# Apply config file
duthost.command('sonic-cfggen -j {} --write-to-db'.format(acl_config_path))
# Remove temporary folders
duthost.command('rm -rf {}'.format(DUT_TMP_DIR))
if remove_bindings:
duthost.command("config nat remove bindings")
# Apply NAT zones
nat_zones_config(duthost, setup_info, interface_type)
# set_arp entries
check_peers_by_ping(duthost)
if handshake:
# Perform handshake
direction = 'host-tor'
# Define network data and L4 ports
network_data = get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type='dynamic')
src_port, dst_port = get_l4_default_ports(protocol_type)
# Perform TCP handshake (host-tor -> leaf-tor)
perform_handshake(ptfhost, setup_info,
protocol_type, direction,
network_data.ip_dst, dst_port,
network_data.ip_src, src_port,
network_data.public_ip)
def wait_timeout(protocol_type, wait_time=None, default=True):
"""
method for wait until NAT entry expired or some time to check that they ware not expired
:param protocol_type: type of protocol
:param wait_time: time to wait
:param default: wait default NAT timeout
"""
if default:
if protocol_type == "UDP":
# Wait until UDP entry expires
time.sleep(GLOBAL_UDP_NAPT_TIMEOUT + 80)
elif protocol_type == "TCP":
time.sleep(GLOBAL_TCP_NAPT_TIMEOUT + 80)
else:
time.sleep(60)
else:
time.sleep(wait_time)
def get_static_l4_ports(proto, direction, nat_type):
"""
Get l4 ports for static NAT/NAPT test cases
:param proto: sting with TCP/UDP values
:param direction: string with current flow direction
:param nat_type: string with static napt/nat types
:return named tuple with values src_port, dst_port, exp_src_port, exp_dst_por
"""
src_port, dst_port = get_l4_default_ports(proto)
if nat_type == 'static_napt' and direction == "host-tor":
exp_src_port = dst_port
exp_dst_port = dst_port
elif nat_type == "static_napt" and direction == "leaf-tor":
exp_src_port, exp_dst_port = dst_port, src_port
src_port = dst_port
elif direction == "leaf-tor":
exp_src_port, exp_dst_port = dst_port, src_port
src_port, dst_port = dst_port, src_port
elif direction == "host-tor":
exp_src_port = src_port
exp_dst_port = dst_port
return L4_PORTS_DATA(src_port, dst_port, exp_src_port, exp_dst_port)
def conf_dut_routes(duthost, setup_info, subnet, interface_type, teardown=False):
"""
method for add/delete routes on DUT
:param duthost: DUT host object
:param setup_info: dict with interfaces parameters to configure
:param subnet: subnet to configure
:param interface_type: string interface type
:param teardown: Boolean parameter to remove or not DUT routes
"""
gw = setup_info[interface_type]["vrf_conf"]["red"]["ip"][:-1] + "{}". \
format(int(setup_info[interface_type]["vrf_conf"]["red"]["ip"][-1:]) + 1)
if teardown:
try:
duthost.command("ip route del {} via {}".format(subnet, gw))
except RunAnsibleModuleFail:
logger.debug("Route '%s via %s' was not deleted/existed", subnet, gw)
else:
duthost.command("ip route add {} via {}".format(subnet, gw))
def get_redis_val(duthost, db, key):
"""
Returns dictionary of value for redis key.
:param duthost: DUT host object
:param db: database to be selected
:param key: key to be selected
"""
try:
output = exec_command(duthost, ["redis-dump -d {} --pretty -k *{}*".format(db, key)])
if output["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
redis_dict = json.loads(output['stdout'])
return redis_dict
except Exception as e:
return e.__str__()
def get_db_rules(duthost, ptfadapter, setup_test_env, protocol_type, db_type, private_ip=None, public_ip=None, private_port=None,
public_port=None, start_port=POOL_RANGE_START_PORT, end_port=POOL_RANGE_END_PORT, access_list=ACL_TABLE_GLOBAL_NAME, nat_pool=DYNAMIC_POOL_NAME,
post_flag=False):
"""
Returns dictionary of database rules.
:param duthost: DUT host object
:param ptfadapter: ptf adapter fixture
:param setup_test_env: fixture used to gather setup_info fixture and interface_type (Loopback, Portchannel etc)
:param protocol_type: type of protocol TCP/UDP
:param db_type: databyte type used to select which redis dump should be checked
:param private_ip: IP variable used to confirm proper configuration
:param public_ip: IP variable used to confirm proper configuration
:param private_port: port variable used to confirm proper configuration
:param public_port: port variable used to confirm proper configuration
:param start_port: port variable used to confirm proper configuration
:param end_port: port variable used to confirm proper configuration
:param access_list: ACL variable used to confirm proper configuration
:param nat_pool: pool variable used to confirm proper configuration
:param post_flag: boolean flag used to determine which redis dump template should be used (pre or post configuration)
"""
interface_type, setup_info = setup_test_env
setup_data = copy.deepcopy(setup_info)
nat_type = 'static_napt'
direction = 'host-tor'
network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type)
secondary_protocol = {"TCP": "UDP", "UDP": "TCP"}[protocol_type]
global_port = {"TCP": TCP_GLOBAL_PORT, "UDP": UDP_GLOBAL_PORT}[protocol_type]
local_port = {"TCP": TCP_LOCAL_PORT, "UDP": UDP_LOCAL_PORT}[protocol_type]
db_rules = {}
# APP_DB timeout
if db_type == 'APP_DB timeout':
offset = {True: 200, False: 0}[post_flag]
db_rules = {"nat_timeout" : "{}".format(GLOBAL_NAT_TIMEOUT + offset),
"admin_mode" : "enabled",
"nat_udp_timeout" : "{}".format(GLOBAL_UDP_NAPT_TIMEOUT + offset),
"nat_tcp_timeout" : "{}".format(GLOBAL_TCP_NAPT_TIMEOUT + offset * 25)
}
# Pool CONFIG_DB
elif db_type == 'Pool CONFIG_DB':
db_rules = {"nat_ip": "{}".format(public_ip),
"nat_port": "{}-{}".format(start_port, end_port)
}
# Pool APP_DB
elif db_type == 'Pool APP_DB':
db_rules = {"port_range": "{}-{}".format(start_port, end_port)}
# Binding CONFIG_DB
elif db_type == 'Binding CONFIG_DB':
db_rules = {"access_list": access_list,
"nat_pool": nat_pool,
"nat_type": "snat",
"twice_nat_id": "NULL"
}
# NAPT APP_DB
elif db_type == 'NAPT APP_DB':
db_rules = {
"NAPT_TABLE:{}:{}:{}".format(protocol_type, network_data.public_ip, global_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "dnat",
"translated_ip": "{}".format(network_data.private_ip),
"translated_l4_port": "{}".format(local_port)
}
},
"NAPT_TABLE:{}:{}:{}".format(protocol_type, network_data.private_ip, local_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "snat",
"translated_ip": "{}".format(network_data.public_ip),
"translated_l4_port": "{}".format(global_port)
}
}
}
# NAPT CONFIG_DB
elif db_type == 'NAPT CONFIG_DB':
db_rules = {
"STATIC_NAPT|{}|{}|{}".format(network_data.public_ip, protocol_type, global_port): {
"type": "hash",
"value": {
"local_ip": "{}".format(network_data.private_ip),
"local_port": "{}".format(local_port),
"nat_type": "dnat"
}
}
}
# NAPT APP_DB POST
elif db_type == 'NAPT APP_DB POST':
db_rules = {
"NAPT_TABLE:{}:{}:{}".format(protocol_type, public_ip, public_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "dnat",
"translated_ip": "{}".format(private_ip),
"translated_l4_port": "{}".format(private_port)
}
},
"NAPT_TABLE:{}:{}:{}".format(protocol_type, private_ip, private_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "snat",
"translated_ip": "{}".format(public_ip),
"translated_l4_port": "{}".format(public_port)
}
},
"NAPT_TABLE:{}:{}:{}".format(protocol_type, network_data.public_ip, global_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "dnat",
"translated_ip": "{}".format(network_data.private_ip),
"translated_l4_port": "{}".format(local_port)
}
},
"NAPT_TABLE:{}:{}:{}".format(protocol_type, network_data.private_ip, local_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "snat",
"translated_ip": "{}".format(network_data.public_ip),
"translated_l4_port": "{}".format(global_port)
}
},
"NAPT_TABLE:{}:{}:{}".format(secondary_protocol, public_ip, public_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "dnat",
"translated_ip": "{}".format(private_ip),
"translated_l4_port": "{}".format(private_port)
}
},
"NAPT_TABLE:{}:{}:{}".format(secondary_protocol, private_ip, private_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "snat",
"translated_ip": "{}".format(public_ip),
"translated_l4_port": "{}".format(public_port)
}
}
}
# NAPT CONFIG_DB POST
elif db_type == 'NAPT CONFIG_DB POST':
db_rules = {
"STATIC_NAPT|{}|{}|{}".format(public_ip, protocol_type, public_port): {
"type": "hash",
"value": {
"local_ip": "{}".format(private_ip),
"local_port": "{}".format(private_port),
"nat_type": "dnat"
}
},
"STATIC_NAPT|{}|{}|{}".format(public_ip, secondary_protocol, public_port): {
"type": "hash",
"value": {
"local_ip": "{}".format(private_ip),
"local_port": "{}".format(private_port),
"nat_type": "dnat"
}
},
"STATIC_NAPT|{}|{}|{}".format(network_data.public_ip, protocol_type, global_port): {
"type": "hash",
"value": {
"local_ip": "{}".format(network_data.private_ip),
"local_port": "{}".format(local_port),
"nat_type": "dnat"
}
}
}
# ASIC_DB SRC status
elif db_type == 'ASIC_DB SRC':
db_rules = {
"SAI_NAT_ENTRY_ATTR_SRC_IP": "{}".format(network_data.public_ip),
"SAI_NAT_ENTRY_ATTR_L4_SRC_PORT": "{}".format(global_port)
}
# ASIC_DB DST status
elif db_type == 'ASIC_DB DST':
db_rules = {
"SAI_NAT_ENTRY_ATTR_DST_IP": "{}".format(network_data.private_ip),
"SAI_NAT_ENTRY_ATTR_L4_DST_PORT": "{}".format(local_port)
}
else:
raise Exception('Improper db_type selected')
return db_rules
def write_json(duthost, json_dict, feature):
"""
Write NAT config json to dut
:param DUT host name
:param json dictionary with variables used by templates
:param feature used to select which template should be used
"""
TEMP_FILE = "{}.json".format(feature)
curr_dir = os.path.dirname(os.path.abspath(__file__))
j2_template = Environment(loader=FileSystemLoader(curr_dir), trim_blocks=True)
if feature == "dynamic_binding":
j2_temp = j2_template.get_template(NAT_CONF_J2_TEMPLATE).render(nat=json_dict)
else:
raise AttributeError("Unexpected feature {}".format(feature))
exec_command(duthost, ["mkdir -p {}".format(DUT_TMP_DIR)])
exec_command(duthost, ["echo '{j2_temp}' > {dir}/{file}".
format(j2_temp=j2_temp, dir=DUT_TMP_DIR, file=TEMP_FILE)])
exec_command(duthost, ["sudo config load {} -y".format(DUT_TMP_DIR+"/"+TEMP_FILE)])
exec_command(duthost, ["rm -rf {}".format(DUT_TMP_DIR)])
| 47.348943 | 161 | 0.627628 | import re
import os
import time
import logging
import json
from collections import namedtuple
import ptf.mask as mask
import ptf.packet as packet
import ptf.testutils as testutils
from tests.common.errors import RunAnsibleModuleFail
from tests.common.helpers.assertions import pytest_assert
from jinja2 import Environment, FileSystemLoader
from tests.common.config_reload import config_reload
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
DUT_TMP_DIR = os.path.join('tmp', os.path.basename(BASE_DIR))
NAT_CONF_J2_TEMPLATE = "templates/create_nat_binding.j2"
FILES_DIR = os.path.join(BASE_DIR, 'files')
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
NAT_GLOBAL_TEMPLATE = 'global_nat_table_config.j2'
NAT_STATIC_TEMPLATE = 'static_nat_napt_table_config.j2'
ZONES_TEMPLATE = 'nat_zone_table_config.j2'
NAT_ADMIN_MODE = "enabled"
STATIC_NAT_TABLE_NAME = "STATIC_NAT"
STATIC_NAPT_TABLE_NAME = "STATIC_NAPT"
ACL_TEMPLATE = 'create_acl_rule.j2'
GLOBAL_NAT_TIMEOUT = 300
GLOBAL_UDP_NAPT_TIMEOUT = 120
GLOBAL_TCP_NAPT_TIMEOUT = 300
TCP_GLOBAL_PORT = 3700
UDP_GLOBAL_PORT = 3000
TCP_LOCAL_PORT = 80
UDP_LOCAL_PORT = 161
POOL_RANGE_START_PORT = 5000
POOL_RANGE_END_PORT = 6000
logger = logging.getLogger(__name__)
DYNAMIC_POOL_NAME = "test_pool"
ACL_TABLE_GLOBAL_NAME = "test_acl_table"
DYNAMIC_BINDING_NAME = "test_binding"
ACL_SUBNET = "192.168.0.0/24"
BR_MAC = ["22:22:22:22:22:21"]
VRF = {"red": {"ip": "11.1.0.2", "id": "1", "mask": "30", "gw": "11.1.0.1", "dut_iface": "PortChannel0001", "port_id": {"t0": ["28"],
"t0-64": ["0", "1"],
"t0-64-32": ["0", "1"]
}
},
"blue": {"ip": "192.168.0.101", "id": "2", "mask": "24", "gw": "192.168.0.1", "port_id": "6"},
"yellow": {"ip": "192.168.0.201", "id": "3", "mask": "24", "gw": "192.168.0.1", "port_id": "7"}
}
SETUP_CONF = {"loopback": {"vrf": VRF, "acl_subnet": ACL_SUBNET},
"port_in_lag": {"vrf": VRF, "acl_subnet": ACL_SUBNET}
}
DIRECTION_PARAMS = ['host-tor', 'leaf-tor']
FULL_CONE_TEST_IP = "172.20.1.2"
FULL_CONE_TEST_SUBNET = "172.20.1.0/24"
REBOOT_MAP = {'cold': {"timeout": 300}, 'fast': {"timeout": 180}, 'warm': {"timeout": 180}}
PTF_NETWORK_DATA = namedtuple('PTF_NETWORK_DATA', ['outer_ports', 'inner_ports', 'eth_dst', 'eth_src', 'ip_src',
'ip_dst', 'public_ip', 'private_ip', 'exp_src_ip', 'exp_dst_ip'])
L4_PORTS_DATA = namedtuple('L4_PORTS_DATA', ['src_port', 'dst_port', 'exp_src_port', 'exp_dst_port'])
def check_peers_by_ping(duthost):
for vrf in VRF:
duthost.command("ping {0} -c 5".format(VRF[vrf]['ip']))
def configure_nat_over_cli(duthost, action, nat_type, global_ip, local_ip, proto=None,
global_port=None, local_port=None):
action_type_map = {'add': '-nat_type dnat', 'remove': ''}
if nat_type == 'static_nat':
duthost.command("sudo config nat {} static basic {} {} {}".format(action, global_ip, local_ip, action_type_map[action]))
return {
global_ip: {'local_ip': local_ip, 'nat_type': 'dnat'}
}
elif nat_type == 'static_napt':
duthost.command("sudo config nat {} static {} {} {} {} {} {}".format(action, proto.lower(),
global_ip, global_port,
local_ip, local_port,
action_type_map[action]))
return {
"{}|{}|{}".format(global_ip, proto.upper(), global_port): {'local_ip': local_ip,
'local_port': "{}".format(local_port),
'nat_type': 'dnat'
}
}
return "Unkown NAT type"
def nat_statistics(duthost, show=False, clear=False):
if show:
output_cli = exec_command(duthost, ["show nat statistics"])
if output_cli["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
output = {}
entries = output_cli["stdout"].split()[10:]
if entries:
num_entries = len(entries[::5])
keys = output_cli["stdout"].split()[:5]
for num in range(0, num_entries):
entry_values = entries[(num * 5):(num * 5) + 5]
key = entry_values[1] if entry_values[1] != "---" else entry_values[2]
output[key] = {keys[i]: entry_values[i] for i in range(0, len(keys))}
return output
elif clear:
output_cli = exec_command(duthost, ["sudo sonic-clear nat statistics"])
if output_cli["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
return output_cli["stdout"].lstrip()
return None
def dut_nat_iptables_status(duthost):
nat_table_status = {}
output_cli = exec_command(duthost, ["sudo iptables -nL -t nat"])
if output_cli["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
entries = output_cli["stdout"].split("\n")
index_prerouting = [i for i in range(0, len(entries)) if "PREROUTING" in entries[i]][0] + 2
index_input = [i for i in range(0, len(entries)) if "INPUT" in entries[i]][0]
index_postrouting = [i for i in range(0, len(entries)) if 'POSTROUTING' in entries[i]][0] + 2
if any(['DOCKER' in entry for entry in entries]):
index_docker = [i for i in range(0, len(entries)) if 'DOCKER' in entries[i]][0]
postrouting = [el for el in entries[index_postrouting:index_docker] if len(el) > 1]
else:
postrouting = [el for el in entries[index_postrouting:] if len(el) > 1]
prerouting = [el for el in entries[index_prerouting:index_input] if len(el) > 0]
nat_table_status["prerouting"] = [" ".join([s.strip() for s in el.split() if len(el) > 0])
for el in prerouting]
nat_table_status["postrouting"] = [" ".join([s.strip() for s in el.split() if len(el) > 0])
for el in postrouting]
return nat_table_status
def dut_interface_status(duthost, interface_name):
return duthost.show_interface(command='status', interfaces=interface_name)['ansible_facts']['int_status'][interface_name]['oper_state']
def dut_interface_control(duthost, action, interface_name, ip_addr=""):
interface_actions = {"disable": "shutdown {}".format(interface_name),
"enable": "startup {}".format(interface_name),
"ip remove": "{} {}".format(action, ip_addr),
"ip add": "{} {}".format(action, ip_addr)
}
expected_operstatus = {"disable": "down", "enable": "up", "ip remove": "up", "ip add": "up"}
output_cli = exec_command(duthost, ["sudo config interface {}".format(interface_actions[action])])
if output_cli["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
attempts = 3
current_operstatus = dut_interface_status(duthost, interface_name)
while current_operstatus != expected_operstatus[action]:
if attempts == 0:
break
time.sleep(15)
current_operstatus = dut_interface_status(duthost, interface_name)
attempts -= 1
return current_operstatus
def nat_translations(duthost, show=False, clear=False):
if show:
output_cli = exec_command(duthost, ["show nat translations"])
if output_cli["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
output = {}
entries = output_cli["stdout"].split('\n')[15:]
splited_entries = []
for el in entries:
splited_entries.extend(el.split())
if splited_entries:
num_entries = len(splited_entries[::5])
keys = [el.strip() for el in output_cli["stdout"].split("\n")[13].split(" ") if el]
for num in range(0, num_entries):
entry_values = splited_entries[(num * 5):(num * 5) + 5]
key = entry_values[1] if entry_values[1] != "---" else entry_values[2]
output[key] = {keys[i]: entry_values[i] for i in range(0, len(keys))}
return output
elif clear:
output_cli = exec_command(duthost, ["sudo sonic-clear nat translations"])
if output_cli["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
return output_cli["stdout"].lstrip()
return None
def crud_operations_basic(duthost, crud_operation):
nat_type = "static_nat"
for key in crud_operation.keys():
output = configure_nat_over_cli(duthost, crud_operation[key]["action"], nat_type,
crud_operation[key]["global_ip"], crud_operation[key]["local_ip"])
return output
def crud_operations_napt(duthost, crud_operation):
nat_type = 'static_napt'
for key in crud_operation.keys():
output = configure_nat_over_cli(duthost, crud_operation[key]["action"], nat_type,
crud_operation[key]["global_ip"], crud_operation[key]["local_ip"],
proto=crud_operation[key]["proto"],
global_port=crud_operation[key]["global_port"],
local_port=crud_operation[key]["local_port"])
return output
def exec_command(host, command_list):
if len(command_list) == 1:
try:
response = host.shell(command_list[0])
return response
except Exception as e:
return e.__str__()
else:
for command in command_list:
exec_command(host, [command])
def nat_zones_config(duthost, setup_info, interface_type):
inner_zone_interfaces = setup_info[interface_type]["inner_zone_interfaces"]
outer_zone_interfaces = setup_info[interface_type]["outer_zone_interfaces"]
for rif in setup_info["dut_rifs_in_topo_t0"]:
if rif in inner_zone_interfaces or rif in outer_zone_interfaces:
nat_zone_vars = setup_info['interfaces_nat_zone'][rif]
duthost.command("sudo config nat add interface {0} -nat_zone {1}".format(rif, nat_zone_vars['zone_id']))
show_zones = duthost.command("show nat config zones")['stdout']
zone_id = re.search(r"{}\s+(\d)".format(rif), show_zones).group(1)
pytest_assert(str(nat_zone_vars['zone_id']) == zone_id, "NAT zone was not set to {}".format(zone_id))
def get_cli_show_nat_config_output(duthost, command):
return duthost.show_and_parse("show nat config {}".format(command))
def apply_static_nat_config(duthost, ptfadapter, ptfhost, setup_data,
network_data, direction, interface_type, nat_type, public_ip,
private_ip, protocol_type=None, nat_entry=None, handshake=False):
network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type)
src_port, dst_port = get_l4_default_ports(protocol_type)
global_port = dst_port
local_port = src_port
if nat_entry != 'static_napt':
duthost.command("sudo config nat add static basic {0} {1} -nat_type=dnat".format(public_ip, private_ip))
else:
duthost.command("sudo config nat add static {0} {1} {2} {3} {4} -nat_type=dnat".
format(protocol_type.lower(), public_ip, global_port, private_ip, local_port))
static_nat = get_cli_show_nat_config_output(duthost, "static")
pytest_assert('dnat' == static_nat[0]['nat type'], "Default NAT type was changed")
pytest_assert(public_ip == static_nat[0]['global ip'], "Global IP does not match {}".format(public_ip))
pytest_assert(private_ip == static_nat[0]['local ip'], "Local IP does not match {}".format(private_ip))
if nat_entry == 'static_napt':
pytest_assert(protocol_type == static_nat[0]['ip protocol'], "Protocol does not match {}".format(protocol_type))
pytest_assert(str(global_port) == static_nat[0]['global port'], "Global Port does not match {}".format(global_port))
pytest_assert(str(local_port) == static_nat[0]['local port'], "Local Port does not match {}".format(local_port))
else:
pytest_assert('all' == static_nat[0]['ip protocol'])
nat_zones_config(duthost, setup_data, interface_type)
if handshake:
if direction == 'leaf-tor':
check_peers_by_ping(duthost)
perform_handshake(ptfhost, setup_data,
protocol_type, direction,
network_data.ip_dst, dst_port,
network_data.ip_src, src_port,
network_data.public_ip)
def get_src_port(setup_info, direction, interface_type, second_port=False):
if direction == 'host-tor':
if second_port:
return [setup_info[interface_type]['inner_port_id'][0] + 1]
return setup_info[interface_type]['inner_port_id']
return setup_info[interface_type]['outer_port_id']
def get_dst_port(setup_info, direction, interface_type, second_port=False):
if direction == 'leaf-tor':
if second_port:
return [setup_info[interface_type]['inner_port_id'][0] + 1]
return setup_info[interface_type]['inner_port_id']
return setup_info[interface_type]['outer_port_id']
def get_src_ip(setup_info, direction, interface_type, nat_type=None, second_port=False):
if direction == 'host-tor' or nat_type == "static_napt":
if second_port:
return setup_info[interface_type]["second_src_ip"]
return setup_info[interface_type]['src_ip']
return setup_info[interface_type]['dst_ip']
def get_dst_ip(setup_info, direction, interface_type, nat_type=None):
if direction == 'host-tor' or nat_type == "static_napt":
return setup_info[interface_type]['dst_ip']
return setup_info[interface_type]['public_ip']
def get_public_ip(setup_info, interface_type):
return setup_info[interface_type]['public_ip']
def setup_ptf_interfaces(testbed, ptfhost, duthost, setup_info, interface_type, vrf_id, vrf_name, port_id,
ip_address, mask, gw_ip, key):
ptfhost.shell("grep -Fxq '{} {}' /etc/iproute2/rt_tables "
"|| echo '{} {}' >> /etc/iproute2/rt_tables".format(vrf_id, vrf_name, vrf_id, vrf_name))
ptfhost.shell("ip link add {} type vrf table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip link set dev {} up".format(vrf_name))
if vrf_name == "red":
bond_interface = "bond1"
ptfhost.shell("ip link add {} type bond".format(bond_interface))
ptfhost.shell("ip link set {} type bond miimon 100 mode balance-xor".format(bond_interface))
for iface_id in port_id[testbed['topo']['name']]:
ptfhost.shell("ip link set eth{} down".format(iface_id))
ptfhost.shell("ip link set eth{} master {}".format(iface_id, bond_interface))
ptfhost.shell("ip link set dev {} up".format(bond_interface))
ptfhost.shell("ifconfig {} hw ether {}".format(bond_interface, BR_MAC[0]))
ptfhost.shell("ifconfig {} mtu 9216 up".format(bond_interface))
ptfhost.shell("ip link set {} master {}".format(bond_interface, vrf_name))
ptfhost.shell("ip addr add {}/{} dev {}".format(ip_address, mask, bond_interface))
else:
ptfhost.shell("ip link set eth{} master {}".format(port_id, vrf_name))
ptfhost.shell("ip addr add {}/{} dev eth{}".format(ip_address, mask, port_id))
ptfhost.shell("ip rule add iif {} table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip rule add oif {} table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip route add 0.0.0.0/0 via {} table {}".format(gw_ip, vrf_id))
if "dut_iface" in setup_info[interface_type]["vrf_conf"][key].keys():
dut_iface = setup_info[interface_type]["vrf_conf"][key]["dut_iface"]
pch_ip = setup_info["pch_ips"][dut_iface]
duthost.shell("sudo config interface ip remove {} {}/31".format(dut_iface, pch_ip))
duthost.shell("sudo config interface ip add {} {}/{}".format(dut_iface, gw_ip, mask))
def teardown_ptf_interfaces(testbed, ptfhost, gw_ip, vrf_id, ip_address, mask, port_id, vrf_name):
ptfhost.shell("ip route del 0.0.0.0/0 via {} table {}".format(gw_ip, vrf_id))
if vrf_name == "red":
bond_interface = "bond1"
ptfhost.shell("ip addr del {}/{} dev {}".format(ip_address, mask, bond_interface))
ptfhost.shell("ip rule del iif {} table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip rule del oif {} table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip link set {} nomaster".format(bond_interface))
for iface_id in port_id[testbed['topo']['name']]:
ptfhost.shell("ip link set eth{} nomaster".format(iface_id))
ptfhost.shell("ip link set eth{} up".format(iface_id))
ptfhost.shell("ip link del {}".format(bond_interface))
ptfhost.shell("ip link del {} type vrf table {}".format(vrf_name, vrf_id))
else:
ptfhost.shell("ip addr del {}/{} dev eth{}".format(ip_address, mask, port_id))
ptfhost.shell("ip rule del iif {} table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip rule del oif {} table {}".format(vrf_name, vrf_id))
ptfhost.shell("ip link set eth{} nomaster".format(port_id))
ptfhost.shell("ip link del {} type vrf table {}".format(vrf_name, vrf_id))
def conf_ptf_interfaces(testbed, ptfhost, duthost, setup_info, interface_type, teardown=False):
if not teardown:
ptfhost.script("./scripts/change_mac.sh")
for key in setup_info[interface_type]["vrf_conf"]:
vrf_id = setup_info[interface_type]["vrf_conf"][key]["id"]
vrf_name = key
ip_address = setup_info[interface_type]["vrf_conf"][key]["ip"]
gw_ip = setup_info[interface_type]["vrf_conf"][key]["gw"]
port_id = setup_info[interface_type]["vrf_conf"][key]["port_id"]
mask = setup_info[interface_type]["vrf_conf"][key]["mask"]
if teardown:
teardown_ptf_interfaces(testbed, ptfhost, gw_ip, vrf_id, ip_address, mask, port_id, vrf_name)
else:
setup_ptf_interfaces(testbed, ptfhost, duthost, setup_info, interface_type, vrf_id, vrf_name, port_id, ip_address,
mask, gw_ip, key)
if not teardown:
ptfhost.shell('supervisorctl restart ptf_nn_agent')
def expected_mask_nated_packet(pkt, protocol_type, ip_dst, ip_src,
src_port=None, dst_port=None, icmp_id=None):
exp_pkt = pkt.copy()
exp_pkt['IP'].ttl -= 1
exp_pkt['IP'].dst = ip_dst
exp_pkt['IP'].src = ip_src
if protocol_type in ["TCP", "UDP"]:
exp_pkt[protocol_type].sport = src_port
exp_pkt[protocol_type].dport = dst_port
if protocol_type == "ICMP":
exp_pkt[protocol_type].id = icmp_id
exp_pkt = mask.Mask(exp_pkt)
exp_pkt.set_do_not_care_scapy(packet.Ether, 'dst')
exp_pkt.set_do_not_care_scapy(packet.Ether, 'src')
exp_pkt.set_do_not_care_scapy(packet.IP, 'chksum')
exp_pkt.set_do_not_care_scapy(packet.IP, 'id')
return exp_pkt
def create_packet(eth_dst, eth_src, ip_dst, ip_src, protocol_type, sport=None, dport=None):
if protocol_type == "TCP":
return testutils.simple_tcp_packet(eth_dst=eth_dst, eth_src=eth_src, ip_dst=ip_dst, ip_src=ip_src,
tcp_sport=sport, tcp_dport=dport, ip_ttl=64)
elif protocol_type == "UDP":
return testutils.simple_udp_packet(eth_dst=eth_dst, eth_src=eth_src, ip_dst=ip_dst, ip_src=ip_src,
udp_sport=sport, udp_dport=dport, ip_ttl=64)
return testutils.simple_icmp_packet(eth_dst=eth_dst, eth_src=eth_src, ip_dst=ip_dst, ip_src=ip_src, icmp_type=8,
icmp_code=0, ip_ttl=64)
def teardown_test_env(testbed, duthost, ptfhost, setup_info, interface_type, reboot=False, before_test=False):
if reboot:
duthost.command('reboot')
else:
config_reload(duthost)
time.sleep(180)
if not before_test:
conf_ptf_interfaces(testbed, ptfhost, duthost, setup_info, interface_type, teardown=True)
def get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type=None, second_port=False):
outer_ports = get_dst_port(setup_info, direction, interface_type,
second_port=second_port)
inner_ports = get_src_port(setup_info, direction, interface_type,
second_port=second_port)
mac_map = {"host-tor": ptfadapter.dataplane.get_mac(0, inner_ports[0]), "leaf-tor": BR_MAC[0]}
ip_src = get_src_ip(setup_info, direction, interface_type,
nat_type=nat_type, second_port=second_port)
ip_dst = get_dst_ip(setup_info, direction, interface_type,
nat_type=nat_type)
if nat_type == "static_napt" and direction == "leaf-tor":
exp_dst_ip = ip_src
ip_src = ip_dst
ip_dst = setup_info[interface_type]["public_ip"]
exp_src_ip = ip_src
elif direction == 'host-tor':
exp_dst_ip = setup_info[interface_type]["dst_ip"]
exp_src_ip = setup_info[interface_type]["public_ip"]
else:
exp_dst_ip = setup_info[interface_type]["src_ip"]
exp_src_ip = setup_info[interface_type]["dst_ip"]
if second_port:
exp_dst_ip = setup_info[interface_type]["second_src_ip"]
eth_dst = setup_info['router_mac']
eth_src = mac_map[direction]
public_ip = get_public_ip(setup_info, interface_type)
private_ip = get_src_ip(setup_info, direction, interface_type, nat_type, second_port)
return PTF_NETWORK_DATA(outer_ports, inner_ports, eth_dst, eth_src, ip_src, ip_dst, public_ip, private_ip, exp_src_ip, exp_dst_ip)
def perform_handshake(ptfhost, setup_info, protocol_type, direction,
ip_dst, dest_l4_port, ip_src, source_l4_port, public_ip, second_port=False):
src_vrf = setup_info["inner_vrf"][0]
dst_vrf = setup_info["outer_vrf"][0]
if second_port:
src_vrf = setup_info["inner_vrf"][1]
dst_vrf = setup_info["outer_vrf"][1]
if direction == "host-tor":
echo_cmd = "python /tmp/nat_ptf_echo.py {} {} {} {} {} {} {} None &".format(protocol_type.lower(),
ip_dst, dest_l4_port,
ip_src, source_l4_port,
dst_vrf, src_vrf)
else:
echo_cmd = "python /tmp/nat_ptf_echo.py {} {} {} {} {} {} {} {} &".format(protocol_type.lower(),
ip_src, source_l4_port,
ip_dst, dest_l4_port,
dst_vrf, src_vrf,
public_ip)
ptfhost.copy(src="./scripts/nat_ptf_echo.py", dest="/tmp")
ptfhost.command(echo_cmd)
def generate_and_verify_traffic(duthost, ptfadapter, setup_info, interface_type, direction, protocol_type, nat_type, second_port=False,
src_port=None, dst_port=None, exp_src_port=None, exp_dst_port=None):
network_data = get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type=nat_type, second_port=second_port)
if nat_type != 'dynamic':
l4_ports = get_static_l4_ports(protocol_type, direction, nat_type)
else:
l4_ports = get_dynamic_l4_ports(duthost, protocol_type, direction, network_data.public_ip)
if src_port is None:
src_port = l4_ports.src_port
if dst_port is None:
dst_port = l4_ports.dst_port
if exp_src_port is None:
exp_src_port = l4_ports.exp_src_port
if exp_dst_port is None:
exp_dst_port = l4_ports.exp_dst_port
pkt = create_packet(network_data.eth_dst, network_data.eth_src,
network_data.ip_dst, network_data.ip_src,
protocol_type, sport=src_port, dport=dst_port)
exp_pkt = expected_mask_nated_packet(pkt, protocol_type, network_data.exp_dst_ip, network_data.exp_src_ip,
src_port=exp_src_port, dst_port=exp_dst_port)
ptfadapter.dataplane.flush()
for port in network_data.inner_ports:
testutils.send(ptfadapter, port, pkt, count=5)
testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=network_data.outer_ports)
def generate_and_verify_not_translated_traffic(ptfadapter, setup_info, interface_type, direction, protocol_type, nat_type, second_port=False,
ip_src=None, ip_dst=None, exp_ip_src=None, exp_ip_dst=None):
network_data = get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type=nat_type, second_port=second_port)
src_port, dst_port = get_l4_default_ports(protocol_type)
if ip_src is None:
ip_src = network_data.ip_src
if ip_dst is None:
ip_dst = network_data.ip_dst
if exp_ip_src is None:
exp_ip_src = network_data.ip_src
if exp_ip_dst is None:
exp_ip_dst = network_data.ip_dst
pkt = create_packet(network_data.eth_dst, network_data.eth_src, ip_dst, ip_src,
protocol_type, sport=src_port, dport=dst_port)
exp_pkt = expected_mask_nated_packet(pkt, protocol_type, exp_ip_dst, exp_ip_src,
src_port=src_port, dst_port=dst_port)
ptfadapter.dataplane.flush()
for port in network_data.inner_ports:
testutils.send(ptfadapter, port, pkt, count=5)
testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=network_data.outer_ports)
def generate_and_verify_traffic_dropped(ptfadapter, setup_info, interface_type, direction, protocol_type, nat_type,
src_port, dst_port, exp_src_port, exp_dst_port, second_port=False):
network_data = get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type=nat_type, second_port=second_port)
pkt = create_packet(network_data.eth_dst, network_data.eth_src,
network_data.ip_dst, network_data.ip_src,
protocol_type, sport=src_port, dport=dst_port)
exp_pkt = expected_mask_nated_packet(pkt, protocol_type, network_data.exp_dst_ip, network_data.exp_src_ip,
src_port=exp_src_port, dst_port=exp_dst_port)
ptfadapter.dataplane.flush()
for port in network_data.inner_ports:
testutils.send(ptfadapter, port, pkt, count=5)
testutils.verify_no_packet_any(ptfadapter, exp_pkt, ports=network_data.outer_ports)
def generate_and_verify_icmp_traffic(ptfadapter, setup_info, interface_type, direction, nat_type, second_port=False, icmp_id=None):
protocol_type = 'ICMP'
network_data = get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type=nat_type, second_port=second_port)
pkt = create_packet(network_data.eth_dst, network_data.eth_src, network_data.ip_dst, network_data.ip_src, protocol_type)
exp_pkt_request = expected_mask_nated_packet(pkt, protocol_type, network_data.exp_dst_ip, network_data.exp_src_ip, icmp_id=icmp_id)
exp_dst_ip = get_src_ip(setup_info, direction, interface_type,
nat_type=nat_type, second_port=second_port)
exp_src_ip = get_dst_ip(setup_info, direction, interface_type,
nat_type=nat_type)
exp_pkt_reply = expected_mask_nated_packet(pkt, protocol_type, exp_dst_ip, exp_src_ip, icmp_id=0)
exp_pkt_reply.exp_pkt[protocol_type].type = 0
ptfadapter.dataplane.flush()
for port in network_data.inner_ports:
testutils.send(ptfadapter, port, pkt, count=5)
testutils.verify_packet_any_port(ptfadapter, exp_pkt_request, ports=network_data.outer_ports)
testutils.verify_packet_any_port(ptfadapter, exp_pkt_reply, ports=network_data.inner_ports)
def generate_and_verify_not_translated_icmp_traffic(ptfadapter, setup_info, interface_type, direction, nat_type, second_port=False,
ip_src=None, ip_dst=None, check_reply=True):
protocol_type = 'ICMP'
network_data = get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type=nat_type, second_port=second_port)
if ip_src is None:
ip_src = network_data.ip_src
if ip_dst is None:
ip_dst = network_data.ip_dst
pkt = create_packet(network_data.eth_dst, network_data.eth_src, ip_dst, ip_src, protocol_type)
exp_pkt_request = expected_mask_nated_packet(pkt, protocol_type, ip_dst, ip_src)
exp_pkt_reply = expected_mask_nated_packet(pkt, protocol_type, ip_src, ip_dst)
exp_pkt_reply.exp_pkt[protocol_type].type = 0
ptfadapter.dataplane.flush()
for port in network_data.inner_ports:
testutils.send(ptfadapter, port, pkt, count=5)
testutils.verify_packet_any_port(ptfadapter, exp_pkt_request, ports=network_data.outer_ports)
if check_reply:
testutils.verify_packet_any_port(ptfadapter, exp_pkt_reply, ports=network_data.inner_ports)
def get_l4_default_ports(protocol_type):
source_l4_port = TCP_LOCAL_PORT
dest_l4_port = TCP_GLOBAL_PORT
if protocol_type == "UDP":
source_l4_port = UDP_LOCAL_PORT
dest_l4_port = UDP_GLOBAL_PORT
return source_l4_port, dest_l4_port
def get_dynamic_l4_ports(duthost, proto, direction, public_ip):
time.sleep(5)
output = exec_command(duthost, ["show nat translation"])['stdout']
pattern = r"{}.+{}:(\d+)".format(proto.lower(), public_ip)
ports = re.findall(pattern, output)
if not ports:
raise Exception("Dynamic NAT translation was not created")
dynamic_global_port = int(sorted(ports)[-1])
src_port, dst_port = get_l4_default_ports(proto)
if direction == "leaf-tor":
exp_src_port = dynamic_global_port
exp_dst_port = src_port
src_port = dynamic_global_port
dst_port = dynamic_global_port
else:
exp_src_port = dynamic_global_port
exp_dst_port = dynamic_global_port
dst_port = dynamic_global_port
return L4_PORTS_DATA(src_port, dst_port, exp_src_port, exp_dst_port)
def configure_dynamic_nat_rule(duthost, ptfadapter, ptfhost, setup_info, interface_type, protocol_type, pool_name=DYNAMIC_POOL_NAME,
public_ip=None, acl_table=ACL_TABLE_GLOBAL_NAME, ports_assigned=None, acl_rules=None,
binding_name=DYNAMIC_BINDING_NAME, port_range=None,
default=False, remove_bindings=False, handshake=False):
if default:
public_ip = get_public_ip(setup_info, interface_type) if not public_ip else public_ip
acl_subnet = setup_info[interface_type]["acl_subnet"]
acl_rules = [{"priority": "10", "src_ip": acl_subnet, "action": "forward"}] if not acl_rules else acl_rules
port_range = "{0}-{1}".format(POOL_RANGE_START_PORT, POOL_RANGE_END_PORT) if not port_range else port_range
ports_assigned = setup_info['indices_to_ports_config'][setup_info[interface_type]['inner_port_id'][0]] if not \
ports_assigned else ports_assigned
duthost.command("sudo config nat add pool {0} {1} {2}".format(pool_name, public_ip, port_range))
show_nat_pool = get_cli_show_nat_config_output(duthost, "pool")
pytest_assert(pool_name == show_nat_pool[0]['pool name'], "Pool name was not set to {}".format(pool_name))
pytest_assert(public_ip == show_nat_pool[0]['global ip range'], "Global IP Range was not set to {}".format(public_ip))
pytest_assert(port_range == show_nat_pool[0]['global port range'], "Global Port Range was not set to {}".format(port_range))
duthost.command("sudo config nat add binding {0} {1} {2}".format(binding_name, pool_name, acl_table))
show_nat_binding = get_cli_show_nat_config_output(duthost, "bindings")
pytest_assert(binding_name == show_nat_binding[0]['binding name'], "Binding Name was not set to {}".format(binding_name))
pytest_assert(pool_name == show_nat_binding[0]['pool name'], "Pool Name was not set to {}".format(pool_name))
pytest_assert(acl_table == show_nat_binding[0]['access-list'], "Access-List was not set to {}".format(acl_table))
duthost.command("mkdir -p {}".format(DUT_TMP_DIR))
acl_rule_vars = {
'acl_table_name': acl_table,
'stage': "INGRESS",
'ports_assigned': ports_assigned,
'acl_rules': acl_rules
}
duthost.host.options['variable_manager'].extra_vars.update(acl_rule_vars)
acl_config = 'acl_table.json'
acl_config_path = os.path.join(DUT_TMP_DIR, acl_config)
duthost.template(src=os.path.join(TEMPLATE_DIR, ACL_TEMPLATE), dest=acl_config_path)
duthost.command('sonic-cfggen -j {} --write-to-db'.format(acl_config_path))
duthost.command('rm -rf {}'.format(DUT_TMP_DIR))
if remove_bindings:
duthost.command("config nat remove bindings")
nat_zones_config(duthost, setup_info, interface_type)
check_peers_by_ping(duthost)
if handshake:
direction = 'host-tor'
network_data = get_network_data(ptfadapter, setup_info, direction, interface_type, nat_type='dynamic')
src_port, dst_port = get_l4_default_ports(protocol_type)
perform_handshake(ptfhost, setup_info,
protocol_type, direction,
network_data.ip_dst, dst_port,
network_data.ip_src, src_port,
network_data.public_ip)
def wait_timeout(protocol_type, wait_time=None, default=True):
if default:
if protocol_type == "UDP":
time.sleep(GLOBAL_UDP_NAPT_TIMEOUT + 80)
elif protocol_type == "TCP":
time.sleep(GLOBAL_TCP_NAPT_TIMEOUT + 80)
else:
time.sleep(60)
else:
time.sleep(wait_time)
def get_static_l4_ports(proto, direction, nat_type):
src_port, dst_port = get_l4_default_ports(proto)
if nat_type == 'static_napt' and direction == "host-tor":
exp_src_port = dst_port
exp_dst_port = dst_port
elif nat_type == "static_napt" and direction == "leaf-tor":
exp_src_port, exp_dst_port = dst_port, src_port
src_port = dst_port
elif direction == "leaf-tor":
exp_src_port, exp_dst_port = dst_port, src_port
src_port, dst_port = dst_port, src_port
elif direction == "host-tor":
exp_src_port = src_port
exp_dst_port = dst_port
return L4_PORTS_DATA(src_port, dst_port, exp_src_port, exp_dst_port)
def conf_dut_routes(duthost, setup_info, subnet, interface_type, teardown=False):
gw = setup_info[interface_type]["vrf_conf"]["red"]["ip"][:-1] + "{}". \
format(int(setup_info[interface_type]["vrf_conf"]["red"]["ip"][-1:]) + 1)
if teardown:
try:
duthost.command("ip route del {} via {}".format(subnet, gw))
except RunAnsibleModuleFail:
logger.debug("Route '%s via %s' was not deleted/existed", subnet, gw)
else:
duthost.command("ip route add {} via {}".format(subnet, gw))
def get_redis_val(duthost, db, key):
try:
output = exec_command(duthost, ["redis-dump -d {} --pretty -k *{}*".format(db, key)])
if output["rc"]:
raise Exception('Return code is {} not 0'.format(output_cli["rc"]))
redis_dict = json.loads(output['stdout'])
return redis_dict
except Exception as e:
return e.__str__()
def get_db_rules(duthost, ptfadapter, setup_test_env, protocol_type, db_type, private_ip=None, public_ip=None, private_port=None,
public_port=None, start_port=POOL_RANGE_START_PORT, end_port=POOL_RANGE_END_PORT, access_list=ACL_TABLE_GLOBAL_NAME, nat_pool=DYNAMIC_POOL_NAME,
post_flag=False):
interface_type, setup_info = setup_test_env
setup_data = copy.deepcopy(setup_info)
nat_type = 'static_napt'
direction = 'host-tor'
network_data = get_network_data(ptfadapter, setup_data, direction, interface_type, nat_type=nat_type)
secondary_protocol = {"TCP": "UDP", "UDP": "TCP"}[protocol_type]
global_port = {"TCP": TCP_GLOBAL_PORT, "UDP": UDP_GLOBAL_PORT}[protocol_type]
local_port = {"TCP": TCP_LOCAL_PORT, "UDP": UDP_LOCAL_PORT}[protocol_type]
db_rules = {}
if db_type == 'APP_DB timeout':
offset = {True: 200, False: 0}[post_flag]
db_rules = {"nat_timeout" : "{}".format(GLOBAL_NAT_TIMEOUT + offset),
"admin_mode" : "enabled",
"nat_udp_timeout" : "{}".format(GLOBAL_UDP_NAPT_TIMEOUT + offset),
"nat_tcp_timeout" : "{}".format(GLOBAL_TCP_NAPT_TIMEOUT + offset * 25)
}
elif db_type == 'Pool CONFIG_DB':
db_rules = {"nat_ip": "{}".format(public_ip),
"nat_port": "{}-{}".format(start_port, end_port)
}
elif db_type == 'Pool APP_DB':
db_rules = {"port_range": "{}-{}".format(start_port, end_port)}
elif db_type == 'Binding CONFIG_DB':
db_rules = {"access_list": access_list,
"nat_pool": nat_pool,
"nat_type": "snat",
"twice_nat_id": "NULL"
}
elif db_type == 'NAPT APP_DB':
db_rules = {
"NAPT_TABLE:{}:{}:{}".format(protocol_type, network_data.public_ip, global_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "dnat",
"translated_ip": "{}".format(network_data.private_ip),
"translated_l4_port": "{}".format(local_port)
}
},
"NAPT_TABLE:{}:{}:{}".format(protocol_type, network_data.private_ip, local_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "snat",
"translated_ip": "{}".format(network_data.public_ip),
"translated_l4_port": "{}".format(global_port)
}
}
}
elif db_type == 'NAPT CONFIG_DB':
db_rules = {
"STATIC_NAPT|{}|{}|{}".format(network_data.public_ip, protocol_type, global_port): {
"type": "hash",
"value": {
"local_ip": "{}".format(network_data.private_ip),
"local_port": "{}".format(local_port),
"nat_type": "dnat"
}
}
}
elif db_type == 'NAPT APP_DB POST':
db_rules = {
"NAPT_TABLE:{}:{}:{}".format(protocol_type, public_ip, public_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "dnat",
"translated_ip": "{}".format(private_ip),
"translated_l4_port": "{}".format(private_port)
}
},
"NAPT_TABLE:{}:{}:{}".format(protocol_type, private_ip, private_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "snat",
"translated_ip": "{}".format(public_ip),
"translated_l4_port": "{}".format(public_port)
}
},
"NAPT_TABLE:{}:{}:{}".format(protocol_type, network_data.public_ip, global_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "dnat",
"translated_ip": "{}".format(network_data.private_ip),
"translated_l4_port": "{}".format(local_port)
}
},
"NAPT_TABLE:{}:{}:{}".format(protocol_type, network_data.private_ip, local_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "snat",
"translated_ip": "{}".format(network_data.public_ip),
"translated_l4_port": "{}".format(global_port)
}
},
"NAPT_TABLE:{}:{}:{}".format(secondary_protocol, public_ip, public_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "dnat",
"translated_ip": "{}".format(private_ip),
"translated_l4_port": "{}".format(private_port)
}
},
"NAPT_TABLE:{}:{}:{}".format(secondary_protocol, private_ip, private_port): {
"type": "hash",
"value": {
"entry_type": "static",
"nat_type": "snat",
"translated_ip": "{}".format(public_ip),
"translated_l4_port": "{}".format(public_port)
}
}
}
elif db_type == 'NAPT CONFIG_DB POST':
db_rules = {
"STATIC_NAPT|{}|{}|{}".format(public_ip, protocol_type, public_port): {
"type": "hash",
"value": {
"local_ip": "{}".format(private_ip),
"local_port": "{}".format(private_port),
"nat_type": "dnat"
}
},
"STATIC_NAPT|{}|{}|{}".format(public_ip, secondary_protocol, public_port): {
"type": "hash",
"value": {
"local_ip": "{}".format(private_ip),
"local_port": "{}".format(private_port),
"nat_type": "dnat"
}
},
"STATIC_NAPT|{}|{}|{}".format(network_data.public_ip, protocol_type, global_port): {
"type": "hash",
"value": {
"local_ip": "{}".format(network_data.private_ip),
"local_port": "{}".format(local_port),
"nat_type": "dnat"
}
}
}
elif db_type == 'ASIC_DB SRC':
db_rules = {
"SAI_NAT_ENTRY_ATTR_SRC_IP": "{}".format(network_data.public_ip),
"SAI_NAT_ENTRY_ATTR_L4_SRC_PORT": "{}".format(global_port)
}
elif db_type == 'ASIC_DB DST':
db_rules = {
"SAI_NAT_ENTRY_ATTR_DST_IP": "{}".format(network_data.private_ip),
"SAI_NAT_ENTRY_ATTR_L4_DST_PORT": "{}".format(local_port)
}
else:
raise Exception('Improper db_type selected')
return db_rules
def write_json(duthost, json_dict, feature):
TEMP_FILE = "{}.json".format(feature)
curr_dir = os.path.dirname(os.path.abspath(__file__))
j2_template = Environment(loader=FileSystemLoader(curr_dir), trim_blocks=True)
if feature == "dynamic_binding":
j2_temp = j2_template.get_template(NAT_CONF_J2_TEMPLATE).render(nat=json_dict)
else:
raise AttributeError("Unexpected feature {}".format(feature))
exec_command(duthost, ["mkdir -p {}".format(DUT_TMP_DIR)])
exec_command(duthost, ["echo '{j2_temp}' > {dir}/{file}".
format(j2_temp=j2_temp, dir=DUT_TMP_DIR, file=TEMP_FILE)])
exec_command(duthost, ["sudo config load {} -y".format(DUT_TMP_DIR+"/"+TEMP_FILE)])
exec_command(duthost, ["rm -rf {}".format(DUT_TMP_DIR)])
| true | true |
f73432ee58b79c9c931bae7337e94822d4ebdfa3 | 876 | py | Python | company/migrations/0024_auto_20161220_1751.py | uktrade/directory-api | 45a9024a7ecc2842895201cbb51420ba9e57a168 | [
"MIT"
] | 2 | 2017-06-02T09:09:08.000Z | 2021-01-18T10:26:53.000Z | company/migrations/0024_auto_20161220_1751.py | konradko/directory-api | e9cd05b1deaf575e94352c46ddbd1857d8119fda | [
"MIT"
] | 629 | 2016-10-10T09:35:52.000Z | 2022-03-25T15:04:04.000Z | company/migrations/0024_auto_20161220_1751.py | konradko/directory-api | e9cd05b1deaf575e94352c46ddbd1857d8119fda | [
"MIT"
] | 5 | 2017-06-22T10:02:22.000Z | 2022-03-14T17:55:21.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-12-20 17:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0023_auto_20161219_1730'),
]
operations = [
migrations.AddField(
model_name='company',
name='facebook_url',
field=models.URLField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='company',
name='linkedin_url',
field=models.URLField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='company',
name='twitter_url',
field=models.URLField(blank=True, default='', max_length=255, null=True),
),
]
| 28.258065 | 85 | 0.593607 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0023_auto_20161219_1730'),
]
operations = [
migrations.AddField(
model_name='company',
name='facebook_url',
field=models.URLField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='company',
name='linkedin_url',
field=models.URLField(blank=True, default='', max_length=255, null=True),
),
migrations.AddField(
model_name='company',
name='twitter_url',
field=models.URLField(blank=True, default='', max_length=255, null=True),
),
]
| true | true |
f734331cbfdb47aab2dbbef75b0eeb01f8ce15b8 | 413 | py | Python | seedstars_contacts/wsgi.py | ruidacosta/seedstars-contacts | 6b39cf0c688adc3fbc5d032ee30e3271588c6ec3 | [
"MIT"
] | null | null | null | seedstars_contacts/wsgi.py | ruidacosta/seedstars-contacts | 6b39cf0c688adc3fbc5d032ee30e3271588c6ec3 | [
"MIT"
] | null | null | null | seedstars_contacts/wsgi.py | ruidacosta/seedstars-contacts | 6b39cf0c688adc3fbc5d032ee30e3271588c6ec3 | [
"MIT"
] | null | null | null | """
WSGI config for seedstars_contacts project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "seedstars_contacts.settings")
application = get_wsgi_application()
| 24.294118 | 78 | 0.79661 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "seedstars_contacts.settings")
application = get_wsgi_application()
| true | true |
f73435251244e0ae957d0fc4c9b83da5333bdf6a | 16,707 | py | Python | hphp/hack/test/hh_codesynthesis/codesynthesis_test.py | jjergus/hhvm | 7f94d3da259c9443792327e0a61af1a6778d53b9 | [
"PHP-3.01",
"Zend-2.0"
] | null | null | null | hphp/hack/test/hh_codesynthesis/codesynthesis_test.py | jjergus/hhvm | 7f94d3da259c9443792327e0a61af1a6778d53b9 | [
"PHP-3.01",
"Zend-2.0"
] | null | null | null | hphp/hack/test/hh_codesynthesis/codesynthesis_test.py | jjergus/hhvm | 7f94d3da259c9443792327e0a61af1a6778d53b9 | [
"PHP-3.01",
"Zend-2.0"
] | null | null | null | #!/usr/bin/env python3
# pyre-strict
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the "hack" directory of this source tree.
import tempfile
import unittest
from hphp.hack.src.hh_codesynthesis import hh_codesynthesis, hackGenerator
from hphp.hack.src.hh_codesynthesis.hh_codesynthesis import ClingoContext
class GenerateLogicRulesTest(unittest.TestCase):
def test_depth_less_than_nodes(self) -> None:
ClingoContext.number_of_nodes = 12
ClingoContext.min_depth = 3
exp = [
'internal_symbols("S0", 0;"S1", 1;"S2", 2;"S3", 3;"S4", 4;"S5", 5;"S6", 6;"S7", 7;"S8", 8;"S9", 9;"S10", 10;"S11", 11).',
'extends_to("S0", "S4").',
'extends_to("S4", "S8").',
]
self.assertListEqual(exp, hh_codesynthesis.generate_logic_rules())
def test_depth_more_than_nodes(self) -> None:
# In this case, the graph has no way to satisfy the min_depth requirement.
# The user, or the higher level wrapper should make sure given proper
# parameters. Otherwise, we will create the following output.
ClingoContext.number_of_nodes = 3
ClingoContext.min_depth = 5
with self.assertRaises(
expected_exception=RuntimeError, msg="Received unreasonable parameters."
):
hh_codesynthesis.generate_logic_rules()
def test_depth_equals_to_nodes(self) -> None:
ClingoContext.number_of_nodes = 7
ClingoContext.min_depth = 7
exp = [
'internal_symbols("S0", 0;"S1", 1;"S2", 2;"S3", 3;"S4", 4;"S5", 5;"S6", 6).',
'extends_to("S0", "S1").',
'extends_to("S1", "S2").',
'extends_to("S2", "S3").',
'extends_to("S3", "S4").',
'extends_to("S4", "S5").',
'extends_to("S5", "S6").',
]
self.assertListEqual(exp, hh_codesynthesis.generate_logic_rules())
def test_hack_code_gen(self) -> None:
ClingoContext.number_of_nodes = 12
ClingoContext.min_depth = 3
ClingoContext.min_classes = 3
ClingoContext.min_interfaces = 4
ClingoContext.lower_bound = 1
ClingoContext.higher_bound = 5
ClingoContext.avg_width = 0
exp = """\
<?hh
class S9 {}
class S10 {}
class S11 {}
interface S0 {}
interface S1 {}
interface S2 {}
interface S3 {}
interface S4 extends S0 {}
interface S5 {}
interface S6 {}
interface S7 {}
interface S8 extends S4 {}
"""
hack_codegen = hackGenerator.HackCodeGenerator()
hh_codesynthesis.do_reasoning(
additional_programs=hh_codesynthesis.generate_logic_rules(),
generator=hack_codegen,
)
self.assertEqual(str(hack_codegen), exp)
def test_hack_code_gen_with_partial_dependency_graph_given_by_user(self) -> None:
ClingoContext.number_of_nodes = 12
ClingoContext.min_depth = 3
ClingoContext.min_classes = 3
ClingoContext.min_interfaces = 4
ClingoContext.lower_bound = 1
ClingoContext.higher_bound = 5
ClingoContext.avg_width = 0
deps = """\
Extends A -> Type B
Extends I -> Type B
Extends T -> Type A
Type A -> Type B
Type I -> Type B
Type T -> Type A, Type B"""
exp = """\
<?hh
class S9 {}
class S10 {}
class S11 {}
interface A extends T {}
interface B extends A,I {}
interface I {}
interface T {}
interface S0 {}
interface S1 {}
interface S2 {}
interface S3 {}
interface S4 extends S0 {}
interface S5 {}
interface S6 {}
interface S7 {}
interface S8 extends S4 {}
"""
hack_codegen = hackGenerator.HackCodeGenerator()
combined_rules = (
hh_codesynthesis.generate_logic_rules()
+ hh_codesynthesis.extract_logic_rules(deps.split("\n"))
)
hh_codesynthesis.do_reasoning(
additional_programs=combined_rules,
generator=hack_codegen,
)
self.assertEqual(str(hack_codegen), exp)
def test_unsatisfiable_parameters(self) -> None:
# Given 5 nodes, but asking for 3 classes + 4 interfaces with
ClingoContext.number_of_nodes = 5
ClingoContext.min_classes = 3
ClingoContext.min_interfaces = 4
hack_codegen = hackGenerator.HackCodeGenerator()
with self.assertRaises(expected_exception=RuntimeError, msg="Unsatisfiable."):
hh_codesynthesis.do_reasoning(
additional_programs=hh_codesynthesis.generate_logic_rules(),
generator=hack_codegen,
)
class ExtractLogicRulesTest(unittest.TestCase):
def test_wrong_format(self) -> None:
exp = [
'extends_to("A", "B").',
'extends_to("I", "B").',
'extends_to("T", "A").',
'method("A", "foo", "B").',
'type("A", "B").',
'type("I", "B").',
'type("T", "A").',
'type("T", "B").',
'symbols("A";"B";"I";"T").',
]
deps = """\
Extends A -> Type B
Extends I -> Type B
Extends T -> Type A, Broke
Method A::foo -> Type B
Type A -> Type B
Type I -> Type B
Type T -> Type A, Type B"""
self.assertListEqual(
exp, hh_codesynthesis.extract_logic_rules(deps.split("\n"))
)
def test_multiple_lines(self) -> None:
exp = [
'extends_to("I1", "C1").',
'extends_to("I1", "C2").',
'extends_to("I1", "C3").',
'extends_to("I1", "I2").',
'extends_to("I3", "C4").',
'extends_to("I4", "C5").',
'symbols("C1";"C2";"C3";"C4";"C5";"I1";"I2";"I3";"I4").',
]
deps = """\
Extends I1 -> Type C1, Type C2, Type C3, Type I2
Extends I3 -> Type C4,
Type C6,
Type I5,
Type I6,
Type I7,
Type I8
Extends I4 -> Type C5,
Type C6,
Type I9,
Type I10,
Type I11,
Type I12,
Type I13,
Type I14"""
self.assertListEqual(
exp, hh_codesynthesis.extract_logic_rules(deps.split("\n"))
)
def test_multiple_lines_all(self) -> None:
# T92303034 Temporary handle for the multiple lines using replace(",\n", ","),
exp = [
'extends_to("I1", "C1").',
'extends_to("I1", "C2").',
'extends_to("I1", "C3").',
'extends_to("I1", "I2").',
'extends_to("I3", "C4").',
'extends_to("I3", "C6").',
'extends_to("I3", "I5").',
'extends_to("I3", "I6").',
'extends_to("I3", "I7").',
'extends_to("I3", "I8").',
'extends_to("I4", "C5").',
'extends_to("I4", "C6").',
'extends_to("I4", "I9").',
'extends_to("I4", "I10").',
'extends_to("I4", "I11").',
'extends_to("I4", "I12").',
'extends_to("I4", "I13").',
'extends_to("I4", "I14").',
'symbols("C1";"C2";"C3";"C4";"C5";"C6";"I1";"I10";"I11";"I12";"I13";"I14";"I2";"I3";"I4";"I5";"I6";"I7";"I8";"I9").',
]
deps = """\
Extends I1 -> Type C1, Type C2, Type C3, Type I2
Extends I3 -> Type C4,
Type C6,
Type I5,
Type I6,
Type I7,
Type I8
Extends I4 -> Type C5,
Type C6,
Type I9,
Type I10,
Type I11,
Type I12,
Type I13,
Type I14"""
self.assertListEqual(
exp,
hh_codesynthesis.extract_logic_rules(deps.replace(",\n", ",").split("\n")),
)
def test_extends_type_method_dependency(self) -> None:
exp = [
'extends_to("A", "B").',
'extends_to("I", "B").',
'extends_to("T", "A").',
'method("A", "foo", "B").',
'type("A", "B").',
'type("I", "B").',
'type("T", "A").',
'type("T", "B").',
'symbols("A";"B";"I";"T").',
]
deps = """\
Extends A -> Type B
Extends I -> Type B
Extends T -> Type A
Method A::foo -> Type B
Type A -> Type B
Type I -> Type B
Type T -> Type A, Type B"""
self.assertListEqual(
exp, hh_codesynthesis.extract_logic_rules(deps.split("\n"))
)
def test_unsupported_type_dependency(self) -> None:
# T94428437 Temporary skipping all built-in functions for now.
exp = [
'extends_to("A", "B").',
'type("A", "B").',
'symbols("A";"B").',
]
deps = r"""
Extends A -> Type B
Type A -> Type B
Type HH\Capabilities\AccessGlobals -> Type B
Type HH\Contexts\Unsafe\globals -> Type A"""
self.assertListEqual(
exp, hh_codesynthesis.extract_logic_rules(deps.split("\n"))
)
class DoReasoningTest(unittest.TestCase):
def extract_run_and_compare(
self, deps: str, exp: str, generator: hh_codesynthesis.CodeGenerator
) -> None:
additional_programs = hh_codesynthesis.extract_logic_rules(deps.split("\n"))
hh_codesynthesis.do_reasoning(
additional_programs=additional_programs, generator=generator
)
self.assertEqual(str(generator), exp)
def test_clingo_exception(self) -> None:
deps = ["rule_without_period(symbol1, symbol2)"]
raw_codegen = hh_codesynthesis.CodeGenerator()
with self.assertRaises(expected_exception=RuntimeError, msg="parsing failed"):
hh_codesynthesis.do_reasoning(
additional_programs=deps, generator=raw_codegen
)
def test_extends_dependency(self) -> None:
exp = [
'class("B")',
'class("I")',
'extends("A","T")',
'extends("B","I")',
'implements("B","A")',
'interface("A")',
'interface("T")',
]
rules = [
'extends_to("A", "B").',
'extends_to("I", "B").',
'extends_to("T", "A").',
'symbols("A";"B";"I";"T").',
]
raw_codegen = hh_codesynthesis.CodeGenerator()
hh_codesynthesis.do_reasoning(additional_programs=rules, generator=raw_codegen)
self.assertListEqual(sorted(str(raw_codegen).split()), exp)
def test_type_dependency(self) -> None:
# This one covered the 'has_method_with_parameter'.
exp = ['class("B")', 'has_method_with_parameter("C","B")', 'interface("C")']
rules = ['type("B", "C").' 'symbols("B"; "C").']
raw_codegen = hh_codesynthesis.CodeGenerator()
hh_codesynthesis.do_reasoning(additional_programs=rules, generator=raw_codegen)
self.assertListEqual(sorted(str(raw_codegen).split()), exp)
def test_method_dependency(self) -> None:
# This one covered the 'invokes_in_method', as well as the
# 'has_method_with_parameter', since we need to pass the object as parameter,
# then invoke its method.
exp = [
'add_method("B","Foo")',
'class("C")',
'has_method_with_parameter("C","B")',
'interface("B")',
'invokes_in_method("C","B","Foo")',
]
rules = ['method("B", "Foo", "C").' 'symbols("B"; "C").']
raw_codegen = hh_codesynthesis.CodeGenerator()
hh_codesynthesis.do_reasoning(additional_programs=rules, generator=raw_codegen)
self.assertListEqual(sorted(str(raw_codegen).split()), exp)
def test_method_type_extends_dependencies(self) -> None:
# This one covered the 'override' in the "Extend" and "Method" edge.
exp = [
'add_method("B","Foo")',
'add_method("C","Foo")',
'class("C")',
'implements("C","B")',
'interface("B")',
]
rules = [
'extends_to("B", "C").',
'method("B", "Foo", "C").',
'type("B", "C").',
'symbols("B"; "C").',
]
raw_codegen = hh_codesynthesis.CodeGenerator()
hh_codesynthesis.do_reasoning(additional_programs=rules, generator=raw_codegen)
self.assertListEqual(sorted(str(raw_codegen).split()), exp)
def test_extends_dependency_with_rule_extraction(self) -> None:
exp = [
'add_method("A","foo")',
'add_method("B","foo")',
'class("B")',
'class("I")',
'extends("A","T")',
'extends("B","I")',
'implements("B","A")',
'interface("A")',
'interface("T")',
]
deps = """\
Extends A -> Type B
Extends I -> Type B
Extends T -> Type A
Method A::foo -> Type B
Type A -> Type B
Type I -> Type B
Type T -> Type A, Type B
"""
raw_codegen = hh_codesynthesis.CodeGenerator()
additional_programs = hh_codesynthesis.extract_logic_rules(deps.split("\n"))
hh_codesynthesis.do_reasoning(
additional_programs=additional_programs, generator=raw_codegen
)
self.assertListEqual(sorted(str(raw_codegen).split()), exp)
def test_extends_dependency_hack_codegen(self) -> None:
exp = """\
<?hh
class B extends I implements A {}
class I {}
interface A extends T {}
interface T {}
"""
rules = [
'extends_to("A", "B").',
'extends_to("I", "B").',
'extends_to("T", "A").',
'symbols("A";"B";"I";"T").',
]
hack_codegen = hackGenerator.HackCodeGenerator()
hh_codesynthesis.do_reasoning(additional_programs=rules, generator=hack_codegen)
self.assertEqual(str(hack_codegen), exp)
def test_extends_dependency_with_rule_extraction_hack_codegen(self) -> None:
exp = """\
<?hh
class B extends I implements A {}
class I {}
interface A extends T {}
interface T {}
"""
deps = """\
Extends A -> Type B
Extends I -> Type B
Extends T -> Type A
Type A -> Type B
Type I -> Type B
Type T -> Type A, Type B
"""
self.extract_run_and_compare(deps, exp, hackGenerator.HackCodeGenerator())
def test_method_dependency_with_rule_extraction_hack_codegen_override(self) -> None:
exp = """\
<?hh
class B implements A {
public function foo(): void{}
}
interface A {
public function foo(): void;
}
"""
deps = """\
Extends A -> Type B
Method A::foo -> Type B
Type A -> Type B
"""
self.extract_run_and_compare(deps, exp, hackGenerator.HackCodeGenerator())
def test_type_dependency_with_rule_extraction_hack_codegen(self) -> None:
exp = """\
<?hh
class A {}
interface B {
public function dummy_B_method(A $A_obj): void;
}
"""
deps = """\
Type A -> Type B
"""
self.extract_run_and_compare(deps, exp, hackGenerator.HackCodeGenerator())
class ReadFromFileTest(unittest.TestCase):
def test_read(self) -> None:
exp = [
'extends_to("A", "B").',
'extends_to("I", "B").',
'extends_to("T", "A").',
'method("A", "foo", "B").',
'type("A", "B").',
'type("I", "B").',
'type("T", "A").',
'type("T", "B").',
'symbols("A";"B";"I";"T").',
]
deps = """\
Extends A -> Type B
Extends I -> Type B
Extends T -> Type A
Method A::foo -> Type B
Type A -> Type B
Type I -> Type B
Type T -> Type A, Type B
"""
with tempfile.NamedTemporaryFile(mode="w") as fp:
fp.write(deps)
fp.flush()
self.assertListEqual(
exp,
hh_codesynthesis.extract_logic_rules(
hh_codesynthesis.read_from_file_or_stdin(fp.name)
),
)
def test_non_exist(self) -> None:
test_file = "non_exist.in"
with self.assertRaises(expected_exception=FileNotFoundError):
hh_codesynthesis.extract_logic_rules(
hh_codesynthesis.read_from_file_or_stdin(test_file)
)
class WriteToFileTest(unittest.TestCase):
def test_hack_output(self) -> None:
exp = """\
<?hh
class C1 {}
class C2 extends C1 implements I1 {}
interface I1 {}
"""
generator = hackGenerator.HackCodeGenerator()
generator._add_class("C1")
generator._add_class("C2")
generator._add_interface("I1")
generator._add_extend("C2", "C1")
generator._add_implement("C2", "I1")
with tempfile.NamedTemporaryFile("r") as fp:
hh_codesynthesis.output_to_file_or_stdout(generator, fp.name)
lines = fp.readlines()
self.assertEqual("".join(lines), exp)
| 32.440777 | 133 | 0.553481 |
import tempfile
import unittest
from hphp.hack.src.hh_codesynthesis import hh_codesynthesis, hackGenerator
from hphp.hack.src.hh_codesynthesis.hh_codesynthesis import ClingoContext
class GenerateLogicRulesTest(unittest.TestCase):
def test_depth_less_than_nodes(self) -> None:
ClingoContext.number_of_nodes = 12
ClingoContext.min_depth = 3
exp = [
'internal_symbols("S0", 0;"S1", 1;"S2", 2;"S3", 3;"S4", 4;"S5", 5;"S6", 6;"S7", 7;"S8", 8;"S9", 9;"S10", 10;"S11", 11).',
'extends_to("S0", "S4").',
'extends_to("S4", "S8").',
]
self.assertListEqual(exp, hh_codesynthesis.generate_logic_rules())
def test_depth_more_than_nodes(self) -> None:
ClingoContext.number_of_nodes = 3
ClingoContext.min_depth = 5
with self.assertRaises(
expected_exception=RuntimeError, msg="Received unreasonable parameters."
):
hh_codesynthesis.generate_logic_rules()
def test_depth_equals_to_nodes(self) -> None:
ClingoContext.number_of_nodes = 7
ClingoContext.min_depth = 7
exp = [
'internal_symbols("S0", 0;"S1", 1;"S2", 2;"S3", 3;"S4", 4;"S5", 5;"S6", 6).',
'extends_to("S0", "S1").',
'extends_to("S1", "S2").',
'extends_to("S2", "S3").',
'extends_to("S3", "S4").',
'extends_to("S4", "S5").',
'extends_to("S5", "S6").',
]
self.assertListEqual(exp, hh_codesynthesis.generate_logic_rules())
def test_hack_code_gen(self) -> None:
ClingoContext.number_of_nodes = 12
ClingoContext.min_depth = 3
ClingoContext.min_classes = 3
ClingoContext.min_interfaces = 4
ClingoContext.lower_bound = 1
ClingoContext.higher_bound = 5
ClingoContext.avg_width = 0
exp = """\
<?hh
class S9 {}
class S10 {}
class S11 {}
interface S0 {}
interface S1 {}
interface S2 {}
interface S3 {}
interface S4 extends S0 {}
interface S5 {}
interface S6 {}
interface S7 {}
interface S8 extends S4 {}
"""
hack_codegen = hackGenerator.HackCodeGenerator()
hh_codesynthesis.do_reasoning(
additional_programs=hh_codesynthesis.generate_logic_rules(),
generator=hack_codegen,
)
self.assertEqual(str(hack_codegen), exp)
def test_hack_code_gen_with_partial_dependency_graph_given_by_user(self) -> None:
ClingoContext.number_of_nodes = 12
ClingoContext.min_depth = 3
ClingoContext.min_classes = 3
ClingoContext.min_interfaces = 4
ClingoContext.lower_bound = 1
ClingoContext.higher_bound = 5
ClingoContext.avg_width = 0
deps = """\
Extends A -> Type B
Extends I -> Type B
Extends T -> Type A
Type A -> Type B
Type I -> Type B
Type T -> Type A, Type B"""
exp = """\
<?hh
class S9 {}
class S10 {}
class S11 {}
interface A extends T {}
interface B extends A,I {}
interface I {}
interface T {}
interface S0 {}
interface S1 {}
interface S2 {}
interface S3 {}
interface S4 extends S0 {}
interface S5 {}
interface S6 {}
interface S7 {}
interface S8 extends S4 {}
"""
hack_codegen = hackGenerator.HackCodeGenerator()
combined_rules = (
hh_codesynthesis.generate_logic_rules()
+ hh_codesynthesis.extract_logic_rules(deps.split("\n"))
)
hh_codesynthesis.do_reasoning(
additional_programs=combined_rules,
generator=hack_codegen,
)
self.assertEqual(str(hack_codegen), exp)
def test_unsatisfiable_parameters(self) -> None:
ClingoContext.number_of_nodes = 5
ClingoContext.min_classes = 3
ClingoContext.min_interfaces = 4
hack_codegen = hackGenerator.HackCodeGenerator()
with self.assertRaises(expected_exception=RuntimeError, msg="Unsatisfiable."):
hh_codesynthesis.do_reasoning(
additional_programs=hh_codesynthesis.generate_logic_rules(),
generator=hack_codegen,
)
class ExtractLogicRulesTest(unittest.TestCase):
def test_wrong_format(self) -> None:
exp = [
'extends_to("A", "B").',
'extends_to("I", "B").',
'extends_to("T", "A").',
'method("A", "foo", "B").',
'type("A", "B").',
'type("I", "B").',
'type("T", "A").',
'type("T", "B").',
'symbols("A";"B";"I";"T").',
]
deps = """\
Extends A -> Type B
Extends I -> Type B
Extends T -> Type A, Broke
Method A::foo -> Type B
Type A -> Type B
Type I -> Type B
Type T -> Type A, Type B"""
self.assertListEqual(
exp, hh_codesynthesis.extract_logic_rules(deps.split("\n"))
)
def test_multiple_lines(self) -> None:
exp = [
'extends_to("I1", "C1").',
'extends_to("I1", "C2").',
'extends_to("I1", "C3").',
'extends_to("I1", "I2").',
'extends_to("I3", "C4").',
'extends_to("I4", "C5").',
'symbols("C1";"C2";"C3";"C4";"C5";"I1";"I2";"I3";"I4").',
]
deps = """\
Extends I1 -> Type C1, Type C2, Type C3, Type I2
Extends I3 -> Type C4,
Type C6,
Type I5,
Type I6,
Type I7,
Type I8
Extends I4 -> Type C5,
Type C6,
Type I9,
Type I10,
Type I11,
Type I12,
Type I13,
Type I14"""
self.assertListEqual(
exp, hh_codesynthesis.extract_logic_rules(deps.split("\n"))
)
def test_multiple_lines_all(self) -> None:
exp = [
'extends_to("I1", "C1").',
'extends_to("I1", "C2").',
'extends_to("I1", "C3").',
'extends_to("I1", "I2").',
'extends_to("I3", "C4").',
'extends_to("I3", "C6").',
'extends_to("I3", "I5").',
'extends_to("I3", "I6").',
'extends_to("I3", "I7").',
'extends_to("I3", "I8").',
'extends_to("I4", "C5").',
'extends_to("I4", "C6").',
'extends_to("I4", "I9").',
'extends_to("I4", "I10").',
'extends_to("I4", "I11").',
'extends_to("I4", "I12").',
'extends_to("I4", "I13").',
'extends_to("I4", "I14").',
'symbols("C1";"C2";"C3";"C4";"C5";"C6";"I1";"I10";"I11";"I12";"I13";"I14";"I2";"I3";"I4";"I5";"I6";"I7";"I8";"I9").',
]
deps = """\
Extends I1 -> Type C1, Type C2, Type C3, Type I2
Extends I3 -> Type C4,
Type C6,
Type I5,
Type I6,
Type I7,
Type I8
Extends I4 -> Type C5,
Type C6,
Type I9,
Type I10,
Type I11,
Type I12,
Type I13,
Type I14"""
self.assertListEqual(
exp,
hh_codesynthesis.extract_logic_rules(deps.replace(",\n", ",").split("\n")),
)
def test_extends_type_method_dependency(self) -> None:
exp = [
'extends_to("A", "B").',
'extends_to("I", "B").',
'extends_to("T", "A").',
'method("A", "foo", "B").',
'type("A", "B").',
'type("I", "B").',
'type("T", "A").',
'type("T", "B").',
'symbols("A";"B";"I";"T").',
]
deps = """\
Extends A -> Type B
Extends I -> Type B
Extends T -> Type A
Method A::foo -> Type B
Type A -> Type B
Type I -> Type B
Type T -> Type A, Type B"""
self.assertListEqual(
exp, hh_codesynthesis.extract_logic_rules(deps.split("\n"))
)
def test_unsupported_type_dependency(self) -> None:
exp = [
'extends_to("A", "B").',
'type("A", "B").',
'symbols("A";"B").',
]
deps = r"""
Extends A -> Type B
Type A -> Type B
Type HH\Capabilities\AccessGlobals -> Type B
Type HH\Contexts\Unsafe\globals -> Type A"""
self.assertListEqual(
exp, hh_codesynthesis.extract_logic_rules(deps.split("\n"))
)
class DoReasoningTest(unittest.TestCase):
def extract_run_and_compare(
self, deps: str, exp: str, generator: hh_codesynthesis.CodeGenerator
) -> None:
additional_programs = hh_codesynthesis.extract_logic_rules(deps.split("\n"))
hh_codesynthesis.do_reasoning(
additional_programs=additional_programs, generator=generator
)
self.assertEqual(str(generator), exp)
def test_clingo_exception(self) -> None:
deps = ["rule_without_period(symbol1, symbol2)"]
raw_codegen = hh_codesynthesis.CodeGenerator()
with self.assertRaises(expected_exception=RuntimeError, msg="parsing failed"):
hh_codesynthesis.do_reasoning(
additional_programs=deps, generator=raw_codegen
)
def test_extends_dependency(self) -> None:
exp = [
'class("B")',
'class("I")',
'extends("A","T")',
'extends("B","I")',
'implements("B","A")',
'interface("A")',
'interface("T")',
]
rules = [
'extends_to("A", "B").',
'extends_to("I", "B").',
'extends_to("T", "A").',
'symbols("A";"B";"I";"T").',
]
raw_codegen = hh_codesynthesis.CodeGenerator()
hh_codesynthesis.do_reasoning(additional_programs=rules, generator=raw_codegen)
self.assertListEqual(sorted(str(raw_codegen).split()), exp)
def test_type_dependency(self) -> None:
exp = ['class("B")', 'has_method_with_parameter("C","B")', 'interface("C")']
rules = ['type("B", "C").' 'symbols("B"; "C").']
raw_codegen = hh_codesynthesis.CodeGenerator()
hh_codesynthesis.do_reasoning(additional_programs=rules, generator=raw_codegen)
self.assertListEqual(sorted(str(raw_codegen).split()), exp)
def test_method_dependency(self) -> None:
exp = [
'add_method("B","Foo")',
'class("C")',
'has_method_with_parameter("C","B")',
'interface("B")',
'invokes_in_method("C","B","Foo")',
]
rules = ['method("B", "Foo", "C").' 'symbols("B"; "C").']
raw_codegen = hh_codesynthesis.CodeGenerator()
hh_codesynthesis.do_reasoning(additional_programs=rules, generator=raw_codegen)
self.assertListEqual(sorted(str(raw_codegen).split()), exp)
def test_method_type_extends_dependencies(self) -> None:
exp = [
'add_method("B","Foo")',
'add_method("C","Foo")',
'class("C")',
'implements("C","B")',
'interface("B")',
]
rules = [
'extends_to("B", "C").',
'method("B", "Foo", "C").',
'type("B", "C").',
'symbols("B"; "C").',
]
raw_codegen = hh_codesynthesis.CodeGenerator()
hh_codesynthesis.do_reasoning(additional_programs=rules, generator=raw_codegen)
self.assertListEqual(sorted(str(raw_codegen).split()), exp)
def test_extends_dependency_with_rule_extraction(self) -> None:
exp = [
'add_method("A","foo")',
'add_method("B","foo")',
'class("B")',
'class("I")',
'extends("A","T")',
'extends("B","I")',
'implements("B","A")',
'interface("A")',
'interface("T")',
]
deps = """\
Extends A -> Type B
Extends I -> Type B
Extends T -> Type A
Method A::foo -> Type B
Type A -> Type B
Type I -> Type B
Type T -> Type A, Type B
"""
raw_codegen = hh_codesynthesis.CodeGenerator()
additional_programs = hh_codesynthesis.extract_logic_rules(deps.split("\n"))
hh_codesynthesis.do_reasoning(
additional_programs=additional_programs, generator=raw_codegen
)
self.assertListEqual(sorted(str(raw_codegen).split()), exp)
def test_extends_dependency_hack_codegen(self) -> None:
exp = """\
<?hh
class B extends I implements A {}
class I {}
interface A extends T {}
interface T {}
"""
rules = [
'extends_to("A", "B").',
'extends_to("I", "B").',
'extends_to("T", "A").',
'symbols("A";"B";"I";"T").',
]
hack_codegen = hackGenerator.HackCodeGenerator()
hh_codesynthesis.do_reasoning(additional_programs=rules, generator=hack_codegen)
self.assertEqual(str(hack_codegen), exp)
def test_extends_dependency_with_rule_extraction_hack_codegen(self) -> None:
exp = """\
<?hh
class B extends I implements A {}
class I {}
interface A extends T {}
interface T {}
"""
deps = """\
Extends A -> Type B
Extends I -> Type B
Extends T -> Type A
Type A -> Type B
Type I -> Type B
Type T -> Type A, Type B
"""
self.extract_run_and_compare(deps, exp, hackGenerator.HackCodeGenerator())
def test_method_dependency_with_rule_extraction_hack_codegen_override(self) -> None:
exp = """\
<?hh
class B implements A {
public function foo(): void{}
}
interface A {
public function foo(): void;
}
"""
deps = """\
Extends A -> Type B
Method A::foo -> Type B
Type A -> Type B
"""
self.extract_run_and_compare(deps, exp, hackGenerator.HackCodeGenerator())
def test_type_dependency_with_rule_extraction_hack_codegen(self) -> None:
exp = """\
<?hh
class A {}
interface B {
public function dummy_B_method(A $A_obj): void;
}
"""
deps = """\
Type A -> Type B
"""
self.extract_run_and_compare(deps, exp, hackGenerator.HackCodeGenerator())
class ReadFromFileTest(unittest.TestCase):
def test_read(self) -> None:
exp = [
'extends_to("A", "B").',
'extends_to("I", "B").',
'extends_to("T", "A").',
'method("A", "foo", "B").',
'type("A", "B").',
'type("I", "B").',
'type("T", "A").',
'type("T", "B").',
'symbols("A";"B";"I";"T").',
]
deps = """\
Extends A -> Type B
Extends I -> Type B
Extends T -> Type A
Method A::foo -> Type B
Type A -> Type B
Type I -> Type B
Type T -> Type A, Type B
"""
with tempfile.NamedTemporaryFile(mode="w") as fp:
fp.write(deps)
fp.flush()
self.assertListEqual(
exp,
hh_codesynthesis.extract_logic_rules(
hh_codesynthesis.read_from_file_or_stdin(fp.name)
),
)
def test_non_exist(self) -> None:
test_file = "non_exist.in"
with self.assertRaises(expected_exception=FileNotFoundError):
hh_codesynthesis.extract_logic_rules(
hh_codesynthesis.read_from_file_or_stdin(test_file)
)
class WriteToFileTest(unittest.TestCase):
def test_hack_output(self) -> None:
exp = """\
<?hh
class C1 {}
class C2 extends C1 implements I1 {}
interface I1 {}
"""
generator = hackGenerator.HackCodeGenerator()
generator._add_class("C1")
generator._add_class("C2")
generator._add_interface("I1")
generator._add_extend("C2", "C1")
generator._add_implement("C2", "I1")
with tempfile.NamedTemporaryFile("r") as fp:
hh_codesynthesis.output_to_file_or_stdout(generator, fp.name)
lines = fp.readlines()
self.assertEqual("".join(lines), exp)
| true | true |
f734353463d9f084656926c5976d0185d41c41a7 | 7,041 | py | Python | container_service_extension/lib/pksclient/api/usage_api.py | arunmk/container-service-extension | 5e67df64fd5ed7fbb664d449356cb983cecbca12 | [
"BSD-3-Clause"
] | 81 | 2017-07-05T19:42:41.000Z | 2022-03-09T22:04:05.000Z | container_service_extension/lib/pksclient/api/usage_api.py | arunmk/container-service-extension | 5e67df64fd5ed7fbb664d449356cb983cecbca12 | [
"BSD-3-Clause"
] | 670 | 2017-07-05T16:48:02.000Z | 2022-03-31T13:40:53.000Z | container_service_extension/lib/pksclient/api/usage_api.py | arunmk/container-service-extension | 5e67df64fd5ed7fbb664d449356cb983cecbca12 | [
"BSD-3-Clause"
] | 64 | 2017-07-05T16:32:55.000Z | 2022-03-23T09:36:03.000Z | # coding: utf-8
"""
PKS
PKS API # noqa: E501
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# python 2 and python 3 compatibility library
import six
from container_service_extension.lib.pksclient.api_client import ApiClient
class UsageApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_usage(self, owner, **kwargs): # noqa: E501
"""get_usage # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_usage(owner, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner: The owner name (required)
:return: Usage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_usage_with_http_info(owner, **kwargs) # noqa: E501
else:
(data) = self.get_usage_with_http_info(owner, **kwargs) # noqa: E501
return data
def get_usage_with_http_info(self, owner, **kwargs): # noqa: E501
"""get_usage # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_usage_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str owner: The owner name (required)
:return: Usage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_usage" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params or
params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `get_usage`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/usages/{owner}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Usage', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_usages(self, **kwargs): # noqa: E501
"""List all usage # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_usages(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Usage]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_usages_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_usages_with_http_info(**kwargs) # noqa: E501
return data
def list_usages_with_http_info(self, **kwargs): # noqa: E501
"""List all usage # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_usages_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Usage]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_usages" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/usages', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Usage]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 33.369668 | 109 | 0.592814 |
from __future__ import absolute_import
import six
from container_service_extension.lib.pksclient.api_client import ApiClient
class UsageApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_usage(self, owner, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_usage_with_http_info(owner, **kwargs)
else:
(data) = self.get_usage_with_http_info(owner, **kwargs)
return data
def get_usage_with_http_info(self, owner, **kwargs):
all_params = ['owner']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_usage" % key
)
params[key] = val
del params['kwargs']
if ('owner' not in params or
params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `get_usage`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = ['basicAuth', 'uaa']
return self.api_client.call_api(
'/usages/{owner}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Usage',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_usages(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_usages_with_http_info(**kwargs)
else:
(data) = self.list_usages_with_http_info(**kwargs)
return data
def list_usages_with_http_info(self, **kwargs):
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_usages" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = ['basicAuth', 'uaa']
return self.api_client.call_api(
'/usages', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Usage]',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true | true |
f73437decdc2b3be3f553e9d269d83a639111390 | 3,340 | py | Python | pogom/pgoapi/protos/POGOProtos/Inventory/InventoryDelta_pb2.py | tier4fusion/pogom-updated | 31c4db3dfc85b19abb39c2e43f5efa530c65159e | [
"MIT"
] | 2,557 | 2016-07-19T22:20:45.000Z | 2022-01-25T10:53:35.000Z | pogom/pgoapi/protos/POGOProtos/Inventory/InventoryDelta_pb2.py | tier4fusion/pogom-updated | 31c4db3dfc85b19abb39c2e43f5efa530c65159e | [
"MIT"
] | 1,360 | 2016-07-20T02:06:42.000Z | 2021-07-27T12:46:40.000Z | pogom/pgoapi/protos/POGOProtos/Inventory/InventoryDelta_pb2.py | tier4fusion/pogom-updated | 31c4db3dfc85b19abb39c2e43f5efa530c65159e | [
"MIT"
] | 607 | 2016-07-20T03:34:04.000Z | 2022-01-05T14:57:09.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Inventory/InventoryDelta.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Inventory import InventoryItem_pb2 as POGOProtos_dot_Inventory_dot_InventoryItem__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Inventory/InventoryDelta.proto',
package='POGOProtos.Inventory',
syntax='proto3',
serialized_pb=_b('\n)POGOProtos/Inventory/InventoryDelta.proto\x12\x14POGOProtos.Inventory\x1a(POGOProtos/Inventory/InventoryItem.proto\"\x87\x01\n\x0eInventoryDelta\x12\x1d\n\x15original_timestamp_ms\x18\x01 \x01(\x03\x12\x18\n\x10new_timestamp_ms\x18\x02 \x01(\x03\x12<\n\x0finventory_items\x18\x03 \x03(\x0b\x32#.POGOProtos.Inventory.InventoryItemb\x06proto3')
,
dependencies=[POGOProtos_dot_Inventory_dot_InventoryItem__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_INVENTORYDELTA = _descriptor.Descriptor(
name='InventoryDelta',
full_name='POGOProtos.Inventory.InventoryDelta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='original_timestamp_ms', full_name='POGOProtos.Inventory.InventoryDelta.original_timestamp_ms', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_timestamp_ms', full_name='POGOProtos.Inventory.InventoryDelta.new_timestamp_ms', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inventory_items', full_name='POGOProtos.Inventory.InventoryDelta.inventory_items', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=110,
serialized_end=245,
)
_INVENTORYDELTA.fields_by_name['inventory_items'].message_type = POGOProtos_dot_Inventory_dot_InventoryItem__pb2._INVENTORYITEM
DESCRIPTOR.message_types_by_name['InventoryDelta'] = _INVENTORYDELTA
InventoryDelta = _reflection.GeneratedProtocolMessageType('InventoryDelta', (_message.Message,), dict(
DESCRIPTOR = _INVENTORYDELTA,
__module__ = 'POGOProtos.Inventory.InventoryDelta_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Inventory.InventoryDelta)
))
_sym_db.RegisterMessage(InventoryDelta)
# @@protoc_insertion_point(module_scope)
| 38.390805 | 365 | 0.78503 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
_sym_db = _symbol_database.Default()
from POGOProtos.Inventory import InventoryItem_pb2 as POGOProtos_dot_Inventory_dot_InventoryItem__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Inventory/InventoryDelta.proto',
package='POGOProtos.Inventory',
syntax='proto3',
serialized_pb=_b('\n)POGOProtos/Inventory/InventoryDelta.proto\x12\x14POGOProtos.Inventory\x1a(POGOProtos/Inventory/InventoryItem.proto\"\x87\x01\n\x0eInventoryDelta\x12\x1d\n\x15original_timestamp_ms\x18\x01 \x01(\x03\x12\x18\n\x10new_timestamp_ms\x18\x02 \x01(\x03\x12<\n\x0finventory_items\x18\x03 \x03(\x0b\x32#.POGOProtos.Inventory.InventoryItemb\x06proto3')
,
dependencies=[POGOProtos_dot_Inventory_dot_InventoryItem__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_INVENTORYDELTA = _descriptor.Descriptor(
name='InventoryDelta',
full_name='POGOProtos.Inventory.InventoryDelta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='original_timestamp_ms', full_name='POGOProtos.Inventory.InventoryDelta.original_timestamp_ms', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_timestamp_ms', full_name='POGOProtos.Inventory.InventoryDelta.new_timestamp_ms', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inventory_items', full_name='POGOProtos.Inventory.InventoryDelta.inventory_items', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=110,
serialized_end=245,
)
_INVENTORYDELTA.fields_by_name['inventory_items'].message_type = POGOProtos_dot_Inventory_dot_InventoryItem__pb2._INVENTORYITEM
DESCRIPTOR.message_types_by_name['InventoryDelta'] = _INVENTORYDELTA
InventoryDelta = _reflection.GeneratedProtocolMessageType('InventoryDelta', (_message.Message,), dict(
DESCRIPTOR = _INVENTORYDELTA,
__module__ = 'POGOProtos.Inventory.InventoryDelta_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Inventory.InventoryDelta)
))
_sym_db.RegisterMessage(InventoryDelta)
# @@protoc_insertion_point(module_scope)
| true | true |
f73439a039a68664196ccfefac524b885ff036d1 | 1,663 | py | Python | accelerator/examples/printer.py | eBay/accelerator | 218d9a5e4451ac72b9e65df6c5b32e37d25136c8 | [
"Apache-2.0"
] | 143 | 2018-04-20T18:50:41.000Z | 2022-02-06T07:07:35.000Z | accelerator/examples/printer.py | exaxorg/accelerator | d6132f215585b98d2ad14c5d74d2c937fbd940e2 | [
"Apache-2.0"
] | null | null | null | accelerator/examples/printer.py | exaxorg/accelerator | d6132f215585b98d2ad14c5d74d2c937fbd940e2 | [
"Apache-2.0"
] | 29 | 2018-04-20T18:50:43.000Z | 2021-04-27T18:42:23.000Z | # This isn't intended as an example, it's just here to simplify all the
# output formating the examples do.
from accelerator import colour
class Printer:
def __init__(self, indent=0):
self._indent = indent
self._last_indent = 0
self._unindent = []
def _prefix(self, txt):
for c in txt:
if not c.isspace():
break
yield c
yield None
def _print(self, a, indent, *attrs):
txt = ' '.join(str(v) for v in a)
prefixes = []
lines = txt.lstrip('\n').rstrip().split('\n')
for line in lines:
if line.strip():
prefixes.append(list(self._prefix(line)))
prefix_len = 0
if prefixes:
prefixes[0].pop()
while len(prefixes[0]) > prefix_len and len(set(p[prefix_len] for p in prefixes)) == 1:
prefix_len += 1
lines = [line[prefix_len:].rstrip() for line in lines]
self._last_indent = max(indent + self._indent, 0)
indent = ' ' * self._last_indent
lines = [indent + line if line else '' for line in lines]
txt = '\n'.join(lines)
if attrs and txt:
txt = colour(txt, *attrs)
print(txt)
return self
def __call__(self, *a):
return self._print(a, 0, 'brightblue')
def header(self, *a):
return self._print(a, 0, 'bold', 'brightblue')
def command(self, *a):
return self._print(a, 2, 'bold')
def output(self, *a):
return self._print(a, 2)
def plain(self, *a):
return self._print(a, 0)
def source(self, filename):
return self._print(('Source: ' + colour.bold(filename),), -1000)
def __enter__(self):
self._unindent.append(self._indent)
self._indent = self._last_indent + 2
return self
def __exit__(self, e_type, e_value, e_tb):
self._indent = self._unindent.pop()
prt = Printer()
| 24.455882 | 90 | 0.659651 |
from accelerator import colour
class Printer:
def __init__(self, indent=0):
self._indent = indent
self._last_indent = 0
self._unindent = []
def _prefix(self, txt):
for c in txt:
if not c.isspace():
break
yield c
yield None
def _print(self, a, indent, *attrs):
txt = ' '.join(str(v) for v in a)
prefixes = []
lines = txt.lstrip('\n').rstrip().split('\n')
for line in lines:
if line.strip():
prefixes.append(list(self._prefix(line)))
prefix_len = 0
if prefixes:
prefixes[0].pop()
while len(prefixes[0]) > prefix_len and len(set(p[prefix_len] for p in prefixes)) == 1:
prefix_len += 1
lines = [line[prefix_len:].rstrip() for line in lines]
self._last_indent = max(indent + self._indent, 0)
indent = ' ' * self._last_indent
lines = [indent + line if line else '' for line in lines]
txt = '\n'.join(lines)
if attrs and txt:
txt = colour(txt, *attrs)
print(txt)
return self
def __call__(self, *a):
return self._print(a, 0, 'brightblue')
def header(self, *a):
return self._print(a, 0, 'bold', 'brightblue')
def command(self, *a):
return self._print(a, 2, 'bold')
def output(self, *a):
return self._print(a, 2)
def plain(self, *a):
return self._print(a, 0)
def source(self, filename):
return self._print(('Source: ' + colour.bold(filename),), -1000)
def __enter__(self):
self._unindent.append(self._indent)
self._indent = self._last_indent + 2
return self
def __exit__(self, e_type, e_value, e_tb):
self._indent = self._unindent.pop()
prt = Printer()
| true | true |
f7343a622d7f533789cc7b05b9b75120bde054ca | 76,341 | py | Python | fluids/compressible.py | celikten/fluids | acfd9a06acef18817693f6296aadf8d1c7cabee4 | [
"MIT"
] | 218 | 2016-01-04T07:44:46.000Z | 2022-03-30T08:06:36.000Z | fluids/compressible.py | celikten/fluids | acfd9a06acef18817693f6296aadf8d1c7cabee4 | [
"MIT"
] | 47 | 2016-04-20T06:06:10.000Z | 2022-03-04T14:45:39.000Z | fluids/compressible.py | celikten/fluids | acfd9a06acef18817693f6296aadf8d1c7cabee4 | [
"MIT"
] | 65 | 2016-01-29T03:28:01.000Z | 2022-01-26T16:17:05.000Z | # -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017, 2018, 2019, 2020 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains equations for modeling flow where density changes
significantly during the process - compressible flow. Also included are
equations for choked flow - the phenomenon where the velocity of a fluid
reaches its speed of sound.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/fluids/>`_
or contact the author at Caleb.Andrew.Bell@gmail.com.
.. contents:: :local:
Compression Processes
---------------------
.. autofunction:: isothermal_work_compression
.. autofunction:: isentropic_work_compression
.. autofunction:: isentropic_T_rise_compression
.. autofunction:: isentropic_efficiency
.. autofunction:: polytropic_exponent
Compressible Flow
-----------------
.. autofunction:: isothermal_gas
Empirical Compressible Flow
---------------------------
.. autofunction:: Panhandle_A
.. autofunction:: Panhandle_B
.. autofunction:: Weymouth
.. autofunction:: Spitzglass_high
.. autofunction:: Spitzglass_low
.. autofunction:: Oliphant
.. autofunction:: Fritzsche
.. autofunction:: Muller
.. autofunction:: IGT
Critical Flow
-------------
.. autofunction:: T_critical_flow
.. autofunction:: P_critical_flow
.. autofunction:: is_critical_flow
.. autofunction:: P_isothermal_critical_flow
.. autofunction:: P_upstream_isothermal_critical_flow
Stagnation Point
----------------
.. autofunction:: stagnation_energy
.. autofunction:: P_stagnation
.. autofunction:: T_stagnation
.. autofunction:: T_stagnation_ideal
"""
from __future__ import division
from math import sqrt, log, pi, exp, isinf
from fluids.constants import R
from fluids.numerics import secant, newton, ridder, lambertw
__all__ = ['Panhandle_A', 'Panhandle_B', 'Weymouth', 'Spitzglass_high',
'Spitzglass_low', 'Oliphant', 'Fritzsche', 'Muller', 'IGT', 'isothermal_gas',
'isothermal_work_compression', 'polytropic_exponent',
'isentropic_work_compression', 'isentropic_efficiency',
'isentropic_T_rise_compression', 'T_critical_flow',
'P_critical_flow', 'P_isothermal_critical_flow',
'is_critical_flow', 'stagnation_energy', 'P_stagnation',
'T_stagnation', 'T_stagnation_ideal']
def isothermal_work_compression(P1, P2, T, Z=1.0):
r'''Calculates the work of compression or expansion of a gas going through
an isothermal process.
.. math::
W = zRT\ln\left(\frac{P_2}{P_1}\right)
Parameters
----------
P1 : float
Inlet pressure, [Pa]
P2 : float
Outlet pressure, [Pa]
T : float
Temperature of the gas going through an isothermal process, [K]
Z : float
Constant compressibility factor of the gas, [-]
Returns
-------
W : float
Work performed per mole of gas compressed/expanded [J/mol]
Notes
-----
The full derivation with all forms is as follows:
.. math::
W = \int_{P_1}^{P_2} V dP = zRT\int_{P_1}^{P_2} \frac{1}{P} dP
.. math::
W = zRT\ln\left(\frac{P_2}{P_1}\right) = P_1 V_1 \ln\left(\frac{P_2}
{P_1}\right) = P_2 V_2 \ln\left(\frac{P_2}{P_1}\right)
The substitutions are according to the ideal gas law with compressibility:
.. math:
PV = ZRT
The work of compression/expansion is the change in enthalpy of the gas.
Returns negative values for expansion and positive values for compression.
An average compressibility factor can be used where Z changes. For further
accuracy, this expression can be used repeatedly with small changes in
pressure and the work from each step summed.
This is the best possible case for compression; all actual compresssors
require more work to do the compression.
By making the compression take a large number of stages and cooling the gas
between stages, this can be approached reasonable closely. Integrally
geared compressors are often used for this purpose.
Examples
--------
>>> isothermal_work_compression(1E5, 1E6, 300)
5743.427304244769
References
----------
.. [1] Couper, James R., W. Roy Penney, and James R. Fair. Chemical Process
Equipment: Selection and Design. 2nd ed. Amsterdam ; Boston: Gulf
Professional Publishing, 2009.
'''
return Z*R*T*log(P2/P1)
def isentropic_work_compression(T1, k, Z=1.0, P1=None, P2=None, W=None, eta=None):
r'''Calculation function for dealing with compressing or expanding a gas
going through an isentropic, adiabatic process assuming constant Cp and Cv.
The polytropic model is the same equation; just provide `n` instead of `k`
and use a polytropic efficiency for `eta` instead of a isentropic
efficiency. Can calculate any of the following, given all the other inputs:
* W, Work of compression
* P2, Pressure after compression
* P1, Pressure before compression
* eta, isentropic efficiency of compression
.. math::
W = \left(\frac{k}{k-1}\right)ZRT_1\left[\left(\frac{P_2}{P_1}
\right)^{(k-1)/k}-1\right]/\eta_{isentropic}
Parameters
----------
T1 : float
Initial temperature of the gas, [K]
k : float
Isentropic exponent of the gas (Cp/Cv) or polytropic exponent `n` to
use this as a polytropic model instead [-]
Z : float, optional
Constant compressibility factor of the gas, [-]
P1 : float, optional
Inlet pressure, [Pa]
P2 : float, optional
Outlet pressure, [Pa]
W : float, optional
Work performed per mole of gas compressed/expanded [J/mol]
eta : float, optional
Isentropic efficiency of the process or polytropic efficiency of the
process to use this as a polytropic model instead [-]
Returns
-------
W, P1, P2, or eta : float
The missing input which was solved for [base SI]
Notes
-----
For the same compression ratio, this is always of larger magnitude than the
isothermal case.
The full derivation is as follows:
For constant-heat capacity "isentropic" fluid,
.. math::
V = \frac{P_1^{1/k}V_1}{P^{1/k}}
.. math::
W = \int_{P_1}^{P_2} V dP = \int_{P_1}^{P_2}\frac{P_1^{1/k}V_1}
{P^{1/k}}dP
.. math::
W = \frac{P_1^{1/k} V_1}{1 - \frac{1}{k}}\left[P_2^{1-1/k} -
P_1^{1-1/k}\right]
After performing the integration and substantial mathematical manipulation
we can obtain:
.. math::
W = \left(\frac{k}{k-1}\right) P_1 V_1 \left[\left(\frac{P_2}{P_1}
\right)^{(k-1)/k}-1\right]
Using PV = ZRT:
.. math::
W = \left(\frac{k}{k-1}\right)ZRT_1\left[\left(\frac{P_2}{P_1}
\right)^{(k-1)/k}-1\right]
The work of compression/expansion is the change in enthalpy of the gas.
Returns negative values for expansion and positive values for compression.
An average compressibility factor should be used as Z changes. For further
accuracy, this expression can be used repeatedly with small changes in
pressure and new values of isentropic exponent, and the work from each step
summed.
For the polytropic case this is not necessary, as `eta` corrects for the
simplification.
Examples
--------
>>> isentropic_work_compression(P1=1E5, P2=1E6, T1=300, k=1.4, eta=0.78)
10416.876986384483
References
----------
.. [1] Couper, James R., W. Roy Penney, and James R. Fair. Chemical Process
Equipment: Selection and Design. 2nd ed. Amsterdam ; Boston: Gulf
Professional Publishing, 2009.
'''
if W is None and eta is not None and P1 is not None and P2 is not None:
return k/(k - 1.0)*Z*R*T1*((P2/P1)**((k-1.)/k) - 1.0)/eta
elif P1 is None and eta is not None and W is not None and P2 is not None:
return P2*(1.0 + W*eta/(R*T1*Z) - W*eta/(R*T1*Z*k))**(-k/(k - 1.0))
elif P2 is None and eta is not None and W is not None and P1 is not None:
return P1*(1.0 + W*eta/(R*T1*Z) - W*eta/(R*T1*Z*k))**(k/(k - 1.0))
elif eta is None and P1 is not None and P2 is not None and W is not None:
return R*T1*Z*k*((P2/P1)**((k - 1.0)/k) - 1.0)/(W*(k - 1.0))
else:
raise ValueError('Three of W, P1, P2, and eta must be specified.')
def isentropic_T_rise_compression(T1, P1, P2, k, eta=1):
r'''Calculates the increase in temperature of a fluid which is compressed
or expanded under isentropic, adiabatic conditions assuming constant
Cp and Cv. The polytropic model is the same equation; just provide `n`
instead of `k` and use a polytropic efficienty for `eta` instead of a
isentropic efficiency.
.. math::
T_2 = T_1 + \frac{\Delta T_s}{\eta_s} = T_1 \left\{1 + \frac{1}
{\eta_s}\left[\left(\frac{P_2}{P_1}\right)^{(k-1)/k}-1\right]\right\}
Parameters
----------
T1 : float
Initial temperature of gas [K]
P1 : float
Initial pressure of gas [Pa]
P2 : float
Final pressure of gas [Pa]
k : float
Isentropic exponent of the gas (Cp/Cv) or polytropic exponent `n` to
use this as a polytropic model instead [-]
eta : float
Isentropic efficiency of the process or polytropic efficiency of the
process to use this as a polytropic model instead [-]
Returns
-------
T2 : float
Final temperature of gas [K]
Notes
-----
For the ideal case of `eta` = 1, the model simplifies to:
.. math::
\frac{T_2}{T_1} = \left(\frac{P_2}{P_1}\right)^{(k-1)/k}
Examples
--------
>>> isentropic_T_rise_compression(286.8, 54050, 432400, 1.4)
519.5230938217768
References
----------
.. [1] Couper, James R., W. Roy Penney, and James R. Fair. Chemical Process
Equipment: Selection and Design. 2nd ed. Amsterdam ; Boston: Gulf
Professional Publishing, 2009.
.. [2] GPSA. GPSA Engineering Data Book. 13th edition. Gas Processors
Suppliers Association, Tulsa, OK, 2012.
'''
dT = T1*((P2/P1)**((k - 1.0)/k) - 1.0)/eta
return T1 + dT
def isentropic_efficiency(P1, P2, k, eta_s=None, eta_p=None):
r'''Calculates either isentropic or polytropic efficiency from the other
type of efficiency.
.. math::
\eta_s = \frac{(P_2/P_1)^{(k-1)/k}-1}
{(P_2/P_1)^{\frac{k-1}{k\eta_p}}-1}
.. math::
\eta_p = \frac{\left(k - 1\right) \ln{\left (\frac{P_{2}}{P_{1}}
\right )}}{k \ln{\left (\frac{1}{\eta_{s}} \left(\eta_{s}
+ \left(\frac{P_{2}}{P_{1}}\right)^{\frac{1}{k} \left(k - 1\right)}
- 1\right) \right )}}
Parameters
----------
P1 : float
Initial pressure of gas [Pa]
P2 : float
Final pressure of gas [Pa]
k : float
Isentropic exponent of the gas (Cp/Cv) [-]
eta_s : float, optional
Isentropic (adiabatic) efficiency of the process, [-]
eta_p : float, optional
Polytropic efficiency of the process, [-]
Returns
-------
eta_s or eta_p : float
Isentropic or polytropic efficiency, depending on input, [-]
Notes
-----
The form for obtained `eta_p` from `eta_s` was derived with SymPy.
Examples
--------
>>> isentropic_efficiency(1E5, 1E6, 1.4, eta_p=0.78)
0.7027614191263858
References
----------
.. [1] Couper, James R., W. Roy Penney, and James R. Fair. Chemical Process
Equipment: Selection and Design. 2nd ed. Amsterdam ; Boston: Gulf
Professional Publishing, 2009.
'''
if eta_s is None and eta_p is not None:
return ((P2/P1)**((k-1.0)/k)-1.0)/((P2/P1)**((k-1.0)/(k*eta_p))-1.0)
elif eta_p is None and eta_s is not None:
return (k - 1.0)*log(P2/P1)/(k*log(
(eta_s + (P2/P1)**((k - 1.0)/k) - 1.0)/eta_s))
else:
raise ValueError('Either eta_s or eta_p is required')
def polytropic_exponent(k, n=None, eta_p=None):
r'''Calculates one of:
* Polytropic exponent from polytropic efficiency
* Polytropic efficiency from the polytropic exponent
.. math::
n = \frac{k\eta_p}{1 - k(1-\eta_p)}
.. math::
\eta_p = \frac{\left(\frac{n}{n-1}\right)}{\left(\frac{k}{k-1}
\right)} = \frac{n(k-1)}{k(n-1)}
Parameters
----------
k : float
Isentropic exponent of the gas (Cp/Cv) [-]
n : float, optional
Polytropic exponent of the process [-]
eta_p : float, optional
Polytropic efficiency of the process, [-]
Returns
-------
n or eta_p : float
Polytropic exponent or polytropic efficiency, depending on input, [-]
Notes
-----
Examples
--------
>>> polytropic_exponent(1.4, eta_p=0.78)
1.5780346820809246
References
----------
.. [1] Couper, James R., W. Roy Penney, and James R. Fair. Chemical Process
Equipment: Selection and Design. 2nd ed. Amsterdam ; Boston: Gulf
Professional Publishing, 2009.
'''
if n is None and eta_p is not None:
return k*eta_p/(1.0 - k*(1.0 - eta_p))
elif eta_p is None and n is not None:
return n*(k - 1.0)/(k*(n - 1.0))
else:
raise ValueError('Either n or eta_p is required')
def T_critical_flow(T, k):
r'''Calculates critical flow temperature `Tcf` for a fluid with the
given isentropic coefficient. `Tcf` is in a flow (with Ma=1) whose
stagnation conditions are known. Normally used with converging/diverging
nozzles.
.. math::
\frac{T^*}{T_0} = \frac{2}{k+1}
Parameters
----------
T : float
Stagnation temperature of a fluid with Ma=1 [K]
k : float
Isentropic coefficient []
Returns
-------
Tcf : float
Critical flow temperature at Ma=1 [K]
Notes
-----
Assumes isentropic flow.
Examples
--------
Example 12.4 in [1]_:
>>> T_critical_flow(473, 1.289)
413.2809086937528
References
----------
.. [1] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return T*2.0/(k + 1.0)
def P_critical_flow(P, k):
r'''Calculates critical flow pressure `Pcf` for a fluid with the
given isentropic coefficient. `Pcf` is in a flow (with Ma=1) whose
stagnation conditions are known. Normally used with converging/diverging
nozzles.
.. math::
\frac{P^*}{P_0} = \left(\frac{2}{k+1}\right)^{k/(k-1)}
Parameters
----------
P : float
Stagnation pressure of a fluid with Ma=1 [Pa]
k : float
Isentropic coefficient []
Returns
-------
Pcf : float
Critical flow pressure at Ma=1 [Pa]
Notes
-----
Assumes isentropic flow.
Examples
--------
Example 12.4 in [1]_:
>>> P_critical_flow(1400000, 1.289)
766812.9022792266
References
----------
.. [1] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return P*(2.0/(k + 1.))**(k/(k - 1.0))
def P_isothermal_critical_flow(P, fd, D, L):
r'''Calculates critical flow pressure `Pcf` for a fluid flowing
isothermally and suffering pressure drop caused by a pipe's friction factor.
.. math::
P_2 = P_{1} e^{\frac{1}{2 D} \left(D \left(\operatorname{LambertW}
{\left (- e^{\frac{1}{D} \left(- D - L f_d\right)} \right )} + 1\right)
+ L f_d\right)}
Parameters
----------
P : float
Inlet pressure [Pa]
fd : float
Darcy friction factor for flow in pipe [-]
D : float
Diameter of pipe, [m]
L : float
Length of pipe, [m]
Returns
-------
Pcf : float
Critical flow pressure of a compressible gas flowing from `P1` to `Pcf`
in a tube of length L and friction factor `fd` [Pa]
Notes
-----
Assumes isothermal flow. Developed based on the `isothermal_gas` model,
using SymPy.
The isothermal gas model is solved for maximum mass flow rate; any pressure
drop under it is impossible due to the formation of a shock wave.
Examples
--------
>>> P_isothermal_critical_flow(P=1E6, fd=0.00185, L=1000., D=0.5)
389699.73176
References
----------
.. [1] Wilkes, James O. Fluid Mechanics for Chemical Engineers with
Microfluidics and CFD. 2 edition. Upper Saddle River, NJ: Prentice Hall,
2005.
'''
# Correct branch of lambertw found by trial and error
lambert_term = float((lambertw(-exp((-D - L*fd)/D), -1)).real)
return P*exp((D*(lambert_term + 1.0) + L*fd)/(2.0*D))
def P_upstream_isothermal_critical_flow(P, fd, D, L):
"""Not part of the public API. Reverses `P_isothermal_critical_flow`.
Examples
--------
>>> P_upstream_isothermal_critical_flow(P=389699.7317645518, fd=0.00185,
... L=1000., D=0.5)
1000000.00000
"""
lambertw_term = float(lambertw(-exp(-(fd*L+D)/D), -1).real)
return exp(-0.5*(D*lambertw_term+fd*L+D)/D)*P
def is_critical_flow(P1, P2, k):
r'''Determines if a flow of a fluid driven by pressure gradient
P1 - P2 is critical, for a fluid with the given isentropic coefficient.
This function calculates critical flow pressure, and checks if this is
larger than P2. If so, the flow is critical and choked.
Parameters
----------
P1 : float
Higher, source pressure [Pa]
P2 : float
Lower, downstream pressure [Pa]
k : float
Isentropic coefficient []
Returns
-------
flowtype : bool
True if the flow is choked; otherwise False
Notes
-----
Assumes isentropic flow. Uses P_critical_flow function.
Examples
--------
Examples 1-2 from API 520.
>>> is_critical_flow(670E3, 532E3, 1.11)
False
>>> is_critical_flow(670E3, 101E3, 1.11)
True
References
----------
.. [1] API. 2014. API 520 - Part 1 Sizing, Selection, and Installation of
Pressure-relieving Devices, Part I - Sizing and Selection, 9E.
'''
Pcf = P_critical_flow(P1, k)
return Pcf > P2
def stagnation_energy(V):
r'''Calculates the increase in enthalpy `dH` which is provided by a fluid's
velocity `V`.
.. math::
\Delta H = \frac{V^2}{2}
Parameters
----------
V : float
Velocity [m/s]
Returns
-------
dH : float
Incease in enthalpy [J/kg]
Notes
-----
The units work out. This term is pretty small, but not trivial.
Examples
--------
>>> stagnation_energy(125)
7812.5
References
----------
.. [1] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return 0.5*V*V
def P_stagnation(P, T, Tst, k):
r'''Calculates stagnation flow pressure `Pst` for a fluid with the
given isentropic coefficient and specified stagnation temperature and
normal temperature. Normally used with converging/diverging nozzles.
.. math::
\frac{P_0}{P}=\left(\frac{T_0}{T}\right)^{\frac{k}{k-1}}
Parameters
----------
P : float
Normal pressure of a fluid [Pa]
T : float
Normal temperature of a fluid [K]
Tst : float
Stagnation temperature of a fluid moving at a certain velocity [K]
k : float
Isentropic coefficient []
Returns
-------
Pst : float
Stagnation pressure of a fluid moving at a certain velocity [Pa]
Notes
-----
Assumes isentropic flow.
Examples
--------
Example 12-1 in [1]_.
>>> P_stagnation(54050., 255.7, 286.8, 1.4)
80772.80495900588
References
----------
.. [1] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return P*(Tst/T)**(k/(k - 1.0))
def T_stagnation(T, P, Pst, k):
r'''Calculates stagnation flow temperature `Tst` for a fluid with the
given isentropic coefficient and specified stagnation pressure and
normal pressure. Normally used with converging/diverging nozzles.
.. math::
T=T_0\left(\frac{P}{P_0}\right)^{\frac{k-1}{k}}
Parameters
----------
T : float
Normal temperature of a fluid [K]
P : float
Normal pressure of a fluid [Pa]
Pst : float
Stagnation pressure of a fluid moving at a certain velocity [Pa]
k : float
Isentropic coefficient []
Returns
-------
Tst : float
Stagnation temperature of a fluid moving at a certain velocity [K]
Notes
-----
Assumes isentropic flow.
Examples
--------
Example 12-1 in [1]_.
>>> T_stagnation(286.8, 54050, 54050*8, 1.4)
519.5230938217768
References
----------
.. [1] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return T*(Pst/P)**((k - 1.0)/k)
def T_stagnation_ideal(T, V, Cp):
r'''Calculates the ideal stagnation temperature `Tst` calculated assuming
the fluid has a constant heat capacity `Cp` and with a specified
velocity `V` and temperature `T`.
.. math::
T^* = T + \frac{V^2}{2C_p}
Parameters
----------
T : float
Tempearture [K]
V : float
Velocity [m/s]
Cp : float
Ideal heat capacity [J/kg/K]
Returns
-------
Tst : float
Stagnation temperature [J/kg]
Examples
--------
Example 12-1 in [1]_.
>>> T_stagnation_ideal(255.7, 250, 1005.)
286.79452736318405
References
----------
.. [1] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
return T + 0.5*V*V/Cp
def isothermal_gas_err_P1(P1, fd, rho, P2, L, D, m):
return m - isothermal_gas(rho, fd, P1=P1, P2=P2, L=L, D=D)
def isothermal_gas_err_P2(P2, rho, fd, P1, L, D, m):
return m - isothermal_gas(rho, fd, P1=P1, P2=P2, L=L, D=D)
def isothermal_gas_err_P2_basis(P1, P2, rho, fd, m, L, D):
return abs(P2 - isothermal_gas(rho, fd, m=m, P1=P1, P2=None, L=L, D=D))
def isothermal_gas_err_D(D, m, rho, fd, P1, P2, L):
return m - isothermal_gas(rho, fd, P1=P1, P2=P2, L=L, D=D)
def isothermal_gas(rho, fd, P1=None, P2=None, L=None, D=None, m=None):
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline for the complete isothermal flow equation. Can calculate any of
the following, given all other inputs:
* Mass flow rate
* Upstream pressure (numerical)
* Downstream pressure (analytical or numerical if an overflow occurs)
* Diameter of pipe (numerical)
* Length of pipe
A variety of forms of this equation have been presented, differing in their
use of the ideal gas law and choice of gas constant. The form here uses
density explicitly, allowing for non-ideal values to be used.
.. math::
\dot m^2 = \frac{\left(\frac{\pi D^2}{4}\right)^2 \rho_{avg}
\left(P_1^2-P_2^2\right)}{P_1\left(f_d\frac{L}{D} + 2\ln\frac{P_1}{P_2}
\right)}
Parameters
----------
rho : float
Average density of gas in pipe, [kg/m^3]
fd : float
Darcy friction factor for flow in pipe [-]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
m : float, optional
Mass flow rate of gas through pipe, [kg/s]
Returns
-------
m, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
The solution for P2 has the following closed form, derived using Maple:
.. math::
P_2={P_1 \left( {{ e}^{0.5\cdot{\frac {1}{{m}^{2}} \left( -C{m}^{2}
+\text{ lambertW} \left(-{\frac {BP_1}{{m}^{2}}{{ e}^{-{\frac {-C{m}^{
2}+BP_1}{{m}^{2}}}}}}\right){}{m}^{2}+BP_1 \right) }}} \right) ^{-1}}
.. math::
B = \frac{\pi^2 D^4}{4^2} \rho_{avg}
.. math::
C = f_d \frac{L}{D}
A wide range of conditions are impossible due to choked flow. See
`P_isothermal_critical_flow` for details. An exception is raised when
they occur.
The 2 multiplied by the logarithm is often shown as a power of the
pressure ratio; this is only the case when the pressure ratio is raised to
the power of 2 before its logarithm is taken.
A number of limitations exist for this model:
* Density dependence is that of an ideal gas.
* If calculating the pressure drop, the average gas density cannot
be known immediately; iteration must be used to correct this.
* The friction factor depends on both the gas density and velocity,
so it should be solved for iteratively as well. It changes throughout
the pipe as the gas expands and velocity increases.
* The model is not easily adapted to include elevation effects due to
the acceleration term included in it.
* As the gas expands, it will change temperature slightly, further
altering the density and friction factor.
There are many commercial packages which perform the actual direct
integration of the flow, such as OLGA Dynamic Multiphase Flow Simulator,
or ASPEN Hydraulics.
This expression has also been presented with the ideal gas assumption
directly incorporated into it [4]_ (note R is the specific gas constant, in
units of J/kg/K):
.. math::
\dot m^2 = \frac{\left(\frac{\pi D^2}{4}\right)^2
\left(P_1^2-P_2^2\right)}{RT\left(f_d\frac{L}{D} + 2\ln\frac{P_1}{P_2}
\right)}
Examples
--------
>>> isothermal_gas(rho=11.3, fd=0.00185, P1=1E6, P2=9E5, L=1000, D=0.5)
145.4847572636031
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
.. [2] Kim, J. and Singh, N. "A Novel Equation for Isothermal Pipe Flow.".
Chemical Engineering, June 2012, http://www.chemengonline.com/a-novel-equation-for-isothermal-pipe-flow/?printmode=1
.. [3] Wilkes, James O. Fluid Mechanics for Chemical Engineers with
Microfluidics and CFD. 2 edition. Upper Saddle River, NJ: Prentice Hall,
2005.
.. [4] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
'''
if m is None and P1 is not None and P2 is not None and L is not None and D is not None:
Pcf = P_isothermal_critical_flow(P=P1, fd=fd, D=D, L=L)
if P2 < Pcf:
raise ValueError('Given outlet pressure is not physically possible ' # numba: delete
'due to the formation of choked flow at P2=%f, specified outlet pressure was %f' % (Pcf, P2)) # numba: delete
# raise ValueError("Not possible") # numba: uncomment
if P2 > P1:
raise ValueError('Specified outlet pressure is larger than the '
'inlet pressure; fluid will flow backwards.')
return sqrt(0.0625*pi*pi*D**4*rho/(P1*(fd*L/D + 2.0*log(P1/P2)))*(P1*P1 - P2*P2))
elif L is None and P1 is not None and P2 is not None and D is not None and m is not None:
return D*(pi*pi*D**4*rho*(P1*P1 - P2*P2) - 32.0*P1*m*m*log(P1/P2))/(16.0*P1*fd*m*m)
elif P1 is None and L is not None and P2 is not None and D is not None and m is not None:
Pcf = P_upstream_isothermal_critical_flow(P=P2, fd=fd, D=D, L=L)
try:
# Use the explicit solution for P2 with different P1 guesses;
# newton doesn't like solving for m.
P1 = secant(isothermal_gas_err_P2_basis, (P2+Pcf)/2., args=(P2, rho, fd, m, L, D))
if not (P2 <= P1):
raise ValueError("Failed")
return P1
except:
try:
return ridder(isothermal_gas_err_P1, a=P2, b=Pcf, args=(fd, rho, P2, L, D, m))
except:
m_max = isothermal_gas(rho, fd, P1=Pcf, P2=P2, L=L, D=D) # numba: delete
raise ValueError('The desired mass flow rate of %f kg/s cannot ' # numba: delete
'be achieved with the specified downstream pressure; the maximum flowrate is ' # numba: delete
'%f kg/s at an upstream pressure of %f Pa' %(m, m_max, Pcf)) # numba: delete
# raise ValueError("Failed") # numba: uncomment
elif P2 is None and L is not None and P1 is not None and D is not None and m is not None:
try:
Pcf = P_isothermal_critical_flow(P=P1, fd=fd, D=D, L=L)
m_max = isothermal_gas(rho, fd, P1=P1, P2=Pcf, L=L, D=D)
if not (m <= m_max):
raise ValueError("Failed")
C = fd*L/D
B = (pi/4*D**2)**2*rho
arg = -B/m**2*P1*exp(-(-C*m**2+B*P1)/m**2)
# Consider the two real branches of the lambertw function.
# The k=-1 branch produces the higher P2 values; the k=0 branch is
# physically impossible.
lambert_ans = float(lambertw(arg, k=-1).real)
# Large overflow problem here; also divide by zero problems!
# Fail and try a numerical solution if it doesn't work.
if isinf(lambert_ans):
raise ValueError("Should not be infinity")
P2 = P1/exp((-C*m**2+lambert_ans*m**2+B*P1)/m**2/2.)
if not (P2 < P1):
raise ValueError("Should not be the case")
return P2
except:
Pcf = P_isothermal_critical_flow(P=P1, fd=fd, D=D, L=L)
try:
return ridder(isothermal_gas_err_P2, a=Pcf, b=P1, args=(rho, fd, P1, L, D, m))
except:
m_max = isothermal_gas(rho, fd, P1=P1, P2=Pcf, L=L, D=D)
raise ValueError('The desired mass flow rate cannot be achieved ' # numba: delete
'with the specified upstream pressure of %f Pa; the maximum flowrate is %f ' # numba: delete
'kg/s at a downstream pressure of %f' %(P1, m_max, Pcf)) # numba: delete
# raise ValueError("Failed") # numba: uncomment
# A solver which respects its boundaries is required here.
# ridder cuts the time down from 2 ms to 200 mircoseconds.
# Is is believed Pcf and P1 will always bracked the root, however
# leave the commented code for testing
elif D is None and P2 is not None and P1 is not None and L is not None and m is not None:
return secant(isothermal_gas_err_D, 0.1, args=(m, rho, fd, P1, P2, L))
else:
raise ValueError('This function solves for either mass flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def Panhandle_A(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=0.92):
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Panhandle A formula. Can calculate any of the following,
given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe
* Length of pipe
A variety of different constants and expressions have been presented
for the Panhandle A equation. Here, a new form is developed with all units
in base SI, based on the work of [1]_.
.. math::
Q = 158.02053 E \left(\frac{T_s}{P_s}\right)^{1.0788}\left[\frac{P_1^2
-P_2^2}{L \cdot {SG}^{0.8539} T_{avg}Z_{avg}}\right]^{0.5394}D^{2.6182}
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe at `Ts` and `Ps`, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
[1]_'s original constant was 4.5965E-3, and it has units of km (length),
kPa, mm (diameter), and flowrate in m^3/day.
The form in [2]_ has the same exponents as used here, units of mm
(diameter), kPa, km (length), and flow in m^3/hour; its leading constant is
1.9152E-4.
The GPSA [3]_ has a leading constant of 0.191, a bracketed power of 0.5392,
a specific gravity power of 0.853, and otherwise the same constants.
It is in units of mm (diameter) and kPa and m^3/day; length is stated to be
in km, but according to the errata is in m.
[4]_ has a leading constant of 1.198E7, a specific gravity of power of 0.8541,
and a power of diameter which is under the root of 4.854 and is otherwise
the same. It has units of kPa and m^3/day, but is otherwise in base SI
units.
[5]_ has a leading constant of 99.5211, but its reference correction has no
exponent; other exponents are the same as here. It is entirely in base SI
units.
[6]_ has pressures in psi, diameter in inches, length in miles, Q in
ft^3/day, T in degrees Rankine, and a constant of 435.87.
Its reference condition power is 1.07881, and it has a specific gravity
correction outside any other term with a power of 0.4604.
Examples
--------
>>> Panhandle_A(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, Tavg=277.15)
42.56082051195928
References
----------
.. [1] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton,
FL: CRC Press, 2005.
.. [2] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
.. [3] GPSA. GPSA Engineering Data Book. 13th edition. Gas Processors
Suppliers Association, Tulsa, OK, 2012.
.. [4] Campbell, John M. Gas Conditioning and Processing, Vol. 2: The
Equipment Modules. 7th edition. Campbell Petroleum Series, 1992.
.. [5] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations
for Steady State Flow in Natural Gas Pipelines." Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29, no. 3
(September 2007): 262-73. doi:10.1590/S1678-58782007000300005.
.. [6] Ikoku, Chi U. Natural Gas Production Engineering. Malabar, Fla:
Krieger Pub Co, 1991.
'''
c1 = 1.0788
c2 = 0.8539
c3 = 0.5394
c4 = 2.6182
c5 = 158.0205328706957220332831680508433862787 # 45965*10**(591/1250)/864
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*E*(Ts/Ps)**c1*((P1**2 - P2**2)/(L*SG**c2*Tavg*Zavg))**c3*D**c4
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return (Q*(Ts/Ps)**(-c1)*(SG**(-c2)*(P1**2 - P2**2)/(L*Tavg*Zavg))**(-c3)/(E*c5))**(1./c4)
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*SG**c2*Tavg*Zavg*(D**(-c4)*Q*(Ts/Ps)**(-c1)/(E*c5))**(1./c3) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*SG**c2*Tavg*Zavg*(D**(-c4)*Q*(Ts/Ps)**(-c1)/(E*c5))**(1./c3) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return SG**(-c2)*(D**(-c4)*Q*(Ts/Ps)**(-c1)/(E*c5))**(-1./c3)*(P1**2 - P2**2)/(Tavg*Zavg)
else:
raise ValueError('This function solves for either flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def Panhandle_B(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=0.92):
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Panhandle B formula. Can calculate any of the following,
given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe
* Length of pipe
A variety of different constants and expressions have been presented
for the Panhandle B equation. Here, a new form is developed with all units
in base SI, based on the work of [1]_.
.. math::
Q = 152.88116 E \left(\frac{T_s}{P_s}\right)^{1.02}\left[\frac{P_1^2
-P_2^2}{L \cdot {SG}^{0.961} T_{avg}Z_{avg}}\right]^{0.51}D^{2.53}
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe at `Ts` and `Ps`, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
[1]_'s original constant was 1.002E-2, and it has units of km (length),
kPa, mm (diameter), and flowrate in m^3/day.
The form in [2]_ has the same exponents as used here, units of mm
(diameter), kPa, km (length), and flow in m^3/hour; its leading constant is
4.1749E-4.
The GPSA [3]_ has a leading constant of 0.339, and otherwise the same constants.
It is in units of mm (diameter) and kPa and m^3/day; length is stated to be
in km, but according to the errata is in m.
[4]_ has a leading constant of 1.264E7, a diameter power of 4.961 which is
also under the 0.51 power, and is otherwise the same. It has units of kPa
and m^3/day, but is otherwise in base SI units.
[5]_ has a leading constant of 135.8699, but its reference correction has
no exponent and its specific gravity has a power of 0.9608; the other
exponents are the same as here. It is entirely in base SI units.
[6]_ has pressures in psi, diameter in inches, length in miles, Q in
ft^3/day, T in degrees Rankine, and a constant of 737 with the exponents
the same as here.
Examples
--------
>>> Panhandle_B(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, Tavg=277.15)
42.35366178004172
References
----------
.. [1] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton,
FL: CRC Press, 2005.
.. [2] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
.. [3] GPSA. GPSA Engineering Data Book. 13th edition. Gas Processors
Suppliers Association, Tulsa, OK, 2012.
.. [4] Campbell, John M. Gas Conditioning and Processing, Vol. 2: The
Equipment Modules. 7th edition. Campbell Petroleum Series, 1992.
.. [5] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations
for Steady State Flow in Natural Gas Pipelines." Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29, no. 3
(September 2007): 262-73. doi:10.1590/S1678-58782007000300005.
.. [6] Ikoku, Chi U. Natural Gas Production Engineering. Malabar, Fla:
Krieger Pub Co, 1991.
'''
c1 = 1.02 # reference condition power
c2 = 0.961 # sg power
c3 = 0.51 # main power
c4 = 2.53 # diameter power
c5 = 152.8811634298055458624385985866624419060 # 4175*10**(3/25)/36
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*E*(Ts/Ps)**c1*((P1**2 - P2**2)/(L*SG**c2*Tavg*Zavg))**c3*D**c4
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return (Q*(Ts/Ps)**(-c1)*(SG**(-c2)*(P1**2 - P2**2)/(L*Tavg*Zavg))**(-c3)/(E*c5))**(1./c4)
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*SG**c2*Tavg*Zavg*(D**(-c4)*Q*(Ts/Ps)**(-c1)/(E*c5))**(1./c3) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*SG**c2*Tavg*Zavg*(D**(-c4)*Q*(Ts/Ps)**(-c1)/(E*c5))**(1./c3) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return SG**(-c2)*(D**(-c4)*Q*(Ts/Ps)**(-c1)/(E*c5))**(-1./c3)*(P1**2 - P2**2)/(Tavg*Zavg)
else:
raise ValueError('This function solves for either flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def Weymouth(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=0.92):
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Weymouth formula. Can calculate any of the following,
given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe
* Length of pipe
A variety of different constants and expressions have been presented
for the Weymouth equation. Here, a new form is developed with all units
in base SI, based on the work of [1]_.
.. math::
Q = 137.32958 E \frac{T_s}{P_s}\left[\frac{P_1^2
-P_2^2}{L \cdot {SG} \cdot T_{avg}Z_{avg}}\right]^{0.5}D^{2.667}
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe at `Ts` and `Ps`, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
[1]_'s original constant was 3.7435E-3, and it has units of km (length),
kPa, mm (diameter), and flowrate in m^3/day.
The form in [2]_ has the same exponents as used here, units of mm
(diameter), kPa, km (length), and flow in m^3/hour; its leading constant is
1.5598E-4.
The GPSA [3]_ has a leading constant of 0.1182, and otherwise the same constants.
It is in units of mm (diameter) and kPa and m^3/day; length is stated to be
in km, but according to the errata is in m.
[4]_ has a leading constant of 1.162E7, a diameter power of 5.333 which is
also under the 0.50 power, and is otherwise the same. It has units of kPa
and m^3/day, but is otherwise in base SI units.
[5]_ has a leading constant of 137.2364; the other
exponents are the same as here. It is entirely in base SI units.
[6]_ has pressures in psi, diameter in inches, length in miles, Q in
ft^3/hour, T in degrees Rankine, and a constant of 18.062 with the
exponents the same as here.
Examples
--------
>>> Weymouth(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, Tavg=277.15)
32.07729055913029
References
----------
.. [1] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton,
FL: CRC Press, 2005.
.. [2] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
.. [3] GPSA. GPSA Engineering Data Book. 13th edition. Gas Processors
Suppliers Association, Tulsa, OK, 2012.
.. [4] Campbell, John M. Gas Conditioning and Processing, Vol. 2: The
Equipment Modules. 7th edition. Campbell Petroleum Series, 1992.
.. [5] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations
for Steady State Flow in Natural Gas Pipelines." Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29, no. 3
(September 2007): 262-73. doi:10.1590/S1678-58782007000300005.
.. [6] Ikoku, Chi U. Natural Gas Production Engineering. Malabar, Fla:
Krieger Pub Co, 1991.
'''
c3 = 0.5 # main power
c4 = 2.667 # diameter power
c5 = 137.3295809942512546732179684618143090992 # 37435*10**(501/1000)/864
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*E*(Ts/Ps)*((P1**2 - P2**2)/(L*SG*Tavg*Zavg))**c3*D**c4
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return (Ps*Q*((P1**2 - P2**2)/(L*SG*Tavg*Zavg))**(-c3)/(E*Ts*c5))**(1./c4)
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*SG*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*SG*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return (D**(-c4)*Ps*Q/(E*Ts*c5))**(-1./c3)*(P1**2 - P2**2)/(SG*Tavg*Zavg)
else:
raise ValueError('This function solves for either flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def _to_solve_Spitzglass_high(D, Q, SG, Tavg, L, P1, P2, Ts, Ps, Zavg, E):
return Q - Spitzglass_high(SG=SG, Tavg=Tavg, L=L, D=D,
P1=P1, P2=P2, Ts=Ts, Ps=Ps,Zavg=Zavg, E=E)
def Spitzglass_high(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=1.):
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Spitzglass (high pressure drop) formula. Can calculate
any of the following, given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe (numerical solution)
* Length of pipe
A variety of different constants and expressions have been presented
for the Spitzglass (high pressure drop) formula. Here, the form as in [1]_
is used but with a more precise metric conversion from inches to m.
.. math::
Q = 125.1060 E \left(\frac{T_s}{P_s}\right)\left[\frac{P_1^2
-P_2^2}{L \cdot {SG} T_{avg}Z_{avg} (1 + 0.09144/D + \frac{150}{127}D)}
\right]^{0.5}D^{2.5}
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe at `Ts` and `Ps`, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
This equation is often presented without any correction for reference
conditions for specific gravity.
This model is also presented in [2]_ with a leading constant of 1.0815E-2,
the same exponents as used here, units of mm (diameter), kPa, km (length),
and flow in m^3/hour.
Examples
--------
>>> Spitzglass_high(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, Tavg=277.15)
29.42670246281681
References
----------
.. [1] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations
for Steady State Flow in Natural Gas Pipelines." Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29, no. 3
(September 2007): 262-73. doi:10.1590/S1678-58782007000300005.
.. [2] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton,
FL: CRC Press, 2005.
'''
c3 = 1.181102362204724409448818897637795275591 # 0.03/inch or 150/127
c4 = 0.09144
c5 = 125.1060
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return (c5*E*Ts/Ps*D**2.5*sqrt((P1**2-P2**2)
/(L*SG*Zavg*Tavg*(1 + c4/D + c3*D))))
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return secant(_to_solve_Spitzglass_high, 0.5, args=(Q, SG, Tavg, L, P1, P2, Ts, Ps, Zavg, E))
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt((D**6*E**2*P2**2*Ts**2*c5**2
+ D**2*L*Ps**2*Q**2*SG*Tavg*Zavg*c3
+ D*L*Ps**2*Q**2*SG*Tavg*Zavg
+ L*Ps**2*Q**2*SG*Tavg*Zavg*c4)/(D**6*E**2*Ts**2*c5**2))
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt((D**6*E**2*P1**2*Ts**2*c5**2
- D**2*L*Ps**2*Q**2*SG*Tavg*Zavg*c3
- D*L*Ps**2*Q**2*SG*Tavg*Zavg
- L*Ps**2*Q**2*SG*Tavg*Zavg*c4)/(D**6*E**2*Ts**2*c5**2))
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return (D**6*E**2*Ts**2*c5**2*(P1**2 - P2**2)
/(Ps**2*Q**2*SG*Tavg*Zavg*(D**2*c3 + D + c4)))
else:
raise ValueError('This function solves for either flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def _to_solve_Spitzglass_low(D, Q, SG, Tavg, L, P1, P2, Ts, Ps, Zavg, E):
return Q - Spitzglass_low(SG=SG, Tavg=Tavg, L=L, D=D, P1=P1, P2=P2, Ts=Ts, Ps=Ps, Zavg=Zavg, E=E)
def Spitzglass_low(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=1.):
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Spitzglass (low pressure drop) formula. Can calculate
any of the following, given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe (numerical solution)
* Length of pipe
A variety of different constants and expressions have been presented
for the Spitzglass (low pressure drop) formula. Here, the form as in [1]_
is used but with a more precise metric conversion from inches to m.
.. math::
Q = 125.1060 E \left(\frac{T_s}{P_s}\right)\left[\frac{2(P_1
-P_2)(P_s+1210)}{L \cdot {SG} \cdot T_{avg}Z_{avg} (1 + 0.09144/D
+ \frac{150}{127}D)}\right]^{0.5}D^{2.5}
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe at `Ts` and `Ps`, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
This equation is often presented without any correction for reference
conditions for specific gravity.
This model is also presented in [2]_ with a leading constant of 5.69E-2,
the same exponents as used here, units of mm (diameter), kPa, km (length),
and flow in m^3/hour. However, it is believed to contain a typo, and gives
results <1/3 of the correct values. It is also present in [2]_ in imperial
form; this is believed correct, but makes a slight assumption not done in
[1]_.
This model is present in [3]_ without reference corrections. The 1210
constant in [1]_ is an approximation necessary for the reference correction
to function without a square of the pressure difference. The GPSA version
is as follows, and matches this formulation very closely:
.. math::
Q = 0.821 \left[\frac{(P_1-P_2)D^5}{L \cdot {SG}
(1 + 91.44/D + 0.0018D)}\right]^{0.5}
The model is also shown in [4]_, with diameter in inches, length in feet,
flow in MMSCFD, pressure drop in inH2O, and a rounded leading constant of
0.09; this makes its predictions several percent higher than the model here.
Examples
--------
>>> Spitzglass_low(D=0.154051, P1=6720.3199, P2=0, L=54.864, SG=0.6, Tavg=288.7)
0.9488775242530617
References
----------
.. [1] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations
for Steady State Flow in Natural Gas Pipelines." Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29, no. 3
(September 2007): 262-73. doi:10.1590/S1678-58782007000300005.
.. [2] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton,
FL: CRC Press, 2005.
.. [3] GPSA. GPSA Engineering Data Book. 13th edition. Gas Processors
Suppliers Association, Tulsa, OK, 2012.
.. [4] PetroWiki. "Pressure Drop Evaluation along Pipelines" Accessed
September 11, 2016. http://petrowiki.org/Pressure_drop_evaluation_along_pipelines#Spitzglass_equation_2.
'''
c3 = 1.181102362204724409448818897637795275591 # 0.03/inch or 150/127
c4 = 0.09144
c5 = 125.1060
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*Ts/Ps*D**2.5*E*sqrt(((P1-P2)*2*(Ps+1210.))/(L*SG*Tavg*Zavg*(1 + c4/D + c3*D)))
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return secant(_to_solve_Spitzglass_low, 0.5, args=(Q, SG, Tavg, L, P1, P2, Ts, Ps, Zavg, E))
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return 0.5*(2.0*D**6*E**2*P2*Ts**2*c5**2*(Ps + 1210.0) + D**2*L*Ps**2*Q**2*SG*Tavg*Zavg*c3 + D*L*Ps**2*Q**2*SG*Tavg*Zavg + L*Ps**2*Q**2*SG*Tavg*Zavg*c4)/(D**6*E**2*Ts**2*c5**2*(Ps + 1210.0))
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return 0.5*(2.0*D**6*E**2*P1*Ts**2*c5**2*(Ps + 1210.0) - D**2*L*Ps**2*Q**2*SG*Tavg*Zavg*c3 - D*L*Ps**2*Q**2*SG*Tavg*Zavg - L*Ps**2*Q**2*SG*Tavg*Zavg*c4)/(D**6*E**2*Ts**2*c5**2*(Ps + 1210.0))
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return 2.0*D**6*E**2*Ts**2*c5**2*(P1*Ps + 1210.0*P1 - P2*Ps - 1210.0*P2)/(Ps**2*Q**2*SG*Tavg*Zavg*(D**2*c3 + D + c4))
else:
raise ValueError('This function solves for either flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def _to_solve_Oliphant(D, Q, SG, Tavg, L, P1, P2, Ts, Ps, Zavg, E):
return Q - Oliphant(SG=SG, Tavg=Tavg, L=L, D=D, P1=P1, P2=P2, Ts=Ts, Ps=Ps, Zavg=Zavg, E=E)
def Oliphant(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=0.92):
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Oliphant formula. Can calculate any of the following,
given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe (numerical solution)
* Length of pipe
This model is a more complete conversion to metric of the Imperial version
presented in [1]_.
.. math::
Q = 84.5872\left(D^{2.5} + 0.20915D^3\right)\frac{T_s}{P_s}\left(\frac
{P_1^2 - P_2^2}{L\cdot {SG} \cdot T_{avg}}\right)^{0.5}
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe at `Ts` and `Ps`, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
Recommended in [1]_ for use between vacuum and 100 psi.
The model is simplified by grouping constants here; however, it is presented
in the imperial unit set inches (diameter), miles (length), psi, Rankine,
and MMSCFD in [1]_:
.. math::
Q = 42(24)\left(D^{2.5} + \frac{D^3}{30}\right)\left(\frac{14.4}{P_s}
\right)\left(\frac{T_s}{520}\right)\left[\left(\frac{0.6}{SG}\right)
\left(\frac{520}{T_{avg}}\right)\left(\frac{P_1^2 - P_2^2}{L}\right)
\right]^{0.5}
Examples
--------
>>> Oliphant(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, Tavg=277.15)
28.851535408143057
References
----------
.. [1] GPSA. GPSA Engineering Data Book. 13th edition. Gas Processors
Suppliers Association, Tulsa, OK, 2012.
.. [2] F. N. Oliphant, "Production of Natural Gas," Report. USGS, 1902.
'''
# c1 = 42*24*Q*foot**3/day*(mile)**0.5*9/5.*(5/9.)**0.5*psi*(1/psi)*14.4/520.*0.6**0.5*520**0.5/inch**2.5
c1 = 84.587176139918568651410168968141078948974609375000
c2 = 0.2091519350460528670065940559652517549694 # 1/(30.*0.0254**0.5)
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c1*(D**2.5 + c2*D**3)*Ts/Ps*sqrt((P1**2-P2**2)/(L*SG*Tavg))
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return secant(_to_solve_Oliphant, 0.5, args=(Q, SG, Tavg, L, P1, P2, Ts, Ps, Zavg, E))
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*Ps**2*Q**2*SG*Tavg/(Ts**2*c1**2*(D**3*c2 + D**2.5)**2) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*Ps**2*Q**2*SG*Tavg/(Ts**2*c1**2*(D**3*c2 + D**2.5)**2) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return Ts**2*c1**2*(P1**2 - P2**2)*(D**3*c2 + D**2.5)**2/(Ps**2*Q**2*SG*Tavg)
else:
raise ValueError('This function solves for either flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def Fritzsche(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=1.0):
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Fritzsche formula. Can calculate any of the following,
given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe
* Length of pipe
A variety of different constants and expressions have been presented
for the Fritzsche formula. Here, the form as in [1]_
is used but with all inputs in base SI units.
.. math::
Q = 93.500 \frac{T_s}{P_s}\left(\frac{P_1^2 - P_2^2}
{L\cdot {SG}^{0.8587} \cdot T_{avg}}\right)^{0.538}D^{2.69}
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe at `Ts` and `Ps`, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
This model is also presented in [1]_ with a leading constant of 2.827,
the same exponents as used here, units of mm (diameter), kPa, km (length),
and flow in m^3/hour.
This model is shown in base SI units in [2]_, and with a leading constant
of 94.2565, a diameter power of 2.6911, main group power of 0.5382
and a specific gravity power of 0.858. The difference is very small.
Examples
--------
>>> Fritzsche(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, Tavg=277.15)
39.421535157535565
References
----------
.. [1] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton,
FL: CRC Press, 2005.
.. [2] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations
for Steady State Flow in Natural Gas Pipelines." Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29, no. 3
(September 2007): 262-73. doi:10.1590/S1678-58782007000300005.
'''
# Rational('2.827E-3')/(3600*24)*(1000)**Rational('2.69')*(1000)**Rational('0.538')*1000/(1000**2)**Rational('0.538')
c5 = 93.50009798751128188757518688244137811221 # 14135*10**(57/125)/432
c2 = 0.8587
c3 = 0.538
c4 = 2.69
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*E*(Ts/Ps)*((P1**2 - P2**2)/(SG**c2*Tavg*L*Zavg))**c3*D**c4
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return (Ps*Q*(SG**(-c2)*(P1**2 - P2**2)/(L*Tavg*Zavg))**(-c3)/(E*Ts*c5))**(1./c4)
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*SG**c2*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*SG**c2*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return SG**(-c2)*(D**(-c4)*Ps*Q/(E*Ts*c5))**(-1./c3)*(P1**2 - P2**2)/(Tavg*Zavg)
else:
raise ValueError('This function solves for either flow, upstream pressure, downstream pressure, diameter, or length; all other inputs must be provided.')
def Muller(SG, Tavg, mu, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=1.0):
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Muller formula. Can calculate any of the following,
given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe
* Length of pipe
A variety of different constants and expressions have been presented
for the Muller formula. Here, the form as in [1]_
is used but with all inputs in base SI units.
.. math::
Q = 15.7743\frac{T_s}{P_s}E\left(\frac{P_1^2 - P_2^2}{L \cdot Z_{avg}
\cdot T_{avg}}\right)^{0.575} \left(\frac{D^{2.725}}{\mu^{0.15}
SG^{0.425}}\right)
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
mu : float
Average viscosity of the fluid in the pipeline, [Pa*s]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe at `Ts` and `Ps`, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
This model is presented in [1]_ with a leading constant of 0.4937, the same
exponents as used here, units of inches (diameter), psi, feet (length),
Rankine, pound/(foot*second) for viscosity, and 1000 ft^3/hour.
This model is also presented in [2]_ in both SI and imperial form. The
SI form was incorrectly converted and yields much higher flow rates. The
imperial version has a leading constant of 85.7368, the same powers as
used here except with rounded values of powers of viscosity (0.2609) and
specific gravity (0.7391) rearranged to be inside the bracketed group;
its units are inches (diameter), psi, miles (length),
Rankine, pound/(foot*second) for viscosity, and ft^3/day.
This model is shown in base SI units in [3]_, and with a leading constant
of 15.7650, a diameter power of 2.724, main group power of 0.5747,
a specific gravity power of 0.74, and a viscosity power of 0.1494.
Examples
--------
>>> Muller(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, mu=1E-5,
... Tavg=277.15)
60.45796698148659
References
----------
.. [1] Mohitpour, Mo, Golshan, and Allan Murray. Pipeline Design and
Construction: A Practical Approach. 3rd edition. New York: Amer Soc
Mechanical Engineers, 2006.
.. [2] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton,
FL: CRC Press, 2005.
.. [3] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations
for Steady State Flow in Natural Gas Pipelines." Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29, no. 3
(September 2007): 262-73. doi:10.1590/S1678-58782007000300005.
'''
# 1000*foot**3/hour*0.4937/inch**2.725*foot**0.575*(5/9.)**0.575*9/5.*(pound/foot)**0.15*psi*(1/psi**2)**0.575
c5 = 15.77439908642077352939746374951659525108 # 5642991*196133**(17/20)*2**(3/5)*3**(11/40)*5**(7/40)/30645781250
c2 = 0.575 # main power
c3 = 2.725 # D power
c4 = 0.425 # SG power
c1 = 0.15 # mu power
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*Ts/Ps*E*((P1**2-P2**2)/Tavg/L/Zavg)**c2*D**c3/SG**c4/mu**c1
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return (Ps*Q*SG**c4*mu**c1*((P1**2 - P2**2)/(L*Tavg*Zavg))**(-c2)/(E*Ts*c5))**(1./c3)
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*Tavg*Zavg*(D**(-c3)*Ps*Q*SG**c4*mu**c1/(E*Ts*c5))**(1/c2) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*Tavg*Zavg*(D**(-c3)*Ps*Q*SG**c4*mu**c1/(E*Ts*c5))**(1/c2) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return (D**(-c3)*Ps*Q*SG**c4*mu**c1/(E*Ts*c5))**(-1/c2)*(P1**2 - P2**2)/(Tavg*Zavg)
else:
raise ValueError('This function solves for either flow, upstream pressure, downstream pressure, diameter, or length; all other inputs must be provided.')
def IGT(SG, Tavg, mu, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=1.0):
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the IGT formula. Can calculate any of the following,
given all other inputs:
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe
* Length of pipe
A variety of different constants and expressions have been presented
for the IGT formula. Here, the form as in [1]_
is used but with all inputs in base SI units.
.. math::
Q = 24.6241\frac{T_s}{P_s}E\left(\frac{P_1^2 - P_2^2}{L \cdot Z_{avg}
\cdot T_{avg}}\right)^{5/9} \left(\frac{D^{8/3}}{\mu^{1/9}
SG^{4/9}}\right)
Parameters
----------
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure `Ts` and `Ps`, [-]
Tavg : float
Average temperature of the fluid in the pipeline, [K]
mu : float
Average viscosity of the fluid in the pipeline, [Pa*s]
L : float, optional
Length of pipe, [m]
D : float, optional
Diameter of pipe, [m]
P1 : float, optional
Inlet pressure to pipe, [Pa]
P2 : float, optional
Outlet pressure from pipe, [Pa]
Q : float, optional
Flow rate of gas through pipe at `Ts` and `Ps`, [m^3/s]
Ts : float, optional
Reference temperature for the specific gravity of the gas, [K]
Ps : float, optional
Reference pressure for the specific gravity of the gas, [Pa]
Zavg : float, optional
Average compressibility factor for gas, [-]
E : float, optional
Pipeline efficiency, a correction factor between 0 and 1
Returns
-------
Q, P1, P2, D, or L : float
The missing input which was solved for [base SI]
Notes
-----
This model is presented in [1]_ with a leading constant of 0.6643, the same
exponents as used here, units of inches (diameter), psi, feet (length),
Rankine, pound/(foot*second) for viscosity, and 1000 ft^3/hour.
This model is also presented in [2]_ in both SI and imperial form. Both
forms are correct. The imperial version has a leading constant of 136.9,
the same powers as used here except with rounded values of powers of
viscosity (0.2) and specific gravity (0.8) rearranged to be inside the
bracketed group; its units are inches (diameter), psi, miles (length),
Rankine, pound/(foot*second) for viscosity, and ft^3/day.
This model is shown in base SI units in [3]_, and with a leading constant
of 24.6145, and the same powers as used here.
Examples
--------
>>> IGT(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, mu=1E-5, Tavg=277.15)
48.92351786788815
References
----------
.. [1] Mohitpour, Mo, Golshan, and Allan Murray. Pipeline Design and
Construction: A Practical Approach. 3rd edition. New York: Amer Soc
Mechanical Engineers, 2006.
.. [2] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton,
FL: CRC Press, 2005.
.. [3] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations
for Steady State Flow in Natural Gas Pipelines." Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29, no. 3
(September 2007): 262-73. doi:10.1590/S1678-58782007000300005.
'''
# 1000*foot**3/hour*0.6643/inch**(8/3.)*foot**(5/9.)*(5/9.)**(5/9.)*9/5.*(pound/foot)**(1/9.)*psi*(1/psi**2)**(5/9.)
c5 = 24.62412451461407054875301709443930350550 # 1084707*196133**(8/9)*2**(1/9)*6**(1/3)/4377968750
c2 = 5/9. # main power
c3 = 8/3. # D power
c4 = 4/9. # SG power
c1 = 1/9. # mu power
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*Ts/Ps*E*((P1**2-P2**2)/Tavg/L/Zavg)**c2*D**c3/SG**c4/mu**c1
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return (Ps*Q*SG**c4*mu**c1*((P1**2 - P2**2)/(L*Tavg*Zavg))**(-c2)/(E*Ts*c5))**(1./c3)
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*Tavg*Zavg*(D**(-c3)*Ps*Q*SG**c4*mu**c1/(E*Ts*c5))**(1/c2) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*Tavg*Zavg*(D**(-c3)*Ps*Q*SG**c4*mu**c1/(E*Ts*c5))**(1/c2) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return (D**(-c3)*Ps*Q*SG**c4*mu**c1/(E*Ts*c5))**(-1/c2)*(P1**2 - P2**2)/(Tavg*Zavg)
else:
raise ValueError('This function solves for either flow, upstream pressure, downstream pressure, diameter, or length; all other inputs must be provided.')
| 39.270062 | 198 | 0.62564 |
from __future__ import division
from math import sqrt, log, pi, exp, isinf
from fluids.constants import R
from fluids.numerics import secant, newton, ridder, lambertw
__all__ = ['Panhandle_A', 'Panhandle_B', 'Weymouth', 'Spitzglass_high',
'Spitzglass_low', 'Oliphant', 'Fritzsche', 'Muller', 'IGT', 'isothermal_gas',
'isothermal_work_compression', 'polytropic_exponent',
'isentropic_work_compression', 'isentropic_efficiency',
'isentropic_T_rise_compression', 'T_critical_flow',
'P_critical_flow', 'P_isothermal_critical_flow',
'is_critical_flow', 'stagnation_energy', 'P_stagnation',
'T_stagnation', 'T_stagnation_ideal']
def isothermal_work_compression(P1, P2, T, Z=1.0):
return Z*R*T*log(P2/P1)
def isentropic_work_compression(T1, k, Z=1.0, P1=None, P2=None, W=None, eta=None):
if W is None and eta is not None and P1 is not None and P2 is not None:
return k/(k - 1.0)*Z*R*T1*((P2/P1)**((k-1.)/k) - 1.0)/eta
elif P1 is None and eta is not None and W is not None and P2 is not None:
return P2*(1.0 + W*eta/(R*T1*Z) - W*eta/(R*T1*Z*k))**(-k/(k - 1.0))
elif P2 is None and eta is not None and W is not None and P1 is not None:
return P1*(1.0 + W*eta/(R*T1*Z) - W*eta/(R*T1*Z*k))**(k/(k - 1.0))
elif eta is None and P1 is not None and P2 is not None and W is not None:
return R*T1*Z*k*((P2/P1)**((k - 1.0)/k) - 1.0)/(W*(k - 1.0))
else:
raise ValueError('Three of W, P1, P2, and eta must be specified.')
def isentropic_T_rise_compression(T1, P1, P2, k, eta=1):
dT = T1*((P2/P1)**((k - 1.0)/k) - 1.0)/eta
return T1 + dT
def isentropic_efficiency(P1, P2, k, eta_s=None, eta_p=None):
if eta_s is None and eta_p is not None:
return ((P2/P1)**((k-1.0)/k)-1.0)/((P2/P1)**((k-1.0)/(k*eta_p))-1.0)
elif eta_p is None and eta_s is not None:
return (k - 1.0)*log(P2/P1)/(k*log(
(eta_s + (P2/P1)**((k - 1.0)/k) - 1.0)/eta_s))
else:
raise ValueError('Either eta_s or eta_p is required')
def polytropic_exponent(k, n=None, eta_p=None):
if n is None and eta_p is not None:
return k*eta_p/(1.0 - k*(1.0 - eta_p))
elif eta_p is None and n is not None:
return n*(k - 1.0)/(k*(n - 1.0))
else:
raise ValueError('Either n or eta_p is required')
def T_critical_flow(T, k):
return T*2.0/(k + 1.0)
def P_critical_flow(P, k):
return P*(2.0/(k + 1.))**(k/(k - 1.0))
def P_isothermal_critical_flow(P, fd, D, L):
lambert_term = float((lambertw(-exp((-D - L*fd)/D), -1)).real)
return P*exp((D*(lambert_term + 1.0) + L*fd)/(2.0*D))
def P_upstream_isothermal_critical_flow(P, fd, D, L):
lambertw_term = float(lambertw(-exp(-(fd*L+D)/D), -1).real)
return exp(-0.5*(D*lambertw_term+fd*L+D)/D)*P
def is_critical_flow(P1, P2, k):
Pcf = P_critical_flow(P1, k)
return Pcf > P2
def stagnation_energy(V):
return 0.5*V*V
def P_stagnation(P, T, Tst, k):
return P*(Tst/T)**(k/(k - 1.0))
def T_stagnation(T, P, Pst, k):
return T*(Pst/P)**((k - 1.0)/k)
def T_stagnation_ideal(T, V, Cp):
return T + 0.5*V*V/Cp
def isothermal_gas_err_P1(P1, fd, rho, P2, L, D, m):
return m - isothermal_gas(rho, fd, P1=P1, P2=P2, L=L, D=D)
def isothermal_gas_err_P2(P2, rho, fd, P1, L, D, m):
return m - isothermal_gas(rho, fd, P1=P1, P2=P2, L=L, D=D)
def isothermal_gas_err_P2_basis(P1, P2, rho, fd, m, L, D):
return abs(P2 - isothermal_gas(rho, fd, m=m, P1=P1, P2=None, L=L, D=D))
def isothermal_gas_err_D(D, m, rho, fd, P1, P2, L):
return m - isothermal_gas(rho, fd, P1=P1, P2=P2, L=L, D=D)
def isothermal_gas(rho, fd, P1=None, P2=None, L=None, D=None, m=None):
if m is None and P1 is not None and P2 is not None and L is not None and D is not None:
Pcf = P_isothermal_critical_flow(P=P1, fd=fd, D=D, L=L)
if P2 < Pcf:
raise ValueError('Given outlet pressure is not physically possible '
'due to the formation of choked flow at P2=%f, specified outlet pressure was %f' % (Pcf, P2))
1:
raise ValueError('Specified outlet pressure is larger than the '
'inlet pressure; fluid will flow backwards.')
return sqrt(0.0625*pi*pi*D**4*rho/(P1*(fd*L/D + 2.0*log(P1/P2)))*(P1*P1 - P2*P2))
elif L is None and P1 is not None and P2 is not None and D is not None and m is not None:
return D*(pi*pi*D**4*rho*(P1*P1 - P2*P2) - 32.0*P1*m*m*log(P1/P2))/(16.0*P1*fd*m*m)
elif P1 is None and L is not None and P2 is not None and D is not None and m is not None:
Pcf = P_upstream_isothermal_critical_flow(P=P2, fd=fd, D=D, L=L)
try:
P1 = secant(isothermal_gas_err_P2_basis, (P2+Pcf)/2., args=(P2, rho, fd, m, L, D))
if not (P2 <= P1):
raise ValueError("Failed")
return P1
except:
try:
return ridder(isothermal_gas_err_P1, a=P2, b=Pcf, args=(fd, rho, P2, L, D, m))
except:
m_max = isothermal_gas(rho, fd, P1=Pcf, P2=P2, L=L, D=D) # numba: delete
raise ValueError('The desired mass flow rate of %f kg/s cannot ' # numba: delete
'be achieved with the specified downstream pressure; the maximum flowrate is ' # numba: delete
'%f kg/s at an upstream pressure of %f Pa' %(m, m_max, Pcf)) # numba: delete
# raise ValueError("Failed") # numba: uncomment
elif P2 is None and L is not None and P1 is not None and D is not None and m is not None:
try:
Pcf = P_isothermal_critical_flow(P=P1, fd=fd, D=D, L=L)
m_max = isothermal_gas(rho, fd, P1=P1, P2=Pcf, L=L, D=D)
if not (m <= m_max):
raise ValueError("Failed")
C = fd*L/D
B = (pi/4*D**2)**2*rho
arg = -B/m**2*P1*exp(-(-C*m**2+B*P1)/m**2)
# Consider the two real branches of the lambertw function.
# The k=-1 branch produces the higher P2 values; the k=0 branch is
# physically impossible.
lambert_ans = float(lambertw(arg, k=-1).real)
# Large overflow problem here; also divide by zero problems!
# Fail and try a numerical solution if it doesn't work.
if isinf(lambert_ans):
raise ValueError("Should not be infinity")
P2 = P1/exp((-C*m**2+lambert_ans*m**2+B*P1)/m**2/2.)
if not (P2 < P1):
raise ValueError("Should not be the case")
return P2
except:
Pcf = P_isothermal_critical_flow(P=P1, fd=fd, D=D, L=L)
try:
return ridder(isothermal_gas_err_P2, a=Pcf, b=P1, args=(rho, fd, P1, L, D, m))
except:
m_max = isothermal_gas(rho, fd, P1=P1, P2=Pcf, L=L, D=D)
raise ValueError('The desired mass flow rate cannot be achieved '
'with the specified upstream pressure of %f Pa; the maximum flowrate is %f '
'kg/s at a downstream pressure of %f' %(P1, m_max, Pcf))
elif D is None and P2 is not None and P1 is not None and L is not None and m is not None:
return secant(isothermal_gas_err_D, 0.1, args=(m, rho, fd, P1, P2, L))
else:
raise ValueError('This function solves for either mass flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def Panhandle_A(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=0.92):
c1 = 1.0788
c2 = 0.8539
c3 = 0.5394
c4 = 2.6182
c5 = 158.0205328706957220332831680508433862787
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*E*(Ts/Ps)**c1*((P1**2 - P2**2)/(L*SG**c2*Tavg*Zavg))**c3*D**c4
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return (Q*(Ts/Ps)**(-c1)*(SG**(-c2)*(P1**2 - P2**2)/(L*Tavg*Zavg))**(-c3)/(E*c5))**(1./c4)
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*SG**c2*Tavg*Zavg*(D**(-c4)*Q*(Ts/Ps)**(-c1)/(E*c5))**(1./c3) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*SG**c2*Tavg*Zavg*(D**(-c4)*Q*(Ts/Ps)**(-c1)/(E*c5))**(1./c3) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return SG**(-c2)*(D**(-c4)*Q*(Ts/Ps)**(-c1)/(E*c5))**(-1./c3)*(P1**2 - P2**2)/(Tavg*Zavg)
else:
raise ValueError('This function solves for either flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def Panhandle_B(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=0.92):
c1 = 1.02
c2 = 0.961
c3 = 0.51
c4 = 2.53
c5 = 152.8811634298055458624385985866624419060
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*E*(Ts/Ps)**c1*((P1**2 - P2**2)/(L*SG**c2*Tavg*Zavg))**c3*D**c4
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return (Q*(Ts/Ps)**(-c1)*(SG**(-c2)*(P1**2 - P2**2)/(L*Tavg*Zavg))**(-c3)/(E*c5))**(1./c4)
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*SG**c2*Tavg*Zavg*(D**(-c4)*Q*(Ts/Ps)**(-c1)/(E*c5))**(1./c3) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*SG**c2*Tavg*Zavg*(D**(-c4)*Q*(Ts/Ps)**(-c1)/(E*c5))**(1./c3) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return SG**(-c2)*(D**(-c4)*Q*(Ts/Ps)**(-c1)/(E*c5))**(-1./c3)*(P1**2 - P2**2)/(Tavg*Zavg)
else:
raise ValueError('This function solves for either flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def Weymouth(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=0.92):
c3 = 0.5
c4 = 2.667
c5 = 137.3295809942512546732179684618143090992
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*E*(Ts/Ps)*((P1**2 - P2**2)/(L*SG*Tavg*Zavg))**c3*D**c4
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return (Ps*Q*((P1**2 - P2**2)/(L*SG*Tavg*Zavg))**(-c3)/(E*Ts*c5))**(1./c4)
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*SG*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*SG*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return (D**(-c4)*Ps*Q/(E*Ts*c5))**(-1./c3)*(P1**2 - P2**2)/(SG*Tavg*Zavg)
else:
raise ValueError('This function solves for either flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def _to_solve_Spitzglass_high(D, Q, SG, Tavg, L, P1, P2, Ts, Ps, Zavg, E):
return Q - Spitzglass_high(SG=SG, Tavg=Tavg, L=L, D=D,
P1=P1, P2=P2, Ts=Ts, Ps=Ps,Zavg=Zavg, E=E)
def Spitzglass_high(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=1.):
c3 = 1.181102362204724409448818897637795275591
c4 = 0.09144
c5 = 125.1060
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return (c5*E*Ts/Ps*D**2.5*sqrt((P1**2-P2**2)
/(L*SG*Zavg*Tavg*(1 + c4/D + c3*D))))
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return secant(_to_solve_Spitzglass_high, 0.5, args=(Q, SG, Tavg, L, P1, P2, Ts, Ps, Zavg, E))
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt((D**6*E**2*P2**2*Ts**2*c5**2
+ D**2*L*Ps**2*Q**2*SG*Tavg*Zavg*c3
+ D*L*Ps**2*Q**2*SG*Tavg*Zavg
+ L*Ps**2*Q**2*SG*Tavg*Zavg*c4)/(D**6*E**2*Ts**2*c5**2))
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt((D**6*E**2*P1**2*Ts**2*c5**2
- D**2*L*Ps**2*Q**2*SG*Tavg*Zavg*c3
- D*L*Ps**2*Q**2*SG*Tavg*Zavg
- L*Ps**2*Q**2*SG*Tavg*Zavg*c4)/(D**6*E**2*Ts**2*c5**2))
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return (D**6*E**2*Ts**2*c5**2*(P1**2 - P2**2)
/(Ps**2*Q**2*SG*Tavg*Zavg*(D**2*c3 + D + c4)))
else:
raise ValueError('This function solves for either flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def _to_solve_Spitzglass_low(D, Q, SG, Tavg, L, P1, P2, Ts, Ps, Zavg, E):
return Q - Spitzglass_low(SG=SG, Tavg=Tavg, L=L, D=D, P1=P1, P2=P2, Ts=Ts, Ps=Ps, Zavg=Zavg, E=E)
def Spitzglass_low(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=1.):
c3 = 1.181102362204724409448818897637795275591
c4 = 0.09144
c5 = 125.1060
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*Ts/Ps*D**2.5*E*sqrt(((P1-P2)*2*(Ps+1210.))/(L*SG*Tavg*Zavg*(1 + c4/D + c3*D)))
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return secant(_to_solve_Spitzglass_low, 0.5, args=(Q, SG, Tavg, L, P1, P2, Ts, Ps, Zavg, E))
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return 0.5*(2.0*D**6*E**2*P2*Ts**2*c5**2*(Ps + 1210.0) + D**2*L*Ps**2*Q**2*SG*Tavg*Zavg*c3 + D*L*Ps**2*Q**2*SG*Tavg*Zavg + L*Ps**2*Q**2*SG*Tavg*Zavg*c4)/(D**6*E**2*Ts**2*c5**2*(Ps + 1210.0))
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return 0.5*(2.0*D**6*E**2*P1*Ts**2*c5**2*(Ps + 1210.0) - D**2*L*Ps**2*Q**2*SG*Tavg*Zavg*c3 - D*L*Ps**2*Q**2*SG*Tavg*Zavg - L*Ps**2*Q**2*SG*Tavg*Zavg*c4)/(D**6*E**2*Ts**2*c5**2*(Ps + 1210.0))
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return 2.0*D**6*E**2*Ts**2*c5**2*(P1*Ps + 1210.0*P1 - P2*Ps - 1210.0*P2)/(Ps**2*Q**2*SG*Tavg*Zavg*(D**2*c3 + D + c4))
else:
raise ValueError('This function solves for either flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def _to_solve_Oliphant(D, Q, SG, Tavg, L, P1, P2, Ts, Ps, Zavg, E):
return Q - Oliphant(SG=SG, Tavg=Tavg, L=L, D=D, P1=P1, P2=P2, Ts=Ts, Ps=Ps, Zavg=Zavg, E=E)
def Oliphant(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=0.92):
c1 = 84.587176139918568651410168968141078948974609375000
c2 = 0.2091519350460528670065940559652517549694
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c1*(D**2.5 + c2*D**3)*Ts/Ps*sqrt((P1**2-P2**2)/(L*SG*Tavg))
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return secant(_to_solve_Oliphant, 0.5, args=(Q, SG, Tavg, L, P1, P2, Ts, Ps, Zavg, E))
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*Ps**2*Q**2*SG*Tavg/(Ts**2*c1**2*(D**3*c2 + D**2.5)**2) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*Ps**2*Q**2*SG*Tavg/(Ts**2*c1**2*(D**3*c2 + D**2.5)**2) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return Ts**2*c1**2*(P1**2 - P2**2)*(D**3*c2 + D**2.5)**2/(Ps**2*Q**2*SG*Tavg)
else:
raise ValueError('This function solves for either flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.')
def Fritzsche(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=1.0):
c5 = 93.50009798751128188757518688244137811221
c2 = 0.8587
c3 = 0.538
c4 = 2.69
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*E*(Ts/Ps)*((P1**2 - P2**2)/(SG**c2*Tavg*L*Zavg))**c3*D**c4
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return (Ps*Q*(SG**(-c2)*(P1**2 - P2**2)/(L*Tavg*Zavg))**(-c3)/(E*Ts*c5))**(1./c4)
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*SG**c2*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*SG**c2*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return SG**(-c2)*(D**(-c4)*Ps*Q/(E*Ts*c5))**(-1./c3)*(P1**2 - P2**2)/(Tavg*Zavg)
else:
raise ValueError('This function solves for either flow, upstream pressure, downstream pressure, diameter, or length; all other inputs must be provided.')
def Muller(SG, Tavg, mu, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=1.0):
c5 = 15.77439908642077352939746374951659525108
c2 = 0.575
c3 = 2.725
c4 = 0.425
c1 = 0.15
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*Ts/Ps*E*((P1**2-P2**2)/Tavg/L/Zavg)**c2*D**c3/SG**c4/mu**c1
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return (Ps*Q*SG**c4*mu**c1*((P1**2 - P2**2)/(L*Tavg*Zavg))**(-c2)/(E*Ts*c5))**(1./c3)
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*Tavg*Zavg*(D**(-c3)*Ps*Q*SG**c4*mu**c1/(E*Ts*c5))**(1/c2) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*Tavg*Zavg*(D**(-c3)*Ps*Q*SG**c4*mu**c1/(E*Ts*c5))**(1/c2) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return (D**(-c3)*Ps*Q*SG**c4*mu**c1/(E*Ts*c5))**(-1/c2)*(P1**2 - P2**2)/(Tavg*Zavg)
else:
raise ValueError('This function solves for either flow, upstream pressure, downstream pressure, diameter, or length; all other inputs must be provided.')
def IGT(SG, Tavg, mu, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7,
Ps=101325., Zavg=1.0, E=1.0):
c5 = 24.62412451461407054875301709443930350550
c2 = 5/9.
c3 = 8/3.
c4 = 4/9.
c1 = 1/9.
if Q is None and L is not None and D is not None and P1 is not None and P2 is not None:
return c5*Ts/Ps*E*((P1**2-P2**2)/Tavg/L/Zavg)**c2*D**c3/SG**c4/mu**c1
elif D is None and L is not None and Q is not None and P1 is not None and P2 is not None:
return (Ps*Q*SG**c4*mu**c1*((P1**2 - P2**2)/(L*Tavg*Zavg))**(-c2)/(E*Ts*c5))**(1./c3)
elif P1 is None and L is not None and Q is not None and D is not None and P2 is not None:
return sqrt(L*Tavg*Zavg*(D**(-c3)*Ps*Q*SG**c4*mu**c1/(E*Ts*c5))**(1/c2) + P2**2)
elif P2 is None and L is not None and Q is not None and D is not None and P1 is not None:
return sqrt(-L*Tavg*Zavg*(D**(-c3)*Ps*Q*SG**c4*mu**c1/(E*Ts*c5))**(1/c2) + P1**2)
elif L is None and P2 is not None and Q is not None and D is not None and P1 is not None:
return (D**(-c3)*Ps*Q*SG**c4*mu**c1/(E*Ts*c5))**(-1/c2)*(P1**2 - P2**2)/(Tavg*Zavg)
else:
raise ValueError('This function solves for either flow, upstream pressure, downstream pressure, diameter, or length; all other inputs must be provided.')
| true | true |
f7343c31f576b59257d7b79941143a08aa3e5c0d | 7,758 | py | Python | code/supporting_functions.py | AhmedElshaarany/RoboND-Rover-Project | 9dad356d4585bb567ee436062afdd82d9d7eb4de | [
"MIT"
] | null | null | null | code/supporting_functions.py | AhmedElshaarany/RoboND-Rover-Project | 9dad356d4585bb567ee436062afdd82d9d7eb4de | [
"MIT"
] | null | null | null | code/supporting_functions.py | AhmedElshaarany/RoboND-Rover-Project | 9dad356d4585bb567ee436062afdd82d9d7eb4de | [
"MIT"
] | null | null | null | import numpy as np
import cv2
from PIL import Image
from io import BytesIO, StringIO
import base64
import time
# Define a function to convert telemetry strings to float independent of decimal convention
def convert_to_float(string_to_convert):
if ',' in string_to_convert:
float_value = np.float(string_to_convert.replace(',','.'))
else:
float_value = np.float(string_to_convert)
return float_value
def update_rover(Rover, data):
# Initialize start time and sample positions
if Rover.start_time == None:
Rover.start_time = time.time()
Rover.total_time = 0
samples_xpos = np.int_([convert_to_float(pos.strip()) for pos in data["samples_x"].split(';')])
samples_ypos = np.int_([convert_to_float(pos.strip()) for pos in data["samples_y"].split(';')])
Rover.samples_pos = (samples_xpos, samples_ypos)
Rover.samples_to_find = np.int(data["sample_count"])
# Or just update elapsed time
else:
tot_time = time.time() - Rover.start_time
if np.isfinite(tot_time):
Rover.total_time = tot_time
# Print out the fields in the telemetry data dictionary
print(data.keys())
# The current speed of the rover in m/s
Rover.vel = convert_to_float(data["speed"])
# The current position of the rover
Rover.pos = [convert_to_float(pos.strip()) for pos in data["position"].split(';')]
# The current yaw angle of the rover
Rover.yaw = convert_to_float(data["yaw"])
# The current yaw angle of the rover
Rover.pitch = convert_to_float(data["pitch"])
# The current yaw angle of the rover
Rover.roll = convert_to_float(data["roll"])
# The current throttle setting
Rover.throttle = convert_to_float(data["throttle"])
# The current steering angle
Rover.steer = convert_to_float(data["steering_angle"])
# Near sample flag
Rover.near_sample = np.int(data["near_sample"])
# Picking up flag
Rover.picking_up = np.int(data["picking_up"])
# Update number of rocks collected
Rover.samples_collected = Rover.samples_to_find - np.int(data["sample_count"])
print('speed =',Rover.vel, 'position =', Rover.pos, 'throttle =',
Rover.throttle, 'steer_angle =', Rover.steer, 'near_sample:', Rover.near_sample,
'picking_up:', data["picking_up"], 'sending pickup:', Rover.send_pickup,
'total time:', Rover.total_time, 'samples remaining:', data["sample_count"],
'samples collected:', Rover.samples_collected)
# Get the current image from the center camera of the rover
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
Rover.img = np.asarray(image)
# Return updated Rover and separate image for optional saving
return Rover, image
# Define a function to create display output given worldmap results
def create_output_images(Rover):
# Create a scaled map for plotting and clean up obs/nav pixels a bit
if np.max(Rover.worldmap[:,:,2]) > 0:
nav_pix = Rover.worldmap[:,:,2] > 0
navigable = Rover.worldmap[:,:,2] * (255 / np.mean(Rover.worldmap[nav_pix, 2]))
else:
navigable = Rover.worldmap[:,:,2]
if np.max(Rover.worldmap[:,:,0]) > 0:
obs_pix = Rover.worldmap[:,:,0] > 0
obstacle = Rover.worldmap[:,:,0] * (255 / np.mean(Rover.worldmap[obs_pix, 0]))
else:
obstacle = Rover.worldmap[:,:,0]
likely_nav = navigable >= obstacle
obstacle[likely_nav] = 0
plotmap = np.zeros_like(Rover.worldmap)
plotmap[:, :, 0] = obstacle
plotmap[:, :, 2] = navigable
plotmap = plotmap.clip(0, 255)
# Overlay obstacle and navigable terrain map with ground truth map
map_add = cv2.addWeighted(plotmap, 1, Rover.ground_truth, 0.5, 0)
# Check whether any rock detections are present in worldmap
rock_world_pos = Rover.worldmap[:,:,1].nonzero()
# If there are, we'll step through the known sample positions
# to confirm whether detections are real
samples_located = 0
if rock_world_pos[0].any():
rock_size = 2
for idx in range(len(Rover.samples_pos[0])):
test_rock_x = Rover.samples_pos[0][idx]
test_rock_y = Rover.samples_pos[1][idx]
rock_sample_dists = np.sqrt((test_rock_x - rock_world_pos[1])**2 + \
(test_rock_y - rock_world_pos[0])**2)
# If rocks were detected within 3 meters of known sample positions
# consider it a success and plot the location of the known
# sample on the map
if np.min(rock_sample_dists) < 3:
samples_located += 1
Rover.samples_located = samples_located
map_add[test_rock_y-rock_size:test_rock_y+rock_size,
test_rock_x-rock_size:test_rock_x+rock_size, :] = 255
# Calculate some statistics on the map results
# First get the total number of pixels in the navigable terrain map
tot_nav_pix = np.float(len((plotmap[:,:,2].nonzero()[0])))
# Next figure out how many of those correspond to ground truth pixels
good_nav_pix = np.float(len(((plotmap[:,:,2] > 0) & (Rover.ground_truth[:,:,1] > 0)).nonzero()[0]))
# Next find how many do not correspond to ground truth pixels
bad_nav_pix = np.float(len(((plotmap[:,:,2] > 0) & (Rover.ground_truth[:,:,1] == 0)).nonzero()[0]))
# Grab the total number of map pixels
tot_map_pix = np.float(len((Rover.ground_truth[:,:,1].nonzero()[0])))
# Calculate the percentage of ground truth map that has been successfully found
perc_mapped = round(100*good_nav_pix/tot_map_pix, 1)
# Calculate the number of good map pixel detections divided by total pixels
# found to be navigable terrain
if tot_nav_pix > 0:
fidelity = round(100*good_nav_pix/(tot_nav_pix), 1)
else:
fidelity = 0
# Flip the map for plotting so that the y-axis points upward in the display
map_add = np.flipud(map_add).astype(np.float32)
# Add some text about map and rock sample detection results
cv2.putText(map_add,"Time: "+str(np.round(Rover.total_time, 1))+' s', (0, 10),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
cv2.putText(map_add,"Mapped: "+str(perc_mapped)+'%', (0, 25),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
cv2.putText(map_add,"Fidelity: "+str(fidelity)+'%', (0, 40),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
cv2.putText(map_add,"Rocks", (0, 55),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
cv2.putText(map_add," Located: "+str(samples_located), (0, 70),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
cv2.putText(map_add," Collected: "+str(Rover.samples_collected), (0, 85),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
# Convert map and vision image to base64 strings for sending to server
pil_img = Image.fromarray(map_add.astype(np.uint8))
buff = BytesIO()
pil_img.save(buff, format="JPEG")
encoded_string1 = base64.b64encode(buff.getvalue()).decode("utf-8")
pil_img = Image.fromarray(Rover.vision_image.astype(np.uint8))
buff = BytesIO()
pil_img.save(buff, format="JPEG")
encoded_string2 = base64.b64encode(buff.getvalue()).decode("utf-8")
return encoded_string1, encoded_string2
| 48.792453 | 107 | 0.62645 | import numpy as np
import cv2
from PIL import Image
from io import BytesIO, StringIO
import base64
import time
def convert_to_float(string_to_convert):
if ',' in string_to_convert:
float_value = np.float(string_to_convert.replace(',','.'))
else:
float_value = np.float(string_to_convert)
return float_value
def update_rover(Rover, data):
if Rover.start_time == None:
Rover.start_time = time.time()
Rover.total_time = 0
samples_xpos = np.int_([convert_to_float(pos.strip()) for pos in data["samples_x"].split(';')])
samples_ypos = np.int_([convert_to_float(pos.strip()) for pos in data["samples_y"].split(';')])
Rover.samples_pos = (samples_xpos, samples_ypos)
Rover.samples_to_find = np.int(data["sample_count"])
else:
tot_time = time.time() - Rover.start_time
if np.isfinite(tot_time):
Rover.total_time = tot_time
print(data.keys())
Rover.vel = convert_to_float(data["speed"])
Rover.pos = [convert_to_float(pos.strip()) for pos in data["position"].split(';')]
Rover.yaw = convert_to_float(data["yaw"])
Rover.pitch = convert_to_float(data["pitch"])
Rover.roll = convert_to_float(data["roll"])
Rover.throttle = convert_to_float(data["throttle"])
Rover.steer = convert_to_float(data["steering_angle"])
Rover.near_sample = np.int(data["near_sample"])
Rover.picking_up = np.int(data["picking_up"])
Rover.samples_collected = Rover.samples_to_find - np.int(data["sample_count"])
print('speed =',Rover.vel, 'position =', Rover.pos, 'throttle =',
Rover.throttle, 'steer_angle =', Rover.steer, 'near_sample:', Rover.near_sample,
'picking_up:', data["picking_up"], 'sending pickup:', Rover.send_pickup,
'total time:', Rover.total_time, 'samples remaining:', data["sample_count"],
'samples collected:', Rover.samples_collected)
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
Rover.img = np.asarray(image)
return Rover, image
def create_output_images(Rover):
if np.max(Rover.worldmap[:,:,2]) > 0:
nav_pix = Rover.worldmap[:,:,2] > 0
navigable = Rover.worldmap[:,:,2] * (255 / np.mean(Rover.worldmap[nav_pix, 2]))
else:
navigable = Rover.worldmap[:,:,2]
if np.max(Rover.worldmap[:,:,0]) > 0:
obs_pix = Rover.worldmap[:,:,0] > 0
obstacle = Rover.worldmap[:,:,0] * (255 / np.mean(Rover.worldmap[obs_pix, 0]))
else:
obstacle = Rover.worldmap[:,:,0]
likely_nav = navigable >= obstacle
obstacle[likely_nav] = 0
plotmap = np.zeros_like(Rover.worldmap)
plotmap[:, :, 0] = obstacle
plotmap[:, :, 2] = navigable
plotmap = plotmap.clip(0, 255)
map_add = cv2.addWeighted(plotmap, 1, Rover.ground_truth, 0.5, 0)
rock_world_pos = Rover.worldmap[:,:,1].nonzero()
# to confirm whether detections are real
samples_located = 0
if rock_world_pos[0].any():
rock_size = 2
for idx in range(len(Rover.samples_pos[0])):
test_rock_x = Rover.samples_pos[0][idx]
test_rock_y = Rover.samples_pos[1][idx]
rock_sample_dists = np.sqrt((test_rock_x - rock_world_pos[1])**2 + \
(test_rock_y - rock_world_pos[0])**2)
# If rocks were detected within 3 meters of known sample positions
# consider it a success and plot the location of the known
# sample on the map
if np.min(rock_sample_dists) < 3:
samples_located += 1
Rover.samples_located = samples_located
map_add[test_rock_y-rock_size:test_rock_y+rock_size,
test_rock_x-rock_size:test_rock_x+rock_size, :] = 255
# Calculate some statistics on the map results
# First get the total number of pixels in the navigable terrain map
tot_nav_pix = np.float(len((plotmap[:,:,2].nonzero()[0])))
# Next figure out how many of those correspond to ground truth pixels
good_nav_pix = np.float(len(((plotmap[:,:,2] > 0) & (Rover.ground_truth[:,:,1] > 0)).nonzero()[0]))
# Next find how many do not correspond to ground truth pixels
bad_nav_pix = np.float(len(((plotmap[:,:,2] > 0) & (Rover.ground_truth[:,:,1] == 0)).nonzero()[0]))
# Grab the total number of map pixels
tot_map_pix = np.float(len((Rover.ground_truth[:,:,1].nonzero()[0])))
# Calculate the percentage of ground truth map that has been successfully found
perc_mapped = round(100*good_nav_pix/tot_map_pix, 1)
# Calculate the number of good map pixel detections divided by total pixels
# found to be navigable terrain
if tot_nav_pix > 0:
fidelity = round(100*good_nav_pix/(tot_nav_pix), 1)
else:
fidelity = 0
# Flip the map for plotting so that the y-axis points upward in the display
map_add = np.flipud(map_add).astype(np.float32)
# Add some text about map and rock sample detection results
cv2.putText(map_add,"Time: "+str(np.round(Rover.total_time, 1))+' s', (0, 10),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
cv2.putText(map_add,"Mapped: "+str(perc_mapped)+'%', (0, 25),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
cv2.putText(map_add,"Fidelity: "+str(fidelity)+'%', (0, 40),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
cv2.putText(map_add,"Rocks", (0, 55),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
cv2.putText(map_add," Located: "+str(samples_located), (0, 70),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
cv2.putText(map_add," Collected: "+str(Rover.samples_collected), (0, 85),
cv2.FONT_HERSHEY_COMPLEX, 0.4, (255, 255, 255), 1)
# Convert map and vision image to base64 strings for sending to server
pil_img = Image.fromarray(map_add.astype(np.uint8))
buff = BytesIO()
pil_img.save(buff, format="JPEG")
encoded_string1 = base64.b64encode(buff.getvalue()).decode("utf-8")
pil_img = Image.fromarray(Rover.vision_image.astype(np.uint8))
buff = BytesIO()
pil_img.save(buff, format="JPEG")
encoded_string2 = base64.b64encode(buff.getvalue()).decode("utf-8")
return encoded_string1, encoded_string2
| true | true |
f7343d001b8ca38d7c2dca8af4bbbaa19f18099d | 12,159 | py | Python | Training/pytorch/train.py | QinchengZhang/PathologySegmentation | 7a2c21346739a79c33e7a7ccc081018821868eb7 | [
"MIT"
] | 3 | 2020-10-25T06:18:21.000Z | 2021-12-23T01:42:56.000Z | Training/pytorch/train.py | QinchengZhang/PathologySegmentation | 7a2c21346739a79c33e7a7ccc081018821868eb7 | [
"MIT"
] | null | null | null | Training/pytorch/train.py | QinchengZhang/PathologySegmentation | 7a2c21346739a79c33e7a7ccc081018821868eb7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Author: TJUZQC
Date: 2020-10-26 10:26:51
LastEditors: TJUZQC
LastEditTime: 2020-11-20 19:23:55
Description: None
'''
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import yaml
from torch import optim
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from evaluation import eval_net
from models import ChooseModel, init_weights
from utils.dataset import BasicDataset
conf = yaml.load(open(os.path.join(
sys.path[0], 'config', 'config.yaml')), Loader=yaml.FullLoader)
dir_img = conf['DATASET']['IMGS_DIR']
dir_mask = conf['DATASET']['MASKS_DIR']
dir_checkpoint = conf['MODEL']['CHECKPOINT_DIR']
def train_net(net,
device,
epochs=5,
batch_size=16,
lr=0.001,
val_percent=0.1,
save_cp=True,
img_scale=0.5,
use_apex=False,
optimizer='adam',
classes=2,
lr_scheduler='steplr',
lr_scheduler_cfgs: dict = {'step_size': 10}):
dataset = BasicDataset(dir_img, dir_mask, img_scale,
train=True, classes=classes)
n_val = int(len(dataset) * val_percent)
n_train = len(dataset) - n_val
train, val = random_split(dataset, [n_train, n_val])
train_loader = DataLoader(
train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
val_loader = DataLoader(val, batch_size=batch_size,
shuffle=False, num_workers=8, pin_memory=True)
writer = SummaryWriter(
comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')
global_step = 0
logging.info(f'''Starting training:
Epochs: {epochs}
Batch size: {batch_size}
Learning rate: {lr}
Training size: {n_train}
Validation size: {n_val}
Checkpoints: {save_cp}
Device: {device.type}
Images scaling: {img_scale}
Use apex: {use_apex}
''')
optimizers = {
'adadelta': optim.Adadelta,
'adagrad': optim.Adagrad,
'adam': optim.Adam,
'adamw': optim.AdamW,
'sparseadam': optim.SparseAdam,
'adamax': optim.Adamax,
'asgd': optim.ASGD,
'lbfgs': optim.LBFGS,
'rmsprop': optim.RMSprop,
'rprop': optim.Rprop,
'sgd': optim.SGD,
}
optimizer = optimizers.get(optimizer, None)(
net.parameters(), lr=lr, weight_decay=1e-8)
lr_scheduler_getter = {
'lambdalr': torch.optim.lr_scheduler.LambdaLR,
'multiplicativelr': torch.optim.lr_scheduler.MultiplicativeLR,
'steplr': torch.optim.lr_scheduler.StepLR,
'multisteplr': torch.optim.lr_scheduler.MultiStepLR,
'exponentiallr': torch.optim.lr_scheduler.ExponentialLR,
'cosineannealinglr': torch.optim.lr_scheduler.CosineAnnealingLR,
'reducelronplateau': torch.optim.lr_scheduler.ReduceLROnPlateau,
'cycliclr': torch.optim.lr_scheduler.CyclicLR,
'onecyclelr': torch.optim.lr_scheduler.OneCycleLR,
}
lr_scheduler = lr_scheduler_getter.get(
lr_scheduler.lower(), None)(optimizer, **lr_scheduler_cfgs)
if use_apex:
try:
from apex import amp
net, optimizer = amp.initialize(net, optimizer, opt_level="O1")
except ImportError as e:
print(e)
use_apex = False
if net.n_classes > 1:
criterion = nn.CrossEntropyLoss()
else:
criterion = nn.BCEWithLogitsLoss()
for epoch in range(epochs):
net.train()
epoch_loss = 0
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
for batch in train_loader:
imgs = batch['image']
true_masks = batch['mask']
assert imgs.shape[1] == net.n_channels, \
f'Network has been defined with {net.n_channels} input channels, ' \
f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
'the images are loaded correctly.'
imgs = imgs.to(device=device, dtype=torch.float32)
mask_type = torch.float32 if net.n_classes == 1 else torch.long
true_masks = true_masks.to(device=device, dtype=mask_type)
if net.n_classes > 1:
b, c, w, h = true_masks.shape
true_masks = true_masks.view(b, w, h)
masks_pred = net(imgs)
loss = criterion(masks_pred, true_masks)
epoch_loss += loss.item()
writer.add_scalar('Loss/train', loss.item(), global_step)
pbar.set_postfix(**{'loss (batch)': loss.item()})
optimizer.zero_grad()
if not use_apex:
loss.backward()
else:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
pbar.update(imgs.shape[0])
global_step += 1
dataset_len = len(dataset)
a1 = dataset_len // 10
a2 = dataset_len / 10
b1 = global_step % a1
b2 = global_step % a2
if global_step % (len(dataset) // (10 * batch_size)) == 0:
dice_coeff, pA, oA, precision, recall, f1score = eval_net(
net, val_loader, device, n_val)
if net.n_classes > 1:
logging.info(
'Validation cross entropy: {}'.format(dice_coeff))
writer.add_scalar('Loss/test', dice_coeff, global_step)
else:
logging.info(
'Validation Dice Coeff: {}'.format(dice_coeff))
writer.add_scalar('Dice/test', dice_coeff, global_step)
logging.info(
'Validation Pixel Accuracy: {}'.format(pA))
writer.add_scalar('pA/test', pA, global_step)
logging.info(
'Validation Overall Accuracy: {}'.format(oA))
writer.add_scalar('oA/test', oA, global_step)
logging.info(
'Validation Precision: {}'.format(precision))
writer.add_scalar('precision/test',
precision, global_step)
logging.info(
'Validation Recall: {}'.format(recall))
writer.add_scalar('recall/test', recall, global_step)
logging.info(
'Validation F1-score: {}'.format(f1score))
writer.add_scalar(
'F1-score/test', f1score, global_step)
writer.add_images('images', imgs, global_step)
if net.n_classes == 1:
writer.add_images(
'masks/true', true_masks, global_step)
writer.add_images(
'masks/pred', torch.sigmoid(masks_pred) > 0.5, global_step)
lr_scheduler.step()
if save_cp:
try:
os.mkdir(dir_checkpoint)
logging.info('Created checkpoint directory')
except OSError:
pass
torch.save(net.state_dict(),
os.path.join(dir_checkpoint, f'CP_epoch{epoch + 1}_loss_{str(loss.item())}.pth'))
logging.info(
f'Checkpoint {epoch + 1} saved ! loss (batch) = ' + str(loss.item()))
writer.close()
def get_args():
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--network', metavar='NETWORK', type=str,
default=conf['MODEL']['MODEL_NAME'], help='network type', dest='network')
parser.add_argument('-e', '--epochs', metavar='E', type=int, default=conf['NUM_EPOCHS'],
help='Number of epochs', dest='epochs')
parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=conf['BATCH_SIZE'],
help='Batch size', dest='batchsize')
parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=conf['LR'],
help='Learning rate', dest='lr')
parser.add_argument('-f', '--load', dest='load', type=str, default=conf['MODEL']['PRETRAINED_MODEL_DIR'],
help='Load model from a .pth file')
parser.add_argument('-s', '--scale', dest='scale', type=float, default=conf['SCALE'],
help='Downscaling factor of the images')
parser.add_argument('-v', '--validation', dest='val', type=float, default=conf['VALIDATION'],
help='Percent of the data that is used as validation (0-100)')
parser.add_argument('-t', '--init-type', dest='init_type', type=str, default=conf['INIT_TYPE'],
help='Init weights type')
parser.add_argument('-a', '--use-apex', dest='use_apex', type=str, default=conf['APEX'],
help='Automatic Mixed Precision')
parser.add_argument('-o', '--optimizer', dest='optimizer',
type=str, default=conf['OPTIMIZER'], help='Optimizer type')
parser.add_argument('-ls', '--lr-scheduler', dest='lr_scheduler',
type=str, default=conf['LR_SCHEDULER'], help='lr scheduler type')
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(message)s')
args = get_args()
device = torch.device('cuda' if torch.cuda.is_available(
) and conf['DEVICE'].lower() == 'cuda' else 'cpu')
logging.info(f'Using device {device}')
network = args.network.lower()
# Change here to adapt to your data
# n_channels=3 for RGB images
# n_classes is the number of probabilities you want to get per pixel
# - For 1 class and background, use n_classes=1
# - For 2 classes, use n_classes=1
# - For N > 2 classes, use n_classes=N
net = ChooseModel(network)(
n_channels=3, n_classes=conf['DATASET']['NUM_CLASSES'])
assert net is not None, f'check your argument --network'
logging.info(f'Network:\n'
f'\t{net.n_channels} input channels\n'
f'\t{net.n_classes} output channels (classes)\n'
f'\t{"Bilinear" if net.bilinear else "Dilated conv"} upscaling\n'
f'\tApex is {"using" if args.use_apex == "True" else "not using"}')
init_weights(net, args.init_type)
if args.load:
net.load_state_dict(
torch.load(args.load, map_location=device)
)
logging.info(f'Model loaded from {args.load}')
net.to(device=device)
# faster convolutions, but more memory
# cudnn.benchmark = True
try:
train_net(net=net,
epochs=args.epochs,
batch_size=args.batchsize,
lr=args.lr,
device=device,
img_scale=args.scale,
val_percent=args.val / 100,
use_apex=(args.use_apex == "True"),
optimizer=args.optimizer.lower(),
classes=conf['DATASET']['NUM_CLASSES'],
lr_scheduler=args.lr_scheduler,
lr_scheduler_cfgs=conf['LR_SCHEDULER_CFGS'])
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 41.640411 | 109 | 0.555062 |
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import yaml
from torch import optim
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from evaluation import eval_net
from models import ChooseModel, init_weights
from utils.dataset import BasicDataset
conf = yaml.load(open(os.path.join(
sys.path[0], 'config', 'config.yaml')), Loader=yaml.FullLoader)
dir_img = conf['DATASET']['IMGS_DIR']
dir_mask = conf['DATASET']['MASKS_DIR']
dir_checkpoint = conf['MODEL']['CHECKPOINT_DIR']
def train_net(net,
device,
epochs=5,
batch_size=16,
lr=0.001,
val_percent=0.1,
save_cp=True,
img_scale=0.5,
use_apex=False,
optimizer='adam',
classes=2,
lr_scheduler='steplr',
lr_scheduler_cfgs: dict = {'step_size': 10}):
dataset = BasicDataset(dir_img, dir_mask, img_scale,
train=True, classes=classes)
n_val = int(len(dataset) * val_percent)
n_train = len(dataset) - n_val
train, val = random_split(dataset, [n_train, n_val])
train_loader = DataLoader(
train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
val_loader = DataLoader(val, batch_size=batch_size,
shuffle=False, num_workers=8, pin_memory=True)
writer = SummaryWriter(
comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')
global_step = 0
logging.info(f'''Starting training:
Epochs: {epochs}
Batch size: {batch_size}
Learning rate: {lr}
Training size: {n_train}
Validation size: {n_val}
Checkpoints: {save_cp}
Device: {device.type}
Images scaling: {img_scale}
Use apex: {use_apex}
''')
optimizers = {
'adadelta': optim.Adadelta,
'adagrad': optim.Adagrad,
'adam': optim.Adam,
'adamw': optim.AdamW,
'sparseadam': optim.SparseAdam,
'adamax': optim.Adamax,
'asgd': optim.ASGD,
'lbfgs': optim.LBFGS,
'rmsprop': optim.RMSprop,
'rprop': optim.Rprop,
'sgd': optim.SGD,
}
optimizer = optimizers.get(optimizer, None)(
net.parameters(), lr=lr, weight_decay=1e-8)
lr_scheduler_getter = {
'lambdalr': torch.optim.lr_scheduler.LambdaLR,
'multiplicativelr': torch.optim.lr_scheduler.MultiplicativeLR,
'steplr': torch.optim.lr_scheduler.StepLR,
'multisteplr': torch.optim.lr_scheduler.MultiStepLR,
'exponentiallr': torch.optim.lr_scheduler.ExponentialLR,
'cosineannealinglr': torch.optim.lr_scheduler.CosineAnnealingLR,
'reducelronplateau': torch.optim.lr_scheduler.ReduceLROnPlateau,
'cycliclr': torch.optim.lr_scheduler.CyclicLR,
'onecyclelr': torch.optim.lr_scheduler.OneCycleLR,
}
lr_scheduler = lr_scheduler_getter.get(
lr_scheduler.lower(), None)(optimizer, **lr_scheduler_cfgs)
if use_apex:
try:
from apex import amp
net, optimizer = amp.initialize(net, optimizer, opt_level="O1")
except ImportError as e:
print(e)
use_apex = False
if net.n_classes > 1:
criterion = nn.CrossEntropyLoss()
else:
criterion = nn.BCEWithLogitsLoss()
for epoch in range(epochs):
net.train()
epoch_loss = 0
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
for batch in train_loader:
imgs = batch['image']
true_masks = batch['mask']
assert imgs.shape[1] == net.n_channels, \
f'Network has been defined with {net.n_channels} input channels, ' \
f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
'the images are loaded correctly.'
imgs = imgs.to(device=device, dtype=torch.float32)
mask_type = torch.float32 if net.n_classes == 1 else torch.long
true_masks = true_masks.to(device=device, dtype=mask_type)
if net.n_classes > 1:
b, c, w, h = true_masks.shape
true_masks = true_masks.view(b, w, h)
masks_pred = net(imgs)
loss = criterion(masks_pred, true_masks)
epoch_loss += loss.item()
writer.add_scalar('Loss/train', loss.item(), global_step)
pbar.set_postfix(**{'loss (batch)': loss.item()})
optimizer.zero_grad()
if not use_apex:
loss.backward()
else:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
pbar.update(imgs.shape[0])
global_step += 1
dataset_len = len(dataset)
a1 = dataset_len // 10
a2 = dataset_len / 10
b1 = global_step % a1
b2 = global_step % a2
if global_step % (len(dataset) // (10 * batch_size)) == 0:
dice_coeff, pA, oA, precision, recall, f1score = eval_net(
net, val_loader, device, n_val)
if net.n_classes > 1:
logging.info(
'Validation cross entropy: {}'.format(dice_coeff))
writer.add_scalar('Loss/test', dice_coeff, global_step)
else:
logging.info(
'Validation Dice Coeff: {}'.format(dice_coeff))
writer.add_scalar('Dice/test', dice_coeff, global_step)
logging.info(
'Validation Pixel Accuracy: {}'.format(pA))
writer.add_scalar('pA/test', pA, global_step)
logging.info(
'Validation Overall Accuracy: {}'.format(oA))
writer.add_scalar('oA/test', oA, global_step)
logging.info(
'Validation Precision: {}'.format(precision))
writer.add_scalar('precision/test',
precision, global_step)
logging.info(
'Validation Recall: {}'.format(recall))
writer.add_scalar('recall/test', recall, global_step)
logging.info(
'Validation F1-score: {}'.format(f1score))
writer.add_scalar(
'F1-score/test', f1score, global_step)
writer.add_images('images', imgs, global_step)
if net.n_classes == 1:
writer.add_images(
'masks/true', true_masks, global_step)
writer.add_images(
'masks/pred', torch.sigmoid(masks_pred) > 0.5, global_step)
lr_scheduler.step()
if save_cp:
try:
os.mkdir(dir_checkpoint)
logging.info('Created checkpoint directory')
except OSError:
pass
torch.save(net.state_dict(),
os.path.join(dir_checkpoint, f'CP_epoch{epoch + 1}_loss_{str(loss.item())}.pth'))
logging.info(
f'Checkpoint {epoch + 1} saved ! loss (batch) = ' + str(loss.item()))
writer.close()
def get_args():
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--network', metavar='NETWORK', type=str,
default=conf['MODEL']['MODEL_NAME'], help='network type', dest='network')
parser.add_argument('-e', '--epochs', metavar='E', type=int, default=conf['NUM_EPOCHS'],
help='Number of epochs', dest='epochs')
parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=conf['BATCH_SIZE'],
help='Batch size', dest='batchsize')
parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=conf['LR'],
help='Learning rate', dest='lr')
parser.add_argument('-f', '--load', dest='load', type=str, default=conf['MODEL']['PRETRAINED_MODEL_DIR'],
help='Load model from a .pth file')
parser.add_argument('-s', '--scale', dest='scale', type=float, default=conf['SCALE'],
help='Downscaling factor of the images')
parser.add_argument('-v', '--validation', dest='val', type=float, default=conf['VALIDATION'],
help='Percent of the data that is used as validation (0-100)')
parser.add_argument('-t', '--init-type', dest='init_type', type=str, default=conf['INIT_TYPE'],
help='Init weights type')
parser.add_argument('-a', '--use-apex', dest='use_apex', type=str, default=conf['APEX'],
help='Automatic Mixed Precision')
parser.add_argument('-o', '--optimizer', dest='optimizer',
type=str, default=conf['OPTIMIZER'], help='Optimizer type')
parser.add_argument('-ls', '--lr-scheduler', dest='lr_scheduler',
type=str, default=conf['LR_SCHEDULER'], help='lr scheduler type')
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(message)s')
args = get_args()
device = torch.device('cuda' if torch.cuda.is_available(
) and conf['DEVICE'].lower() == 'cuda' else 'cpu')
logging.info(f'Using device {device}')
network = args.network.lower()
net = ChooseModel(network)(
n_channels=3, n_classes=conf['DATASET']['NUM_CLASSES'])
assert net is not None, f'check your argument --network'
logging.info(f'Network:\n'
f'\t{net.n_channels} input channels\n'
f'\t{net.n_classes} output channels (classes)\n'
f'\t{"Bilinear" if net.bilinear else "Dilated conv"} upscaling\n'
f'\tApex is {"using" if args.use_apex == "True" else "not using"}')
init_weights(net, args.init_type)
if args.load:
net.load_state_dict(
torch.load(args.load, map_location=device)
)
logging.info(f'Model loaded from {args.load}')
net.to(device=device)
try:
train_net(net=net,
epochs=args.epochs,
batch_size=args.batchsize,
lr=args.lr,
device=device,
img_scale=args.scale,
val_percent=args.val / 100,
use_apex=(args.use_apex == "True"),
optimizer=args.optimizer.lower(),
classes=conf['DATASET']['NUM_CLASSES'],
lr_scheduler=args.lr_scheduler,
lr_scheduler_cfgs=conf['LR_SCHEDULER_CFGS'])
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| true | true |
f7343d1941a5745d36121531142f0fb145e90236 | 1,464 | py | Python | mr_database/databaseconnection.py | SorenSeeberg/MrDatabase | 07d39c1f227d9ceaffe7e409f0915776901dd3e9 | [
"MIT"
] | null | null | null | mr_database/databaseconnection.py | SorenSeeberg/MrDatabase | 07d39c1f227d9ceaffe7e409f0915776901dd3e9 | [
"MIT"
] | 1 | 2018-05-28T11:55:49.000Z | 2018-06-11T16:51:45.000Z | mr_database/databaseconnection.py | SorenSeeberg/MrDatabase | 07d39c1f227d9ceaffe7e409f0915776901dd3e9 | [
"MIT"
] | null | null | null | import sqlite3 as sqlite
class ConType:
query = 0
mutation = 1
batch = 2
class DatabaseConnection:
is_batching = False
def __init__(self, database_object: 'MrDatabase', con_type: int=ConType.mutation):
self.database_object = database_object
self.con_type: int = con_type
def __enter__(self):
if self.con_type == ConType.query:
if not DatabaseConnection.is_batching:
self.connect()
elif self.con_type == ConType.mutation:
if not DatabaseConnection.is_batching:
self.connect()
elif self.con_type == ConType.batch:
self.connect()
DatabaseConnection.is_batching = True
def __exit__(self, *args):
if self.con_type == ConType.query:
if not self.is_batching:
self.close()
elif self.con_type == ConType.mutation:
if not self.is_batching:
self.commit()
self.close()
elif self.con_type == ConType.batch:
DatabaseConnection.is_batching = False
self.commit()
self.close()
def connect(self):
self.database_object.con = sqlite.connect(self.database_object.database_path)
self.database_object.cur = self.database_object.con.cursor()
def commit(self):
self.database_object.con.commit()
def close(self):
self.database_object.con.close()
| 24.4 | 86 | 0.605874 | import sqlite3 as sqlite
class ConType:
query = 0
mutation = 1
batch = 2
class DatabaseConnection:
is_batching = False
def __init__(self, database_object: 'MrDatabase', con_type: int=ConType.mutation):
self.database_object = database_object
self.con_type: int = con_type
def __enter__(self):
if self.con_type == ConType.query:
if not DatabaseConnection.is_batching:
self.connect()
elif self.con_type == ConType.mutation:
if not DatabaseConnection.is_batching:
self.connect()
elif self.con_type == ConType.batch:
self.connect()
DatabaseConnection.is_batching = True
def __exit__(self, *args):
if self.con_type == ConType.query:
if not self.is_batching:
self.close()
elif self.con_type == ConType.mutation:
if not self.is_batching:
self.commit()
self.close()
elif self.con_type == ConType.batch:
DatabaseConnection.is_batching = False
self.commit()
self.close()
def connect(self):
self.database_object.con = sqlite.connect(self.database_object.database_path)
self.database_object.cur = self.database_object.con.cursor()
def commit(self):
self.database_object.con.commit()
def close(self):
self.database_object.con.close()
| true | true |
f7344259f9ea929ba22c289ed7728299838a7401 | 1,394 | py | Python | 05-07-a2/sample_05_button.py | hiro345g/raspi_magazine_201610_toku1 | f5dde65409eaeef15e15e6e2d5c86cbf0ac88ef5 | [
"MIT"
] | null | null | null | 05-07-a2/sample_05_button.py | hiro345g/raspi_magazine_201610_toku1 | f5dde65409eaeef15e15e6e2d5c86cbf0ac88ef5 | [
"MIT"
] | null | null | null | 05-07-a2/sample_05_button.py | hiro345g/raspi_magazine_201610_toku1 | f5dde65409eaeef15e15e6e2d5c86cbf0ac88ef5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import wiringpi
# 定数宣言
LED_PIN = 4 # LEDのGPIO番号
BUTTON_PIN = 13 # スイッチのGPIO番号
BUTTON_DELAY = 0.5 # チャタリング防止用の遅延時間
INTERVAL = 0.1 # スイッチチェック間隔
def get_button_value(pin):
"""指定したpinのスイッチ入力状態を取得"""
v = wiringpi.digitalRead(pin)
if v == wiringpi.HIGH:
# チャタリング防止のため次回の入力チェックを遅らせる
time.sleep(BUTTON_DELAY)
return v
def main():
"""メイン処理"""
wiringpi.wiringPiSetupGpio() # wiringpiの初期化
wiringpi.pinMode(LED_PIN, wiringpi.OUTPUT) # LEDは出力モード
wiringpi.pinMode(BUTTON_PIN, wiringpi.INPUT) # スイッチは入力モード
cnt = 0
current_led_value = wiringpi.LOW
try:
while cnt < 4:
# cnt が4になるまで繰り返し
button_value = get_button_value(BUTTON_PIN)
if button_value == wiringpi.HIGH:
cnt += 1
print('cnt:{0}'.format(cnt))
if current_led_value == wiringpi.HIGH:
wiringpi.digitalWrite(LED_PIN, wiringpi.LOW)
current_led_value = wiringpi.LOW
else:
wiringpi.digitalWrite(LED_PIN, wiringpi.HIGH)
current_led_value = wiringpi.HIGH
time.sleep(INTERVAL)
except KeyboardInterrupt:
print('KeyboardInterrupt')
wiringpi.digitalWrite(LED_PIN, wiringpi.LOW)
if __name__ == '__main__':
main()
| 27.88 | 65 | 0.610473 |
import time
import wiringpi
LED_PIN = 4
BUTTON_PIN = 13
BUTTON_DELAY = 0.5
INTERVAL = 0.1
def get_button_value(pin):
v = wiringpi.digitalRead(pin)
if v == wiringpi.HIGH:
time.sleep(BUTTON_DELAY)
return v
def main():
wiringpi.wiringPiSetupGpio()
wiringpi.pinMode(LED_PIN, wiringpi.OUTPUT)
wiringpi.pinMode(BUTTON_PIN, wiringpi.INPUT)
cnt = 0
current_led_value = wiringpi.LOW
try:
while cnt < 4:
button_value = get_button_value(BUTTON_PIN)
if button_value == wiringpi.HIGH:
cnt += 1
print('cnt:{0}'.format(cnt))
if current_led_value == wiringpi.HIGH:
wiringpi.digitalWrite(LED_PIN, wiringpi.LOW)
current_led_value = wiringpi.LOW
else:
wiringpi.digitalWrite(LED_PIN, wiringpi.HIGH)
current_led_value = wiringpi.HIGH
time.sleep(INTERVAL)
except KeyboardInterrupt:
print('KeyboardInterrupt')
wiringpi.digitalWrite(LED_PIN, wiringpi.LOW)
if __name__ == '__main__':
main()
| true | true |
f73443186c3bcf41147c14278c5082c02e5001aa | 565 | py | Python | qu.py | oldshensheep/crawl-yande.re | ffb512d9ca98f666383f856f833b851bb0982a00 | [
"MIT"
] | 1 | 2021-04-12T06:29:43.000Z | 2021-04-12T06:29:43.000Z | qu.py | oldshensheep/crawl-yande.re | ffb512d9ca98f666383f856f833b851bb0982a00 | [
"MIT"
] | null | null | null | qu.py | oldshensheep/crawl-yande.re | ffb512d9ca98f666383f856f833b851bb0982a00 | [
"MIT"
] | null | null | null | import queue
import threading
import time
import concurrent.futures
q = queue.Queue(maxsize=10)
def Producer(name):
count = 1
while True:
q.put("包子 %s" % count)
print("做了包子", count)
count += 1
time.sleep(0.5)
def Consumer(name):
while True:
print("[%s] 取到[%s] 并且吃了它..." % (name, q.get()))
time.sleep(1)
p = threading.Thread(target=Producer, args=("Lily",))
c = threading.Thread(target=Consumer, args=("Lilei",))
c1 = threading.Thread(target=Consumer, args=("Ahi",))
p.start()
c.start()
c1.start()
| 18.225806 | 55 | 0.60531 | import queue
import threading
import time
import concurrent.futures
q = queue.Queue(maxsize=10)
def Producer(name):
count = 1
while True:
q.put("包子 %s" % count)
print("做了包子", count)
count += 1
time.sleep(0.5)
def Consumer(name):
while True:
print("[%s] 取到[%s] 并且吃了它..." % (name, q.get()))
time.sleep(1)
p = threading.Thread(target=Producer, args=("Lily",))
c = threading.Thread(target=Consumer, args=("Lilei",))
c1 = threading.Thread(target=Consumer, args=("Ahi",))
p.start()
c.start()
c1.start()
| true | true |
f73443a2f76a5004632b0b69d392edbe51fe587b | 73 | py | Python | wsgi.py | BruninLima/HotOrCold | d8d0ab92d949bfa2e3b62c14659e12a7ce821b1f | [
"MIT"
] | null | null | null | wsgi.py | BruninLima/HotOrCold | d8d0ab92d949bfa2e3b62c14659e12a7ce821b1f | [
"MIT"
] | null | null | null | wsgi.py | BruninLima/HotOrCold | d8d0ab92d949bfa2e3b62c14659e12a7ce821b1f | [
"MIT"
] | null | null | null | from hotorcold.main import app
if __name__ == "__main__":
app.run()
| 14.6 | 30 | 0.684932 | from hotorcold.main import app
if __name__ == "__main__":
app.run()
| true | true |
f73443b348d868b2813099929e72cbd8722638ba | 9,711 | py | Python | nipype/utils/misc.py | kastman/nipype | 15a8d6f57067494196fe639095253217a9235c3c | [
"Apache-2.0"
] | 3 | 2015-11-03T08:24:57.000Z | 2018-05-02T15:26:28.000Z | nipype/utils/misc.py | kastman/nipype | 15a8d6f57067494196fe639095253217a9235c3c | [
"Apache-2.0"
] | null | null | null | nipype/utils/misc.py | kastman/nipype | 15a8d6f57067494196fe639095253217a9235c3c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Miscellaneous utility functions
"""
from __future__ import (print_function, unicode_literals, division,
absolute_import)
from builtins import next, str
import os
import sys
import re
from collections import Iterator
from warnings import warn
from distutils.version import LooseVersion
import numpy as np
from future.utils import raise_from
from future import standard_library
try:
from textwrap import indent as textwrap_indent
except ImportError:
def textwrap_indent(text, prefix):
""" A textwrap.indent replacement for Python < 3.3 """
if not prefix:
return text
splittext = text.splitlines(True)
return prefix + prefix.join(splittext)
standard_library.install_aliases()
def human_order_sorted(l):
"""Sorts string in human order (i.e. 'stat10' will go after 'stat2')"""
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
if isinstance(text, tuple):
text = text[0]
return [atoi(c) for c in re.split('(\d+)', text)]
return sorted(l, key=natural_keys)
def trim(docstring, marker=None):
if isinstance(docstring, bytes):
docstring = str(docstring, 'utf-8')
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
# replace existing REST marker with doc level marker
stripped = line.lstrip().strip().rstrip()
if marker is not None and stripped and \
all([s == stripped[0] for s in stripped]) and \
stripped[0] not in [':']:
line = line.replace(stripped[0], marker)
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def find_indices(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def is_container(item):
"""Checks if item is a container (list, tuple, dict, set)
Parameters
----------
item : object
object to check for .__iter__
Returns
-------
output : Boolean
True if container
False if not (eg string)
"""
if isinstance(item, str):
return False
elif hasattr(item, '__iter__'):
return True
else:
return False
def container_to_string(cont):
"""Convert a container to a command line string.
Elements of the container are joined with a space between them,
suitable for a command line parameter.
If the container `cont` is only a sequence, like a string and not a
container, it is returned unmodified.
Parameters
----------
cont : container
A container object like a list, tuple, dict, or a set.
Returns
-------
cont_str : string
Container elements joined into a string.
"""
if hasattr(cont, '__iter__') and not isinstance(cont, str):
cont = ' '.join(cont)
return str(cont)
# Dependency checks. Copied this from Nipy, with some modificiations
# (added app as a parameter).
def package_check(pkg_name,
version=None,
app=None,
checker=LooseVersion,
exc_failed_import=ImportError,
exc_failed_check=RuntimeError):
"""Check that the minimal version of the required package is installed.
Parameters
----------
pkg_name : string
Name of the required package.
version : string, optional
Minimal version number for required package.
app : string, optional
Application that is performing the check. For instance, the
name of the tutorial being executed that depends on specific
packages. Default is *Nipype*.
checker : object, optional
The class that will perform the version checking. Default is
distutils.version.LooseVersion.
exc_failed_import : Exception, optional
Class of the exception to be thrown if import failed.
exc_failed_check : Exception, optional
Class of the exception to be thrown if version check failed.
Examples
--------
package_check('numpy', '1.3')
package_check('scipy', '0.7', 'tutorial1')
"""
if app:
msg = '%s requires %s' % (app, pkg_name)
else:
msg = 'Nipype requires %s' % pkg_name
if version:
msg += ' with version >= %s' % (version, )
try:
mod = __import__(pkg_name)
except ImportError as e:
raise_from(exc_failed_import(msg), e)
if not version:
return
try:
have_version = mod.__version__
except AttributeError as e:
raise_from(
exc_failed_check('Cannot find version for %s' % pkg_name), e)
if checker(have_version) < checker(version):
raise exc_failed_check(msg)
def str2bool(v):
if isinstance(v, bool):
return v
lower = v.lower()
if lower in ("yes", "true", "t", "1"):
return True
elif lower in ("no", "false", "n", "f", "0"):
return False
else:
raise ValueError("%s cannot be converted to bool" % v)
def flatten(S):
if S == []:
return S
if isinstance(S[0], list):
return flatten(S[0]) + flatten(S[1:])
return S[:1] + flatten(S[1:])
def unflatten(in_list, prev_structure):
if not isinstance(in_list, Iterator):
in_list = iter(in_list)
if not isinstance(prev_structure, list):
return next(in_list)
out = []
for item in prev_structure:
out.append(unflatten(in_list, item))
return out
def normalize_mc_params(params, source):
"""
Normalize a single row of motion parameters to the SPM format.
SPM saves motion parameters as:
x Right-Left (mm)
y Anterior-Posterior (mm)
z Superior-Inferior (mm)
rx Pitch (rad)
ry Roll (rad)
rz Yaw (rad)
"""
if source.upper() == 'FSL':
params = params[[3, 4, 5, 0, 1, 2]]
elif source.upper() in ('AFNI', 'FSFAST'):
params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)]
params[3:] = params[3:] * np.pi / 180.
elif source.upper() == 'NIPY':
from nipy.algorithms.registration import to_matrix44, aff2euler
matrix = to_matrix44(params)
params = np.zeros(6)
params[:3] = matrix[:3, 3]
params[-1:2:-1] = aff2euler(matrix)
return params
def dict_diff(dold, dnew, indent=0):
"""Helper to log what actually changed from old to new values of
dictionaries.
typical use -- log difference for hashed_inputs
"""
# First check inputs, since they usually are lists of tuples
# and dicts are required.
if isinstance(dnew, list):
dnew = dict(dnew)
if isinstance(dold, list):
dold = dict(dold)
# Compare against hashed_inputs
# Keys: should rarely differ
new_keys = set(dnew.keys())
old_keys = set(dold.keys())
diff = []
if new_keys - old_keys:
diff += [" * keys not previously seen: %s" % (new_keys - old_keys)]
if old_keys - new_keys:
diff += [" * keys not presently seen: %s" % (old_keys - new_keys)]
# Add topical message
if diff:
diff.insert(0, "Dictionaries had differing keys:")
diffkeys = len(diff)
# Values in common keys would differ quite often,
# so we need to join the messages together
for k in new_keys.intersection(old_keys):
try:
new, old = dnew[k], dold[k]
same = new == old
if not same:
# Since JSON does not discriminate between lists and
# tuples, we might need to cast them into the same type
# as the last resort. And lets try to be more generic
same = old.__class__(new) == old
except Exception:
same = False
if not same:
diff += [" * %s: %r != %r" % (k, dnew[k], dold[k])]
if len(diff) > diffkeys:
diff.insert(diffkeys, "Some dictionary entries had differing values:")
return textwrap_indent('\n'.join(diff), ' ' * indent)
def rgetcwd(error=True):
"""
Robust replacement for getcwd when folders get removed
If error==True, this is just an alias for os.getcwd()
"""
if error:
return os.getcwd()
try:
cwd = os.getcwd()
except OSError as exc:
# Changing back to cwd is probably not necessary
# but this makes sure there's somewhere to change to.
cwd = os.getenv('PWD')
if cwd is None:
raise OSError((
exc.errno, 'Current directory does not exist anymore, '
'and nipype was not able to guess it from the environment'))
warn('Current folder does not exist, replacing with "%s" instead.' % cwd)
return cwd
| 29.697248 | 81 | 0.604675 |
from __future__ import (print_function, unicode_literals, division,
absolute_import)
from builtins import next, str
import os
import sys
import re
from collections import Iterator
from warnings import warn
from distutils.version import LooseVersion
import numpy as np
from future.utils import raise_from
from future import standard_library
try:
from textwrap import indent as textwrap_indent
except ImportError:
def textwrap_indent(text, prefix):
""" A textwrap.indent replacement for Python < 3.3 """
if not prefix:
return text
splittext = text.splitlines(True)
return prefix + prefix.join(splittext)
standard_library.install_aliases()
def human_order_sorted(l):
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
if isinstance(text, tuple):
text = text[0]
return [atoi(c) for c in re.split('(\d+)', text)]
return sorted(l, key=natural_keys)
def trim(docstring, marker=None):
if isinstance(docstring, bytes):
docstring = str(docstring, 'utf-8')
if not docstring:
return ''
lines = docstring.expandtabs().splitlines()
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
# replace existing REST marker with doc level marker
stripped = line.lstrip().strip().rstrip()
if marker is not None and stripped and \
all([s == stripped[0] for s in stripped]) and \
stripped[0] not in [':']:
line = line.replace(stripped[0], marker)
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def find_indices(condition):
res, = np.nonzero(np.ravel(condition))
return res
def is_container(item):
if isinstance(item, str):
return False
elif hasattr(item, '__iter__'):
return True
else:
return False
def container_to_string(cont):
if hasattr(cont, '__iter__') and not isinstance(cont, str):
cont = ' '.join(cont)
return str(cont)
# Dependency checks. Copied this from Nipy, with some modificiations
# (added app as a parameter).
def package_check(pkg_name,
version=None,
app=None,
checker=LooseVersion,
exc_failed_import=ImportError,
exc_failed_check=RuntimeError):
if app:
msg = '%s requires %s' % (app, pkg_name)
else:
msg = 'Nipype requires %s' % pkg_name
if version:
msg += ' with version >= %s' % (version, )
try:
mod = __import__(pkg_name)
except ImportError as e:
raise_from(exc_failed_import(msg), e)
if not version:
return
try:
have_version = mod.__version__
except AttributeError as e:
raise_from(
exc_failed_check('Cannot find version for %s' % pkg_name), e)
if checker(have_version) < checker(version):
raise exc_failed_check(msg)
def str2bool(v):
if isinstance(v, bool):
return v
lower = v.lower()
if lower in ("yes", "true", "t", "1"):
return True
elif lower in ("no", "false", "n", "f", "0"):
return False
else:
raise ValueError("%s cannot be converted to bool" % v)
def flatten(S):
if S == []:
return S
if isinstance(S[0], list):
return flatten(S[0]) + flatten(S[1:])
return S[:1] + flatten(S[1:])
def unflatten(in_list, prev_structure):
if not isinstance(in_list, Iterator):
in_list = iter(in_list)
if not isinstance(prev_structure, list):
return next(in_list)
out = []
for item in prev_structure:
out.append(unflatten(in_list, item))
return out
def normalize_mc_params(params, source):
if source.upper() == 'FSL':
params = params[[3, 4, 5, 0, 1, 2]]
elif source.upper() in ('AFNI', 'FSFAST'):
params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)]
params[3:] = params[3:] * np.pi / 180.
elif source.upper() == 'NIPY':
from nipy.algorithms.registration import to_matrix44, aff2euler
matrix = to_matrix44(params)
params = np.zeros(6)
params[:3] = matrix[:3, 3]
params[-1:2:-1] = aff2euler(matrix)
return params
def dict_diff(dold, dnew, indent=0):
# First check inputs, since they usually are lists of tuples
# and dicts are required.
if isinstance(dnew, list):
dnew = dict(dnew)
if isinstance(dold, list):
dold = dict(dold)
# Compare against hashed_inputs
# Keys: should rarely differ
new_keys = set(dnew.keys())
old_keys = set(dold.keys())
diff = []
if new_keys - old_keys:
diff += [" * keys not previously seen: %s" % (new_keys - old_keys)]
if old_keys - new_keys:
diff += [" * keys not presently seen: %s" % (old_keys - new_keys)]
# Add topical message
if diff:
diff.insert(0, "Dictionaries had differing keys:")
diffkeys = len(diff)
# Values in common keys would differ quite often,
# so we need to join the messages together
for k in new_keys.intersection(old_keys):
try:
new, old = dnew[k], dold[k]
same = new == old
if not same:
# Since JSON does not discriminate between lists and
# tuples, we might need to cast them into the same type
# as the last resort. And lets try to be more generic
same = old.__class__(new) == old
except Exception:
same = False
if not same:
diff += [" * %s: %r != %r" % (k, dnew[k], dold[k])]
if len(diff) > diffkeys:
diff.insert(diffkeys, "Some dictionary entries had differing values:")
return textwrap_indent('\n'.join(diff), ' ' * indent)
def rgetcwd(error=True):
if error:
return os.getcwd()
try:
cwd = os.getcwd()
except OSError as exc:
# Changing back to cwd is probably not necessary
# but this makes sure there's somewhere to change to.
cwd = os.getenv('PWD')
if cwd is None:
raise OSError((
exc.errno, 'Current directory does not exist anymore, '
'and nipype was not able to guess it from the environment'))
warn('Current folder does not exist, replacing with "%s" instead.' % cwd)
return cwd
| true | true |
f7344479dd36c883ecee3ae2e8ca891b5e378565 | 4,706 | py | Python | unlock/util/runtime.py | NeuralProsthesisLab/unlock | 0c4d95abdab288d3e657ca2db867b06f755f26ff | [
"BSD-3-Clause"
] | 6 | 2017-05-05T01:08:55.000Z | 2021-08-03T21:50:07.000Z | unlock/util/runtime.py | NeuralProsthesisLab/unlock | 0c4d95abdab288d3e657ca2db867b06f755f26ff | [
"BSD-3-Clause"
] | 1 | 2015-05-21T01:02:50.000Z | 2015-05-21T16:03:43.000Z | unlock/util/runtime.py | NeuralProsthesisLab/unlock | 0c4d95abdab288d3e657ca2db867b06f755f26ff | [
"BSD-3-Clause"
] | 4 | 2015-05-21T12:38:42.000Z | 2022-03-28T15:47:58.000Z | # Copyright (c) James Percent, Byron Galbraith and Unlock contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Unlock nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from unlock.util.observable import *
from unlock.util.saferef import *
from unlock.util.injector import *
from optparse import OptionParser
import json
import logging
import logging.config
import sys
__author__ = 'jpercent'
class RuntimeAssistant(object):
def __init__(self):
super(RuntimeAssistant, self).__init__()
@staticmethod
def configure(config, fact_instance):
assert fact_instance
dpi = DependencyInjector(fact_instance)
instance = dpi.configure_application(config)
assert instance
return instance
@staticmethod
def parse_json_config(conf):
with open(conf, 'rt') as file_descriptor:
json_string = file_descriptor.read()
config = json.loads(json_string)
return config
@staticmethod
def make_high_priority():
try:
import psutil
import os
p = psutil.Process(os.getpid())
p.set_nice(psutil.HIGH_PRIORITY_CLASS)
except Exception as e:
RuntimeAssistant.print_last_exception()
@staticmethod
def print_last_exception():
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
class JsonConfiguredRuntime(object):
def __init__(self, factory, path_to_default_config):
"""Initializes a JsonConfiguredRuntime."""
self.factory = factory
self.conf = None
self.logger = None
self.loglevel = logging.INFO
self.config = None
self.runtime_instance = None
self.args = None
self.options = None
self.parser = None
self.usage = "usage: %prog [options]"
conf_help = 'path to the configuration; if not set conf.json is used'
try:
self.parser = OptionParser(version="%prog 1.0", usage=self.usage)
self.default_conf = os.path.join(path_to_default_config, 'conf.json')
self.parser.add_option('-c', '--conf', type=str, dest='conf', default=self.default_conf, metavar='CONF', help=conf_help)
except Exception as e:
print(str(self.__class__.__name__)+': FATAL failed to parse program arguments')
RuntimeAssistant.print_last_exception()
raise e
def init(self):
assert self.parser
try:
(self.options, self.args) = self.parser.parse_args()
assert self.options.conf
self.config = RuntimeAssistant.parse_json_config(self.options.conf)
self.runtime_instance = RuntimeAssistant.configure(self.config, self.factory)
except Exception as e:
if not self.logger:
print(str(self.__class__.__name__)+': FATAL failed to initialize correctly; did not complete logging setup')
else:
self.logger.fatal('failed to initialize correctly')
if self.parser:
self.parser.print_help()
RuntimeAssistant.print_last_exception()
raise e
self.logger = logging.getLogger(__name__)
| 39.216667 | 132 | 0.68742 |
from unlock.util.observable import *
from unlock.util.saferef import *
from unlock.util.injector import *
from optparse import OptionParser
import json
import logging
import logging.config
import sys
__author__ = 'jpercent'
class RuntimeAssistant(object):
def __init__(self):
super(RuntimeAssistant, self).__init__()
@staticmethod
def configure(config, fact_instance):
assert fact_instance
dpi = DependencyInjector(fact_instance)
instance = dpi.configure_application(config)
assert instance
return instance
@staticmethod
def parse_json_config(conf):
with open(conf, 'rt') as file_descriptor:
json_string = file_descriptor.read()
config = json.loads(json_string)
return config
@staticmethod
def make_high_priority():
try:
import psutil
import os
p = psutil.Process(os.getpid())
p.set_nice(psutil.HIGH_PRIORITY_CLASS)
except Exception as e:
RuntimeAssistant.print_last_exception()
@staticmethod
def print_last_exception():
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
class JsonConfiguredRuntime(object):
def __init__(self, factory, path_to_default_config):
self.factory = factory
self.conf = None
self.logger = None
self.loglevel = logging.INFO
self.config = None
self.runtime_instance = None
self.args = None
self.options = None
self.parser = None
self.usage = "usage: %prog [options]"
conf_help = 'path to the configuration; if not set conf.json is used'
try:
self.parser = OptionParser(version="%prog 1.0", usage=self.usage)
self.default_conf = os.path.join(path_to_default_config, 'conf.json')
self.parser.add_option('-c', '--conf', type=str, dest='conf', default=self.default_conf, metavar='CONF', help=conf_help)
except Exception as e:
print(str(self.__class__.__name__)+': FATAL failed to parse program arguments')
RuntimeAssistant.print_last_exception()
raise e
def init(self):
assert self.parser
try:
(self.options, self.args) = self.parser.parse_args()
assert self.options.conf
self.config = RuntimeAssistant.parse_json_config(self.options.conf)
self.runtime_instance = RuntimeAssistant.configure(self.config, self.factory)
except Exception as e:
if not self.logger:
print(str(self.__class__.__name__)+': FATAL failed to initialize correctly; did not complete logging setup')
else:
self.logger.fatal('failed to initialize correctly')
if self.parser:
self.parser.print_help()
RuntimeAssistant.print_last_exception()
raise e
self.logger = logging.getLogger(__name__)
| true | true |
f734452a3d6c1f77a17ae0567fdcecc839f622e6 | 21,334 | py | Python | node/tests/k8st/tests/test_bgp_advert_v6.py | mikestephen/calico | 6c512191c05097dbfacbd18fb23d1ebff18961fd | [
"Apache-2.0"
] | 3,973 | 2015-07-29T21:13:46.000Z | 2022-03-31T09:27:38.000Z | node/tests/k8st/tests/test_bgp_advert_v6.py | mikestephen/calico | 6c512191c05097dbfacbd18fb23d1ebff18961fd | [
"Apache-2.0"
] | 4,584 | 2015-07-29T08:47:22.000Z | 2022-03-31T22:54:26.000Z | node/tests/k8st/tests/test_bgp_advert_v6.py | mikestephen/calico | 6c512191c05097dbfacbd18fb23d1ebff18961fd | [
"Apache-2.0"
] | 1,066 | 2015-07-30T06:29:18.000Z | 2022-03-31T20:01:47.000Z | # Copyright (c) 2020 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import subprocess
import json
import sys
from tests.k8st.test_base import TestBaseV6
from tests.k8st.utils.utils import start_external_node_with_bgp, \
retry_until_success, run, curl, DiagsCollector, calicoctl, kubectl, node_info
_log = logging.getLogger(__name__)
attempts = 10
bird_conf = """
# Template for all BGP clients
template bgp bgp_template {
debug { states };
description "Connection to BGP peer";
local as 64512;
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export all;
source address ip@local; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
connect delay time 2;
connect retry time 5;
error wait time 5,30;
}
# ------------- Node-to-node mesh -------------
protocol bgp Mesh_with_master_node from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
protocol bgp Mesh_with_node_1 from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
protocol bgp Mesh_with_node_2 from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
protocol bgp Mesh_with_node_3 from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
"""
# BIRD config for an external node to peer with
# the in-cluster route reflector on kube-node-2.
bird_conf_rr = """
# Template for all BGP clients
template bgp bgp_template {
debug { states };
description "Connection to BGP peer";
local as 64512;
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export all;
source address ip@local; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
connect delay time 2;
connect retry time 5;
error wait time 5,30;
}
protocol bgp Mesh_with_node_2 from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
"""
class _TestBGPAdvertV6(TestBaseV6):
def setUp(self):
super(_TestBGPAdvertV6, self).setUp()
# Create bgp test namespace
self.ns = "bgp-test"
self.create_namespace(self.ns)
self.nodes, self.ipv4s, self.ipv6s = node_info()
self.external_node_ip = start_external_node_with_bgp(
"kube-node-extra",
bird6_peer_config=self.get_bird_conf(),
)
# Enable debug logging
self.update_ds_env("calico-node",
"kube-system",
{"BGP_LOGSEVERITYSCREEN": "debug"})
# Establish BGPPeer from cluster nodes to node-extra
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
name: node-extra.peer%s
EOF
""" % self.get_extra_peer_spec())
def tearDown(self):
super(_TestBGPAdvertV6, self).tearDown()
self.delete_and_confirm(self.ns, "ns")
try:
# Delete the extra node.
run("docker rm -f kube-node-extra")
except subprocess.CalledProcessError:
pass
# Delete BGPPeers.
calicoctl("delete bgppeer node-extra.peer", allow_fail=True)
calicoctl("delete bgppeer peer-with-rr", allow_fail=True)
# Restore node-to-node mesh.
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata: {name: default}
spec:
nodeToNodeMeshEnabled: true
asNumber: 64512
EOF
""")
# Remove node-2's route-reflector config.
json_str = calicoctl("get node %s -o json" % self.nodes[2])
node_dict = json.loads(json_str)
node_dict['metadata']['labels'].pop('i-am-a-route-reflector', '')
node_dict['spec']['bgp'].pop('routeReflectorClusterID', '')
calicoctl("""apply -f - << EOF
%s
EOF
""" % json.dumps(node_dict))
def get_svc_cluster_ip(self, svc, ns):
return kubectl("get svc %s -n %s -o json | jq -r .spec.clusterIP" %
(svc, ns)).strip()
def assert_ecmp_routes(self, dst, via):
matchStr = dst + " proto bird metric 1024 pref medium"
# sort ips and construct match string for ECMP routes.
for ip in sorted(via):
matchStr += "\n\tnexthop via %s dev eth0 weight 1 " % ip
retry_until_success(lambda: self.assertIn(matchStr, self.get_routes()))
def get_svc_host_ipv6(self, svc, ns):
ipv4 = kubectl("get po -l app=%s -n %s -o json | jq -r .items[0].status.hostIP" %
(svc, ns)).strip()
for i in range(len(self.ipv4s)):
if ipv4 == self.ipv4s[i]:
return self.ipv6s[i]
assert False
def add_svc_external_ips(self, svc, ns, ips):
ipsStr = ','.join('"{0}"'.format(ip) for ip in ips)
patchStr = "{\"spec\": {\"externalIPs\": [%s]}}" % (ipsStr)
return kubectl("patch svc %s -n %s --patch '%s'" % (svc, ns, patchStr)).strip()
class TestBGPAdvertV6(_TestBGPAdvertV6):
# In the tests of this class we have a full BGP mesh between the
# cluster nodes (kube-control-plane, kube-node-1 and kube-node-2)
# and the external node (kube-node-extra):
#
# - The full mesh between the cluster nodes is configured by
# nodeToNodeMeshEnabled: true.
#
# - The peerings from each cluster node to the external node are
# configured by self.get_extra_peer_spec().
#
# - The peerings from the external node to each cluster node are
# configured by self.get_bird_conf().
def get_bird_conf(self):
return bird_conf % (self.ipv6s[0], self.ipv6s[1],
self.ipv6s[2], self.ipv6s[3])
def get_extra_peer_spec(self):
return """
spec:
peerIP: %s
asNumber: 64512
""" % self.external_node_ip
def test_cluster_ip_advertisement(self):
"""
Runs the tests for service cluster IPv6 advertisement
- Create both a Local and a Cluster type NodePort service with a single replica.
- assert only local and cluster CIDR routes are advertised.
- assert /128 routes are used, source IP is preserved.
- Scale the Local NP service so it is running on multiple nodes, assert ECMP routing, source IP is preserved.
- Delete both services, assert only cluster CIDR route is advertised.
"""
with DiagsCollector():
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
serviceClusterIPs:
- cidr: fd00:10:96::/112
EOF
""")
# Assert that a route to the service IP range is present.
retry_until_success(lambda: self.assertIn("fd00:10:96::/112", self.get_routes()))
# Create both a Local and a Cluster type NodePort service with a single replica.
local_svc = "nginx-local"
cluster_svc = "nginx-cluster"
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", local_svc, self.ns, 80, ipv6=True)
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", cluster_svc, self.ns, 80, traffic_policy="Cluster", ipv6=True)
self.wait_until_exists(local_svc, "svc", self.ns)
self.wait_until_exists(cluster_svc, "svc", self.ns)
# Get clusterIPs.
local_svc_ip = self.get_svc_cluster_ip(local_svc, self.ns)
cluster_svc_ip = self.get_svc_cluster_ip(cluster_svc, self.ns)
# Wait for the deployments to roll out.
self.wait_for_deployment(local_svc, self.ns)
self.wait_for_deployment(cluster_svc, self.ns)
# Assert that both nginx service can be curled from the external node.
retry_until_success(curl, function_args=[local_svc_ip])
retry_until_success(curl, function_args=[cluster_svc_ip])
# Assert that local clusterIP is an advertised route and cluster clusterIP is not.
retry_until_success(lambda: self.assertIn(local_svc_ip, self.get_routes()))
retry_until_success(lambda: self.assertNotIn(cluster_svc_ip, self.get_routes()))
# TODO: This assertion is actually incorrect. Kubernetes performs
# SNAT on all traffic destined to a service ClusterIP that doesn't
# originate from within the cluster's pod CIDR. This assertion
# pass for External / LoadBalancer IPs, though.
#
# Create a network policy that only accepts traffic from the external node.
# Applying this policy asserts that traffic is not being SNAT'd by kube-proxy
# when it reaches the destination node.
# kubectl("""apply -f - << EOF
# apiVersion: networking.k8s.io/v1
# kind: NetworkPolicy
# metadata:
# name: allow-tcp-80-ex
# namespace: bgp-test
# spec:
# podSelector: {}
# policyTypes:
# - Ingress
# ingress:
# - from:
# - ipBlock: { cidr: %s/128 }
# ports:
# - protocol: TCP
# port: 80
# EOF
# """ % self.external_node_ip)
# Connectivity to nginx-local should always succeed.
for i in range(attempts):
retry_until_success(curl, retries=200, wait_time=5, function_args=[local_svc_ip])
# NOTE: Unlike in the IPv6 case (in test_bgp_advert.py) we cannot successfully test that
# connectivity to nginx-cluster is load-balanced across all nodes (and hence, with the
# above policy in place, will sometimes fail and sometimes succeed), because our current
# observation is that Linux's IPv6 ECMP route choice does _not_ depend on source port,
# even though it is documented as such when fib_multipath_hash_policy == 1.
# Scale the local_svc to 4 replicas
self.scale_deployment(local_svc, self.ns, 4)
self.wait_for_deployment(local_svc, self.ns)
self.assert_ecmp_routes(local_svc_ip, [self.ipv6s[1], self.ipv6s[2], self.ipv6s[3]])
for i in range(attempts):
retry_until_success(curl, function_args=[local_svc_ip])
# Delete both services.
self.delete_and_confirm(local_svc, "svc", self.ns)
self.delete_and_confirm(cluster_svc, "svc", self.ns)
# Assert that clusterIP is no longer an advertised route.
retry_until_success(lambda: self.assertNotIn(local_svc_ip, self.get_routes()))
def test_external_ip_advertisement(self):
"""
Runs the tests for service external IPv6 advertisement
"""
with DiagsCollector():
# Whitelist two IP ranges for the external IPs we'll test with
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
serviceExternalIPs:
- cidr: fd5f:1234:175:200::/112
- cidr: fd5f:1234:200:255::/120
EOF
""")
# Create both a Local and a Cluster type NodePort service with a single replica.
local_svc = "nginx-local"
cluster_svc = "nginx-cluster"
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", local_svc, self.ns, 80, ipv6=True)
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", cluster_svc, self.ns, 80, traffic_policy="Cluster", ipv6=True)
self.wait_until_exists(local_svc, "svc", self.ns)
self.wait_until_exists(cluster_svc, "svc", self.ns)
# Get clusterIPs.
local_svc_ip = self.get_svc_cluster_ip(local_svc, self.ns)
cluster_svc_ip = self.get_svc_cluster_ip(cluster_svc, self.ns)
# Wait for the deployments to roll out.
self.wait_for_deployment(local_svc, self.ns)
self.wait_for_deployment(cluster_svc, self.ns)
# Assert that clusterIPs are not advertised.
retry_until_success(lambda: self.assertNotIn(local_svc_ip, self.get_routes()))
retry_until_success(lambda: self.assertNotIn(cluster_svc_ip, self.get_routes()))
# Create a network policy that only accepts traffic from the external node.
kubectl("""apply -f - << EOF
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-tcp-80-ex
namespace: bgp-test
spec:
podSelector: {}
policyTypes:
- Ingress
ingress:
- from:
- ipBlock: { cidr: %s/32 }
ports:
- protocol: TCP
port: 80
EOF
""" % self.external_node_ip)
# Get host IPs for the nginx pods.
local_svc_host_ip = self.get_svc_host_ipv6(local_svc, self.ns)
cluster_svc_host_ip = self.get_svc_host_ipv6(cluster_svc, self.ns)
# Select an IP from each external IP CIDR.
local_svc_external_ip = "fd5f:1234:175:200::1"
cluster_svc_external_ip = "fd5f:1234:200:255::1"
# Add external IPs to the two services.
self.add_svc_external_ips(local_svc, self.ns, [local_svc_external_ip])
self.add_svc_external_ips(cluster_svc, self.ns, [cluster_svc_external_ip])
# Verify that external IPs for local service is advertised but not the cluster service.
local_svc_externalips_route = "%s via %s" % (local_svc_external_ip, local_svc_host_ip)
cluster_svc_externalips_route = "%s via %s" % (cluster_svc_external_ip, cluster_svc_host_ip)
retry_until_success(lambda: self.assertIn(local_svc_externalips_route, self.get_routes()))
retry_until_success(lambda: self.assertNotIn(cluster_svc_externalips_route, self.get_routes()))
# Scale the local_svc to 4 replicas.
self.scale_deployment(local_svc, self.ns, 4)
self.wait_for_deployment(local_svc, self.ns)
# Verify that we have ECMP routes for the external IP of the local service.
retry_until_success(lambda: self.assert_ecmp_routes(local_svc_external_ip, [self.ipv6s[1], self.ipv6s[2], self.ipv6s[3]]))
# Delete both services, assert only cluster CIDR route is advertised.
self.delete_and_confirm(local_svc, "svc", self.ns)
self.delete_and_confirm(cluster_svc, "svc", self.ns)
# Assert that external IP is no longer an advertised route.
retry_until_success(lambda: self.assertNotIn(local_svc_externalips_route, self.get_routes()))
def test_many_services(self):
"""
Creates a lot of IPv6 services quickly
"""
with DiagsCollector():
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
serviceClusterIPs:
- cidr: fd00:10:96::/112
EOF
""")
# Assert that a route to the service IP range is present.
retry_until_success(lambda: self.assertIn("fd00:10:96::/112", self.get_routes()))
# Create a local service and deployment.
local_svc = "nginx-local"
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", local_svc, self.ns, 80, ipv6=True)
self.wait_for_deployment(local_svc, self.ns)
# Get clusterIPs.
cluster_ips = []
cluster_ips.append(self.get_svc_cluster_ip(local_svc, self.ns))
# Create many more services which select this deployment.
num_svc = 300
for i in range(num_svc):
name = "nginx-svc-%s" % i
self.create_service(name, local_svc, self.ns, 80, ipv6=True)
# Get all of their IPs.
for i in range(num_svc):
name = "nginx-svc-%s" % i
cluster_ips.append(self.get_svc_cluster_ip(name, self.ns))
# Assert they are all advertised to the other node. This should happen
# quickly enough that by the time we have queried all services from
# the k8s API, they should be programmed on the remote node.
def check_routes_advertised():
routes = self.get_routes()
for cip in cluster_ips:
self.assertIn(cip, routes)
retry_until_success(check_routes_advertised, retries=3, wait_time=5)
# Scale to 0 replicas, assert all routes are removed.
self.scale_deployment(local_svc, self.ns, 0)
self.wait_for_deployment(local_svc, self.ns)
def check_routes_gone():
routes = self.get_routes()
for cip in cluster_ips:
self.assertNotIn(cip, routes)
retry_until_success(check_routes_gone, retries=10, wait_time=5)
class TestBGPAdvertV6RR(_TestBGPAdvertV6):
# In the tests of this class, kube-node-2 acts as an RR, and all
# the other nodes peer with it. Here are the peerings that we
# need for that:
#
# RR
# kube-master kube-node-1 kube-node-2 kube-node-extra
# 2001:20::8 2001:20::1 2001:20::2 2001:20::20
# | | | | | |
# | +---------+ | +---------+
# +----------------------------+ Peering -> is configured
# These peerings are by get_extra_peer_spec().
# configured by BGPPeer Peering <- is configured
# peer-with-rr in get_bird_conf().
def get_bird_conf(self):
return bird_conf_rr % self.ipv6s[2]
def get_extra_peer_spec(self):
return """
spec:
node: %s
peerIP: %s
asNumber: 64512
""" % (self.nodes[2], self.external_node_ip)
def test_rr(self):
# Create ExternalTrafficPolicy Local service with one endpoint on node-1
kubectl("""apply -f - << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-rr
namespace: bgp-test
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
run: nginx-rr
template:
metadata:
labels:
app: nginx
run: nginx-rr
spec:
containers:
- name: nginx-rr
image: nginx:1.7.9
ports:
- containerPort: 80
nodeSelector:
beta.kubernetes.io/os: linux
kubernetes.io/hostname: %s
---
apiVersion: v1
kind: Service
metadata:
name: nginx-rr
namespace: bgp-test
labels:
app: nginx
run: nginx-rr
spec:
ipFamilies:
- IPv6
externalIPs:
- fd5f:1234:175:200::1
ports:
- port: 80
targetPort: 80
selector:
app: nginx
run: nginx-rr
type: NodePort
externalTrafficPolicy: Local
EOF
""" % self.nodes[1])
calicoctl("get nodes -o yaml")
calicoctl("get bgppeers -o yaml")
calicoctl("get bgpconfigs -o yaml")
# Update the node-2 to behave as a route-reflector
json_str = calicoctl("get node %s -o json" % self.nodes[2])
node_dict = json.loads(json_str)
node_dict['metadata']['labels']['i-am-a-route-reflector'] = 'true'
node_dict['spec']['bgp']['routeReflectorClusterID'] = '224.0.0.1'
calicoctl("""apply -f - << EOF
%s
EOF
""" % json.dumps(node_dict))
# Disable node-to-node mesh, add cluster and external IP CIDRs to
# advertise, and configure BGP peering between the cluster nodes and the
# RR. (The BGP peering from the external node to the RR is included in
# get_bird_conf() above.)
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
nodeToNodeMeshEnabled: false
asNumber: 64512
serviceClusterIPs:
- cidr: fd00:10:96::/112
serviceExternalIPs:
- cidr: fd5f:1234:175:200::/112
EOF
""")
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata: {name: peer-with-rr}
spec:
peerIP: %s
asNumber: 64512
EOF
""" % self.ipv6s[2])
svc_json = kubectl("get svc nginx-rr -n bgp-test -o json")
svc_dict = json.loads(svc_json)
cluster_ip = svc_dict['spec']['clusterIP']
external_ip = svc_dict['spec']['externalIPs'][0]
retry_until_success(lambda: self.assertIn(cluster_ip, self.get_routes()))
retry_until_success(lambda: self.assertIn(external_ip, self.get_routes()))
| 36.220713 | 142 | 0.640386 |
import logging
import subprocess
import json
import sys
from tests.k8st.test_base import TestBaseV6
from tests.k8st.utils.utils import start_external_node_with_bgp, \
retry_until_success, run, curl, DiagsCollector, calicoctl, kubectl, node_info
_log = logging.getLogger(__name__)
attempts = 10
bird_conf = """
# Template for all BGP clients
template bgp bgp_template {
debug { states };
description "Connection to BGP peer";
local as 64512;
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export all;
source address ip@local; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
connect delay time 2;
connect retry time 5;
error wait time 5,30;
}
# ------------- Node-to-node mesh -------------
protocol bgp Mesh_with_master_node from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
protocol bgp Mesh_with_node_1 from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
protocol bgp Mesh_with_node_2 from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
protocol bgp Mesh_with_node_3 from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
"""
# BIRD config for an external node to peer with
# the in-cluster route reflector on kube-node-2.
bird_conf_rr = """
# Template for all BGP clients
template bgp bgp_template {
debug { states };
description "Connection to BGP peer";
local as 64512;
multihop;
gateway recursive; # This should be the default, but just in case.
import all; # Import all routes, since we don't know what the upstream
# topology is and therefore have to trust the ToR/RR.
export all;
source address ip@local; # The local address we use for the TCP connection
add paths on;
graceful restart; # See comment in kernel section about graceful restart.
connect delay time 2;
connect retry time 5;
error wait time 5,30;
}
protocol bgp Mesh_with_node_2 from bgp_template {
neighbor %s as 64512;
passive on; # Mesh is unidirectional, peer will connect to us.
}
"""
class _TestBGPAdvertV6(TestBaseV6):
def setUp(self):
super(_TestBGPAdvertV6, self).setUp()
self.ns = "bgp-test"
self.create_namespace(self.ns)
self.nodes, self.ipv4s, self.ipv6s = node_info()
self.external_node_ip = start_external_node_with_bgp(
"kube-node-extra",
bird6_peer_config=self.get_bird_conf(),
)
self.update_ds_env("calico-node",
"kube-system",
{"BGP_LOGSEVERITYSCREEN": "debug"})
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
name: node-extra.peer%s
EOF
""" % self.get_extra_peer_spec())
def tearDown(self):
super(_TestBGPAdvertV6, self).tearDown()
self.delete_and_confirm(self.ns, "ns")
try:
run("docker rm -f kube-node-extra")
except subprocess.CalledProcessError:
pass
calicoctl("delete bgppeer node-extra.peer", allow_fail=True)
calicoctl("delete bgppeer peer-with-rr", allow_fail=True)
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata: {name: default}
spec:
nodeToNodeMeshEnabled: true
asNumber: 64512
EOF
""")
json_str = calicoctl("get node %s -o json" % self.nodes[2])
node_dict = json.loads(json_str)
node_dict['metadata']['labels'].pop('i-am-a-route-reflector', '')
node_dict['spec']['bgp'].pop('routeReflectorClusterID', '')
calicoctl("""apply -f - << EOF
%s
EOF
""" % json.dumps(node_dict))
def get_svc_cluster_ip(self, svc, ns):
return kubectl("get svc %s -n %s -o json | jq -r .spec.clusterIP" %
(svc, ns)).strip()
def assert_ecmp_routes(self, dst, via):
matchStr = dst + " proto bird metric 1024 pref medium"
# sort ips and construct match string for ECMP routes.
for ip in sorted(via):
matchStr += "\n\tnexthop via %s dev eth0 weight 1 " % ip
retry_until_success(lambda: self.assertIn(matchStr, self.get_routes()))
def get_svc_host_ipv6(self, svc, ns):
ipv4 = kubectl("get po -l app=%s -n %s -o json | jq -r .items[0].status.hostIP" %
(svc, ns)).strip()
for i in range(len(self.ipv4s)):
if ipv4 == self.ipv4s[i]:
return self.ipv6s[i]
assert False
def add_svc_external_ips(self, svc, ns, ips):
ipsStr = ','.join('"{0}"'.format(ip) for ip in ips)
patchStr = "{\"spec\": {\"externalIPs\": [%s]}}" % (ipsStr)
return kubectl("patch svc %s -n %s --patch '%s'" % (svc, ns, patchStr)).strip()
class TestBGPAdvertV6(_TestBGPAdvertV6):
# In the tests of this class we have a full BGP mesh between the
# cluster nodes (kube-control-plane, kube-node-1 and kube-node-2)
# and the external node (kube-node-extra):
#
# - The full mesh between the cluster nodes is configured by
# nodeToNodeMeshEnabled: true.
#
# - The peerings from each cluster node to the external node are
# configured by self.get_extra_peer_spec().
#
# - The peerings from the external node to each cluster node are
# configured by self.get_bird_conf().
def get_bird_conf(self):
return bird_conf % (self.ipv6s[0], self.ipv6s[1],
self.ipv6s[2], self.ipv6s[3])
def get_extra_peer_spec(self):
return """
spec:
peerIP: %s
asNumber: 64512
""" % self.external_node_ip
def test_cluster_ip_advertisement(self):
with DiagsCollector():
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
serviceClusterIPs:
- cidr: fd00:10:96::/112
EOF
""")
# Assert that a route to the service IP range is present.
retry_until_success(lambda: self.assertIn("fd00:10:96::/112", self.get_routes()))
# Create both a Local and a Cluster type NodePort service with a single replica.
local_svc = "nginx-local"
cluster_svc = "nginx-cluster"
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", local_svc, self.ns, 80, ipv6=True)
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", cluster_svc, self.ns, 80, traffic_policy="Cluster", ipv6=True)
self.wait_until_exists(local_svc, "svc", self.ns)
self.wait_until_exists(cluster_svc, "svc", self.ns)
# Get clusterIPs.
local_svc_ip = self.get_svc_cluster_ip(local_svc, self.ns)
cluster_svc_ip = self.get_svc_cluster_ip(cluster_svc, self.ns)
# Wait for the deployments to roll out.
self.wait_for_deployment(local_svc, self.ns)
self.wait_for_deployment(cluster_svc, self.ns)
# Assert that both nginx service can be curled from the external node.
retry_until_success(curl, function_args=[local_svc_ip])
retry_until_success(curl, function_args=[cluster_svc_ip])
# Assert that local clusterIP is an advertised route and cluster clusterIP is not.
retry_until_success(lambda: self.assertIn(local_svc_ip, self.get_routes()))
retry_until_success(lambda: self.assertNotIn(cluster_svc_ip, self.get_routes()))
# TODO: This assertion is actually incorrect. Kubernetes performs
# SNAT on all traffic destined to a service ClusterIP that doesn't
# pass for External / LoadBalancer IPs, though.
#
# Create a network policy that only accepts traffic from the external node.
# Applying this policy asserts that traffic is not being SNAT'd by kube-proxy
# apiVersion: networking.k8s.io/v1
# kind: NetworkPolicy
# metadata:
# name: allow-tcp-80-ex
# namespace: bgp-test
# spec:
# podSelector: {}
# policyTypes:
# - Ingress
# ingress:
# - from:
# - ipBlock: { cidr: %s/128 }
# ports:
# - protocol: TCP
# port: 80
# EOF
# """ % self.external_node_ip)
for i in range(attempts):
retry_until_success(curl, retries=200, wait_time=5, function_args=[local_svc_ip])
# even though it is documented as such when fib_multipath_hash_policy == 1.
# Scale the local_svc to 4 replicas
self.scale_deployment(local_svc, self.ns, 4)
self.wait_for_deployment(local_svc, self.ns)
self.assert_ecmp_routes(local_svc_ip, [self.ipv6s[1], self.ipv6s[2], self.ipv6s[3]])
for i in range(attempts):
retry_until_success(curl, function_args=[local_svc_ip])
# Delete both services.
self.delete_and_confirm(local_svc, "svc", self.ns)
self.delete_and_confirm(cluster_svc, "svc", self.ns)
# Assert that clusterIP is no longer an advertised route.
retry_until_success(lambda: self.assertNotIn(local_svc_ip, self.get_routes()))
def test_external_ip_advertisement(self):
with DiagsCollector():
# Whitelist two IP ranges for the external IPs we'll test with
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
serviceExternalIPs:
- cidr: fd5f:1234:175:200::/112
- cidr: fd5f:1234:200:255::/120
EOF
""")
local_svc = "nginx-local"
cluster_svc = "nginx-cluster"
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", local_svc, self.ns, 80, ipv6=True)
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", cluster_svc, self.ns, 80, traffic_policy="Cluster", ipv6=True)
self.wait_until_exists(local_svc, "svc", self.ns)
self.wait_until_exists(cluster_svc, "svc", self.ns)
local_svc_ip = self.get_svc_cluster_ip(local_svc, self.ns)
cluster_svc_ip = self.get_svc_cluster_ip(cluster_svc, self.ns)
self.wait_for_deployment(local_svc, self.ns)
self.wait_for_deployment(cluster_svc, self.ns)
retry_until_success(lambda: self.assertNotIn(local_svc_ip, self.get_routes()))
retry_until_success(lambda: self.assertNotIn(cluster_svc_ip, self.get_routes()))
kubectl("""apply -f - << EOF
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-tcp-80-ex
namespace: bgp-test
spec:
podSelector: {}
policyTypes:
- Ingress
ingress:
- from:
- ipBlock: { cidr: %s/32 }
ports:
- protocol: TCP
port: 80
EOF
""" % self.external_node_ip)
local_svc_host_ip = self.get_svc_host_ipv6(local_svc, self.ns)
cluster_svc_host_ip = self.get_svc_host_ipv6(cluster_svc, self.ns)
local_svc_external_ip = "fd5f:1234:175:200::1"
cluster_svc_external_ip = "fd5f:1234:200:255::1"
self.add_svc_external_ips(local_svc, self.ns, [local_svc_external_ip])
self.add_svc_external_ips(cluster_svc, self.ns, [cluster_svc_external_ip])
local_svc_externalips_route = "%s via %s" % (local_svc_external_ip, local_svc_host_ip)
cluster_svc_externalips_route = "%s via %s" % (cluster_svc_external_ip, cluster_svc_host_ip)
retry_until_success(lambda: self.assertIn(local_svc_externalips_route, self.get_routes()))
retry_until_success(lambda: self.assertNotIn(cluster_svc_externalips_route, self.get_routes()))
self.scale_deployment(local_svc, self.ns, 4)
self.wait_for_deployment(local_svc, self.ns)
retry_until_success(lambda: self.assert_ecmp_routes(local_svc_external_ip, [self.ipv6s[1], self.ipv6s[2], self.ipv6s[3]]))
self.delete_and_confirm(local_svc, "svc", self.ns)
self.delete_and_confirm(cluster_svc, "svc", self.ns)
retry_until_success(lambda: self.assertNotIn(local_svc_externalips_route, self.get_routes()))
def test_many_services(self):
with DiagsCollector():
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
serviceClusterIPs:
- cidr: fd00:10:96::/112
EOF
""")
retry_until_success(lambda: self.assertIn("fd00:10:96::/112", self.get_routes()))
local_svc = "nginx-local"
self.deploy("gcr.io/kubernetes-e2e-test-images/test-webserver:1.0", local_svc, self.ns, 80, ipv6=True)
self.wait_for_deployment(local_svc, self.ns)
cluster_ips = []
cluster_ips.append(self.get_svc_cluster_ip(local_svc, self.ns))
num_svc = 300
for i in range(num_svc):
name = "nginx-svc-%s" % i
self.create_service(name, local_svc, self.ns, 80, ipv6=True)
for i in range(num_svc):
name = "nginx-svc-%s" % i
cluster_ips.append(self.get_svc_cluster_ip(name, self.ns))
def check_routes_advertised():
routes = self.get_routes()
for cip in cluster_ips:
self.assertIn(cip, routes)
retry_until_success(check_routes_advertised, retries=3, wait_time=5)
self.scale_deployment(local_svc, self.ns, 0)
self.wait_for_deployment(local_svc, self.ns)
def check_routes_gone():
routes = self.get_routes()
for cip in cluster_ips:
self.assertNotIn(cip, routes)
retry_until_success(check_routes_gone, retries=10, wait_time=5)
class TestBGPAdvertV6RR(_TestBGPAdvertV6):
def get_bird_conf(self):
return bird_conf_rr % self.ipv6s[2]
def get_extra_peer_spec(self):
return """
spec:
node: %s
peerIP: %s
asNumber: 64512
""" % (self.nodes[2], self.external_node_ip)
def test_rr(self):
kubectl("""apply -f - << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-rr
namespace: bgp-test
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
run: nginx-rr
template:
metadata:
labels:
app: nginx
run: nginx-rr
spec:
containers:
- name: nginx-rr
image: nginx:1.7.9
ports:
- containerPort: 80
nodeSelector:
beta.kubernetes.io/os: linux
kubernetes.io/hostname: %s
---
apiVersion: v1
kind: Service
metadata:
name: nginx-rr
namespace: bgp-test
labels:
app: nginx
run: nginx-rr
spec:
ipFamilies:
- IPv6
externalIPs:
- fd5f:1234:175:200::1
ports:
- port: 80
targetPort: 80
selector:
app: nginx
run: nginx-rr
type: NodePort
externalTrafficPolicy: Local
EOF
""" % self.nodes[1])
calicoctl("get nodes -o yaml")
calicoctl("get bgppeers -o yaml")
calicoctl("get bgpconfigs -o yaml")
json_str = calicoctl("get node %s -o json" % self.nodes[2])
node_dict = json.loads(json_str)
node_dict['metadata']['labels']['i-am-a-route-reflector'] = 'true'
node_dict['spec']['bgp']['routeReflectorClusterID'] = '224.0.0.1'
calicoctl("""apply -f - << EOF
%s
EOF
""" % json.dumps(node_dict))
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
nodeToNodeMeshEnabled: false
asNumber: 64512
serviceClusterIPs:
- cidr: fd00:10:96::/112
serviceExternalIPs:
- cidr: fd5f:1234:175:200::/112
EOF
""")
calicoctl("""apply -f - << EOF
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata: {name: peer-with-rr}
spec:
peerIP: %s
asNumber: 64512
EOF
""" % self.ipv6s[2])
svc_json = kubectl("get svc nginx-rr -n bgp-test -o json")
svc_dict = json.loads(svc_json)
cluster_ip = svc_dict['spec']['clusterIP']
external_ip = svc_dict['spec']['externalIPs'][0]
retry_until_success(lambda: self.assertIn(cluster_ip, self.get_routes()))
retry_until_success(lambda: self.assertIn(external_ip, self.get_routes()))
| true | true |
f73449826d629af809e16cb66903c50f62680e0c | 3,617 | py | Python | configs/representation/archive/walker/walker_r18_video_8x8x1_temp1e-2_right_50e_kinetics400_rgb.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | configs/representation/archive/walker/walker_r18_video_8x8x1_temp1e-2_right_50e_kinetics400_rgb.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | configs/representation/archive/walker/walker_r18_video_8x8x1_temp1e-2_right_50e_kinetics400_rgb.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='SpaceTimeWalker',
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(3, ),
norm_eval=False,
zero_init_residual=True),
cls_head=dict(
type='WalkerHead',
num_classes=400,
in_channels=512,
channels=128,
spatial_type='avg',
temperature=0.01,
walk_len=7,
walk_dir='right',
init_std=0.01))
# model training and testing settings
train_cfg = dict(patch_size=64, patch_stride=32)
test_cfg = dict(
precede_frames=7,
topk=5,
temperature=0.01,
strides=(1, 2, 1, 1),
out_indices=(2, ),
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1),
dict(type='DecordDecode'),
# dict(type='Resize', scale=(-1, 256)),
# dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(256, 256), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
optimizer = dict(type='Adam', lr=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
# lr_config = dict(policy='CosineAnnealing', min_lr=0)
lr_config = dict(policy='Fixed')
total_epochs = 50
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1, metrics='davis', key_indicator='J&F-Mean', rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
| 31.72807 | 78 | 0.66381 |
model = dict(
type='SpaceTimeWalker',
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(3, ),
norm_eval=False,
zero_init_residual=True),
cls_head=dict(
type='WalkerHead',
num_classes=400,
in_channels=512,
channels=128,
spatial_type='avg',
temperature=0.01,
walk_len=7,
walk_dir='right',
init_std=0.01))
train_cfg = dict(patch_size=64, patch_stride=32)
test_cfg = dict(
precede_frames=7,
topk=5,
temperature=0.01,
strides=(1, 2, 1, 1),
out_indices=(2, ),
output_dir='eval_results')
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1),
dict(type='DecordDecode'),
dict(type='Resize', scale=(256, 256), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
optimizer = dict(type='Adam', lr=0.0001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(policy='Fixed')
total_epochs = 50
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1, metrics='davis', key_indicator='J&F-Mean', rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
| true | true |
f7344a7f8c86b007aba6e7b603e7c9312336814c | 10,179 | py | Python | src/lib/models/networks/msra_resnet.py | jscsmk/CenterNet | d7c643bba2b373c15abfa3d25ffd5304a313fa49 | [
"MIT"
] | null | null | null | src/lib/models/networks/msra_resnet.py | jscsmk/CenterNet | d7c643bba2b373c15abfa3d25ffd5304a313fa49 | [
"MIT"
] | null | null | null | src/lib/models/networks/msra_resnet.py | jscsmk/CenterNet | d7c643bba2b373c15abfa3d25ffd5304a313fa49 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class PoseResNet(nn.Module):
def __init__(self, block, layers, heads, head_conv, **kwargs):
self.inplanes = 64
self.deconv_with_bias = False
self.heads = heads
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
3,
[256, 256, 256],
[4, 4, 4],
)
# self.final_layer = []
for head in sorted(self.heads):
num_output = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(256, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, num_output,
kernel_size=1, stride=1, padding=0))
else:
fc = nn.Conv2d(
in_channels=256,
out_channels=num_output,
kernel_size=1,
stride=1,
padding=0)
self.__setattr__(head, fc)
# self.final_layer = nn.ModuleList(self.final_layer)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = x.float()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
def init_weights(self, num_layers, pretrained=True):
if pretrained:
# print('=> init resnet deconv weights from normal distribution')
for _, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
# print('=> init {}.weight as 1'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# print('=> init final conv weights from normal distribution')
for head in self.heads:
final_layer = self.__getattr__(head)
for i, m in enumerate(final_layer.modules()):
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
if m.weight.shape[0] == self.heads[head]:
if 'hm' in head:
nn.init.constant_(m.bias, -2.19)
else:
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
#pretrained_state_dict = torch.load(pretrained)
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
else:
print('=> imagenet pretrained model dose not exist')
print('=> please download it first')
raise ValueError('imagenet pretrained model does not exist')
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_pose_net(num_layers, heads, head_conv):
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, heads, head_conv=head_conv)
#model.init_weights(num_layers, pretrained=True)
return model
| 36.224199 | 94 | 0.558405 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class PoseResNet(nn.Module):
def __init__(self, block, layers, heads, head_conv, **kwargs):
self.inplanes = 64
self.deconv_with_bias = False
self.heads = heads
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.deconv_layers = self._make_deconv_layer(
3,
[256, 256, 256],
[4, 4, 4],
)
for head in sorted(self.heads):
num_output = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(256, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, num_output,
kernel_size=1, stride=1, padding=0))
else:
fc = nn.Conv2d(
in_channels=256,
out_channels=num_output,
kernel_size=1,
stride=1,
padding=0)
self.__setattr__(head, fc)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = x.float()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
def init_weights(self, num_layers, pretrained=True):
if pretrained:
for _, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for head in self.heads:
final_layer = self.__getattr__(head)
for i, m in enumerate(final_layer.modules()):
if isinstance(m, nn.Conv2d):
if m.weight.shape[0] == self.heads[head]:
if 'hm' in head:
nn.init.constant_(m.bias, -2.19)
else:
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
else:
print('=> imagenet pretrained model dose not exist')
print('=> please download it first')
raise ValueError('imagenet pretrained model does not exist')
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_pose_net(num_layers, heads, head_conv):
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, heads, head_conv=head_conv)
return model
| true | true |
f7344d91646bee887a33588a68e2c919c555a5d1 | 106 | py | Python | stack_overseer/question_monitor/apps.py | Superskyyy/StackOverseer | 195f73d372c581d947e9f6ed0883b40a4a74062d | [
"MIT"
] | 1 | 2020-08-14T16:33:10.000Z | 2020-08-14T16:33:10.000Z | stack_overseer/question_monitor/apps.py | Superskyyy/StackOverseer | 195f73d372c581d947e9f6ed0883b40a4a74062d | [
"MIT"
] | null | null | null | stack_overseer/question_monitor/apps.py | Superskyyy/StackOverseer | 195f73d372c581d947e9f6ed0883b40a4a74062d | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class QuestionMonitorConfig(AppConfig):
name = 'question_monitor'
| 17.666667 | 39 | 0.792453 | from django.apps import AppConfig
class QuestionMonitorConfig(AppConfig):
name = 'question_monitor'
| true | true |
f7344e269516c6d5c79cfade85243463ddf17a6b | 212 | py | Python | pyamg/util/tests/test_warn.py | thomasjpfan/pyamg | b0904d31c8da0c29affcd7d0fcd2bb8cb910b42a | [
"MIT"
] | null | null | null | pyamg/util/tests/test_warn.py | thomasjpfan/pyamg | b0904d31c8da0c29affcd7d0fcd2bb8cb910b42a | [
"MIT"
] | null | null | null | pyamg/util/tests/test_warn.py | thomasjpfan/pyamg | b0904d31c8da0c29affcd7d0fcd2bb8cb910b42a | [
"MIT"
] | 2 | 2022-01-03T00:20:22.000Z | 2022-03-01T12:02:54.000Z | import warnings
from numpy.testing import TestCase
class TestWarn(TestCase):
def test_f(self):
warnings.filterwarnings("ignore", message="another warning")
warnings.warn("another warning!")
| 23.555556 | 68 | 0.721698 | import warnings
from numpy.testing import TestCase
class TestWarn(TestCase):
def test_f(self):
warnings.filterwarnings("ignore", message="another warning")
warnings.warn("another warning!")
| true | true |
f7344e7b0bf68cc20035ce3b513932116c5a03cd | 5,882 | py | Python | pycommons/mqtt.py | jodabyte/bifrost | dfbfa85ceba30a8f540c1e110d44412771cc844f | [
"MIT"
] | null | null | null | pycommons/mqtt.py | jodabyte/bifrost | dfbfa85ceba30a8f540c1e110d44412771cc844f | [
"MIT"
] | null | null | null | pycommons/mqtt.py | jodabyte/bifrost | dfbfa85ceba30a8f540c1e110d44412771cc844f | [
"MIT"
] | null | null | null | import json
import logging
from typing import Any, Callable, List
import paho.mqtt.client as mqtt
from paho.mqtt.client import MQTTMessage, SubscribeOptions
from paho.mqtt.properties import Properties
from paho.mqtt.reasoncodes import PacketTypes, ReasonCodes
class MqttClient:
def __init__(
self,
id: str,
protocol: int = mqtt.MQTTv5,
enable_logging: bool = False,
) -> None:
self.logger = logging.getLogger(id)
self._client = mqtt.Client(client_id=id, protocol=protocol)
if enable_logging:
self._client.enable_logger()
self._client.on_connect = self._on_connect
self._client.on_disconnect = self._on_disconnect
self._client.on_subscribe = self._on_subscribe
self._client.on_unsubscribe = self._on_unsubscribe
self._client.on_message = self._on_message
self._client.on_publish = self._on_publish
def connect(self, host: str, port: int = 1883, properties: Properties = None):
rc: int = self._client.connect(
host=host, port=port, clean_start=True, properties=properties
)
self.logger.debug(
f"Sending CONNECT host='{host}' port='{port}' properties='{properties}' rc='{mqtt.error_string(rc)}'"
)
def disconnect(self, reasoncode: ReasonCodes = None, properties: Properties = None):
rc: int = self._client.disconnect(reasoncode=reasoncode, properties=properties)
self.logger.debug(
f"Sending DISCONNECT reasoncode='{reasoncode}' properties='{properties}' rc='{ReasonCodes(PacketTypes.DISCONNECT, identifier=rc)}'"
)
def subscribe(
self,
topic: str,
qos: int = 0,
options: SubscribeOptions = None,
properties: Properties = None,
) -> None:
rc, mid = self._client.subscribe(
topic=topic, qos=qos, options=options, properties=properties
)
self.logger.debug(
f"Sending SUBSCRIBE topic='{topic}' with qos='{qos}' options='{options}' properties='{properties}' mid='{mid}' rc='{mqtt.error_string(rc)}'"
)
def unsubscribe(self, topic: str, properties: Properties = None) -> None:
rc, mid = self._client.unsubscribe(topic, properties=properties)
self.logger.debug(
f"Sending UNSUBSCRIBE topic='{topic}' properties='{properties}' mid='{mid}' rc='{mqtt.error_string(rc)}'"
)
def publish(
self,
topic: str,
payload: dict,
qos: int = 0,
retain: bool = False,
properties: Properties = None,
) -> None:
info: mqtt.MQTTMessageInfo = self._client.publish(
topic=topic,
payload=json.dumps(payload),
qos=qos,
retain=retain,
properties=properties,
)
self.logger.debug(
f"Sending PUBLICH topic='{topic}' payload='{payload}' with qos='{qos}' retain='{retain}' properties='{properties}' mid='{info.mid}' rc='{mqtt.error_string(info.rc)}'"
)
def loop_start(self) -> None:
self._client.loop_start()
def loop_stop(self) -> None:
self._client.loop_stop()
def loop_forever(self) -> None:
self._client.loop_forever()
def register_callback(self, topic: str, callback: Callable):
self._client.message_callback_add(sub=topic, callback=callback)
self.logger.info(
f"Register CALLBACK topic='{topic}' callback='{callback.__name__}'"
)
def unregister_callback(self, topic: str):
self._client.message_callback_remove(sub=topic)
self.logger.info(f"Unregister CALLBACK topic='{topic}'")
def _on_message(
self, client: mqtt.Client, userdata: Any, message: MQTTMessage
) -> None:
self.logger.info(
f"Received ON_MESSAGE client_id='{client._client_id.decode('utf-8')}' userdata='{userdata}' topic='{message.topic}' payload='{message.payload}' qos='{message.qos}' retain='{message.retain}' mid='{message.info.mid}' rc='{mqtt.error_string(message.info.rc)}'"
)
def _on_connect(
self,
client: mqtt.Client,
userdata: Any,
flags: dict,
rc: ReasonCodes,
properties: mqtt.Properties,
) -> None:
self.logger.info(
f"Received ON_CONNECT client_id='{client._client_id.decode('utf-8')}' rc='{mqtt.connack_string(rc)}' userdata='{userdata}' flags='{flags}' properties='{properties}'"
)
def _on_disconnect(
self,
client: mqtt.Client,
userdata: Any,
rc: int,
) -> None:
self.logger.info(
f"Received ON_DISCONNECT client_id='{client._client_id.decode('utf-8')}' rc='{ReasonCodes(PacketTypes.DISCONNECT, identifier=rc)}' userdata='{userdata}'"
)
def _on_subscribe(
self,
client: mqtt.Client,
userdata: Any,
mid: int,
rc: List[ReasonCodes],
properties: List[Properties],
) -> None:
self.logger.info(
f"Received ON_SUBSCRIBE client_id='{client._client_id.decode('utf-8')}' mid='{mid}' qos='{[qos.getName() for qos in rc]}' userdata='{userdata}' properties='{properties}'"
)
def _on_unsubscribe(
self,
client: mqtt.Client,
userdata: Any,
mid: int,
properties: List[Properties],
rc: List[ReasonCodes],
) -> None:
self.logger.info(
f"Received ON_UNSUBSCRIBE client_id='{client._client_id.decode('utf-8')}' mid='{mid}' rc='{[qos.getName() for qos in rc]}' userdata='{userdata}' properties='{properties}'"
)
def _on_publish(self, client: mqtt.Client, userdata: Any, mid: int) -> None:
self.logger.info(
f"Received ON_PUBLICH client_id='{client._client_id.decode('utf-8')}' mid='{mid}' userdata='{userdata}'"
)
| 37.227848 | 269 | 0.617647 | import json
import logging
from typing import Any, Callable, List
import paho.mqtt.client as mqtt
from paho.mqtt.client import MQTTMessage, SubscribeOptions
from paho.mqtt.properties import Properties
from paho.mqtt.reasoncodes import PacketTypes, ReasonCodes
class MqttClient:
def __init__(
self,
id: str,
protocol: int = mqtt.MQTTv5,
enable_logging: bool = False,
) -> None:
self.logger = logging.getLogger(id)
self._client = mqtt.Client(client_id=id, protocol=protocol)
if enable_logging:
self._client.enable_logger()
self._client.on_connect = self._on_connect
self._client.on_disconnect = self._on_disconnect
self._client.on_subscribe = self._on_subscribe
self._client.on_unsubscribe = self._on_unsubscribe
self._client.on_message = self._on_message
self._client.on_publish = self._on_publish
def connect(self, host: str, port: int = 1883, properties: Properties = None):
rc: int = self._client.connect(
host=host, port=port, clean_start=True, properties=properties
)
self.logger.debug(
f"Sending CONNECT host='{host}' port='{port}' properties='{properties}' rc='{mqtt.error_string(rc)}'"
)
def disconnect(self, reasoncode: ReasonCodes = None, properties: Properties = None):
rc: int = self._client.disconnect(reasoncode=reasoncode, properties=properties)
self.logger.debug(
f"Sending DISCONNECT reasoncode='{reasoncode}' properties='{properties}' rc='{ReasonCodes(PacketTypes.DISCONNECT, identifier=rc)}'"
)
def subscribe(
self,
topic: str,
qos: int = 0,
options: SubscribeOptions = None,
properties: Properties = None,
) -> None:
rc, mid = self._client.subscribe(
topic=topic, qos=qos, options=options, properties=properties
)
self.logger.debug(
f"Sending SUBSCRIBE topic='{topic}' with qos='{qos}' options='{options}' properties='{properties}' mid='{mid}' rc='{mqtt.error_string(rc)}'"
)
def unsubscribe(self, topic: str, properties: Properties = None) -> None:
rc, mid = self._client.unsubscribe(topic, properties=properties)
self.logger.debug(
f"Sending UNSUBSCRIBE topic='{topic}' properties='{properties}' mid='{mid}' rc='{mqtt.error_string(rc)}'"
)
def publish(
self,
topic: str,
payload: dict,
qos: int = 0,
retain: bool = False,
properties: Properties = None,
) -> None:
info: mqtt.MQTTMessageInfo = self._client.publish(
topic=topic,
payload=json.dumps(payload),
qos=qos,
retain=retain,
properties=properties,
)
self.logger.debug(
f"Sending PUBLICH topic='{topic}' payload='{payload}' with qos='{qos}' retain='{retain}' properties='{properties}' mid='{info.mid}' rc='{mqtt.error_string(info.rc)}'"
)
def loop_start(self) -> None:
self._client.loop_start()
def loop_stop(self) -> None:
self._client.loop_stop()
def loop_forever(self) -> None:
self._client.loop_forever()
def register_callback(self, topic: str, callback: Callable):
self._client.message_callback_add(sub=topic, callback=callback)
self.logger.info(
f"Register CALLBACK topic='{topic}' callback='{callback.__name__}'"
)
def unregister_callback(self, topic: str):
self._client.message_callback_remove(sub=topic)
self.logger.info(f"Unregister CALLBACK topic='{topic}'")
def _on_message(
self, client: mqtt.Client, userdata: Any, message: MQTTMessage
) -> None:
self.logger.info(
f"Received ON_MESSAGE client_id='{client._client_id.decode('utf-8')}' userdata='{userdata}' topic='{message.topic}' payload='{message.payload}' qos='{message.qos}' retain='{message.retain}' mid='{message.info.mid}' rc='{mqtt.error_string(message.info.rc)}'"
)
def _on_connect(
self,
client: mqtt.Client,
userdata: Any,
flags: dict,
rc: ReasonCodes,
properties: mqtt.Properties,
) -> None:
self.logger.info(
f"Received ON_CONNECT client_id='{client._client_id.decode('utf-8')}' rc='{mqtt.connack_string(rc)}' userdata='{userdata}' flags='{flags}' properties='{properties}'"
)
def _on_disconnect(
self,
client: mqtt.Client,
userdata: Any,
rc: int,
) -> None:
self.logger.info(
f"Received ON_DISCONNECT client_id='{client._client_id.decode('utf-8')}' rc='{ReasonCodes(PacketTypes.DISCONNECT, identifier=rc)}' userdata='{userdata}'"
)
def _on_subscribe(
self,
client: mqtt.Client,
userdata: Any,
mid: int,
rc: List[ReasonCodes],
properties: List[Properties],
) -> None:
self.logger.info(
f"Received ON_SUBSCRIBE client_id='{client._client_id.decode('utf-8')}' mid='{mid}' qos='{[qos.getName() for qos in rc]}' userdata='{userdata}' properties='{properties}'"
)
def _on_unsubscribe(
self,
client: mqtt.Client,
userdata: Any,
mid: int,
properties: List[Properties],
rc: List[ReasonCodes],
) -> None:
self.logger.info(
f"Received ON_UNSUBSCRIBE client_id='{client._client_id.decode('utf-8')}' mid='{mid}' rc='{[qos.getName() for qos in rc]}' userdata='{userdata}' properties='{properties}'"
)
def _on_publish(self, client: mqtt.Client, userdata: Any, mid: int) -> None:
self.logger.info(
f"Received ON_PUBLICH client_id='{client._client_id.decode('utf-8')}' mid='{mid}' userdata='{userdata}'"
)
| true | true |
f7344e9bd9127ea21b63d85bb7d004b5c061cfab | 49,202 | py | Python | python/src/chirpstack_api/as_pb/external/api/multicastGroup_pb2.py | wisang1999/chirpstack-api | 905b310a883b2a27a5c6460d547c6eb7d92524e7 | [
"MIT"
] | null | null | null | python/src/chirpstack_api/as_pb/external/api/multicastGroup_pb2.py | wisang1999/chirpstack-api | 905b310a883b2a27a5c6460d547c6eb7d92524e7 | [
"MIT"
] | null | null | null | python/src/chirpstack_api/as_pb/external/api/multicastGroup_pb2.py | wisang1999/chirpstack-api | 905b310a883b2a27a5c6460d547c6eb7d92524e7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: chirpstack-api/as_pb/external/api/multicastGroup.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='chirpstack-api/as_pb/external/api/multicastGroup.proto',
package='api',
syntax='proto3',
serialized_options=b'Z:github.com/wisang1999/chirpstack-api/go/v3/as/external/api',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n6chirpstack-api/as_pb/external/api/multicastGroup.proto\x12\x03\x61pi\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1bgoogle/protobuf/empty.proto\"\x8a\x02\n\x0eMulticastGroup\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07mc_addr\x18\x03 \x01(\t\x12\x14\n\x0cmc_nwk_s_key\x18\x04 \x01(\t\x12\x14\n\x0cmc_app_s_key\x18\x05 \x01(\t\x12\r\n\x05\x66_cnt\x18\x06 \x01(\r\x12+\n\ngroup_type\x18\x07 \x01(\x0e\x32\x17.api.MulticastGroupType\x12\n\n\x02\x64r\x18\x08 \x01(\r\x12\x11\n\tfrequency\x18\t \x01(\r\x12\x18\n\x10ping_slot_period\x18\n \x01(\r\x12,\n\x12service_profile_id\x18\x0b \x01(\tR\x10serviceProfileID\"~\n\x16MulticastGroupListItem\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12,\n\x12service_profile_id\x18\x03 \x01(\tR\x10serviceProfileID\x12\x1c\n\x14service_profile_name\x18\x04 \x01(\t\"K\n\x1b\x43reateMulticastGroupRequest\x12,\n\x0fmulticast_group\x18\x01 \x01(\x0b\x32\x13.api.MulticastGroup\"*\n\x1c\x43reateMulticastGroupResponse\x12\n\n\x02id\x18\x01 \x01(\t\"&\n\x18GetMulticastGroupRequest\x12\n\n\x02id\x18\x01 \x01(\t\"\xa9\x01\n\x19GetMulticastGroupResponse\x12,\n\x0fmulticast_group\x18\x01 \x01(\x0b\x32\x13.api.MulticastGroup\x12.\n\ncreated_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nupdated_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"K\n\x1bUpdateMulticastGroupRequest\x12,\n\x0fmulticast_group\x18\x01 \x01(\x0b\x32\x13.api.MulticastGroup\")\n\x1b\x44\x65leteMulticastGroupRequest\x12\n\n\x02id\x18\x01 \x01(\t\"i\n AddDeviceToMulticastGroupRequest\x12,\n\x12multicast_group_id\x18\x01 \x01(\tR\x10multicastGroupID\x12\x17\n\x07\x64\x65v_eui\x18\x02 \x01(\tR\x06\x64\x65vEUI\"n\n%RemoveDeviceFromMulticastGroupRequest\x12,\n\x12multicast_group_id\x18\x01 \x01(\tR\x10multicastGroupID\x12\x17\n\x07\x64\x65v_eui\x18\x02 \x01(\tR\x06\x64\x65vEUI\"\xba\x01\n\x19ListMulticastGroupRequest\x12\r\n\x05limit\x18\x01 \x01(\x03\x12\x0e\n\x06offset\x18\x02 \x01(\x03\x12\'\n\x0forganization_id\x18\x03 \x01(\x03R\x0eorganizationID\x12\x17\n\x07\x64\x65v_eui\x18\x04 \x01(\tR\x06\x64\x65vEUI\x12,\n\x12service_profile_id\x18\x05 \x01(\tR\x10serviceProfileID\x12\x0e\n\x06search\x18\x06 \x01(\t\"^\n\x1aListMulticastGroupResponse\x12\x13\n\x0btotal_count\x18\x01 \x01(\x03\x12+\n\x06result\x18\x02 \x03(\x0b\x32\x1b.api.MulticastGroupListItem\"o\n\x12MulticastQueueItem\x12,\n\x12multicast_group_id\x18\x01 \x01(\tR\x10multicastGroupID\x12\r\n\x05\x66_cnt\x18\x02 \x01(\r\x12\x0e\n\x06\x66_port\x18\x03 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"Y\n EnqueueMulticastQueueItemRequest\x12\x35\n\x14multicast_queue_item\x18\x01 \x01(\x0b\x32\x17.api.MulticastQueueItem\"2\n!EnqueueMulticastQueueItemResponse\x12\r\n\x05\x66_cnt\x18\x01 \x01(\r\"T\n$FlushMulticastGroupQueueItemsRequest\x12,\n\x12multicast_group_id\x18\x01 \x01(\tR\x10multicastGroupID\"S\n#ListMulticastGroupQueueItemsRequest\x12,\n\x12multicast_group_id\x18\x01 \x01(\tR\x10multicastGroupID\"^\n$ListMulticastGroupQueueItemsResponse\x12\x36\n\x15multicast_queue_items\x18\x01 \x03(\x0b\x32\x17.api.MulticastQueueItem*.\n\x12MulticastGroupType\x12\x0b\n\x07\x43LASS_C\x10\x00\x12\x0b\n\x07\x43LASS_B\x10\x01\x32\xba\n\n\x15MulticastGroupService\x12o\n\x06\x43reate\x12 .api.CreateMulticastGroupRequest\x1a!.api.CreateMulticastGroupResponse\" \x82\xd3\xe4\x93\x02\x1a\"\x15/api/multicast-groups:\x01*\x12h\n\x03Get\x12\x1d.api.GetMulticastGroupRequest\x1a\x1e.api.GetMulticastGroupResponse\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/api/multicast-groups/{id}\x12y\n\x06Update\x12 .api.UpdateMulticastGroupRequest\x1a\x16.google.protobuf.Empty\"5\x82\xd3\xe4\x93\x02/\x1a*/api/multicast-groups/{multicast_group.id}:\x01*\x12\x66\n\x06\x44\x65lete\x12 .api.DeleteMulticastGroupRequest\x1a\x16.google.protobuf.Empty\"\"\x82\xd3\xe4\x93\x02\x1c*\x1a/api/multicast-groups/{id}\x12\x66\n\x04List\x12\x1e.api.ListMulticastGroupRequest\x1a\x1f.api.ListMulticastGroupResponse\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/api/multicast-groups\x12\x89\x01\n\tAddDevice\x12%.api.AddDeviceToMulticastGroupRequest\x1a\x16.google.protobuf.Empty\"=\x82\xd3\xe4\x93\x02\x37\"2/api/multicast-groups/{multicast_group_id}/devices:\x01*\x12\x98\x01\n\x0cRemoveDevice\x12*.api.RemoveDeviceFromMulticastGroupRequest\x1a\x16.google.protobuf.Empty\"D\x82\xd3\xe4\x93\x02>*</api/multicast-groups/{multicast_group_id}/devices/{dev_eui}\x12\xaa\x01\n\x07\x45nqueue\x12%.api.EnqueueMulticastQueueItemRequest\x1a&.api.EnqueueMulticastQueueItemResponse\"P\x82\xd3\xe4\x93\x02J\"E/api/multicast-groups/{multicast_queue_item.multicast_group_id}/queue:\x01*\x12\x89\x01\n\nFlushQueue\x12).api.FlushMulticastGroupQueueItemsRequest\x1a\x16.google.protobuf.Empty\"8\x82\xd3\xe4\x93\x02\x32*0/api/multicast-groups/{multicast_group_id}/queue\x12\x9a\x01\n\tListQueue\x12(.api.ListMulticastGroupQueueItemsRequest\x1a).api.ListMulticastGroupQueueItemsResponse\"8\x82\xd3\xe4\x93\x02\x32\x12\x30/api/multicast-groups/{multicast_group_id}/queueB<Z:github.com/wisang1999/chirpstack-api/go/v3/as/external/apib\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_MULTICASTGROUPTYPE = _descriptor.EnumDescriptor(
name='MulticastGroupType',
full_name='api.MulticastGroupType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='CLASS_C', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CLASS_B', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2032,
serialized_end=2078,
)
_sym_db.RegisterEnumDescriptor(_MULTICASTGROUPTYPE)
MulticastGroupType = enum_type_wrapper.EnumTypeWrapper(_MULTICASTGROUPTYPE)
CLASS_C = 0
CLASS_B = 1
_MULTICASTGROUP = _descriptor.Descriptor(
name='MulticastGroup',
full_name='api.MulticastGroup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='api.MulticastGroup.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='api.MulticastGroup.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mc_addr', full_name='api.MulticastGroup.mc_addr', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mc_nwk_s_key', full_name='api.MulticastGroup.mc_nwk_s_key', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mc_app_s_key', full_name='api.MulticastGroup.mc_app_s_key', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='f_cnt', full_name='api.MulticastGroup.f_cnt', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='group_type', full_name='api.MulticastGroup.group_type', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dr', full_name='api.MulticastGroup.dr', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='frequency', full_name='api.MulticastGroup.frequency', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ping_slot_period', full_name='api.MulticastGroup.ping_slot_period', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='service_profile_id', full_name='api.MulticastGroup.service_profile_id', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='serviceProfileID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=156,
serialized_end=422,
)
_MULTICASTGROUPLISTITEM = _descriptor.Descriptor(
name='MulticastGroupListItem',
full_name='api.MulticastGroupListItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='api.MulticastGroupListItem.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='api.MulticastGroupListItem.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='service_profile_id', full_name='api.MulticastGroupListItem.service_profile_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='serviceProfileID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='service_profile_name', full_name='api.MulticastGroupListItem.service_profile_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=424,
serialized_end=550,
)
_CREATEMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='CreateMulticastGroupRequest',
full_name='api.CreateMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group', full_name='api.CreateMulticastGroupRequest.multicast_group', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=552,
serialized_end=627,
)
_CREATEMULTICASTGROUPRESPONSE = _descriptor.Descriptor(
name='CreateMulticastGroupResponse',
full_name='api.CreateMulticastGroupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='api.CreateMulticastGroupResponse.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=629,
serialized_end=671,
)
_GETMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='GetMulticastGroupRequest',
full_name='api.GetMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='api.GetMulticastGroupRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=673,
serialized_end=711,
)
_GETMULTICASTGROUPRESPONSE = _descriptor.Descriptor(
name='GetMulticastGroupResponse',
full_name='api.GetMulticastGroupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group', full_name='api.GetMulticastGroupResponse.multicast_group', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_at', full_name='api.GetMulticastGroupResponse.created_at', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='updated_at', full_name='api.GetMulticastGroupResponse.updated_at', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=714,
serialized_end=883,
)
_UPDATEMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='UpdateMulticastGroupRequest',
full_name='api.UpdateMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group', full_name='api.UpdateMulticastGroupRequest.multicast_group', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=885,
serialized_end=960,
)
_DELETEMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='DeleteMulticastGroupRequest',
full_name='api.DeleteMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='api.DeleteMulticastGroupRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=962,
serialized_end=1003,
)
_ADDDEVICETOMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='AddDeviceToMulticastGroupRequest',
full_name='api.AddDeviceToMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group_id', full_name='api.AddDeviceToMulticastGroupRequest.multicast_group_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='multicastGroupID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='api.AddDeviceToMulticastGroupRequest.dev_eui', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='devEUI', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1005,
serialized_end=1110,
)
_REMOVEDEVICEFROMMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='RemoveDeviceFromMulticastGroupRequest',
full_name='api.RemoveDeviceFromMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group_id', full_name='api.RemoveDeviceFromMulticastGroupRequest.multicast_group_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='multicastGroupID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='api.RemoveDeviceFromMulticastGroupRequest.dev_eui', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='devEUI', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1112,
serialized_end=1222,
)
_LISTMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='ListMulticastGroupRequest',
full_name='api.ListMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='limit', full_name='api.ListMulticastGroupRequest.limit', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='offset', full_name='api.ListMulticastGroupRequest.offset', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='organization_id', full_name='api.ListMulticastGroupRequest.organization_id', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='organizationID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='api.ListMulticastGroupRequest.dev_eui', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='devEUI', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='service_profile_id', full_name='api.ListMulticastGroupRequest.service_profile_id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='serviceProfileID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='search', full_name='api.ListMulticastGroupRequest.search', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1225,
serialized_end=1411,
)
_LISTMULTICASTGROUPRESPONSE = _descriptor.Descriptor(
name='ListMulticastGroupResponse',
full_name='api.ListMulticastGroupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='total_count', full_name='api.ListMulticastGroupResponse.total_count', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='result', full_name='api.ListMulticastGroupResponse.result', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1413,
serialized_end=1507,
)
_MULTICASTQUEUEITEM = _descriptor.Descriptor(
name='MulticastQueueItem',
full_name='api.MulticastQueueItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group_id', full_name='api.MulticastQueueItem.multicast_group_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='multicastGroupID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='f_cnt', full_name='api.MulticastQueueItem.f_cnt', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='f_port', full_name='api.MulticastQueueItem.f_port', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='api.MulticastQueueItem.data', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1509,
serialized_end=1620,
)
_ENQUEUEMULTICASTQUEUEITEMREQUEST = _descriptor.Descriptor(
name='EnqueueMulticastQueueItemRequest',
full_name='api.EnqueueMulticastQueueItemRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_queue_item', full_name='api.EnqueueMulticastQueueItemRequest.multicast_queue_item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1622,
serialized_end=1711,
)
_ENQUEUEMULTICASTQUEUEITEMRESPONSE = _descriptor.Descriptor(
name='EnqueueMulticastQueueItemResponse',
full_name='api.EnqueueMulticastQueueItemResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='f_cnt', full_name='api.EnqueueMulticastQueueItemResponse.f_cnt', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1713,
serialized_end=1763,
)
_FLUSHMULTICASTGROUPQUEUEITEMSREQUEST = _descriptor.Descriptor(
name='FlushMulticastGroupQueueItemsRequest',
full_name='api.FlushMulticastGroupQueueItemsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group_id', full_name='api.FlushMulticastGroupQueueItemsRequest.multicast_group_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='multicastGroupID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1765,
serialized_end=1849,
)
_LISTMULTICASTGROUPQUEUEITEMSREQUEST = _descriptor.Descriptor(
name='ListMulticastGroupQueueItemsRequest',
full_name='api.ListMulticastGroupQueueItemsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group_id', full_name='api.ListMulticastGroupQueueItemsRequest.multicast_group_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='multicastGroupID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1851,
serialized_end=1934,
)
_LISTMULTICASTGROUPQUEUEITEMSRESPONSE = _descriptor.Descriptor(
name='ListMulticastGroupQueueItemsResponse',
full_name='api.ListMulticastGroupQueueItemsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_queue_items', full_name='api.ListMulticastGroupQueueItemsResponse.multicast_queue_items', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1936,
serialized_end=2030,
)
_MULTICASTGROUP.fields_by_name['group_type'].enum_type = _MULTICASTGROUPTYPE
_CREATEMULTICASTGROUPREQUEST.fields_by_name['multicast_group'].message_type = _MULTICASTGROUP
_GETMULTICASTGROUPRESPONSE.fields_by_name['multicast_group'].message_type = _MULTICASTGROUP
_GETMULTICASTGROUPRESPONSE.fields_by_name['created_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETMULTICASTGROUPRESPONSE.fields_by_name['updated_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPDATEMULTICASTGROUPREQUEST.fields_by_name['multicast_group'].message_type = _MULTICASTGROUP
_LISTMULTICASTGROUPRESPONSE.fields_by_name['result'].message_type = _MULTICASTGROUPLISTITEM
_ENQUEUEMULTICASTQUEUEITEMREQUEST.fields_by_name['multicast_queue_item'].message_type = _MULTICASTQUEUEITEM
_LISTMULTICASTGROUPQUEUEITEMSRESPONSE.fields_by_name['multicast_queue_items'].message_type = _MULTICASTQUEUEITEM
DESCRIPTOR.message_types_by_name['MulticastGroup'] = _MULTICASTGROUP
DESCRIPTOR.message_types_by_name['MulticastGroupListItem'] = _MULTICASTGROUPLISTITEM
DESCRIPTOR.message_types_by_name['CreateMulticastGroupRequest'] = _CREATEMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['CreateMulticastGroupResponse'] = _CREATEMULTICASTGROUPRESPONSE
DESCRIPTOR.message_types_by_name['GetMulticastGroupRequest'] = _GETMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['GetMulticastGroupResponse'] = _GETMULTICASTGROUPRESPONSE
DESCRIPTOR.message_types_by_name['UpdateMulticastGroupRequest'] = _UPDATEMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['DeleteMulticastGroupRequest'] = _DELETEMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['AddDeviceToMulticastGroupRequest'] = _ADDDEVICETOMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['RemoveDeviceFromMulticastGroupRequest'] = _REMOVEDEVICEFROMMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['ListMulticastGroupRequest'] = _LISTMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['ListMulticastGroupResponse'] = _LISTMULTICASTGROUPRESPONSE
DESCRIPTOR.message_types_by_name['MulticastQueueItem'] = _MULTICASTQUEUEITEM
DESCRIPTOR.message_types_by_name['EnqueueMulticastQueueItemRequest'] = _ENQUEUEMULTICASTQUEUEITEMREQUEST
DESCRIPTOR.message_types_by_name['EnqueueMulticastQueueItemResponse'] = _ENQUEUEMULTICASTQUEUEITEMRESPONSE
DESCRIPTOR.message_types_by_name['FlushMulticastGroupQueueItemsRequest'] = _FLUSHMULTICASTGROUPQUEUEITEMSREQUEST
DESCRIPTOR.message_types_by_name['ListMulticastGroupQueueItemsRequest'] = _LISTMULTICASTGROUPQUEUEITEMSREQUEST
DESCRIPTOR.message_types_by_name['ListMulticastGroupQueueItemsResponse'] = _LISTMULTICASTGROUPQUEUEITEMSRESPONSE
DESCRIPTOR.enum_types_by_name['MulticastGroupType'] = _MULTICASTGROUPTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MulticastGroup = _reflection.GeneratedProtocolMessageType('MulticastGroup', (_message.Message,), {
'DESCRIPTOR' : _MULTICASTGROUP,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.MulticastGroup)
})
_sym_db.RegisterMessage(MulticastGroup)
MulticastGroupListItem = _reflection.GeneratedProtocolMessageType('MulticastGroupListItem', (_message.Message,), {
'DESCRIPTOR' : _MULTICASTGROUPLISTITEM,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.MulticastGroupListItem)
})
_sym_db.RegisterMessage(MulticastGroupListItem)
CreateMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('CreateMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.CreateMulticastGroupRequest)
})
_sym_db.RegisterMessage(CreateMulticastGroupRequest)
CreateMulticastGroupResponse = _reflection.GeneratedProtocolMessageType('CreateMulticastGroupResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATEMULTICASTGROUPRESPONSE,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.CreateMulticastGroupResponse)
})
_sym_db.RegisterMessage(CreateMulticastGroupResponse)
GetMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('GetMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.GetMulticastGroupRequest)
})
_sym_db.RegisterMessage(GetMulticastGroupRequest)
GetMulticastGroupResponse = _reflection.GeneratedProtocolMessageType('GetMulticastGroupResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMULTICASTGROUPRESPONSE,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.GetMulticastGroupResponse)
})
_sym_db.RegisterMessage(GetMulticastGroupResponse)
UpdateMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('UpdateMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.UpdateMulticastGroupRequest)
})
_sym_db.RegisterMessage(UpdateMulticastGroupRequest)
DeleteMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('DeleteMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.DeleteMulticastGroupRequest)
})
_sym_db.RegisterMessage(DeleteMulticastGroupRequest)
AddDeviceToMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('AddDeviceToMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _ADDDEVICETOMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.AddDeviceToMulticastGroupRequest)
})
_sym_db.RegisterMessage(AddDeviceToMulticastGroupRequest)
RemoveDeviceFromMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('RemoveDeviceFromMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _REMOVEDEVICEFROMMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.RemoveDeviceFromMulticastGroupRequest)
})
_sym_db.RegisterMessage(RemoveDeviceFromMulticastGroupRequest)
ListMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('ListMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.ListMulticastGroupRequest)
})
_sym_db.RegisterMessage(ListMulticastGroupRequest)
ListMulticastGroupResponse = _reflection.GeneratedProtocolMessageType('ListMulticastGroupResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTMULTICASTGROUPRESPONSE,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.ListMulticastGroupResponse)
})
_sym_db.RegisterMessage(ListMulticastGroupResponse)
MulticastQueueItem = _reflection.GeneratedProtocolMessageType('MulticastQueueItem', (_message.Message,), {
'DESCRIPTOR' : _MULTICASTQUEUEITEM,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.MulticastQueueItem)
})
_sym_db.RegisterMessage(MulticastQueueItem)
EnqueueMulticastQueueItemRequest = _reflection.GeneratedProtocolMessageType('EnqueueMulticastQueueItemRequest', (_message.Message,), {
'DESCRIPTOR' : _ENQUEUEMULTICASTQUEUEITEMREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.EnqueueMulticastQueueItemRequest)
})
_sym_db.RegisterMessage(EnqueueMulticastQueueItemRequest)
EnqueueMulticastQueueItemResponse = _reflection.GeneratedProtocolMessageType('EnqueueMulticastQueueItemResponse', (_message.Message,), {
'DESCRIPTOR' : _ENQUEUEMULTICASTQUEUEITEMRESPONSE,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.EnqueueMulticastQueueItemResponse)
})
_sym_db.RegisterMessage(EnqueueMulticastQueueItemResponse)
FlushMulticastGroupQueueItemsRequest = _reflection.GeneratedProtocolMessageType('FlushMulticastGroupQueueItemsRequest', (_message.Message,), {
'DESCRIPTOR' : _FLUSHMULTICASTGROUPQUEUEITEMSREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.FlushMulticastGroupQueueItemsRequest)
})
_sym_db.RegisterMessage(FlushMulticastGroupQueueItemsRequest)
ListMulticastGroupQueueItemsRequest = _reflection.GeneratedProtocolMessageType('ListMulticastGroupQueueItemsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTMULTICASTGROUPQUEUEITEMSREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.ListMulticastGroupQueueItemsRequest)
})
_sym_db.RegisterMessage(ListMulticastGroupQueueItemsRequest)
ListMulticastGroupQueueItemsResponse = _reflection.GeneratedProtocolMessageType('ListMulticastGroupQueueItemsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTMULTICASTGROUPQUEUEITEMSRESPONSE,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.ListMulticastGroupQueueItemsResponse)
})
_sym_db.RegisterMessage(ListMulticastGroupQueueItemsResponse)
DESCRIPTOR._options = None
_MULTICASTGROUPSERVICE = _descriptor.ServiceDescriptor(
name='MulticastGroupService',
full_name='api.MulticastGroupService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=2081,
serialized_end=3419,
methods=[
_descriptor.MethodDescriptor(
name='Create',
full_name='api.MulticastGroupService.Create',
index=0,
containing_service=None,
input_type=_CREATEMULTICASTGROUPREQUEST,
output_type=_CREATEMULTICASTGROUPRESPONSE,
serialized_options=b'\202\323\344\223\002\032\"\025/api/multicast-groups:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Get',
full_name='api.MulticastGroupService.Get',
index=1,
containing_service=None,
input_type=_GETMULTICASTGROUPREQUEST,
output_type=_GETMULTICASTGROUPRESPONSE,
serialized_options=b'\202\323\344\223\002\034\022\032/api/multicast-groups/{id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Update',
full_name='api.MulticastGroupService.Update',
index=2,
containing_service=None,
input_type=_UPDATEMULTICASTGROUPREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002/\032*/api/multicast-groups/{multicast_group.id}:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Delete',
full_name='api.MulticastGroupService.Delete',
index=3,
containing_service=None,
input_type=_DELETEMULTICASTGROUPREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002\034*\032/api/multicast-groups/{id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='List',
full_name='api.MulticastGroupService.List',
index=4,
containing_service=None,
input_type=_LISTMULTICASTGROUPREQUEST,
output_type=_LISTMULTICASTGROUPRESPONSE,
serialized_options=b'\202\323\344\223\002\027\022\025/api/multicast-groups',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AddDevice',
full_name='api.MulticastGroupService.AddDevice',
index=5,
containing_service=None,
input_type=_ADDDEVICETOMULTICASTGROUPREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0027\"2/api/multicast-groups/{multicast_group_id}/devices:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='RemoveDevice',
full_name='api.MulticastGroupService.RemoveDevice',
index=6,
containing_service=None,
input_type=_REMOVEDEVICEFROMMULTICASTGROUPREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002>*</api/multicast-groups/{multicast_group_id}/devices/{dev_eui}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Enqueue',
full_name='api.MulticastGroupService.Enqueue',
index=7,
containing_service=None,
input_type=_ENQUEUEMULTICASTQUEUEITEMREQUEST,
output_type=_ENQUEUEMULTICASTQUEUEITEMRESPONSE,
serialized_options=b'\202\323\344\223\002J\"E/api/multicast-groups/{multicast_queue_item.multicast_group_id}/queue:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='FlushQueue',
full_name='api.MulticastGroupService.FlushQueue',
index=8,
containing_service=None,
input_type=_FLUSHMULTICASTGROUPQUEUEITEMSREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0022*0/api/multicast-groups/{multicast_group_id}/queue',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListQueue',
full_name='api.MulticastGroupService.ListQueue',
index=9,
containing_service=None,
input_type=_LISTMULTICASTGROUPQUEUEITEMSREQUEST,
output_type=_LISTMULTICASTGROUPQUEUEITEMSRESPONSE,
serialized_options=b'\202\323\344\223\0022\0220/api/multicast-groups/{multicast_group_id}/queue',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_MULTICASTGROUPSERVICE)
DESCRIPTOR.services_by_name['MulticastGroupService'] = _MULTICASTGROUPSERVICE
# @@protoc_insertion_point(module_scope)
| 45.015554 | 5,094 | 0.78068 |
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='chirpstack-api/as_pb/external/api/multicastGroup.proto',
package='api',
syntax='proto3',
serialized_options=b'Z:github.com/wisang1999/chirpstack-api/go/v3/as/external/api',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n6chirpstack-api/as_pb/external/api/multicastGroup.proto\x12\x03\x61pi\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1bgoogle/protobuf/empty.proto\"\x8a\x02\n\x0eMulticastGroup\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07mc_addr\x18\x03 \x01(\t\x12\x14\n\x0cmc_nwk_s_key\x18\x04 \x01(\t\x12\x14\n\x0cmc_app_s_key\x18\x05 \x01(\t\x12\r\n\x05\x66_cnt\x18\x06 \x01(\r\x12+\n\ngroup_type\x18\x07 \x01(\x0e\x32\x17.api.MulticastGroupType\x12\n\n\x02\x64r\x18\x08 \x01(\r\x12\x11\n\tfrequency\x18\t \x01(\r\x12\x18\n\x10ping_slot_period\x18\n \x01(\r\x12,\n\x12service_profile_id\x18\x0b \x01(\tR\x10serviceProfileID\"~\n\x16MulticastGroupListItem\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12,\n\x12service_profile_id\x18\x03 \x01(\tR\x10serviceProfileID\x12\x1c\n\x14service_profile_name\x18\x04 \x01(\t\"K\n\x1b\x43reateMulticastGroupRequest\x12,\n\x0fmulticast_group\x18\x01 \x01(\x0b\x32\x13.api.MulticastGroup\"*\n\x1c\x43reateMulticastGroupResponse\x12\n\n\x02id\x18\x01 \x01(\t\"&\n\x18GetMulticastGroupRequest\x12\n\n\x02id\x18\x01 \x01(\t\"\xa9\x01\n\x19GetMulticastGroupResponse\x12,\n\x0fmulticast_group\x18\x01 \x01(\x0b\x32\x13.api.MulticastGroup\x12.\n\ncreated_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nupdated_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"K\n\x1bUpdateMulticastGroupRequest\x12,\n\x0fmulticast_group\x18\x01 \x01(\x0b\x32\x13.api.MulticastGroup\")\n\x1b\x44\x65leteMulticastGroupRequest\x12\n\n\x02id\x18\x01 \x01(\t\"i\n AddDeviceToMulticastGroupRequest\x12,\n\x12multicast_group_id\x18\x01 \x01(\tR\x10multicastGroupID\x12\x17\n\x07\x64\x65v_eui\x18\x02 \x01(\tR\x06\x64\x65vEUI\"n\n%RemoveDeviceFromMulticastGroupRequest\x12,\n\x12multicast_group_id\x18\x01 \x01(\tR\x10multicastGroupID\x12\x17\n\x07\x64\x65v_eui\x18\x02 \x01(\tR\x06\x64\x65vEUI\"\xba\x01\n\x19ListMulticastGroupRequest\x12\r\n\x05limit\x18\x01 \x01(\x03\x12\x0e\n\x06offset\x18\x02 \x01(\x03\x12\'\n\x0forganization_id\x18\x03 \x01(\x03R\x0eorganizationID\x12\x17\n\x07\x64\x65v_eui\x18\x04 \x01(\tR\x06\x64\x65vEUI\x12,\n\x12service_profile_id\x18\x05 \x01(\tR\x10serviceProfileID\x12\x0e\n\x06search\x18\x06 \x01(\t\"^\n\x1aListMulticastGroupResponse\x12\x13\n\x0btotal_count\x18\x01 \x01(\x03\x12+\n\x06result\x18\x02 \x03(\x0b\x32\x1b.api.MulticastGroupListItem\"o\n\x12MulticastQueueItem\x12,\n\x12multicast_group_id\x18\x01 \x01(\tR\x10multicastGroupID\x12\r\n\x05\x66_cnt\x18\x02 \x01(\r\x12\x0e\n\x06\x66_port\x18\x03 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"Y\n EnqueueMulticastQueueItemRequest\x12\x35\n\x14multicast_queue_item\x18\x01 \x01(\x0b\x32\x17.api.MulticastQueueItem\"2\n!EnqueueMulticastQueueItemResponse\x12\r\n\x05\x66_cnt\x18\x01 \x01(\r\"T\n$FlushMulticastGroupQueueItemsRequest\x12,\n\x12multicast_group_id\x18\x01 \x01(\tR\x10multicastGroupID\"S\n#ListMulticastGroupQueueItemsRequest\x12,\n\x12multicast_group_id\x18\x01 \x01(\tR\x10multicastGroupID\"^\n$ListMulticastGroupQueueItemsResponse\x12\x36\n\x15multicast_queue_items\x18\x01 \x03(\x0b\x32\x17.api.MulticastQueueItem*.\n\x12MulticastGroupType\x12\x0b\n\x07\x43LASS_C\x10\x00\x12\x0b\n\x07\x43LASS_B\x10\x01\x32\xba\n\n\x15MulticastGroupService\x12o\n\x06\x43reate\x12 .api.CreateMulticastGroupRequest\x1a!.api.CreateMulticastGroupResponse\" \x82\xd3\xe4\x93\x02\x1a\"\x15/api/multicast-groups:\x01*\x12h\n\x03Get\x12\x1d.api.GetMulticastGroupRequest\x1a\x1e.api.GetMulticastGroupResponse\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/api/multicast-groups/{id}\x12y\n\x06Update\x12 .api.UpdateMulticastGroupRequest\x1a\x16.google.protobuf.Empty\"5\x82\xd3\xe4\x93\x02/\x1a*/api/multicast-groups/{multicast_group.id}:\x01*\x12\x66\n\x06\x44\x65lete\x12 .api.DeleteMulticastGroupRequest\x1a\x16.google.protobuf.Empty\"\"\x82\xd3\xe4\x93\x02\x1c*\x1a/api/multicast-groups/{id}\x12\x66\n\x04List\x12\x1e.api.ListMulticastGroupRequest\x1a\x1f.api.ListMulticastGroupResponse\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/api/multicast-groups\x12\x89\x01\n\tAddDevice\x12%.api.AddDeviceToMulticastGroupRequest\x1a\x16.google.protobuf.Empty\"=\x82\xd3\xe4\x93\x02\x37\"2/api/multicast-groups/{multicast_group_id}/devices:\x01*\x12\x98\x01\n\x0cRemoveDevice\x12*.api.RemoveDeviceFromMulticastGroupRequest\x1a\x16.google.protobuf.Empty\"D\x82\xd3\xe4\x93\x02>*</api/multicast-groups/{multicast_group_id}/devices/{dev_eui}\x12\xaa\x01\n\x07\x45nqueue\x12%.api.EnqueueMulticastQueueItemRequest\x1a&.api.EnqueueMulticastQueueItemResponse\"P\x82\xd3\xe4\x93\x02J\"E/api/multicast-groups/{multicast_queue_item.multicast_group_id}/queue:\x01*\x12\x89\x01\n\nFlushQueue\x12).api.FlushMulticastGroupQueueItemsRequest\x1a\x16.google.protobuf.Empty\"8\x82\xd3\xe4\x93\x02\x32*0/api/multicast-groups/{multicast_group_id}/queue\x12\x9a\x01\n\tListQueue\x12(.api.ListMulticastGroupQueueItemsRequest\x1a).api.ListMulticastGroupQueueItemsResponse\"8\x82\xd3\xe4\x93\x02\x32\x12\x30/api/multicast-groups/{multicast_group_id}/queueB<Z:github.com/wisang1999/chirpstack-api/go/v3/as/external/apib\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_MULTICASTGROUPTYPE = _descriptor.EnumDescriptor(
name='MulticastGroupType',
full_name='api.MulticastGroupType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='CLASS_C', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CLASS_B', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2032,
serialized_end=2078,
)
_sym_db.RegisterEnumDescriptor(_MULTICASTGROUPTYPE)
MulticastGroupType = enum_type_wrapper.EnumTypeWrapper(_MULTICASTGROUPTYPE)
CLASS_C = 0
CLASS_B = 1
_MULTICASTGROUP = _descriptor.Descriptor(
name='MulticastGroup',
full_name='api.MulticastGroup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='api.MulticastGroup.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='api.MulticastGroup.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mc_addr', full_name='api.MulticastGroup.mc_addr', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mc_nwk_s_key', full_name='api.MulticastGroup.mc_nwk_s_key', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mc_app_s_key', full_name='api.MulticastGroup.mc_app_s_key', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='f_cnt', full_name='api.MulticastGroup.f_cnt', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='group_type', full_name='api.MulticastGroup.group_type', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dr', full_name='api.MulticastGroup.dr', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='frequency', full_name='api.MulticastGroup.frequency', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ping_slot_period', full_name='api.MulticastGroup.ping_slot_period', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='service_profile_id', full_name='api.MulticastGroup.service_profile_id', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='serviceProfileID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=156,
serialized_end=422,
)
_MULTICASTGROUPLISTITEM = _descriptor.Descriptor(
name='MulticastGroupListItem',
full_name='api.MulticastGroupListItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='api.MulticastGroupListItem.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='api.MulticastGroupListItem.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='service_profile_id', full_name='api.MulticastGroupListItem.service_profile_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='serviceProfileID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='service_profile_name', full_name='api.MulticastGroupListItem.service_profile_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=424,
serialized_end=550,
)
_CREATEMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='CreateMulticastGroupRequest',
full_name='api.CreateMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group', full_name='api.CreateMulticastGroupRequest.multicast_group', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=552,
serialized_end=627,
)
_CREATEMULTICASTGROUPRESPONSE = _descriptor.Descriptor(
name='CreateMulticastGroupResponse',
full_name='api.CreateMulticastGroupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='api.CreateMulticastGroupResponse.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=629,
serialized_end=671,
)
_GETMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='GetMulticastGroupRequest',
full_name='api.GetMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='api.GetMulticastGroupRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=673,
serialized_end=711,
)
_GETMULTICASTGROUPRESPONSE = _descriptor.Descriptor(
name='GetMulticastGroupResponse',
full_name='api.GetMulticastGroupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group', full_name='api.GetMulticastGroupResponse.multicast_group', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_at', full_name='api.GetMulticastGroupResponse.created_at', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='updated_at', full_name='api.GetMulticastGroupResponse.updated_at', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=714,
serialized_end=883,
)
_UPDATEMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='UpdateMulticastGroupRequest',
full_name='api.UpdateMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group', full_name='api.UpdateMulticastGroupRequest.multicast_group', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=885,
serialized_end=960,
)
_DELETEMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='DeleteMulticastGroupRequest',
full_name='api.DeleteMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='api.DeleteMulticastGroupRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=962,
serialized_end=1003,
)
_ADDDEVICETOMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='AddDeviceToMulticastGroupRequest',
full_name='api.AddDeviceToMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group_id', full_name='api.AddDeviceToMulticastGroupRequest.multicast_group_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='multicastGroupID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='api.AddDeviceToMulticastGroupRequest.dev_eui', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='devEUI', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1005,
serialized_end=1110,
)
_REMOVEDEVICEFROMMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='RemoveDeviceFromMulticastGroupRequest',
full_name='api.RemoveDeviceFromMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group_id', full_name='api.RemoveDeviceFromMulticastGroupRequest.multicast_group_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='multicastGroupID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='api.RemoveDeviceFromMulticastGroupRequest.dev_eui', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='devEUI', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1112,
serialized_end=1222,
)
_LISTMULTICASTGROUPREQUEST = _descriptor.Descriptor(
name='ListMulticastGroupRequest',
full_name='api.ListMulticastGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='limit', full_name='api.ListMulticastGroupRequest.limit', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='offset', full_name='api.ListMulticastGroupRequest.offset', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='organization_id', full_name='api.ListMulticastGroupRequest.organization_id', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='organizationID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='api.ListMulticastGroupRequest.dev_eui', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='devEUI', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='service_profile_id', full_name='api.ListMulticastGroupRequest.service_profile_id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='serviceProfileID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='search', full_name='api.ListMulticastGroupRequest.search', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1225,
serialized_end=1411,
)
_LISTMULTICASTGROUPRESPONSE = _descriptor.Descriptor(
name='ListMulticastGroupResponse',
full_name='api.ListMulticastGroupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='total_count', full_name='api.ListMulticastGroupResponse.total_count', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='result', full_name='api.ListMulticastGroupResponse.result', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1413,
serialized_end=1507,
)
_MULTICASTQUEUEITEM = _descriptor.Descriptor(
name='MulticastQueueItem',
full_name='api.MulticastQueueItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group_id', full_name='api.MulticastQueueItem.multicast_group_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='multicastGroupID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='f_cnt', full_name='api.MulticastQueueItem.f_cnt', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='f_port', full_name='api.MulticastQueueItem.f_port', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='api.MulticastQueueItem.data', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1509,
serialized_end=1620,
)
_ENQUEUEMULTICASTQUEUEITEMREQUEST = _descriptor.Descriptor(
name='EnqueueMulticastQueueItemRequest',
full_name='api.EnqueueMulticastQueueItemRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_queue_item', full_name='api.EnqueueMulticastQueueItemRequest.multicast_queue_item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1622,
serialized_end=1711,
)
_ENQUEUEMULTICASTQUEUEITEMRESPONSE = _descriptor.Descriptor(
name='EnqueueMulticastQueueItemResponse',
full_name='api.EnqueueMulticastQueueItemResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='f_cnt', full_name='api.EnqueueMulticastQueueItemResponse.f_cnt', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1713,
serialized_end=1763,
)
_FLUSHMULTICASTGROUPQUEUEITEMSREQUEST = _descriptor.Descriptor(
name='FlushMulticastGroupQueueItemsRequest',
full_name='api.FlushMulticastGroupQueueItemsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group_id', full_name='api.FlushMulticastGroupQueueItemsRequest.multicast_group_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='multicastGroupID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1765,
serialized_end=1849,
)
_LISTMULTICASTGROUPQUEUEITEMSREQUEST = _descriptor.Descriptor(
name='ListMulticastGroupQueueItemsRequest',
full_name='api.ListMulticastGroupQueueItemsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_group_id', full_name='api.ListMulticastGroupQueueItemsRequest.multicast_group_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='multicastGroupID', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1851,
serialized_end=1934,
)
_LISTMULTICASTGROUPQUEUEITEMSRESPONSE = _descriptor.Descriptor(
name='ListMulticastGroupQueueItemsResponse',
full_name='api.ListMulticastGroupQueueItemsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='multicast_queue_items', full_name='api.ListMulticastGroupQueueItemsResponse.multicast_queue_items', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1936,
serialized_end=2030,
)
_MULTICASTGROUP.fields_by_name['group_type'].enum_type = _MULTICASTGROUPTYPE
_CREATEMULTICASTGROUPREQUEST.fields_by_name['multicast_group'].message_type = _MULTICASTGROUP
_GETMULTICASTGROUPRESPONSE.fields_by_name['multicast_group'].message_type = _MULTICASTGROUP
_GETMULTICASTGROUPRESPONSE.fields_by_name['created_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETMULTICASTGROUPRESPONSE.fields_by_name['updated_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPDATEMULTICASTGROUPREQUEST.fields_by_name['multicast_group'].message_type = _MULTICASTGROUP
_LISTMULTICASTGROUPRESPONSE.fields_by_name['result'].message_type = _MULTICASTGROUPLISTITEM
_ENQUEUEMULTICASTQUEUEITEMREQUEST.fields_by_name['multicast_queue_item'].message_type = _MULTICASTQUEUEITEM
_LISTMULTICASTGROUPQUEUEITEMSRESPONSE.fields_by_name['multicast_queue_items'].message_type = _MULTICASTQUEUEITEM
DESCRIPTOR.message_types_by_name['MulticastGroup'] = _MULTICASTGROUP
DESCRIPTOR.message_types_by_name['MulticastGroupListItem'] = _MULTICASTGROUPLISTITEM
DESCRIPTOR.message_types_by_name['CreateMulticastGroupRequest'] = _CREATEMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['CreateMulticastGroupResponse'] = _CREATEMULTICASTGROUPRESPONSE
DESCRIPTOR.message_types_by_name['GetMulticastGroupRequest'] = _GETMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['GetMulticastGroupResponse'] = _GETMULTICASTGROUPRESPONSE
DESCRIPTOR.message_types_by_name['UpdateMulticastGroupRequest'] = _UPDATEMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['DeleteMulticastGroupRequest'] = _DELETEMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['AddDeviceToMulticastGroupRequest'] = _ADDDEVICETOMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['RemoveDeviceFromMulticastGroupRequest'] = _REMOVEDEVICEFROMMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['ListMulticastGroupRequest'] = _LISTMULTICASTGROUPREQUEST
DESCRIPTOR.message_types_by_name['ListMulticastGroupResponse'] = _LISTMULTICASTGROUPRESPONSE
DESCRIPTOR.message_types_by_name['MulticastQueueItem'] = _MULTICASTQUEUEITEM
DESCRIPTOR.message_types_by_name['EnqueueMulticastQueueItemRequest'] = _ENQUEUEMULTICASTQUEUEITEMREQUEST
DESCRIPTOR.message_types_by_name['EnqueueMulticastQueueItemResponse'] = _ENQUEUEMULTICASTQUEUEITEMRESPONSE
DESCRIPTOR.message_types_by_name['FlushMulticastGroupQueueItemsRequest'] = _FLUSHMULTICASTGROUPQUEUEITEMSREQUEST
DESCRIPTOR.message_types_by_name['ListMulticastGroupQueueItemsRequest'] = _LISTMULTICASTGROUPQUEUEITEMSREQUEST
DESCRIPTOR.message_types_by_name['ListMulticastGroupQueueItemsResponse'] = _LISTMULTICASTGROUPQUEUEITEMSRESPONSE
DESCRIPTOR.enum_types_by_name['MulticastGroupType'] = _MULTICASTGROUPTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MulticastGroup = _reflection.GeneratedProtocolMessageType('MulticastGroup', (_message.Message,), {
'DESCRIPTOR' : _MULTICASTGROUP,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.MulticastGroup)
})
_sym_db.RegisterMessage(MulticastGroup)
MulticastGroupListItem = _reflection.GeneratedProtocolMessageType('MulticastGroupListItem', (_message.Message,), {
'DESCRIPTOR' : _MULTICASTGROUPLISTITEM,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.MulticastGroupListItem)
})
_sym_db.RegisterMessage(MulticastGroupListItem)
CreateMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('CreateMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.CreateMulticastGroupRequest)
})
_sym_db.RegisterMessage(CreateMulticastGroupRequest)
CreateMulticastGroupResponse = _reflection.GeneratedProtocolMessageType('CreateMulticastGroupResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATEMULTICASTGROUPRESPONSE,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.CreateMulticastGroupResponse)
})
_sym_db.RegisterMessage(CreateMulticastGroupResponse)
GetMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('GetMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.GetMulticastGroupRequest)
})
_sym_db.RegisterMessage(GetMulticastGroupRequest)
GetMulticastGroupResponse = _reflection.GeneratedProtocolMessageType('GetMulticastGroupResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMULTICASTGROUPRESPONSE,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.GetMulticastGroupResponse)
})
_sym_db.RegisterMessage(GetMulticastGroupResponse)
UpdateMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('UpdateMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.UpdateMulticastGroupRequest)
})
_sym_db.RegisterMessage(UpdateMulticastGroupRequest)
DeleteMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('DeleteMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.DeleteMulticastGroupRequest)
})
_sym_db.RegisterMessage(DeleteMulticastGroupRequest)
AddDeviceToMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('AddDeviceToMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _ADDDEVICETOMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.AddDeviceToMulticastGroupRequest)
})
_sym_db.RegisterMessage(AddDeviceToMulticastGroupRequest)
RemoveDeviceFromMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('RemoveDeviceFromMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _REMOVEDEVICEFROMMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.RemoveDeviceFromMulticastGroupRequest)
})
_sym_db.RegisterMessage(RemoveDeviceFromMulticastGroupRequest)
ListMulticastGroupRequest = _reflection.GeneratedProtocolMessageType('ListMulticastGroupRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTMULTICASTGROUPREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.ListMulticastGroupRequest)
})
_sym_db.RegisterMessage(ListMulticastGroupRequest)
ListMulticastGroupResponse = _reflection.GeneratedProtocolMessageType('ListMulticastGroupResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTMULTICASTGROUPRESPONSE,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.ListMulticastGroupResponse)
})
_sym_db.RegisterMessage(ListMulticastGroupResponse)
MulticastQueueItem = _reflection.GeneratedProtocolMessageType('MulticastQueueItem', (_message.Message,), {
'DESCRIPTOR' : _MULTICASTQUEUEITEM,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.MulticastQueueItem)
})
_sym_db.RegisterMessage(MulticastQueueItem)
EnqueueMulticastQueueItemRequest = _reflection.GeneratedProtocolMessageType('EnqueueMulticastQueueItemRequest', (_message.Message,), {
'DESCRIPTOR' : _ENQUEUEMULTICASTQUEUEITEMREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.EnqueueMulticastQueueItemRequest)
})
_sym_db.RegisterMessage(EnqueueMulticastQueueItemRequest)
EnqueueMulticastQueueItemResponse = _reflection.GeneratedProtocolMessageType('EnqueueMulticastQueueItemResponse', (_message.Message,), {
'DESCRIPTOR' : _ENQUEUEMULTICASTQUEUEITEMRESPONSE,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.EnqueueMulticastQueueItemResponse)
})
_sym_db.RegisterMessage(EnqueueMulticastQueueItemResponse)
FlushMulticastGroupQueueItemsRequest = _reflection.GeneratedProtocolMessageType('FlushMulticastGroupQueueItemsRequest', (_message.Message,), {
'DESCRIPTOR' : _FLUSHMULTICASTGROUPQUEUEITEMSREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.FlushMulticastGroupQueueItemsRequest)
})
_sym_db.RegisterMessage(FlushMulticastGroupQueueItemsRequest)
ListMulticastGroupQueueItemsRequest = _reflection.GeneratedProtocolMessageType('ListMulticastGroupQueueItemsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTMULTICASTGROUPQUEUEITEMSREQUEST,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.ListMulticastGroupQueueItemsRequest)
})
_sym_db.RegisterMessage(ListMulticastGroupQueueItemsRequest)
ListMulticastGroupQueueItemsResponse = _reflection.GeneratedProtocolMessageType('ListMulticastGroupQueueItemsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTMULTICASTGROUPQUEUEITEMSRESPONSE,
'__module__' : 'chirpstack_api.as_pb.external.api.multicastGroup_pb2'
# @@protoc_insertion_point(class_scope:api.ListMulticastGroupQueueItemsResponse)
})
_sym_db.RegisterMessage(ListMulticastGroupQueueItemsResponse)
DESCRIPTOR._options = None
_MULTICASTGROUPSERVICE = _descriptor.ServiceDescriptor(
name='MulticastGroupService',
full_name='api.MulticastGroupService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=2081,
serialized_end=3419,
methods=[
_descriptor.MethodDescriptor(
name='Create',
full_name='api.MulticastGroupService.Create',
index=0,
containing_service=None,
input_type=_CREATEMULTICASTGROUPREQUEST,
output_type=_CREATEMULTICASTGROUPRESPONSE,
serialized_options=b'\202\323\344\223\002\032\"\025/api/multicast-groups:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Get',
full_name='api.MulticastGroupService.Get',
index=1,
containing_service=None,
input_type=_GETMULTICASTGROUPREQUEST,
output_type=_GETMULTICASTGROUPRESPONSE,
serialized_options=b'\202\323\344\223\002\034\022\032/api/multicast-groups/{id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Update',
full_name='api.MulticastGroupService.Update',
index=2,
containing_service=None,
input_type=_UPDATEMULTICASTGROUPREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002/\032*/api/multicast-groups/{multicast_group.id}:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Delete',
full_name='api.MulticastGroupService.Delete',
index=3,
containing_service=None,
input_type=_DELETEMULTICASTGROUPREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002\034*\032/api/multicast-groups/{id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='List',
full_name='api.MulticastGroupService.List',
index=4,
containing_service=None,
input_type=_LISTMULTICASTGROUPREQUEST,
output_type=_LISTMULTICASTGROUPRESPONSE,
serialized_options=b'\202\323\344\223\002\027\022\025/api/multicast-groups',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AddDevice',
full_name='api.MulticastGroupService.AddDevice',
index=5,
containing_service=None,
input_type=_ADDDEVICETOMULTICASTGROUPREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0027\"2/api/multicast-groups/{multicast_group_id}/devices:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='RemoveDevice',
full_name='api.MulticastGroupService.RemoveDevice',
index=6,
containing_service=None,
input_type=_REMOVEDEVICEFROMMULTICASTGROUPREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002>*</api/multicast-groups/{multicast_group_id}/devices/{dev_eui}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Enqueue',
full_name='api.MulticastGroupService.Enqueue',
index=7,
containing_service=None,
input_type=_ENQUEUEMULTICASTQUEUEITEMREQUEST,
output_type=_ENQUEUEMULTICASTQUEUEITEMRESPONSE,
serialized_options=b'\202\323\344\223\002J\"E/api/multicast-groups/{multicast_queue_item.multicast_group_id}/queue:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='FlushQueue',
full_name='api.MulticastGroupService.FlushQueue',
index=8,
containing_service=None,
input_type=_FLUSHMULTICASTGROUPQUEUEITEMSREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0022*0/api/multicast-groups/{multicast_group_id}/queue',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListQueue',
full_name='api.MulticastGroupService.ListQueue',
index=9,
containing_service=None,
input_type=_LISTMULTICASTGROUPQUEUEITEMSREQUEST,
output_type=_LISTMULTICASTGROUPQUEUEITEMSRESPONSE,
serialized_options=b'\202\323\344\223\0022\0220/api/multicast-groups/{multicast_group_id}/queue',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_MULTICASTGROUPSERVICE)
DESCRIPTOR.services_by_name['MulticastGroupService'] = _MULTICASTGROUPSERVICE
# @@protoc_insertion_point(module_scope)
| true | true |
f7344ebc17b2922dc48ea522bcfbff576ca33829 | 319 | py | Python | todo/urls.py | gleecoders/django-crash-course | 2f565b6c9e599b5e5f3411058727b5fccbdbfa6d | [
"MIT"
] | null | null | null | todo/urls.py | gleecoders/django-crash-course | 2f565b6c9e599b5e5f3411058727b5fccbdbfa6d | [
"MIT"
] | null | null | null | todo/urls.py | gleecoders/django-crash-course | 2f565b6c9e599b5e5f3411058727b5fccbdbfa6d | [
"MIT"
] | null | null | null | from django.urls import path
from .views import todo_list, todo_detail, todo_create, todo_update, todo_delete
app_name = 'todos'
urlpatterns = [
path('', todo_list),
path('create/', todo_create),
path('<id>/', todo_detail),
path('<id>/update/', todo_update),
path('<id>/delete/', todo_delete),
]
| 22.785714 | 80 | 0.667712 | from django.urls import path
from .views import todo_list, todo_detail, todo_create, todo_update, todo_delete
app_name = 'todos'
urlpatterns = [
path('', todo_list),
path('create/', todo_create),
path('<id>/', todo_detail),
path('<id>/update/', todo_update),
path('<id>/delete/', todo_delete),
]
| true | true |
f7344f57262cf26e28149aafdf8a6e25dc9a22d9 | 10,113 | py | Python | src/blade/util.py | noanswer/blade-build | 647921d283c101ee27edbc0fc27495e4fc5db35d | [
"BSD-3-Clause"
] | 817 | 2015-01-06T02:43:58.000Z | 2019-03-20T02:19:30.000Z | src/blade/util.py | salyty/blade-build | 9d8c396ff2d27616369722410ec801e217bbcfd0 | [
"BSD-3-Clause"
] | 160 | 2015-01-08T06:16:54.000Z | 2019-03-20T06:51:56.000Z | src/blade/util.py | salyty/blade-build | 9d8c396ff2d27616369722410ec801e217bbcfd0 | [
"BSD-3-Clause"
] | 253 | 2015-01-04T17:02:30.000Z | 2019-03-19T18:25:40.000Z | # Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Huan Yu <huanyu@tencent.com>
# Feng chen <phongchen@tencent.com>
# Yi Wang <yiwang@tencent.com>
# Chong peng <michaelpeng@tencent.com>
# Date: October 20, 2011
"""
This is the util module which provides some helper functions.
"""
from __future__ import absolute_import
from __future__ import print_function
import ast
import errno
import fcntl
import hashlib
import inspect
import json
import os
import signal
import string
import subprocess
import sys
import zipfile
_IN_PY3 = sys.version_info[0] == 3
# In python 2, cPickle is much faster than pickle, but in python 3, pickle is
# reimplemented in C extension and then the standardalone cPickle is removed.
if _IN_PY3:
import pickle # pylint: disable=unused-import
else:
# pyright: reportMissingImports=false
import cPickle as pickle # pylint: disable=import-error, unused-import
def md5sum_bytes(content):
"""Calculate md5sum of a byte string."""
assert isinstance(content, bytes), 'Invalid type %s' % type(content)
m = hashlib.md5()
m.update(content)
return m.hexdigest()
def md5sum_str(content):
"""Calculate md5sum of a string."""
assert isinstance(content, str), 'Invalid type %s' % type(content)
return md5sum_bytes(content.encode('utf-8'))
def md5sum_file(file_name):
"""Calculate md5sum of a file."""
with open(file_name, 'rb') as f:
digest = md5sum_bytes(f.read())
return digest
def md5sum(obj):
"""Calculate md5sum of a string-like object"""
if isinstance(obj, bytes):
return md5sum_bytes(obj)
if isinstance(obj, str):
return md5sum_str(obj)
raise TypeError('Invalid type %s' % type(str))
def lock_file(filename):
"""lock file."""
try:
fd = os.open(filename, os.O_CREAT | os.O_RDWR)
old_fd_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_fd_flags | fcntl.FD_CLOEXEC)
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return fd, 0
except IOError as ex_value:
return -1, ex_value.errno
def unlock_file(fd):
"""unlock file."""
try:
fcntl.flock(fd, fcntl.LOCK_UN)
os.close(fd)
except IOError:
pass
def var_to_list(var):
"""Normalize a singlar or list to list."""
if isinstance(var, list):
return var[:]
if var is None:
return []
return [var]
def var_to_list_or_none(var):
"""Similar to var_to_list but keeps the None unchanged"""
if var is None:
return var
return var_to_list(var)
def stable_unique(seq):
"""unique a seq and keep its original order"""
# See http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def to_string(text):
if text is None:
return text
if isinstance(text, str):
return text
if isinstance(text, bytes):
return text.decode('utf-8')
raise TypeError('Unknown type %s' % type(text))
def get_cwd():
"""get_cwd
os.getcwd() doesn't work because it will follow symbol link.
os.environ.get('PWD') doesn't work because it won't reflect os.chdir().
So in practice we simply use system('pwd') to get current working directory.
"""
p = subprocess.Popen(['pwd'], stdout=subprocess.PIPE, shell=True)
return to_string(p.communicate()[0].strip())
def find_file_bottom_up(name, from_dir=None):
"""Find the specified file/dir from from_dir bottom up until found or failed.
Returns abspath if found, or empty if failed.
"""
if from_dir is None:
from_dir = get_cwd()
finding_dir = os.path.abspath(from_dir)
while True:
path = os.path.join(finding_dir, name)
if os.path.exists(path):
return path
if finding_dir == '/':
break
finding_dir = os.path.dirname(finding_dir)
return ''
def path_under_dir(path, dir):
"""Check whether a path is under the dir.
Both path and dir must be normalized, and they must be both relative or relative path.
"""
return dir == '.' or path == dir or path.startswith(dir) and path[len(dir)] == os.path.sep
def mkdir_p(path):
"""Make directory if it does not exist."""
try:
if not os.path.isdir(path):
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _echo(stdout, stderr):
"""Echo messages to stdout and stderr."""
if stdout:
sys.stdout.write(stdout)
if stderr:
sys.stderr.write(stderr)
def shell(cmd, env=None):
if isinstance(cmd, list):
cmdline = ' '.join(cmd)
else:
cmdline = cmd
p = subprocess.Popen(cmdline,
env=env,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True)
stdout, stderr = p.communicate()
if p.returncode:
if p.returncode != -signal.SIGINT:
# Error
_echo(stdout, stderr)
else:
# Warnings
_echo(stdout, stderr)
return p.returncode
def run_command(args, **kwargs):
"""Run a command without echo, return returncode, stdout and stderr (always as string)."""
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
if _IN_PY3:
r = subprocess.run(args, universal_newlines=True, **kwargs)
return r.returncode, r.stdout, r.stderr
else:
p = subprocess.Popen(args, universal_newlines=True, **kwargs)
stdout, stderr = p.communicate()
return p.returncode, stdout, stderr
def load_scm(build_dir):
revision = url = 'unknown'
path = os.path.join(build_dir, 'scm.json')
if os.path.exists(path):
with open(path) as f:
scm = json.load(f)
revision, url = scm['revision'], scm['url']
return revision, url
def environ_add_path(env, key, path):
"""Add path to PATH link environments, such as PATH, LD_LIBRARY_PATH, etc"""
old = env.get(key)
if old:
env[key] = path + ':' + old
else:
env[key] = path
def cpu_count():
try:
import multiprocessing # pylint: disable=import-outside-toplevel
return multiprocessing.cpu_count()
except ImportError:
return int(os.sysconf('SC_NPROCESSORS_ONLN'))
_TRANS_TABLE = (str if _IN_PY3 else string).maketrans(',-/:.+*', '_______')
def regular_variable_name(name):
"""convert some name to a valid identifier name"""
return name.translate(_TRANS_TABLE)
# Some python 2/3 compatibility helpers.
if _IN_PY3:
def iteritems(d):
return d.items()
def itervalues(d):
return d.values()
else:
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def exec_file_content(filename, content, globals, locals):
"""Execute code content as filename"""
# pylint: disable=exec-used
exec(compile(content, filename, 'exec'), globals, locals)
def exec_file(filename, globals, locals):
"""Same as python2's execfile builtin function, but python3 has no execfile"""
# pylint: disable=exec-used
with open(filename, 'rb') as f:
exec_file_content(filename, f.read(), globals, locals)
def eval_file(filepath):
"""Load a value from file.
Safely evaluate an expression node or a string containing a Python literal or container display.
The string or node provided may only consist of the following Python literal structures:
strings, bytes, numbers, tuples, lists, dicts, sets, booleans, and None.
"""
return ast.literal_eval(open(filepath).read())
def source_location(filename):
"""Return source location of current call stack from filename"""
full_filename = filename
lineno = 1
# See https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow
frame = inspect.currentframe()
while frame:
if frame.f_code.co_filename.endswith(filename):
full_filename = frame.f_code.co_filename
lineno = frame.f_lineno
break
frame = frame.f_back
return '%s:%s' % (full_filename, lineno)
def calling_source_location(skip=0):
"""Return source location of current call stack, skip specified levels (not include itself)."""
skip += 1 # This function itself is excluded.
skipped = 0
frame = inspect.currentframe()
while frame:
if skipped == skip:
return '%s:%s' % (frame.f_code.co_filename, frame.f_lineno)
frame = frame.f_back
skipped += 1
raise ValueError('Invalid value "%d" for "skip"' % skip)
def parse_command_line(argv):
"""Simple command line parsing.
options can only be passed as the form of `--name=value`, any other arguments are treated as
normal arguments.
Returns:
tuple(options: dict, args: list)
"""
options = {}
args = []
for arg in argv:
if arg.startswith('--'):
pos = arg.find('=')
if pos < 0:
args.append(arg)
continue
name = arg[2:pos]
value = arg[pos+1:]
options[name] = value
else:
args.append(arg)
return options, args
def open_zip_file_for_write(filename, compression_level):
"""Open a zip file for writing with specified compression level."""
compression = zipfile.ZIP_DEFLATED
if sys.version_info.major < 3 or sys.version_info.major == 3 and sys.version_info.minor < 7:
if compression_level == "0":
compression = zipfile.ZIP_STORED
return zipfile.ZipFile(filename, 'w', compression, allowZip64=True)
# pylint: disable=unexpected-keyword-arg
return zipfile.ZipFile(filename, 'w', compression, compresslevel=int(compression_level), allowZip64=True)
| 28.407303 | 126 | 0.641748 |
from __future__ import absolute_import
from __future__ import print_function
import ast
import errno
import fcntl
import hashlib
import inspect
import json
import os
import signal
import string
import subprocess
import sys
import zipfile
_IN_PY3 = sys.version_info[0] == 3
if _IN_PY3:
import pickle
else:
import cPickle as pickle
def md5sum_bytes(content):
assert isinstance(content, bytes), 'Invalid type %s' % type(content)
m = hashlib.md5()
m.update(content)
return m.hexdigest()
def md5sum_str(content):
assert isinstance(content, str), 'Invalid type %s' % type(content)
return md5sum_bytes(content.encode('utf-8'))
def md5sum_file(file_name):
with open(file_name, 'rb') as f:
digest = md5sum_bytes(f.read())
return digest
def md5sum(obj):
if isinstance(obj, bytes):
return md5sum_bytes(obj)
if isinstance(obj, str):
return md5sum_str(obj)
raise TypeError('Invalid type %s' % type(str))
def lock_file(filename):
try:
fd = os.open(filename, os.O_CREAT | os.O_RDWR)
old_fd_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_fd_flags | fcntl.FD_CLOEXEC)
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return fd, 0
except IOError as ex_value:
return -1, ex_value.errno
def unlock_file(fd):
try:
fcntl.flock(fd, fcntl.LOCK_UN)
os.close(fd)
except IOError:
pass
def var_to_list(var):
if isinstance(var, list):
return var[:]
if var is None:
return []
return [var]
def var_to_list_or_none(var):
if var is None:
return var
return var_to_list(var)
def stable_unique(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def to_string(text):
if text is None:
return text
if isinstance(text, str):
return text
if isinstance(text, bytes):
return text.decode('utf-8')
raise TypeError('Unknown type %s' % type(text))
def get_cwd():
p = subprocess.Popen(['pwd'], stdout=subprocess.PIPE, shell=True)
return to_string(p.communicate()[0].strip())
def find_file_bottom_up(name, from_dir=None):
if from_dir is None:
from_dir = get_cwd()
finding_dir = os.path.abspath(from_dir)
while True:
path = os.path.join(finding_dir, name)
if os.path.exists(path):
return path
if finding_dir == '/':
break
finding_dir = os.path.dirname(finding_dir)
return ''
def path_under_dir(path, dir):
return dir == '.' or path == dir or path.startswith(dir) and path[len(dir)] == os.path.sep
def mkdir_p(path):
try:
if not os.path.isdir(path):
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _echo(stdout, stderr):
if stdout:
sys.stdout.write(stdout)
if stderr:
sys.stderr.write(stderr)
def shell(cmd, env=None):
if isinstance(cmd, list):
cmdline = ' '.join(cmd)
else:
cmdline = cmd
p = subprocess.Popen(cmdline,
env=env,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True)
stdout, stderr = p.communicate()
if p.returncode:
if p.returncode != -signal.SIGINT:
_echo(stdout, stderr)
else:
_echo(stdout, stderr)
return p.returncode
def run_command(args, **kwargs):
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
if _IN_PY3:
r = subprocess.run(args, universal_newlines=True, **kwargs)
return r.returncode, r.stdout, r.stderr
else:
p = subprocess.Popen(args, universal_newlines=True, **kwargs)
stdout, stderr = p.communicate()
return p.returncode, stdout, stderr
def load_scm(build_dir):
revision = url = 'unknown'
path = os.path.join(build_dir, 'scm.json')
if os.path.exists(path):
with open(path) as f:
scm = json.load(f)
revision, url = scm['revision'], scm['url']
return revision, url
def environ_add_path(env, key, path):
old = env.get(key)
if old:
env[key] = path + ':' + old
else:
env[key] = path
def cpu_count():
try:
import multiprocessing
return multiprocessing.cpu_count()
except ImportError:
return int(os.sysconf('SC_NPROCESSORS_ONLN'))
_TRANS_TABLE = (str if _IN_PY3 else string).maketrans(',-/:.+*', '_______')
def regular_variable_name(name):
return name.translate(_TRANS_TABLE)
if _IN_PY3:
def iteritems(d):
return d.items()
def itervalues(d):
return d.values()
else:
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def exec_file_content(filename, content, globals, locals):
exec(compile(content, filename, 'exec'), globals, locals)
def exec_file(filename, globals, locals):
with open(filename, 'rb') as f:
exec_file_content(filename, f.read(), globals, locals)
def eval_file(filepath):
return ast.literal_eval(open(filepath).read())
def source_location(filename):
full_filename = filename
lineno = 1
frame = inspect.currentframe()
while frame:
if frame.f_code.co_filename.endswith(filename):
full_filename = frame.f_code.co_filename
lineno = frame.f_lineno
break
frame = frame.f_back
return '%s:%s' % (full_filename, lineno)
def calling_source_location(skip=0):
skip += 1
skipped = 0
frame = inspect.currentframe()
while frame:
if skipped == skip:
return '%s:%s' % (frame.f_code.co_filename, frame.f_lineno)
frame = frame.f_back
skipped += 1
raise ValueError('Invalid value "%d" for "skip"' % skip)
def parse_command_line(argv):
options = {}
args = []
for arg in argv:
if arg.startswith('--'):
pos = arg.find('=')
if pos < 0:
args.append(arg)
continue
name = arg[2:pos]
value = arg[pos+1:]
options[name] = value
else:
args.append(arg)
return options, args
def open_zip_file_for_write(filename, compression_level):
compression = zipfile.ZIP_DEFLATED
if sys.version_info.major < 3 or sys.version_info.major == 3 and sys.version_info.minor < 7:
if compression_level == "0":
compression = zipfile.ZIP_STORED
return zipfile.ZipFile(filename, 'w', compression, allowZip64=True)
return zipfile.ZipFile(filename, 'w', compression, compresslevel=int(compression_level), allowZip64=True)
| true | true |
f734515e90618acecca23b18403af764db215dc5 | 1,684 | py | Python | objifier/data_loader.py | 1chimaruGin/Oject_classifier | d27ca8f47d2d0af107582c25a0756dda15361c2e | [
"Apache-2.0"
] | 4 | 2020-08-19T14:50:29.000Z | 2021-06-03T05:22:12.000Z | objifier/data_loader.py | 1chimaruGin/Oject_classifier | d27ca8f47d2d0af107582c25a0756dda15361c2e | [
"Apache-2.0"
] | null | null | null | objifier/data_loader.py | 1chimaruGin/Oject_classifier | d27ca8f47d2d0af107582c25a0756dda15361c2e | [
"Apache-2.0"
] | null | null | null | import torch
from torchvision import datasets, transforms
import os
transform = {
"train": transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
[0.4914, 0.4821, 0.4465], [0.2470, 0.2435, 0.2616]
),
]
),
"val": transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
[0.4940, 0.4849, 0.4502], [0.2467, 0.2430, 0.2616]
),
]
),
}
def get_loader(root, batch_size, num_workers):
dataset = {
x: datasets.ImageFolder(os.path.join(root, x), transform=transform[x])
for x in ["train", "val"]
}
data_loader = {
x: torch.utils.data.DataLoader(
dataset[x], batch_size=batch_size, shuffle=(x == "train"),
num_workers=num_workers,
)
for x in ["train", "val"]
}
dataset_size = {x: len(dataset[x]) for x in ["train", "val"]}
return data_loader, dataset_size
def CIFAR10(batch_size, root="data/"):
dataset = {
x: datasets.CIFAR10(
root, train=(x == "train"), download=True, transform=transform[x]
)
for x in ["train", "val"]
}
data_loader = {
x: torch.utils.data.DataLoader(
dataset[x], batch_size=batch_size, shuffle=(x == "train")
)
for x in ["train", "val"]
}
dataset_size = {x: len(dataset[x]) for x in ["train", "val"]}
return data_loader, dataset_size
| 25.134328 | 78 | 0.538599 | import torch
from torchvision import datasets, transforms
import os
transform = {
"train": transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
[0.4914, 0.4821, 0.4465], [0.2470, 0.2435, 0.2616]
),
]
),
"val": transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
[0.4940, 0.4849, 0.4502], [0.2467, 0.2430, 0.2616]
),
]
),
}
def get_loader(root, batch_size, num_workers):
dataset = {
x: datasets.ImageFolder(os.path.join(root, x), transform=transform[x])
for x in ["train", "val"]
}
data_loader = {
x: torch.utils.data.DataLoader(
dataset[x], batch_size=batch_size, shuffle=(x == "train"),
num_workers=num_workers,
)
for x in ["train", "val"]
}
dataset_size = {x: len(dataset[x]) for x in ["train", "val"]}
return data_loader, dataset_size
def CIFAR10(batch_size, root="data/"):
dataset = {
x: datasets.CIFAR10(
root, train=(x == "train"), download=True, transform=transform[x]
)
for x in ["train", "val"]
}
data_loader = {
x: torch.utils.data.DataLoader(
dataset[x], batch_size=batch_size, shuffle=(x == "train")
)
for x in ["train", "val"]
}
dataset_size = {x: len(dataset[x]) for x in ["train", "val"]}
return data_loader, dataset_size
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.