id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11539422
|
def count(data, var=None, globvar=None):
if var:
try:
_ttp_["parser_object"].vars[var] += 1
except KeyError:
_ttp_["parser_object"].vars[var] = 1
if globvar:
try:
_ttp_["global_vars"][globvar] += 1
except KeyError:
_ttp_["global_vars"][globvar] = 1
return data, None
|
11539428
|
import unittest
from socketlabs.injectionapi.message.customheader import CustomHeader
from socketlabs.injectionapi.message.bulkrecipient import BulkRecipient
from socketlabs.injectionapi.core.sendvalidator import *
from socketlabs.injectionapi.message.basicmessage import BasicMessage
from socketlabs.injectionapi.message.emailaddress import EmailAddress
from tests.random_helper import RandomHelper
class TestSendValidator(unittest.TestCase):
"""
Testing the SendValidator
"""
def setUp(self):
self.random_helper = RandomHelper()
pass
""" validate_base_message """
def test_validate_base_message_ReturnsMessageValidationEmptySubject_WhenSubjectIsEmpty(self):
# Arrange
message = BasicMessage()
message.subject = None
# Act
actual = validate_base_message(message)
# Assert
self.assertEqual(SendResult.MessageValidationEmptySubject, actual)
def test_validate_base_message_ReturnsEmailAddressValidationMissingFrom_WhenFromRecipientIsNone(self):
# Arrange
message = BasicMessage()
message.subject = self.random_helper.random_string()
message.from_email_address = None
# Act
actual = validate_base_message(message)
# Assert
self.assertEqual(SendResult.EmailAddressValidationMissingFrom, actual)
def test_validate_base_message_ReturnsEmailAddressValidationMissingFrom_WhenFromRecipientIsObjWithNone(self):
# Arrange
message = BasicMessage()
message.subject = self.random_helper.random_string()
message.from_email_address = EmailAddress(None)
# Act
actual = validate_base_message(message)
# Assert
self.assertEqual(SendResult.EmailAddressValidationMissingFrom, actual)
def test_validate_base_message_ReturnsEmailAddressValidationMissingFrom_WhenFromRecipientIsEmpty(self):
# Arrange
message = BasicMessage()
message.subject = self.random_helper.random_string()
message.from_email_address = EmailAddress('')
# Act
actual = validate_base_message(message)
# Assert
self.assertEqual(SendResult.EmailAddressValidationMissingFrom, actual)
def test_validate_base_message_ReturnsEmailAddressValidationInvalidFrom_WhenFromRecipientIsInvalid(self):
# Arrange
message = BasicMessage()
message.subject = self.random_helper.random_string()
message.from_email_address = EmailAddress("$$##%%")
# Act
actual = validate_base_message(message)
# Assert
self.assertEqual(SendResult.EmailAddressValidationInvalidFrom, actual)
def test_validate_base_message_ReturnsMessageValidationEmptyMessage_WhenAllBodyFieldsAreEmpty(self):
# Arrange
message = BasicMessage()
message.subject = self.random_helper.random_string()
message.from_email_address = self.random_helper.random_email_address()
message.html_body = None
message.plain_text_body = None
message.api_template = None
# Act
actual = validate_base_message(message)
# Assert
self.assertEqual(SendResult.MessageValidationEmptyMessage, actual)
def test_validate_base_message_ReturnsMessageValidationInvalidCustomHeaders_WhenCustomHeadersAreInvalid(self):
# Arrange
message = BasicMessage()
message.subject = self.random_helper.random_string()
message.from_email_address = self.random_helper.random_email_address()
message.html_body = self.random_helper.random_string()
message.add_custom_header("", "")
# Act
actual = validate_base_message(message)
# Assert
self.assertEqual(SendResult.MessageValidationInvalidCustomHeaders, actual)
def test_validate_base_message_ReturnsSuccess_WhenSubjectAndFromRecipientAndHtmlBodyIsNotEmpty(self):
# Arrange
message = BasicMessage()
message.subject = self.random_helper.random_string()
message.from_email_address = self.random_helper.random_email_address()
message.html_body = self.random_helper.random_string()
# Act
actual = validate_base_message(message)
# Assert
self.assertEqual(SendResult.Success, actual)
def test_validate_base_message_ReturnsSuccess_WhenSubjectAndFromRecipientAndPlainTextBodyIsNotEmpty(self):
# Arrange
message = BasicMessage()
message.subject = self.random_helper.random_string()
message.from_email_address = self.random_helper.random_email_address()
message.plain_text_body = self.random_helper.random_string()
# Act
actual = validate_base_message(message)
# Assert
self.assertEqual(SendResult.Success, actual)
def test_validate_base_message_ReturnsSuccess_WhenSubjectAndFromRecipientAndApiTemplateIsNotEmpty(self):
# Arrange
message = BasicMessage()
message.subject = self.random_helper.random_string()
message.from_email_address = self.random_helper.random_email_address()
message.api_template = self.random_helper.random_int()
# Act
actual = validate_base_message(message)
# Assert
self.assertEqual(SendResult.Success, actual)
""" has_message_body """
def test_has_message_body_ReturnsFalse_WhenHtmlBodyAndPlainTextBodyAndApiTemplateIsEmpty(self):
# Arrange
message = BasicMessage()
message.html_body = None
message.plain_text_body = None
message.api_template = None
# Act
actual = has_message_body(message)
# Assert
self.assertFalse(actual)
def test_has_message_body_ReturnsTrue_WhenHtmlBodyAndApiTemplateIsEmptyAndPlainTextBodyIsNotEmpty(self):
# Arrange
message = BasicMessage()
message.html_body = None
message.plain_text_body = self.random_helper.random_string()
message.api_template = None
# Act
actual = has_message_body(message)
# Assert
self.assertTrue(actual)
def test_has_message_body_ReturnsTrue_WhenPlainTextBodyAndApiTemplateIsNotEmptyAndHtmlBodyIsEmpty(self):
# Arrange
message = BasicMessage()
message.html_body = self.random_helper.random_string()
message.plain_text_body = None
message.api_template = None
# Act
actual = has_message_body(message)
# Assert
self.assertTrue(actual)
def test_has_message_body_ReturnsTrue_WhenApiTemplateIsNotEmptyAndPlainTextBodyAndHtmlBodyIsEmpty(self):
# Arrange
message = BasicMessage()
message.html_body = None
message.plain_text_body = None
message.api_template = self.random_helper.random_int()
# Act
actual = has_message_body(message)
# Assert
self.assertTrue(actual)
""" has_api_template """
def test_has_api_template_ReturnsTrue_WhenApiTemplateIsNotZero(self):
# Arrange
message = BasicMessage()
message.api_template = self.random_helper.random_int(1, 10)
# Act
actual = has_api_template(message)
# Assert
self.assertTrue(actual)
def test_has_api_template_ReturnsTrue_WhenApiTemplateIsNotMinValue(self):
# Arrange
message = BasicMessage()
message.api_template = self.random_helper.random_int(1, 10)
# Act
actual = has_api_template(message)
# Assert
self.assertTrue(actual)
def test_has_api_template_ReturnsFalse_WhenApiTemplateIsZero(self):
# Arrange
message = BasicMessage()
message.api_template = 0
# Act
actual = has_api_template(message)
# Assert
self.assertFalse(actual)
def test_has_api_template_ReturnsFalse_WhenApiTemplateIsLessThanZero(self):
# Arrange
message = BasicMessage()
message.api_template = -1
# Act
actual = has_api_template(message)
# Assert
self.assertFalse(actual)
""" validate_email_addresses (BasicMessage) """
def test_validate_email_addresses_BasicMessage_ReturnsNoRecipients_WhenToAndCcAndBccIsNone(self):
# Arrange
message = BasicMessage()
# Act
actual = validate_email_addresses(message)
# Assert
self.assertEqual(SendResult.RecipientValidationNoneInMessage, actual.result)
def test_validate_email_addresses_BasicMessage_ReturnsSuccess_WhenToIsNotEmpty(self):
# Arrange
message = BasicMessage()
message.to_email_address = [self.random_helper.random_email_address()]
# Act
actual = validate_email_addresses(message)
# Assert
self.assertEqual(SendResult.Success, actual.result)
def test_validate_email_addresses_BasicMessage_ReturnsSuccess_WhenCcIsNotEmpty(self):
# Arrange
message = BasicMessage()
message.cc_email_address = [self.random_helper.random_email_address()]
# Act
actual = validate_email_addresses(message)
# Assert
self.assertEqual(SendResult.Success, actual.result)
def test_validate_email_addresses_BasicMessage_ReturnsSuccess_WhenBccIsNotEmpty(self):
# Arrange
message = BasicMessage()
message.bcc_email_address = [self.random_helper.random_email_address()]
# Act
actual = validate_email_addresses(message)
# Assert
self.assertEqual(SendResult.Success, actual.result)
def test_validate_email_addresses_BasicMessage_ReturnsTooManyRecipients_WhenEmailListHasToManyRecipients(self):
# Arrange
num_in_list = int(maximumRecipientsPerMessage / 2)
message = BasicMessage()
message.to_email_address = self.random_helper.random_list_of_email_addresses(num_in_list)
message.cc_email_address = self.random_helper.random_list_of_email_addresses(num_in_list)
message.bcc_email_address = self.random_helper.random_list_of_email_addresses(num_in_list)
# Act
actual = validate_email_addresses(message)
# Assert
self.assertEqual(SendResult.RecipientValidationMaxExceeded, actual.result)
def test_validate_email_addresses_BasicMessage_ReturnsTooManyRecipients_WhenToHasToManyRecipients(self):
# Arrange
num_in_list = int(maximumRecipientsPerMessage + 1)
message = BasicMessage()
message.to_email_address = self.random_helper.random_list_of_email_addresses(num_in_list)
# Act
actual = validate_email_addresses(message)
# Assert
self.assertEqual(SendResult.RecipientValidationMaxExceeded, actual.result)
""" validate_recipients (BulkMessage) """
def test_validate_recipients_BulkMessage_ReturnsNoRecipients_WhenToIsNone(self):
# Arrange
message = BulkMessage()
message.to_recipient = None
# Act
actual = validate_recipients(message)
# Assert
self.assertEqual(SendResult.RecipientValidationMissingTo, actual.result)
def test_validate_recipients_BulkMessage_ReturnsNoRecipients_WhenToIsEmpty(self):
# Arrange
message = BulkMessage()
message.to_recipient = []
# Act
actual = validate_recipients(message)
# Assert
self.assertEqual(SendResult.RecipientValidationMissingTo, actual.result)
def test_validate_recipients_BulkMessage_ReturnsTooManyRecipients_WhenToHasToManyRecipients(self):
# Arrange
num_in_list = maximumRecipientsPerMessage + 1
message = BulkMessage()
message.to_recipient = self.random_helper.random_list_of_bulk_recipients(num_in_list)
# Act
actual = validate_recipients(message)
# Assert
self.assertEqual(SendResult.RecipientValidationMaxExceeded, actual.result)
""" has_subject """
def test_has_subject_ReturnsFalse_WhenSubjectIsEmpty(self):
message = BasicMessage()
message.subject = None
actual = has_subject(message)
self.assertFalse(actual)
def test_has_subject_ReturnsTrue_WhenSubjectIsNotEmpty(self):
message = BasicMessage()
message.subject = self.random_helper.random_string()
actual = has_subject(message)
self.assertTrue(actual)
""" has_from_email_address """
def test_has_from_email_address_ReturnsFalse_WhenFromEmailIsNone(self):
# Arrange
message = BasicMessage()
message.from_email_address = None
# Act
actual = has_from_email_address(message)
# Assert
self.assertFalse(actual)
def test_has_from_email_address_ReturnsFalse_WhenFromEmailIsInvalid(self):
# Arrange
message = BasicMessage()
message.from_email_address = EmailAddress(None)
# Act
actual = has_from_email_address(message)
# Assert
self.assertFalse(actual)
def test_has_from_email_address_ReturnsFalse_WhenFromEmailIsEmpty(self):
# Arrange
message = BasicMessage()
message.from_email_address = EmailAddress('')
# Act
actual = has_from_email_address(message)
# Assert
self.assertFalse(actual)
def test_has_from_email_address_ReturnsTrue_WhenFromEmailIsNotEmpty(self):
# Arrange
message = BasicMessage()
message.from_email_address = self.random_helper.random_email_address()
# Act
actual = has_from_email_address(message)
# Assert
self.assertTrue(actual)
""" has_valid_reply_to_email_address """
def test_has_valid_reply_to_email_address_ReturnsTrue_WhenReplyToEmailIsNone(self):
# Arrange
message = BasicMessage()
message.reply_to_email_address = None
# Act
actual = has_valid_reply_to_email_address(message)
# Assert
self.assertTrue(actual)
def test_has_valid_reply_to_email_address_ReturnsFalse_WhenReplyToEmailIsInvalid(self):
# Arrange
message = BasicMessage()
message.reply_to_email_address = EmailAddress(None)
# Act
actual = has_valid_reply_to_email_address(message)
# Assert
self.assertFalse(actual)
def test_has_valid_reply_to_email_address_ReturnsFalse_WhenReplyToEmailIsEmpty(self):
# Arrange
message = BasicMessage()
message.reply_to_email_address = EmailAddress('')
# Act
actual = has_valid_reply_to_email_address(message)
# Assert
self.assertFalse(actual)
def test_has_valid_reply_to_email_address_ReturnsTrue_WhenReplyToEmailIsNotEmpty(self):
# Arrange
message = BasicMessage()
message.reply_to_email_address = self.random_helper.random_email_address()
# Act
actual = has_valid_reply_to_email_address(message)
# Assert
self.assertTrue(actual)
""" get_full_recipient_count """
def test_get_full_recipient_count_BasicMessage_ReturnsGT0_WhenOnlyToRecipientsHasOneValue(self):
# Arrange
message = BasicMessage()
message.to_email_address = self.random_helper.random_list_of_email_addresses(1)
# Act
actual = get_full_recipient_count(message)
# Assert
self.assertTrue(actual > 0)
def test_get_full_recipient_count_BasicMessage_ReturnsGT0_WhenOnlyCcRecipientsHasOneValue(self):
# Arrange
message = BasicMessage()
message.cc_email_address = self.random_helper.random_list_of_email_addresses(1)
# Act
actual = get_full_recipient_count(message)
# Assert
self.assertTrue(actual > 0)
def test_get_full_recipient_count_BasicMessage_ReturnsGT0_WhenOnlyBccRecipientsHasOneValue(self):
# Arrange
message = BasicMessage()
message.bcc_email_address = self.random_helper.random_list_of_email_addresses(1)
# Act
actual = get_full_recipient_count(message)
# Assert
self.assertTrue(actual > 0)
def test_get_full_recipient_count_BasicMessage_Returns3_WhenEachRecipientsHasValue(self):
# Arrange
message = BasicMessage()
message.to_email_address = self.random_helper.random_list_of_email_addresses(1)
message.cc_email_address = self.random_helper.random_list_of_email_addresses(1)
message.bcc_email_address = self.random_helper.random_list_of_email_addresses(1)
# Act
actual = get_full_recipient_count(message)
# Assert
self.assertTrue(actual > 0)
def test_get_full_recipient_count_BasicMessage_Returns0_WhenNoRecipientsAdded(self):
# Arrange
message = BasicMessage()
# Act
actual = get_full_recipient_count(message)
# Assert
self.assertEqual(0, actual)
""" has_invalid_email_addresses (BasicMessage) """
def test_has_invalid_email_addresses_BasicMessage_ReturnsListOfOne_WhenToHasOneInvalid(self):
# Arrange
message = BasicMessage()
message.to_email_address = [EmailAddress(self.random_helper.random_string())]
# Act
actual = has_invalid_email_addresses(message)
# Assert
self.assertEqual(1, len(actual))
def test_has_invalid_email_addresses_BasicMessage_ReturnsListOfOne_WhenCcHasOneInvalid(self):
# Arrange
message = BasicMessage()
message.cc_email_address = [EmailAddress(self.random_helper.random_string())]
# Act
actual = has_invalid_email_addresses(message)
# Assert
self.assertEqual(1, len(actual))
def test_has_invalid_email_addresses_BasicMessage_ReturnsListOfOne_WhenBccHasOneInvalid(self):
# Arrange
message = BasicMessage()
message.bcc_email_address = [EmailAddress(self.random_helper.random_string())]
# Act
actual = has_invalid_email_addresses(message)
# Assert
self.assertEqual(1, len(actual))
def test_has_invalid_email_addresses_BasicMessage_ReturnsListOfThree_WhenEachRecipientHasOneInvalid(self):
# Arrange
message = BasicMessage()
message.to_email_address = [EmailAddress(self.random_helper.random_string())]
message.cc_email_address = [EmailAddress(self.random_helper.random_string())]
message.bcc_email_address = [EmailAddress(self.random_helper.random_string())]
# Act
actual = has_invalid_email_addresses(message)
# Assert
self.assertEqual(3, len(actual))
def test_has_invalid_email_addresses_BasicMessage_ReturnsNull_WhenNoInvalidRecipientsFound(self):
# Arrange
message = BasicMessage()
message.to_email_address = self.random_helper.random_list_of_email_addresses(1)
message.cc_email_address = self.random_helper.random_list_of_email_addresses(1)
message.bcc_email_address = self.random_helper.random_list_of_email_addresses(1)
# Act
actual = has_invalid_email_addresses(message)
# Assert
self.assertIsNone(actual)
""" has_invalid_recipients(BulkMessage) """
def test_has_invalid_recipients_BulkMessage_ReturnsListOfOne_WhenToHasOneInvalid(self):
# Arrange
message = BulkMessage()
message.to_recipient = [BulkRecipient(self.random_helper.random_string())]
# Act
actual = has_invalid_recipients(message)
# Assert
self.assertEqual(1, len(actual))
def test_has_invalid_recipients_BasicMessage_ReturnsListOfThree_WhenToHasThreeInvalid(self):
# Arrange
message = BulkMessage()
message.to_recipient = [
BulkRecipient(self.random_helper.random_string()),
BulkRecipient(self.random_helper.random_string()),
BulkRecipient(self.random_helper.random_string())
]
# Act
actual = has_invalid_recipients(message)
# Assert
self.assertEqual(3, len(actual))
def test_has_invalid_recipients_BulkMessage_ReturnsNull_WhenNoInvalidRecipientsFound(self):
# Arrange
message = BulkMessage()
message.to_recipient = self.random_helper.random_list_of_bulk_recipients(3)
# Act
actual = has_invalid_recipients(message)
# Assert
self.assertIsNone(actual)
""" find_invalid_email_addresses(list of EmailAddress) """
def test_find_invalid_email_addresses_ListOfEmailAddress_ReturnsNull_WhenRecipientsIsNone(self):
# Arrange
addresses = None
# Act
# noinspection PyTypeChecker
actual = find_invalid_email_addresses(addresses)
# Assert
self.assertIsNone(actual)
def test_find_invalid_email_addresses_ListOfEmailAddress_ReturnsNull_WhenRecipientsIsEmpty(self):
# Arrange
addresses = []
# Act
actual = find_invalid_email_addresses(addresses)
# Assert
self.assertIsNone(actual)
def test_find_invalid_email_addresses_ListOfEmailAddress_ReturnsNull_WhenNoInvalidRecipientsFound(self):
# Arrange
addresses = [self.random_helper.random_email_address()]
# Act
actual = find_invalid_email_addresses(addresses)
# Assert
self.assertIsNone(actual)
def test_find_invalid_email_addresses_ListOfEmailAddress_ReturnsList_WhenInvalidRecipientsFound(self):
# Arrange
addresses = [EmailAddress(self.random_helper.random_string())]
# Act
actual = find_invalid_email_addresses(addresses)
# Assert
self.assertEqual(1, len(actual))
""" find_invalid_recipients(BulkRecipient message) """
def test_find_invalid_recipients_ListOfBulkRecipient_ReturnsNull_WhenRecipientsIsNone(self):
# Arrange
addresses = None
# Act
# noinspection PyTypeChecker
actual = find_invalid_recipients(addresses)
# Assert
self.assertIsNone(actual)
def test_find_invalid_recipients_ListOfBulkRecipient_ReturnsNull_WhenRecipientsIsEmpty(self):
# Arrange
addresses = []
# Act
actual = find_invalid_recipients(addresses)
# Assert
self.assertIsNone(actual)
def test_find_invalid_recipients_ListOfBulkRecipient_ReturnsNull_WhenNoInvalidRecipientsFound(self):
# Arrange
addresses = self.random_helper.random_list_of_bulk_recipients(1)
# Act
actual = find_invalid_recipients(addresses)
# Assert
self.assertIsNone(actual)
def test_find_invalid_recipients_ListOfBulkRecipient_ReturnsList_WhenInvalidRecipientsFound(self):
# Arrange
addresses = [BulkRecipient(self.random_helper.random_string())]
# Act
actual = find_invalid_recipients(addresses)
# Assert
self.assertEqual(1, len(actual))
""" has_valid_custom_headers """
def test_has_valid_custom_headers_ReturnsFalse_WhenKeyAndValueAreEmpty(self):
# Arrange
headers = [CustomHeader("", "")]
# Act
actual = has_valid_custom_headers(headers)
# Assert
self.assertFalse(actual)
def test_has_valid_custom_headers_ReturnsFalse_WhenKeyIsNotEmptyAndValueIsEmpty(self):
# Arrange
headers = [CustomHeader(self.random_helper.random_string(), "")]
# Act
actual = has_valid_custom_headers(headers)
# Assert
self.assertFalse(actual)
def test_has_valid_custom_headers_ReturnsFalse_WhenKeyIsEmptyAndValueIsNotEmpty(self):
# Arrange
headers = [CustomHeader("", self.random_helper.random_string())]
# Act
actual = has_valid_custom_headers(headers)
# Assert
self.assertFalse(actual)
def test_has_valid_custom_headers_ReturnsTrue_WhenListIsNone(self):
# Arrange
headers = None
# Act
# noinspection PyTypeChecker
actual = has_valid_custom_headers(headers)
# Assert
self.assertTrue(actual)
def test_has_valid_custom_headers_ReturnsTrue_WhenListIsEmpty(self):
# Arrange
headers = []
# Act
actual = has_valid_custom_headers(headers)
# Assert
self.assertTrue(actual)
def test_has_valid_custom_headers_ReturnsTrue_WhenListIsValid(self):
# Arrange
headers = [CustomHeader(self.random_helper.random_string(), self.random_helper.random_string())]
# Act
actual = has_valid_custom_headers(headers)
# Assert
self.assertTrue(actual)
""" validate_credentials """
def test_validate_credentials_ReturnsAuthenticationError_WhenServerIdAndApiKeyIsEmpty(self):
# Arrange
server_id = None
api_key = None
validator = SendValidator()
# Act
# noinspection PyTypeChecker
actual = validator.validate_credentials(server_id, api_key)
# Assert
self.assertEqual(SendResult.AuthenticationValidationFailed, actual.result)
def test_validate_credentials_ReturnsAuthenticationError_WhenServerIdIsNotEmptyAndApiKeyIsEmpty(self):
# Arrange
server_id = self.random_helper.random_server_id()
api_key = None
validator = SendValidator()
# Act
# noinspection PyTypeChecker
actual = validator.validate_credentials(server_id, api_key)
# Assert
self.assertEqual(SendResult.AuthenticationValidationFailed, actual.result)
def test_validate_credentials_ReturnsAuthenticationError_WhenApiKeyIsNotEmptyAndServerIdIsEmpty(self):
# Arrange
server_id = None
api_key = self.random_helper.random_string()
validator = SendValidator()
# Act
# noinspection PyTypeChecker
actual = validator.validate_credentials(server_id, api_key)
# Assert
self.assertEqual(SendResult.AuthenticationValidationFailed, actual.result)
def test_validate_credentials_ReturnsSuccess_WhenApiKeyAndServerIdIsNotEmpty(self):
# Arrange
server_id = self.random_helper.random_server_id()
api_key = self.random_helper.random_string()
validator = SendValidator()
# Act
actual = validator.validate_credentials(server_id, api_key)
# Assert
self.assertEqual(SendResult.Success, actual.result)
if __name__ == '__main__':
unittest.main()
|
11539439
|
from pathlib import Path
from alfasim_sdk._internal.alfacase import case_description
def generate_alfacase_file(
alfacase_description: case_description.CaseDescription, alfacase_file: Path
) -> None:
"""
Dump the case_description to the given alfacase_file, using YAML format.
PvtModels that are of mode constants.PVT_MODEL_TABLE will be dumped into a separate file (.alfatable).
"""
_generate_alfatable_file_for_pvt_models_description(
alfacase_description.pvt_models, alfacase_file
)
alfacase_file_content = convert_description_to_alfacase(alfacase_description)
alfacase_file.write_text(alfacase_file_content, encoding="utf-8")
def _generate_alfatable_file_for_pvt_models_description(
pvt_models: case_description.PvtModelsDescription, alfacase_file: Path
) -> None:
"""
Create `.alfatable` files for each pvt_model which the mode is constants.PVT_MODEL_TABLE.
"""
from alfasim_sdk import generate_alfatable_file
for pvt_name, pvt_table_description in pvt_models.table_parameters.items():
alfatable_file = generate_alfatable_file(
alfacase_file=alfacase_file,
alfatable_filename=pvt_name,
description=pvt_table_description,
)
pvt_models.tables[pvt_name] = alfatable_file.name
pvt_models.table_parameters.clear()
def convert_description_to_alfacase(
alfacase_description: case_description.CaseDescription,
*,
enable_flow_style_on_numpy: bool = False,
remove_redundant_input_type_data: bool = True,
) -> str:
"""
Convert a given case (decorated with attrs) to YAML representation.
The strictyaml conversion ("as_yaml") requires that all items from dict are strings.
:param alfacase_description:
Alfasim case description.
:param enable_flow_style_on_numpy:
Signalize that numpy arrays should dumped with flow style enabled.
enable_flow_style_on_numpy=False
.. code-block:: python
pressure:
- 1
- 2
enable_flow_style_on_numpy=True
.. code-block:: python
pressure: [1, 2]
:param remove_redundant_input_type_data:
For transient entries remove input type selector, and the unused constant or curve entries.
"""
import attr
from strictyaml import YAML
from .case_to_alfacase import convert_dict_to_valid_alfacase_format
case_description_dict = convert_dict_to_valid_alfacase_format(
attr.asdict(alfacase_description, recurse=False),
enable_flow_style_on_numpy=enable_flow_style_on_numpy,
remove_redundant_input_type_data=remove_redundant_input_type_data,
)
return YAML(case_description_dict).as_yaml()
def convert_alfacase_to_description(
file_alfacase: Path,
) -> case_description.CaseDescription:
"""
Return a :class:`alfasim_sdk._internal.alfacase.case_description` with all information provided on file_yaml.
"""
from alfasim_sdk._internal.alfacase.alfacase_to_case import load_case_description
from alfasim_sdk._internal.alfacase.alfacase_to_case import DescriptionDocument
return load_case_description(DescriptionDocument.from_file(file_alfacase))
|
11539481
|
from collections import namedtuple
from functools import partial
from types import MethodType
from durations.parser import extract_tokens
from durations.scales import Scale
from durations.exceptions import ScaleFormatError
from durations.constants import *
DurationRepresentation = namedtuple(
'DurationRepresentation',
['value', 'scale']
)
class Duration(object):
"""Duration representation class
Duration objects parses an input duration representation
string, and provides a set of methods to retrieve and
convert it's value to other units.
Example:
>>> d = Duration('1m')
>>> d.to_seconds()
60.0
>>> d.to_hours()
0.02
>>> d = Duration('2d 3 hours')
>>> d.to_minutes()
3060.0
"""
def __init__(self, representation, *args, **kwargs):
self.representation = representation
self.parsed_durations = self.parse(self.representation)
self.seconds = self._compute_seconds_value()
def __str__(self):
return '<Duration {0}>'.format(self.representation)
def __repr__(self):
return self.__str__()
def _compute_seconds_value(self):
seconds = 0
for duration in self.parsed_durations:
seconds += duration.value * duration.scale.conversion_unit
return seconds
def parse(self, representation):
"""Parses a duration string representation
:param representation: duration as a string, example: '1d' (day),
'34minutes' (minutes), '485s' (seconds)...
:type representation: string
:returns: the parsed duration representation
:rtype: DurationRepresentation
"""
elements = extract_tokens(representation)
try:
scales = [DurationRepresentation(float(p[0]), Scale(p[1])) for p in elements]
except ValueError:
raise ScaleFormatError("Malformed duration representation: {0}".format(representation))
return scales
def to_centuries(self):
return round(self.seconds / float(SCALE_CENTURY_CONVERSION_UNIT), 2)
def to_decades(self):
return round(self.seconds / float(SCALE_DECADE_CONVERSION_UNIT), 2)
def to_years(self):
return round(self.seconds / float(SCALE_YEAR_CONVERSION_UNIT), 2)
def to_months(self):
return round(self.seconds / float(SCALE_MONTH_CONVERSION_UNIT), 2)
def to_weeks(self):
return round(self.seconds / float(SCALE_WEEK_CONVERSION_UNIT), 2)
def to_days(self):
return round(self.seconds / float(SCALE_DAY_CONVERSION_UNIT), 2)
def to_hours(self):
return round(self.seconds / float(SCALE_HOUR_CONVERSION_UNIT), 2)
def to_minutes(self):
return round(self.seconds / float(SCALE_MINUTE_CONVERSION_UNIT), 2)
def to_seconds(self):
return round(self.seconds / float(SCALE_SECOND_CONVERSION_UNIT), 2)
def to_miliseconds(self):
return round(self.seconds / float(SCALE_MILISECOND_CONVERSION_UNIT), 2)
|
11539494
|
import pytest
import os
dbm = pytest.importorskip('dbm')
def test_get(tmpdir):
path = str(tmpdir.join('test_dbm_extra.test_get'))
d = dbm.open(path, 'c')
x = d.get("42")
assert x is None
d.close()
def test_delitem(tmpdir):
path = str(tmpdir.join('test_dbm_extra.test_delitem'))
d = dbm.open(path, 'c')
with pytest.raises(KeyError):
del d['xyz']
def test_nonstring(tmpdir):
path = str(tmpdir.join('test_dbm_extra.test_nonstring'))
d = dbm.open(path, 'c')
with pytest.raises(TypeError):
d[123] = 'xyz'
with pytest.raises(TypeError):
d['xyz'] = 123
with pytest.raises(TypeError):
d['xyz'] = None
with pytest.raises(TypeError):
del d[123]
with pytest.raises(TypeError):
d[123]
with pytest.raises(TypeError):
123 in d
with pytest.raises(TypeError):
d.has_key(123)
with pytest.raises(TypeError):
d.setdefault(123, 'xyz')
with pytest.raises(TypeError):
d.setdefault('xyz', 123)
with pytest.raises(TypeError):
d.get(123)
assert dict(d) == {}
d.setdefault('xyz', '123')
assert dict(d) == {'xyz': '123'}
d.close()
def test_multiple_sets(tmpdir):
path = str(tmpdir.join('test_dbm_extra.test_multiple_sets'))
d = dbm.open(path, 'c')
d['xyz'] = '12'
d['xyz'] = '3'
d['xyz'] = '546'
assert dict(d) == {'xyz': '546'}
assert d['xyz'] == '546'
@pytest.mark.skipif("'__pypy__' not in sys.modules")
def test_extra():
with pytest.raises(TypeError):
dbm.datum(123)
with pytest.raises(TypeError):
dbm.datum(False)
def test_null():
db = dbm.open('test', 'c')
db['1'] = 'a\x00b'
db.close()
db = dbm.open('test', 'r')
assert db['1'] == 'a\x00b'
db.close()
def test_key_with_empty_value(tmpdir):
# this test fails on CPython too (at least on tannit), and the
# case shows up when gdbm is not installed and test_anydbm.py
# falls back dbm.
pytest.skip("test may fail on CPython too")
path = str(tmpdir.join('test_dbm_extra.test_key_with_empty_value'))
d = dbm.open(path, 'c')
assert 'key_with_empty_value' not in d
d['key_with_empty_value'] = ''
assert 'key_with_empty_value' in d
assert d['key_with_empty_value'] == ''
d.close()
def test_unicode_filename(tmpdir):
path = str(tmpdir) + os.sep + u'test_dbm_extra.test_unicode_filename'
d = dbm.open(path, 'c')
d.close()
|
11539496
|
import maya.cmds as mc
import maya.mel as mm
def ui():
'''
'''
# Window
win = 'graphFilterUI'
if mc.window(win,q=True,ex=True): mc.deleteUI(win)
win = mc.window(win,t='Graph Editor Filter',mxb=True,mnb=True,s=True,wh=[248,210])
# Layout
fl = mc.formLayout(numberOfDivisions=100)
# UI Elements
graphFilterAttrListTSL = mc.textScrollList('graphFilter_attrListTSL',w=120,nr=8,ams=True)
graphFilterModeRBG = mc.radioButtonGrp('graphFilter_modeRBG',label='Mode',labelArray2=['Replace','Append'],nrb=2,sl=1)
graphEditorB = mc.button(l='Graph Editor',c='mm.eval("GraphEditor")')
allCurveB = mc.button(l='All Curves',c='displayAllCurves()')
clearViewB = mc.button(l='Clear View',c='mc.selectionConnection("graphEditor1FromOutliner",e=True,clear=True)')
graphFilterFilterB = mc.button('graphFilter_filterB',l='Filter Selected',c='glTools.tools.graphFilter.filterCurves()')
graphFilterSelectB = mc.button('graphFilter_selectB',l='Select All',c='glTools.tools.graphFilter.selectAll()')
graphFilterClearB = mc.button('graphFilter_clearB',l='Clear list',c='mc.textScrollList("graphFilter_attrListTSL",e=True,ra=True)')
graphFilterUpdateB = mc.button('graphFilter_updateB',l='Update List',c='glTools.tools.graphFilter.updateAttrList()')
# Form Layout
mc.formLayout(fl,e=True,af=[(graphFilterAttrListTSL,'left',5),(graphFilterAttrListTSL,'bottom',5)],ap=[(graphFilterAttrListTSL,'right',5,50)],ac=[(graphFilterAttrListTSL,'top',5,graphFilterModeRBG)])
mc.formLayout(fl,e=True,af=[(graphFilterModeRBG,'left',5),(graphFilterModeRBG,'right',5)],ac=[(graphFilterModeRBG,'top',5,graphEditorB)])
mc.formLayout(fl,e=True,af=[(graphEditorB,'left',5),(graphEditorB,'top',5)],ap=[(graphEditorB,'right',5,33)])
mc.formLayout(fl,e=True,af=[(allCurveB,'top',5)],ap=[(allCurveB,'left',5,33),(allCurveB,'right',5,66)])
mc.formLayout(fl,e=True,af=[(clearViewB,'right',5),(clearViewB,'top',5)],ap=[(clearViewB,'left',5,66)])
mc.formLayout(fl,e=True,af=[(graphFilterFilterB,'right',5)],ap=[(graphFilterFilterB,'left',5,50)],ac=[(graphFilterFilterB,'top',5,graphFilterModeRBG)])
mc.formLayout(fl,e=True,af=[(graphFilterSelectB,'right',5)],ap=[(graphFilterSelectB,'left',5,50)],ac=[(graphFilterSelectB,'top',5,graphFilterFilterB)])
mc.formLayout(fl,e=True,af=[(graphFilterClearB,'right',5)],ap=[(graphFilterClearB,'left',5,50)],ac=[(graphFilterClearB,'top',5,graphFilterSelectB)])
mc.formLayout(fl,e=True,af=[(graphFilterUpdateB,'right',5)],ap=[(graphFilterUpdateB,'left',5,50)],ac=[(graphFilterUpdateB,'top',5,graphFilterClearB)])
# Update keyable attribute list
updateAttrList()
# Show window
mc.showWindow(win)
def updateAttrList():
'''
'''
# Clear attribute list
mc.textScrollList('graphFilter_attrListTSL',e=True,ra=True)
# Get current selection
sel = mc.ls(sl=True)
if not sel: return
# List all keyable attributes
attrList = list(set(mc.listAttr(sel,k=True)))
attrList.sort()
# Update textScrollList
for attr in attrList: mc.textScrollList('graphFilter_attrListTSL',e=True,a=attr)
# Return result
return attrList
def selectAll():
'''
'''
# Select all attributes in the list
for i in range(mc.textScrollList('graphFilter_attrListTSL',q=True,ni=True)):
mc.textScrollList('graphFilter_attrListTSL',e=True,sii=(i+1))
def displayAllCurves():
'''
'''
# Display all attribute curves
sel = mc.selectionConnection('graphEditorList',q=True,object=True)
for obj in sel: mc.selectionConnection('graphEditor1FromOutliner',e=True,select=obj)
def addCurveToEditor(attr):
'''
'''
# Get current selection
sel = mc.ls(sl=True)
for obj in sel:
objAttr = obj+'.'+attr
# Check attr
if mc.objExists(objAttr):
# Add to graphEditor
mc.selectionConnection('graphEditor1FromOutliner',e=True,select=objAttr)
def filterCurves():
'''
'''
# Check attribute list selection
if mc.textScrollList('graphFilter_attrListTSL',q=True,nsi=True):
# Check mode
if mc.radioButtonGrp('graphFilter_modeRBG',q=True,sl=True) == 1:
mc.selectionConnection('graphEditor1FromOutliner',e=True,clear=True)
attrs = mc.textScrollList('graphFilter_attrListTSL',q=True,si=True)
for attr in attrs: addCurveToEditor(attr)
# Update UI
mm.eval('GraphEditor')
mm.eval('SelectAllMarkingMenu')
mm.eval('buildSelectAllMM')
mm.eval('SelectAllMarkingMenuPopDown')
|
11539500
|
from jitcache import Cache
import time
import multiprocessing as mp
cache = Cache()
@cache.memoize
def slow_fn(input_1, input_2):
print("Slow Function Called")
time.sleep(1)
return input_1 * input_2
n_processes = 10
process_list = []
# Create a set of processes who will request the same value
for i in range(n_processes):
p = mp.Process(target=slow_fn, args=(10, 4))
process_list.append(p)
# Start each process
for p in process_list:
p.start()
# Wait for completion
for p in process_list:
p.join()
# Print the value that they tried to compute
print(slow_fn(10, 4))
|
11539581
|
import logging
import os
from typing import List
from xml.etree import ElementTree as ET
from dug import utils as utils
from ._base import DugElement, FileParser, Indexable, InputFile
logger = logging.getLogger('dug')
class NIDAParser(FileParser):
# Class for parsers NIDA Data dictionary into a set of Dug Elements
@staticmethod
def parse_study_name_from_filename(filename: str):
# Parse the study name from the xml filename if it exists. Return None if filename isn't right format to get id from
stemname = os.path.splitext( os.path.basename(filename) )[0]
if stemname.startswith("NIDA-"):
sn = stemname
for s in ["-Dictionary", "_DD"]:
sn = sn.removesuffix(s) if sn.endswith(s) else sn
return sn
return None
def __call__(self, input_file: InputFile) -> List[Indexable]:
logger.debug(input_file)
tree = ET.parse(input_file)
root = tree.getroot()
study_id = root.attrib['study_id']
participant_set = root.get('participant_set','0')
# Parse study name from file handle
study_name = self.parse_study_name_from_filename(str(input_file))
if study_name is None:
err_msg = f"Unable to parse NIDA study name from data dictionary: {input_file}!"
logger.error(err_msg)
raise IOError(err_msg)
elements = []
for variable in root.iter('variable'):
elem = DugElement(elem_id=f"{variable.attrib['id']}.p{participant_set}",
name=variable.find('name').text,
desc=variable.find('description').text.lower(),
elem_type="DbGaP",
collection_id=f"{study_id}.p{participant_set}",
collection_name=study_name)
# Create NIDA links as study/variable actions
elem.collection_action = utils.get_nida_study_link(study_id=study_id)
# Add to set of variables
logger.debug(elem)
elements.append(elem)
# You don't actually create any concepts
return elements
|
11539631
|
from pygame import Rect
from albow.resource import get_image
class ImageArray(object):
def __init__(self, image, shape):
self.image = image
self.shape = shape
if isinstance(shape, tuple):
self.nrows, self.ncols = shape
else:
self.nrows = 1
self.ncols = shape
iwidth, iheight = image.get_size()
self.size = iwidth // self.ncols, iheight // self.nrows
def __len__(self):
return self.shape
def __getitem__(self, index):
image = self.image
nrows = self.nrows
if nrows == 1:
row = 0
col = index
else:
row, col = index
# left = iwidth * col // ncols
#top = iheight * row // nrows
#width = iwidth // ncols
#height = iheight // nrows
width, height = self.size
left = width * col
top = height * row
return image.subsurface(left, top, width, height)
def get_rect(self):
return Rect((0, 0), self.size)
image_array_cache = {}
def get_image_array(name, shape, **kwds):
result = image_array_cache.get(name)
if not result:
result = ImageArray(get_image(name, **kwds), shape)
image_array_cache[name] = result
return result
|
11539632
|
import pathlib
from . import config
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
from cassandra.cqlengine import connection
BASE_DIR = pathlib.Path(__file__).resolve().parent
SOURCE_DIR = BASE_DIR / 'ignored'
if not SOURCE_DIR.exists():
SOURCE_DIR = BASE_DIR / 'decrypted'
CLUSTER_BUNDLE = str( SOURCE_DIR / 'astradb_connect.zip')
settings = config.get_settings()
ASTRA_DB_CLIENT_ID = settings.db_client_id
ASTRA_DB_CLIENT_SECRET = settings.db_client_secret
def get_cluster():
cloud_config= {
'secure_connect_bundle': CLUSTER_BUNDLE
}
auth_provider = PlainTextAuthProvider(ASTRA_DB_CLIENT_ID, ASTRA_DB_CLIENT_SECRET)
return Cluster(cloud=cloud_config, auth_provider=auth_provider)
def get_session():
cluster = get_cluster()
session = cluster.connect()
connection.register_connection(str(session), session=session)
connection.set_default_connection(str(session))
return session
|
11539661
|
import json
from datetime import datetime
from std_bounties.constants import STANDARD_BOUNTIES_V1, STANDARD_BOUNTIES_V2, STANDARD_BOUNTIES_V2_1, STANDARD_BOUNTIES_V2_2, STANDARD_BOUNTIES_V2_3, STANDARD_BOUNTIES_V2_4
def to_serializable(val):
"""JSON serializer for objects not serializable by default"""
if isinstance(val, datetime):
return val.isoformat()
elif hasattr(val, '__dict__'):
return val.__dict__
return val
class Message:
receipt_handle = ''
event = ''
bounty_id = -1
fulfillment_id = -1
message_deduplication_id = ''
transaction_from = ''
transaction_hash = ''
event_timestamp = -1
event_date = None
contract_method_inputs = {}
@staticmethod
def from_event(event):
if not event:
raise ValueError('Can\'t create message without event')
elif event.__class__ != dict:
raise TypeError('Event argument was not a dict')
message_attributes = event['MessageAttributes']
event_timestamp = message_attributes['TimeStamp']['StringValue']
version = ''
if message_attributes['ContractVersion']['StringValue'] == 'v2':
version = STANDARD_BOUNTIES_V2
elif message_attributes['ContractVersion']['StringValue'] == 'v2.1':
version = STANDARD_BOUNTIES_V2_1
elif message_attributes['ContractVersion']['StringValue'] == 'v2.2':
version = STANDARD_BOUNTIES_V2_2
elif message_attributes['ContractVersion']['StringValue'] == 'v2.3':
version = STANDARD_BOUNTIES_V2_3
elif message_attributes['ContractVersion']['StringValue'] == 'v2.4':
version = STANDARD_BOUNTIES_V2_4
else:
version = STANDARD_BOUNTIES_V1
return Message(
receipt_handle=event['ReceiptHandle'],
event=message_attributes['Event']['StringValue'],
bounty_id=int(message_attributes['BountyId']['StringValue']),
fulfillment_id=int(
message_attributes['FulfillmentId']['StringValue']),
message_deduplication_id=message_attributes['MessageDeduplicationId']['StringValue'],
transaction_from=message_attributes['TransactionFrom']['StringValue'],
transaction_hash=message_attributes['TransactionHash']['StringValue'],
event_timestamp=event_timestamp,
event_date=datetime.fromtimestamp(int(event_timestamp)),
contract_method_inputs=json.loads(
message_attributes['ContractMethodInputs']['StringValue']),
contract_event_data=json.loads(
message_attributes['ContractEventData']['StringValue']),
contract_version=version
)
@staticmethod
def from_string(string):
if not string:
raise ValueError('Can\'t create message without string')
elif string.__class__ != str:
raise TypeError('Event argument was not a string')
dictionary = json.loads(string)
dictionary['event_date'] = datetime.strptime(
dictionary['event_date'], '%Y-%m-%dT%H:%M:%S')
return Message.from_dict(dictionary)
@staticmethod
def from_dict(dictionary):
message = Message()
message.__dict__.update(dictionary)
return message
def __init__(self, *args, **kwargs):
if kwargs:
self.__dict__.update(kwargs)
def __str__(self):
return json.dumps(self.__dict__, indent=4, default=to_serializable)
|
11539703
|
import pytest # type: ignore
from typing import Any
from trio_typing import TaskStatus
import trio
import trio.testing
from .. import open_service_nursery
async def test_basic(autojump_clock: trio.testing.MockClock) -> None:
record = []
async with open_service_nursery() as nursery:
@nursery.start_soon
async def background_task() -> None:
try:
await trio.sleep_forever()
finally:
record.append("background_task exiting")
(task,) = nursery.child_tasks
assert "background_task" in task.name
nursery.cancel_scope.cancel()
with trio.CancelScope(shield=True):
await trio.sleep(1)
record.append("body exiting")
await trio.sleep(0)
pytest.fail("should've been cancelled") # pragma: no cover
assert nursery.cancel_scope.cancelled_caught
assert record == ["body exiting", "background_task exiting"]
async def test_start(autojump_clock: trio.testing.MockClock) -> None:
record = []
async def sleep_then_start(val: int, *, task_status: TaskStatus[int]) -> None:
await trio.sleep(1)
task_status.started(val)
try:
await trio.sleep(10)
record.append("background task finished") # pragma: no cover
finally:
record.append("background task exiting")
async def shielded_sleep_then_start(*, task_status: TaskStatus[None]) -> None:
with trio.CancelScope(shield=True):
await trio.sleep(1)
task_status.started()
await trio.sleep(10)
async with open_service_nursery() as nursery:
# Child can be cancelled normally while it's starting
with trio.move_on_after(0.5) as scope:
await nursery.start(sleep_then_start, 1)
assert scope.cancelled_caught
assert not nursery.child_tasks
# If started() is the first thing to notice a cancellation, the task
# stays in the old nursery and remains unshielded
with trio.move_on_after(0.5) as scope:
await nursery.start(shielded_sleep_then_start)
assert scope.cancelled_caught
assert not nursery.child_tasks
assert trio.current_time() == 1.5
# Otherwise, once started() is called the child is shielded until
# the 'async with' block exits.
assert 42 == await nursery.start(sleep_then_start, 42)
assert trio.current_time() == 2.5
nursery.cancel_scope.cancel()
with trio.CancelScope(shield=True):
await trio.sleep(1)
record.append("parent task finished")
assert trio.current_time() == 3.5
assert record == ["parent task finished", "background task exiting"]
async def test_problems() -> None:
async with open_service_nursery() as nursery:
with pytest.raises(TypeError) as info:
nursery.start_soon(trio.sleep)
assert "missing 1 required positional argument" in str(info.value)
with pytest.raises(TypeError) as info:
nursery.start_soon(trio.sleep(1)) # type: ignore
assert "Trio was expecting an async function" in str(info.value)
with pytest.raises(TypeError) as info:
nursery.start_soon(int, 42) # type: ignore
assert "appears to be synchronous" in str(info.value)
first_call = True
def evil() -> Any:
nonlocal first_call
if first_call:
first_call = False
return 42
else:
return trio.sleep(0)
with pytest.raises(trio.TrioInternalError) as info:
nursery.start_soon(evil)
assert "all bets are off at this point" in str(info.value)
|
11539709
|
import logging, os, sys, json, torch
import torch.nn as nn
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from torch.nn import MSELoss, CrossEntropyLoss
import pytorch_lightning as pl
from transformers import AutoTokenizer, AutoModelForTokenClassification, AutoConfig, Trainer, TrainingArguments
from pytorch_lightning.callbacks import EarlyStopping
from nervaluate import Evaluator
import numpy as np
class TransformerModel(pl.LightningModule):
def __init__(self, model_name="dumitrescustefan/bert-base-romanian-cased-v1", tokenizer_name=None, lr=2e-05,
model_max_length=512, bio2tag_list=[], tag_list=[]):
super().__init__()
if tokenizer_name is None or tokenizer_name == "":
tokenizer_name = model_name
print("Loading AutoModel [{}] ...".format(model_name))
self.model_name = model_name
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, strip_accents=False)
self.model = AutoModelForTokenClassification.from_pretrained(model_name, num_labels=len(bio2tag_list))
self.dropout = nn.Dropout(0.2)
self.lr = lr
self.model_max_length = model_max_length
self.bio2tag_list = bio2tag_list
self.tag_list = tag_list
self.num_labels = len(bio2tag_list)
self.train_loss = []
self.valid_y_hat = []
self.valid_y = []
self.valid_loss = []
self.test_y_hat = []
self.test_y = []
self.test_loss = []
# check cls, sep and pad tokens
if self.tokenizer.cls_token_id is None:
print(f"*** Warning, tokenizer {tokenizer_name} has no defined CLS token: sequences will not be marked with special chars! ***")
if self.tokenizer.sep_token_id is None:
print(f"*** Warning, tokenizer {tokenizer_name} has no defined SEP token: sequences will not be marked with special chars! ***")
if self.tokenizer.pad_token_id is None:
print(f"*** Warning, tokenizer {tokenizer_name} has no defined PAD token: sequences will be padded with 0 by default! ***")
def forward(self, input_ids, attention_mask, labels):
output = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
return_dict=True
)
return output["loss"], output["logits"]
def training_step(self, batch, batch_idx):
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
labels = batch["labels"]
loss, logits = self(input_ids, attention_mask, labels)
self.train_loss.append(loss.detach().cpu().numpy())
return {"loss": loss}
def validation_step(self, batch, batch_idx):
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
labels = batch["labels"]
token_idx = batch["token_idx"]
loss, logits = self(input_ids, attention_mask, labels) # logits is [batch_size, seq_len, num_classes]
batch_size = logits.size()[0]
batch_pred = torch.argmax(logits.detach().cpu(), dim=-1).tolist() # reduce to [batch_size, seq_len] as list
batch_gold = labels.detach().cpu().tolist() # [batch_size, seq_len] as list
batch_token_idx = token_idx.detach().cpu().tolist()
for batch_idx in range(batch_size):
pred, gold, idx = batch_pred[batch_idx], batch_gold[batch_idx], batch_token_idx[batch_idx]
y_hat, y = [], []
for i in range(0, max(idx) + 1): # for each sentence
pos = idx.index(i) # find next token index and get pred and gold
y_hat.append(pred[pos])
y.append(gold[pos])
self.valid_y_hat.append(y_hat)
self.valid_y.append(y)
self.valid_loss.append(loss.detach().cpu().numpy())
return {"loss": loss}
def validation_epoch_end(self, outputs):
print()
mean_val_loss = sum(self.valid_loss) / len(self.valid_loss)
gold, pred = [], []
for y, y_hat in zip(self.valid_y, self.valid_y_hat):
gold.append([self.bio2tag_list[token_id] for token_id in y])
pred.append([self.bio2tag_list[token_id] for token_id in y_hat])
evaluator = Evaluator(gold, pred, tags=self.tag_list, loader="list")
results, results_by_tag = evaluator.evaluate()
self.log("valid/avg_loss", mean_val_loss, prog_bar=True)
self.log("valid/ent_type", results["ent_type"]["f1"])
self.log("valid/partial", results["partial"]["f1"])
self.log("valid/strict", results["strict"]["f1"])
self.log("valid/exact", results["exact"]["f1"])
self.valid_y_hat = []
self.valid_y = []
self.valid_loss = []
def test_step(self, batch, batch_idx):
input_ids = batch["input_ids"]
attention_mask = batch["attention_mask"]
labels = batch["labels"]
token_idx = batch["token_idx"]
loss, logits = self(input_ids, attention_mask, labels) # logits is [batch_size, seq_len, num_classes]
batch_size = logits.size()[0]
batch_pred = torch.argmax(logits.detach().cpu(), dim=-1).tolist() # reduce to [batch_size, seq_len] as list
batch_gold = labels.detach().cpu().tolist() # [batch_size, seq_len] as list
batch_token_idx = token_idx.detach().cpu().tolist()
for batch_idx in range(batch_size):
pred, gold, idx = batch_pred[batch_idx], batch_gold[batch_idx], batch_token_idx[batch_idx]
y_hat, y = [], []
for i in range(0, max(idx) + 1): # for each sentence
pos = idx.index(i) # find next token index and get pred and gold
y_hat.append(pred[pos])
y.append(gold[pos])
self.test_y_hat.append(y_hat)
self.test_y.append(y)
self.test_loss.append(loss.detach().cpu().numpy())
def test_epoch_end(self, outputs):
mean_val_loss = sum(self.test_loss) / len(self.test_loss)
gold, pred = [], []
for y, y_hat in zip(self.test_y, self.test_y_hat):
gold.append([self.bio2tag_list[token_id] for token_id in y])
pred.append([self.bio2tag_list[token_id] for token_id in y_hat])
evaluator = Evaluator(gold, pred, tags=self.tag_list, loader="list")
results, results_by_tag = evaluator.evaluate()
self.log("test/avg_loss", mean_val_loss, prog_bar=True)
self.log("test/ent_type", results["ent_type"]["f1"])
self.log("test/partial", results["partial"]["f1"])
self.log("test/strict", results["strict"]["f1"])
self.log("test/exact", results["exact"]["f1"])
import pprint
print("_" * 120)
print("\n\n Test results: \n")
pprint.pprint(results["strict"])
print("\n Per class Strict-F1 values:")
for cls in self.tag_list:
print(f'\t {cls} : \t{results_by_tag[cls]["strict"]["f1"]:.3f}')
self.test_y_hat = []
self.test_y = []
self.test_loss = []
def configure_optimizers(self):
return torch.optim.AdamW([p for p in self.parameters() if p.requires_grad], lr=self.lr, eps=1e-08)
def predict(self, input_string):
input_ids = self.tokenizer.encode(input_string, add_special_tokens=False)
attention_mask = [1] * len(input_ids)
# convert to tensors
# run the model
output = self.model(input_ids=torch.LongTensor(input_ids), return_dict=True)
logits = output["logits"]
# extract results
indices = torch.argmax(logits.detach().cpu(), dim=-1).squeeze(dim=0).tolist() # reduce to [batch_size, seq_len] as list
for id, ind in zip(input_ids, indices):
print(f"\t[{self.tokenizer.decode(id)}] -> {ind}")
class MyDataset(Dataset):
def __init__(self, instances):
self.instances = []
# run check
for instance in instances:
ok = True
if len(instance["ner_ids"]) != len(instance["tokens"]):
print("Different length ner_tags found")
ok = False
else:
for tag, token in zip(instance["ner_ids"], instance["tokens"]):
if token.strip() == "":
ok = False
print("Empty token found")
if ok:
self.instances.append(instance)
def __len__(self):
return len(self.instances)
def __getitem__(self, i):
return self.instances[i]
class MyCollator(object):
def __init__(self, tokenizer, max_seq_len):
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
def __call__(self, input_batch):
batch_input_ids, batch_labels, batch_attention, batch_token_idx = [], [], [], []
max_len = 0
for instance in input_batch:
instance_ids, instance_labels, instance_attention, instance_token_idx = [], [], [], []
for i in range(len(instance["tokens"])):
subids = self.tokenizer.encode(instance["tokens"][i], add_special_tokens=False)
sublabels = [instance["ner_ids"][i]]
if len(subids) > 1: # we have a word split in more than 1 subids, fill appropriately
filler_sublabel = sublabels[0] if sublabels[0] % 2 == 0 else sublabels[0] + 1
sublabels.extend([filler_sublabel] * (len(subids) - 1))
instance_ids.extend(subids) # extend with the number of subids
instance_labels.extend(sublabels) # extend with the number of subtags
instance_token_idx.extend([i] * len(subids)) # extend with the id of the token
assert len(subids) == len(sublabels) # check for possible errors in the dataset
if len(instance_ids) != len(instance_labels):
print(len(instance_ids))
print(len(instance_labels))
print(instance_ids)
print(instance_labels)
assert len(instance_ids) == len(instance_labels)
# cut to max sequence length, if needed
if len(instance_ids) > self.max_seq_len - 2:
instance_ids = instance_ids[:self.max_seq_len - 2]
instance_labels = instance_labels[:self.max_seq_len - 2]
instance_token_idx = instance_token_idx[:self.max_seq_len - 2]
# prepend and append special tokens, if needed
#print()
#print(instance_ids)
if self.tokenizer.cls_token_id and self.tokenizer.sep_token_id:
instance_ids = [self.tokenizer.cls_token_id] + instance_ids + [self.tokenizer.sep_token_id]
instance_labels = [0] + instance_labels + [0]
instance_token_idx = [-1] + instance_token_idx # no need to pad the last, will do so automatically at return
#print(instance_ids)
instance_attention = [1] * len(instance_ids)
# update max_len for later padding
max_len = max(max_len, len(instance_ids))
# add to batch
batch_input_ids.append(torch.LongTensor(instance_ids))
batch_labels.append(torch.LongTensor(instance_labels))
batch_attention.append(torch.LongTensor(instance_attention))
batch_token_idx.append(torch.LongTensor(instance_token_idx))
return {
"input_ids": torch.nn.utils.rnn.pad_sequence(batch_input_ids, batch_first=True,
padding_value=self.tokenizer.pad_token_id if self.tokenizer.pad_token_id else 0),
"attention_mask": torch.nn.utils.rnn.pad_sequence(batch_attention, batch_first=True, padding_value=0),
"labels": torch.nn.utils.rnn.pad_sequence(batch_labels, batch_first=True, padding_value=0),
"token_idx": torch.nn.utils.rnn.pad_sequence(batch_token_idx, batch_first=True, padding_value=-1)
}
def run_evaluation(
automodel_name: str,
tokenizer_name: str,
train_file: str = None,
validation_file: str = None,
test_file: str = None,
dataset_name: str = None,
gpus: int = 1,
batch_size: int = 8,
accumulate_grad_batches: int = 1,
lr: float = 3e-5,
model_max_length: int = 512,
experiment_iterations: int = 1,
results_file: str = "results_ronec_v2.json"
):
print(f"Running {experiment_iterations} experiment(s) with model / tokenizer {automodel_name} / {tokenizer_name}")
if dataset_name != "":
print(f"\t with dataset {dataset_name}")
if train_file != "":
print(f"\t with training file {train_file}")
if validation_file != "":
print(f"\t with validation file {validation_file}")
if test_file != "":
print(f"\t with test file {test_file}")
if dataset_name == "" and (train_file == "" or validation_file == "" or test_file == ""):
print("\n Either a dataset or train/validation/test files must be given.")
return
print("\t batch size is {}, accumulate grad batches is {}, final batch_size is {}\n".format(
batch_size,
accumulate_grad_batches,
batch_size * accumulate_grad_batches)
)
# load data
if dataset_name == "":
import random
with open(train_file, "r", encoding="utf8") as f:
train_data = json.load(f)#[:100]
with open(validation_file, "r", encoding="utf8") as f:
validation_data = json.load(f)
with open(test_file, "r", encoding="utf8") as f:
test_data = json.load(f)
else:
from datasets import load_dataset
dataset = load_dataset(dataset_name)
print(dataset)
sys.exit(0)
# deduce bio2 tag mapping and simple tag list, required by nervaluate
tags = [] # tags without the B- or I- prefix
bio2tags = set() # tags with the B- and I- prefix, all tags are here
for instance in train_data + validation_data + test_data:
for tag in instance["ner_tags"]:
bio2tags.add(tag)
print(f"Dataset contains {len(bio2tags)} BIO2 classes: {bio2tags}.")
tags = sorted(list(set([tag[2:] if len(tag)>2 else tag for tag in bio2tags]))) # skip B- and I-
print(f"\nThere are {len(tags)} classes: {tags}")
# init tokenizer and start loading data
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, strip_accents=False)
print("Loading data...")
train_dataset = MyDataset(train_data)
val_dataset = MyDataset(validation_data)
test_dataset = MyDataset(test_data)
my_collator = MyCollator(tokenizer=tokenizer, max_seq_len=model_max_length)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, num_workers=1, shuffle=True,
collate_fn=my_collator, pin_memory=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size, num_workers=1, shuffle=False,
collate_fn=my_collator, pin_memory=True)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, num_workers=1, shuffle=False,
collate_fn=my_collator, pin_memory=True)
print("Train dataset has {} instances.".format(len(train_dataset)))
print("Valid dataset has {} instances.".format(len(val_dataset)))
print("Test dataset has {} instances.\n".format(len(test_dataset)))
itt = 0
valid_loss = []
valid_ent_type = []
valid_partial = []
valid_strict = []
valid_exact = []
test_loss = []
test_ent_type = []
test_partial = []
test_strict = []
test_exact = []
while itt < experiment_iterations:
print("Running experiment {}/{}".format(itt + 1, experiment_iterations))
model = TransformerModel(
model_name=automodel_name,
lr=lr,
model_max_length=model_max_length,
bio2tag_list=list(bio2tags),
tag_list=tags
)
early_stop = EarlyStopping(
monitor='valid/strict',
min_delta=0.001,
patience=5,
verbose=True,
mode='max'
)
trainer = pl.Trainer(
gpus=gpus,
callbacks=[early_stop],
# limit_train_batches=10,
# limit_val_batches=2,
accumulate_grad_batches=accumulate_grad_batches,
gradient_clip_val=1.0,
checkpoint_callback=False
)
trainer.fit(model, train_dataloader, val_dataloader)
print("\nEvaluating model on the VALIDATION dataset:")
result_valid = trainer.test(model, val_dataloader)
print("\nEvaluating model on the TEST dataset:")
result_test = trainer.test(model, test_dataloader)
with open("results_ronec_{}_of_{}.json".format(itt + 1, experiment_iterations), "w") as f:
json.dump(result_test[0], f, indent=4, sort_keys=True)
valid_loss.append(result_valid[0]['test/avg_loss'])
valid_ent_type.append(result_valid[0]['test/ent_type'])
valid_partial.append(result_valid[0]['test/partial'])
valid_strict.append(result_valid[0]['test/strict'])
valid_exact.append(result_valid[0]['test/exact'])
test_loss.append(result_test[0]['test/avg_loss'])
test_ent_type.append(result_test[0]['test/ent_type'])
test_partial.append(result_test[0]['test/partial'])
test_strict.append(result_test[0]['test/strict'])
test_exact.append(result_test[0]['test/exact'])
itt += 1
print("Done, writing results...\n")
result = {
"valid_loss": sum(valid_loss) / experiment_iterations,
"valid_ent_type": sum(valid_ent_type) / experiment_iterations,
"valid_partial": sum(valid_partial) / experiment_iterations,
"valid_strict": sum(valid_strict) / experiment_iterations,
"valid_exact": sum(valid_exact) / experiment_iterations,
"test_loss": sum(test_loss) / experiment_iterations,
"test_ent_type": sum(test_ent_type) / experiment_iterations,
"test_partial": sum(test_partial) / experiment_iterations,
"test_strict": sum(test_strict) / experiment_iterations,
"test_exact": sum(test_exact) / experiment_iterations
}
with open(results_file, "w") as f:
json.dump(result, f, indent=4, sort_keys=True)
print("\nFinal averaged results on TEST data: ")
print(result)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser() # todo redo defaults
parser.add_argument('--gpus', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--accumulate_grad_batches', type=int, default=1)
parser.add_argument('--model_name', type=str,
default="dumitrescustefan/bert-base-romanian-cased-v1")
parser.add_argument('--tokenizer_name', type=str, default="")
parser.add_argument("--dataset_name", type=str, default="")
parser.add_argument("--train_file", type=str, default="../data/train.json")
parser.add_argument("--validation_file", type=str, default="../data/valid.json")
parser.add_argument("--test_file", type=str, default="../data/test.json")
parser.add_argument('--lr', type=float, default=3e-05)
parser.add_argument('--model_max_length', type=int, default=512)
parser.add_argument('--experiment_iterations', type=int, default=1)
parser.add_argument('--results_file', type=str, default="ronec_v2_results.json")
args = parser.parse_args()
if args.tokenizer_name == "":
args.tokenizer_name = args.model_name
run_evaluation(
automodel_name=args.model_name,
tokenizer_name=args.tokenizer_name,
train_file=args.train_file,
validation_file=args.validation_file,
test_file=args.test_file,
dataset_name=args.dataset_name,
gpus=args.gpus,
batch_size=args.batch_size,
accumulate_grad_batches=args.accumulate_grad_batches,
lr=args.lr,
experiment_iterations=args.experiment_iterations,
results_file=args.results_file
)
|
11539739
|
import numpy as np
import tensorflow as tf
import elbow.util as util
from elbow.structure import unpackRV
from elbow.conditional_dist import ConditionalDistribution
from elbow.transforms import DeterministicTransform
from elbow.elementary import Gaussian,BernoulliMatrix
def layer(inp, w, b):
if len(inp.get_shape()) == 2:
return tf.matmul(inp, w) + b
else:
return tf.pack([tf.matmul(inp_slice, w) + b for inp_slice in tf.unpack(inp)])
def init_weights(shape, stddev=0.01):
return tf.Variable(tf.random_normal(shape, stddev=stddev, dtype=tf.float32))
def init_const(shape, val=1.):
return tf.Variable(tf.ones(shape, dtype=tf.float32)*val)
def init_zero_vector(shape):
assert(len(shape)==1)
n_out = shape[0]
return tf.Variable(tf.zeros((n_out,), dtype=tf.float32))
init_biases = init_zero_vector
def neural_gaussian(X, d_hidden, d_out, shape=None, name=None, **kwargs):
augmented_shape = (2,) + shape if shape is not None else None
encoder = NeuralGaussianTransform(X, d_hidden, d_out, shape=augmented_shape, name=None, **kwargs)
means, stds = unpackRV(encoder)
shape = means.shape
return Gaussian(mean=means, std=stds, shape=shape, name=name)
def neural_bernoulli(X, d_hidden, d_out, shape=None, local=False, name=None, **kwargs):
encoder = NeuralBernoulliTransform(X, d_hidden, d_out, shape=shape, **kwargs)
return BernoulliMatrix(p=encoder, shape=shape, local=local, name=name)
class NeuralGaussianTransform(DeterministicTransform):
def __init__(self, X, d_hidden, d_z, w3=None, w4=None, w5=None, b3=None, b4=None, b5=None, **kwargs):
self.d_hidden = d_hidden
self.d_z = d_z
super(NeuralGaussianTransform, self).__init__(X=X, w3=w3, w4=w4, w5=w5, b3=b3, b4=b4, b5=b5, **kwargs)
def inputs(self):
return {"X": None, "w3": init_weights, "w4": init_weights, "w5": init_weights, "b3": init_zero_vector, "b4": init_zero_vector, "b5": init_zero_vector}
def _input_shape(self, param, **other_shapes):
assert (param in self.inputs().keys())
d_x = other_shapes["X"][-1]
if param == "w3":
return (d_x, self.d_hidden)
elif param in ("w4", "w5"):
return (self.d_hidden, self.d_z)
elif param == "b3":
return (self.d_hidden,)
elif param in ("b4", "b5"):
return (self.d_z,)
else:
raise Exception("don't know how to produce a shape for param %s at %s" % (param, self))
def _compute_shape(self, X_shape, w3_shape, w4_shape, w5_shape, b3_shape, b4_shape, b5_shape):
base_shape = X_shape[:-1] + (self.d_z,)
augmented_shape = (2,) + base_shape
return augmented_shape
def _sample(self, X, w3, w4, w5, b3, b4, b5):
h1 = tf.nn.tanh(layer(X, w3, b3))
mean = layer(h1, w4, b4)
std = tf.exp(layer(h1, w5, b5))
return tf.pack([mean, std])
def default_q(self):
return super(NeuralGaussianTransform, self).default_q(d_hidden=self.d_hidden, d_z=self.d_z)
class NeuralBernoulliTransform(DeterministicTransform):
def __init__(self, z, d_hidden, d_x, w1=None, w2=None, b1=None, b2=None, **kwargs):
z_shape = util.extract_shape(z) if isinstance(z, tf.Tensor) else z.shape
self.d_z = z.shape[-1]
self.d_hidden = d_hidden
self.d_x = d_x
super(NeuralBernoulliTransform, self).__init__(z=z, w1=w1, w2=w2, b1=b1, b2=b2, **kwargs)
def inputs(self):
return {"z": None, "w1": init_weights, "w2": init_weights, "b1": init_zero_vector, "b2": init_zero_vector}
def _input_shape(self, param, **kwargs):
assert (param in self.inputs().keys())
if param == "w1":
return (self.d_z, self.d_hidden)
elif param == "w2":
return (self.d_hidden, self.d_x)
elif param == "b1":
return (self.d_hidden,)
elif param == "b2":
return (self.d_x,)
else:
raise Exception("don't know how to produce a shape for param %s at %s" % (param, self))
def _compute_shape(self, z_shape, w1_shape, w2_shape, b1_shape, b2_shape):
return z_shape[:-1] + (self.d_x,)
def _sample(self, z, w1, w2, b1, b2):
h1 = tf.nn.tanh(layer(z, w1, b1))
probs = tf.nn.sigmoid(layer(h1, w2, b2))
return probs
def default_q(self):
return super(NeuralBernoulliTransform, self).default_q(d_hidden=self.d_hidden, d_x=self.d_x)
|
11539750
|
from typing import List, Optional, Union, overload, Tuple, Type
import warnings
import torch
import xitorch as xt
import dqc.hamilton.intor as intor
from dqc.df.base_df import BaseDF
from dqc.df.dfmol import DFMol
from dqc.hamilton.base_hamilton import BaseHamilton
from dqc.hamilton.orbconverter import OrbitalOrthogonalizer, IdentityOrbConverter
from dqc.hamilton.orbparams import BaseOrbParams, QROrbParams, MatExpOrbParams
from dqc.utils.datastruct import AtomCGTOBasis, ValGrad, SpinParam, DensityFitInfo
from dqc.grid.base_grid import BaseGrid
from dqc.xc.base_xc import BaseXC
from dqc.utils.cache import Cache
from dqc.utils.mem import chunkify, get_dtype_memsize
from dqc.utils.config import config
from dqc.utils.misc import logger
class HamiltonCGTO(BaseHamilton):
"""
Hamiltonian object of contracted Gaussian type-orbital.
This class orthogonalizes the basis by taking the weighted eigenvectors of
the overlap matrix, i.e. the eigenvectors divided by square root of the
eigenvalues.
The advantage of doing this is making the overlap matrix in Roothan's equation
identity and it could handle overcomplete basis.
"""
def __init__(self, atombases: List[AtomCGTOBasis], spherical: bool = True,
df: Optional[DensityFitInfo] = None,
efield: Optional[Tuple[torch.Tensor, ...]] = None,
vext: Optional[torch.Tensor] = None,
cache: Optional[Cache] = None,
orthozer: bool = True,
aoparamzer: str = "qr") -> None:
self.atombases = atombases
self.spherical = spherical
self.libcint_wrapper = intor.LibcintWrapper(atombases, spherical)
self.dtype = self.libcint_wrapper.dtype
self.device = self.libcint_wrapper.device
# set up the orbital converter
ovlp = intor.overlap(self.libcint_wrapper)
if orthozer:
self._orthozer = OrbitalOrthogonalizer(ovlp)
else:
self._orthozer = IdentityOrbConverter(ovlp)
# set up the orbital parameterized
if aoparamzer == "qr":
self._orbparam: Type[BaseOrbParams] = QROrbParams
elif aoparamzer == "matexp":
warnings.warn("Parametrization with matrix exponential is still at the experimental stage.")
self._orbparam = MatExpOrbParams
else:
aoparam_opts = ["qr", "matexp"]
raise RuntimeError(
f"Unknown ao parameterizer: {aoparamzer}. Available options are: {aoparam_opts}")
# set up the density matrix
self._dfoptions = df
if df is None:
self._df: Optional[DFMol] = None
else:
self._df = DFMol(df, wrapper=self.libcint_wrapper, orthozer=self._orthozer)
self._efield = efield
self._vext = vext
self.is_grid_set = False
self.is_ao_set = False
self.is_grad_ao_set = False
self.is_lapl_ao_set = False
self.xc: Optional[BaseXC] = None
self.xcfamily = 1
self.is_built = False
# initialize cache
self._cache = cache if cache is not None else Cache.get_dummy()
self._cache.add_cacheable_params(["overlap", "kinetic", "nuclattr", "efield0"])
if self._df is None:
self._cache.add_cacheable_params(["elrep"])
@property
def nao(self) -> int:
return self._orthozer.nao()
@property
def kpts(self) -> torch.Tensor:
raise TypeError("Isolated molecule Hamiltonian does not have kpts property")
@property
def df(self) -> Optional[BaseDF]:
return self._df
############# setups #############
def build(self) -> BaseHamilton:
# get the matrices (all (nao, nao), except el_mat)
# these matrices have already been normalized
with self._cache.open():
# check the signature
self._cache.check_signature({
"atombases": self.atombases,
"spherical": self.spherical,
"dfoptions": self._dfoptions,
})
logger.log("Calculating the overlap matrix")
self.olp_mat = self._cache.cache("overlap", lambda: intor.overlap(self.libcint_wrapper))
logger.log("Calculating the kinetic matrix")
kin_mat = self._cache.cache("kinetic", lambda: intor.kinetic(self.libcint_wrapper))
logger.log("Calculating the nuclear attraction matrix")
nucl_mat = self._cache.cache("nuclattr", lambda: intor.nuclattr(self.libcint_wrapper))
self.nucl_mat = nucl_mat
self.kinnucl_mat = kin_mat + nucl_mat
# electric field integral
if self._efield is not None:
# (ndim, nao, nao)
fac: float = 1.0
for i in range(len(self._efield)):
fac *= i + 1
intor_fcn = lambda: intor.int1e("r0" * (i + 1), self.libcint_wrapper)
efield_mat_f = self._cache.cache(f"efield{i}", intor_fcn)
efield_mat = torch.einsum("dab,d->ab", efield_mat_f, self._efield[i])
self.kinnucl_mat = self.kinnucl_mat + efield_mat / fac
if self._df is None:
logger.log("Calculating the electron repulsion matrix")
self.el_mat = self._cache.cache("elrep", lambda: intor.elrep(self.libcint_wrapper)) # (nao^4)
# TODO: decide whether to precompute the 2-eris in the new basis
# based on the memory
self.el_mat = self._orthozer.convert4(self.el_mat)
else:
logger.log("Building the density fitting matrices")
self._df.build()
self.is_built = True
# orthogonalize the matrices
self.olp_mat = self._orthozer.convert2(self.olp_mat) # (nao2, nao2)
self.kinnucl_mat = self._orthozer.convert2(self.kinnucl_mat)
self.nucl_mat = self._orthozer.convert2(self.nucl_mat)
# external potential
if self._vext is not None:
vext_mat = self.get_vext(self._vext).fullmatrix()
self.kinnucl_mat = self.kinnucl_mat + vext_mat
logger.log("Setting up the Hamiltonian done")
return self
def setup_grid(self, grid: BaseGrid, xc: Optional[BaseXC] = None) -> None:
# save the family and save the xc
self.xc = xc
if xc is None:
self.xcfamily = 1
else:
self.xcfamily = xc.family
# save the grid
self.grid = grid
self.rgrid = grid.get_rgrid()
assert grid.coord_type == "cart"
# setup the basis as a spatial function
logger.log("Calculating the basis values in the grid")
self.is_ao_set = True
self.basis = intor.eval_gto(self.libcint_wrapper, self.rgrid, to_transpose=True) # (ngrid, nao)
self.dvolume = self.grid.get_dvolume()
self.basis_dvolume = self.basis * self.dvolume.unsqueeze(-1) # (ngrid, nao)
if self.xcfamily == 1: # LDA
return
# setup the gradient of the basis
logger.log("Calculating the basis gradient values in the grid")
self.is_grad_ao_set = True
# (ndim, nao, ngrid)
self.grad_basis = intor.eval_gradgto(self.libcint_wrapper, self.rgrid, to_transpose=True)
if self.xcfamily == 2: # GGA
return
# setup the laplacian of the basis
self.is_lapl_ao_set = True
logger.log("Calculating the basis laplacian values in the grid")
self.lapl_basis = intor.eval_laplgto(self.libcint_wrapper, self.rgrid, to_transpose=True) # (nao, ngrid)
############ fock matrix components ############
def get_nuclattr(self) -> xt.LinearOperator:
# nucl_mat: (nao, nao)
# return: (nao, nao)
return xt.LinearOperator.m(self.nucl_mat, is_hermitian=True)
def get_kinnucl(self) -> xt.LinearOperator:
# kinnucl_mat: (nao, nao)
# return: (nao, nao)
return xt.LinearOperator.m(self.kinnucl_mat, is_hermitian=True)
def get_overlap(self) -> xt.LinearOperator:
# olp_mat: (nao, nao)
# return: (nao, nao)
return xt.LinearOperator.m(self.olp_mat, is_hermitian=True)
def get_elrep(self, dm: torch.Tensor) -> xt.LinearOperator:
# dm: (*BD, nao, nao)
# elrep_mat: (nao, nao, nao, nao)
# return: (*BD, nao, nao)
if self._df is None:
mat = torch.einsum("...ij,ijkl->...kl", dm, self.el_mat)
mat = (mat + mat.transpose(-2, -1)) * 0.5 # reduce numerical instability
return xt.LinearOperator.m(mat, is_hermitian=True)
else:
elrep = self._df.get_elrep(dm)
return elrep
@overload
def get_exchange(self, dm: torch.Tensor) -> xt.LinearOperator:
...
@overload
def get_exchange(self, dm: SpinParam[torch.Tensor]) -> SpinParam[xt.LinearOperator]:
...
def get_exchange(self, dm):
# get the exchange operator
# dm: (*BD, nao, nao)
# el_mat: (nao, nao, nao, nao)
# return: (*BD, nao, nao)
if self._df is not None:
raise RuntimeError("Exact exchange cannot be computed with density fitting")
elif isinstance(dm, torch.Tensor):
# the einsum form below is to hack PyTorch's bug #57121
# mat = -0.5 * torch.einsum("...jk,ijkl->...il", dm, self.el_mat) # slower
mat = -0.5 * torch.einsum("...il,ijkl->...ijk", dm, self.el_mat).sum(dim=-3) # faster
mat = (mat + mat.transpose(-2, -1)) * 0.5 # reduce numerical instability
return xt.LinearOperator.m(mat, is_hermitian=True)
else: # dm is SpinParam
# using the spin-scaling property of exchange energy
return SpinParam(u=self.get_exchange(2 * dm.u),
d=self.get_exchange(2 * dm.d))
def get_vext(self, vext: torch.Tensor) -> xt.LinearOperator:
# vext: (*BR, ngrid)
if not self.is_ao_set:
raise RuntimeError("Please call `setup_grid(grid, xc)` to call this function")
mat = torch.einsum("...r,rb,rc->...bc", vext, self.basis_dvolume, self.basis) # (*BR, nao, nao)
mat = self._orthozer.convert2(mat)
mat = (mat + mat.transpose(-2, -1)) * 0.5 # ensure the symmetricity and reduce numerical instability
return xt.LinearOperator.m(mat, is_hermitian=True)
@overload
def get_vxc(self, dm: SpinParam[torch.Tensor]) -> SpinParam[xt.LinearOperator]:
...
@overload
def get_vxc(self, dm: torch.Tensor) -> xt.LinearOperator:
...
def get_vxc(self, dm):
# dm: (*BD, nao, nao)
assert self.xc is not None, "Please call .setup_grid with the xc object"
densinfo = SpinParam.apply_fcn(
lambda dm_: self._dm2densinfo(dm_), dm) # value: (*BD, nr)
potinfo = self.xc.get_vxc(densinfo) # value: (*BD, nr)
vxc_linop = SpinParam.apply_fcn(
lambda potinfo_: self._get_vxc_from_potinfo(potinfo_), potinfo)
return vxc_linop
############### interface to dm ###############
def ao_orb2dm(self, orb: torch.Tensor, orb_weight: torch.Tensor) -> torch.Tensor:
# convert the atomic orbital to the density matrix
# in CGTO, it is U.W.U^T
# orb: (*BO, nao, norb)
# orb_weight: (*BW, norb)
# return: (*BOW, nao, nao)
orb_w = orb * orb_weight.unsqueeze(-2) # (*BOW, nao, norb)
return torch.matmul(orb, orb_w.transpose(-2, -1)) # (*BOW, nao, nao)
def aodm2dens(self, dm: torch.Tensor, xyz: torch.Tensor) -> torch.Tensor:
# xyz: (*BR, ndim)
# dm: (*BD, nao, nao)
# returns: (*BRD)
dm = self._orthozer.unconvert_dm(dm)
nao = dm.shape[-1]
xyzshape = xyz.shape
# basis: (nao, *BR)
basis = intor.eval_gto(self.libcint_wrapper, xyz.reshape(-1, xyzshape[-1])).reshape((nao, *xyzshape[:-1]))
basis = torch.movedim(basis, 0, -1) # (*BR, nao)
# torch.einsum("...ij,...i,...j->...", dm, basis, basis)
dens = torch.matmul(dm, basis.unsqueeze(-1)) # (*BRD, nao, 1)
dens = torch.matmul(basis.unsqueeze(-2), dens).squeeze(-1).squeeze(-1) # (*BRD)
return dens
############### energy of the Hamiltonian ###############
def get_e_hcore(self, dm: torch.Tensor) -> torch.Tensor:
# get the energy from one electron operator
return torch.einsum("...ij,...ji->...", self.kinnucl_mat, dm)
def get_e_elrep(self, dm: torch.Tensor) -> torch.Tensor:
# get the energy from two electron repulsion operator
elrep_mat = self.get_elrep(dm).fullmatrix()
return 0.5 * torch.einsum("...ij,...ji->...", elrep_mat, dm)
def get_e_exchange(self, dm: Union[torch.Tensor, SpinParam[torch.Tensor]]) -> torch.Tensor:
# get the energy from two electron exchange operator
exc_mat = self.get_exchange(dm)
ene = SpinParam.apply_fcn(
lambda exc_mat, dm: 0.5 * torch.einsum("...ij,...ji->...", exc_mat.fullmatrix(), dm),
exc_mat, dm)
enetot = SpinParam.sum(ene)
return enetot
def get_e_xc(self, dm: Union[torch.Tensor, SpinParam[torch.Tensor]]) -> torch.Tensor:
assert self.xc is not None, "Please call .setup_grid with the xc object"
# obtain the energy density per unit volume
densinfo = SpinParam.apply_fcn(
lambda dm_: self._dm2densinfo(dm_), dm) # (spin) value: (*BD, nr)
edens = self.xc.get_edensityxc(densinfo) # (*BD, nr)
return torch.sum(self.grid.get_dvolume() * edens, dim=-1)
############### free parameters for variational method ###############
@overload
def ao_orb_params2dm(self, ao_orb_params: torch.Tensor, ao_orb_coeffs: torch.Tensor,
orb_weight: torch.Tensor,
with_penalty: None) -> torch.Tensor:
...
@overload
def ao_orb_params2dm(self, ao_orb_params: torch.Tensor, ao_orb_coeffs: torch.Tensor,
orb_weight: torch.Tensor,
with_penalty: float) -> Union[torch.Tensor, torch.Tensor]:
...
def ao_orb_params2dm(self, ao_orb_params, ao_orb_coeffs, orb_weight, with_penalty=None):
# convert from atomic orbital parameters to density matrix
# the atomic orbital parameter is the inverse QR of the orbital
# ao_orb_params: (*BD, nao, norb)
out = self._orbparam.params2orb(ao_orb_params, ao_orb_coeffs, with_penalty=with_penalty)
if with_penalty is None:
ao_orbq = out
else:
ao_orbq, penalty = out
ao_orb = self._orthozer.convert_ortho_orb(ao_orbq)
dm = self.ao_orb2dm(ao_orb, orb_weight)
if with_penalty is None:
return dm
else:
return dm, penalty
def dm2ao_orb_params(self, dm: torch.Tensor, norb: int) -> Tuple[torch.Tensor, torch.Tensor]:
# convert back the density matrix to one solution in the parameters space
# NOTE: this assumes that the orbital weights always decreasing in order
mdmm = self._orthozer.unconvert_to_ortho_dm(dm)
w, orbq = torch.linalg.eigh(mdmm)
# w is ordered increasingly, so we take the last parts
orbq_params = orbq[..., -norb:] # (nao, norb)
orbq_params = torch.flip(orbq_params, dims=(-1,))
return self._orbparam.orb2params(orbq_params)
################ misc ################
def _dm2densinfo(self, dm: torch.Tensor) -> ValGrad:
# dm: (*BD, nao, nao), Hermitian
# family: 1 for LDA, 2 for GGA, 4 for MGGA
# self.basis: (ngrid, nao)
# self.grad_basis: (ndim, ngrid, nao)
ngrid = self.basis.shape[-2]
batchshape = dm.shape[:-2]
# dm @ ao will be used in every case
dmdmt = (dm + dm.transpose(-2, -1)) * 0.5 # (*BD, nao2, nao2)
# convert it back to dm in the cgto basis
dmdmt = self._orthozer.unconvert_dm(dmdmt)
# prepare the densinfo components
dens = torch.empty((*batchshape, ngrid), dtype=self.dtype, device=self.device)
gdens: Optional[torch.Tensor] = None
lapldens: Optional[torch.Tensor] = None
kindens: Optional[torch.Tensor] = None
if self.xcfamily == 2 or self.xcfamily == 4: # GGA or MGGA
gdens = torch.empty((*dm.shape[:-2], 3, ngrid),
dtype=self.dtype, device=self.device) # (..., ndim, ngrid)
if self.xcfamily == 4: # MGGA
lapldens = torch.empty((*batchshape, ngrid), dtype=self.dtype, device=self.device)
kindens = torch.empty((*batchshape, ngrid), dtype=self.dtype, device=self.device)
# It is faster to split into chunks than evaluating a single big chunk
maxnumel = config.CHUNK_MEMORY // get_dtype_memsize(self.basis)
for basis, ioff, iend in chunkify(self.basis, dim=0, maxnumel=maxnumel):
# basis: (ngrid2, nao)
dmao = torch.matmul(basis, dmdmt) # (ngrid2, nao)
dens[..., ioff:iend] = torch.einsum("...ri,ri->...r", dmao, basis)
if self.xcfamily == 2 or self.xcfamily == 4: # GGA or MGGA
assert gdens is not None
if not self.is_grad_ao_set:
msg = "Please call `setup_grid(grid, gradlevel>=1)` to calculate the density gradient"
raise RuntimeError(msg)
# summing it 3 times is faster than applying the d-axis directly
grad_basis0 = self.grad_basis[0, ioff:iend, :] # (ngrid2, nao)
grad_basis1 = self.grad_basis[1, ioff:iend, :]
grad_basis2 = self.grad_basis[2, ioff:iend, :]
gdens[..., 0, ioff:iend] = torch.einsum("...ri,ri->...r", dmao, grad_basis0) * 2
gdens[..., 1, ioff:iend] = torch.einsum("...ri,ri->...r", dmao, grad_basis1) * 2
gdens[..., 2, ioff:iend] = torch.einsum("...ri,ri->...r", dmao, grad_basis2) * 2
if self.xcfamily == 4:
assert lapldens is not None
assert kindens is not None
# calculate the laplacian of the density and kinetic energy density at the grid
if not self.is_lapl_ao_set:
msg = "Please call `setup_grid(grid, gradlevel>=2)` to calculate the density gradient"
raise RuntimeError(msg)
lapl_basis_cat = self.lapl_basis[ioff:iend, :]
lapl_basis = torch.einsum("...ri,ri->...r", dmao, lapl_basis_cat)
grad_grad = torch.einsum("...ri,ri->...r", torch.matmul(grad_basis0, dmdmt), grad_basis0)
grad_grad += torch.einsum("...ri,ri->...r", torch.matmul(grad_basis1, dmdmt), grad_basis1)
grad_grad += torch.einsum("...ri,ri->...r", torch.matmul(grad_basis2, dmdmt), grad_basis2)
# pytorch's "...ij,ir,jr->...r" is really slow for large matrix
# grad_grad = torch.einsum("...ij,ir,jr->...r", dmdmt, self.grad_basis[0], self.grad_basis[0])
# grad_grad += torch.einsum("...ij,ir,jr->...r", dmdmt, self.grad_basis[1], self.grad_basis[1])
# grad_grad += torch.einsum("...ij,ir,jr->...r", dmdmt, self.grad_basis[2], self.grad_basis[2])
lapldens[..., ioff:iend] = (lapl_basis + grad_grad) * 2
kindens[..., ioff:iend] = grad_grad * 0.5
# dens: (*BD, ngrid)
# gdens: (*BD, ndim, ngrid)
res = ValGrad(value=dens, grad=gdens, lapl=lapldens, kin=kindens)
return res
def _get_vxc_from_potinfo(self, potinfo: ValGrad) -> xt.LinearOperator:
# obtain the vxc operator from the potential information
# potinfo.value: (*BD, nr)
# potinfo.grad: (*BD, ndim, nr)
# potinfo.lapl: (*BD, nr)
# potinfo.kin: (*BD, nr)
# self.basis: (nr, nao)
# self.grad_basis: (ndim, nr, nao)
# prepare the fock matrix component from vxc
nao = self.basis.shape[-1]
mat = torch.zeros((*potinfo.value.shape[:-1], nao, nao), dtype=self.dtype, device=self.device)
# Split the r-dimension into several parts, it is usually faster than
# evaluating all at once
maxnumel = config.CHUNK_MEMORY // get_dtype_memsize(self.basis)
for basis, ioff, iend in chunkify(self.basis, dim=0, maxnumel=maxnumel):
# basis: (nr, nao)
vb = potinfo.value[..., ioff:iend].unsqueeze(-1) * basis # (*BD, nr, nao)
if self.xcfamily in [2, 4]: # GGA or MGGA
assert potinfo.grad is not None # (..., ndim, nr)
vgrad = potinfo.grad[..., ioff:iend] * 2
grad_basis0 = self.grad_basis[0, ioff:iend, :] # (nr, nao)
grad_basis1 = self.grad_basis[1, ioff:iend, :]
grad_basis2 = self.grad_basis[2, ioff:iend, :]
vb += torch.einsum("...r,ra->...ra", vgrad[..., 0, :], grad_basis0)
vb += torch.einsum("...r,ra->...ra", vgrad[..., 1, :], grad_basis1)
vb += torch.einsum("...r,ra->...ra", vgrad[..., 2, :], grad_basis2)
if self.xcfamily == 4: # MGGA
assert potinfo.lapl is not None # (..., nrgrid)
assert potinfo.kin is not None
lapl = potinfo.lapl[..., ioff:iend]
kin = potinfo.kin[..., ioff:iend]
vb += 2 * lapl.unsqueeze(-1) * self.lapl_basis[ioff:iend, :]
# calculating the matrix from multiplication with the basis
mat += torch.matmul(self.basis_dvolume[ioff:iend, :].transpose(-2, -1), vb)
if self.xcfamily == 4: # MGGA
assert potinfo.lapl is not None # (..., nrgrid)
assert potinfo.kin is not None
lapl_kin_dvol = (2 * lapl + 0.5 * kin) * self.dvolume[..., ioff:iend]
mat += torch.einsum("...r,rb,rc->...bc", lapl_kin_dvol, grad_basis0, grad_basis0)
mat += torch.einsum("...r,rb,rc->...bc", lapl_kin_dvol, grad_basis1, grad_basis1)
mat += torch.einsum("...r,rb,rc->...bc", lapl_kin_dvol, grad_basis2, grad_basis2)
# construct the Hermitian linear operator
mat = self._orthozer.convert2(mat)
mat = (mat + mat.transpose(-2, -1)) * 0.5
vxc_linop = xt.LinearOperator.m(mat, is_hermitian=True)
return vxc_linop
def getparamnames(self, methodname: str, prefix: str = "") -> List[str]:
if methodname == "get_kinnucl":
return [prefix + "kinnucl_mat"]
elif methodname == "get_nuclattr":
return [prefix + "nucl_mat"]
elif methodname == "get_overlap":
return [prefix + "olp_mat"]
elif methodname == "get_elrep":
if self._df is None:
return [prefix + "el_mat"]
else:
return self._df.getparamnames("get_elrep", prefix=prefix + "_df.")
elif methodname == "get_exchange":
return [prefix + "el_mat"]
elif methodname == "ao_orb2dm":
return []
elif methodname == "ao_orb_params2dm":
return self.getparamnames("ao_orb2dm", prefix=prefix) + \
self._orthozer.getparamnames("convert_ortho_orb", prefix=prefix + "_orthozer.")
elif methodname == "get_e_hcore":
return [prefix + "kinnucl_mat"]
elif methodname == "get_e_elrep":
return self.getparamnames("get_elrep", prefix=prefix)
elif methodname == "get_e_exchange":
return self.getparamnames("get_exchange", prefix=prefix)
elif methodname == "get_e_xc":
assert self.xc is not None
return self.getparamnames("_dm2densinfo", prefix=prefix) + \
self.xc.getparamnames("get_edensityxc", prefix=prefix + "xc.") + \
self.grid.getparamnames("get_dvolume", prefix=prefix + "grid.")
elif methodname == "get_vext":
return [prefix + "basis_dvolume", prefix + "basis"] + \
self._orthozer.getparamnames("convert2", prefix=prefix + "_orthozer.")
elif methodname == "get_grad_vext":
return [prefix + "basis_dvolume", prefix + "grad_basis"]
elif methodname == "get_lapl_kin_vext":
return [prefix + "dvolume", prefix + "basis", prefix + "grad_basis",
prefix + "lapl_basis"]
elif methodname == "get_vxc":
assert self.xc is not None
return self.getparamnames("_dm2densinfo", prefix=prefix) + \
self.getparamnames("_get_vxc_from_potinfo", prefix=prefix) + \
self.xc.getparamnames("get_vxc", prefix=prefix + "xc.")
elif methodname == "_dm2densinfo":
params = [prefix + "basis"] + \
self._orthozer.getparamnames("unconvert_dm", prefix=prefix + "_orthozer.")
if self.xcfamily == 2 or self.xcfamily == 4:
params += [prefix + "grad_basis"]
if self.xcfamily == 4:
params += [prefix + "lapl_basis"]
return params
elif methodname == "_get_vxc_from_potinfo":
params = [prefix + "basis", prefix + "basis_dvolume"] + \
self._orthozer.getparamnames("convert2", prefix=prefix + "_orthozer.")
if self.xcfamily in [2, 4]:
params += [prefix + "grad_basis"]
if self.xcfamily == 4:
params += [prefix + "lapl_basis", prefix + "dvolume"]
return params
else:
raise KeyError("getparamnames has no %s method" % methodname)
# TODO: complete this
|
11539757
|
import sys
import ppp4py.protocol.base
class Protocol (ppp4py.protocol.base.Protocol):
Protocol = 0x404F
ProtocolID = 'P4'
ProtocolName = 'PppPrintf'
def process (cls, information):
sys.stdout.write(cls.Decode(information))
@classmethod
def Decode (cls, information):
return 'LOG: ' + information[1:]
ppp4py.protocol.Registry[Protocol.Protocol] = Protocol
|
11539849
|
import numpy as np
class parameterRobot(object):
def __init__(self, defaultFolder = ""):
self.defaultFolder = defaultFolder
self.setReadStat()
self.setGenomeStat()
self.setThresholdPara()
self.genome = False
self.longOnly = False
def setReadStat(self, Nshort= 5000, Nlong= 1250, Lshort=100, Llong=240, p=0.1 , longOnly = False):
self.Nshort = Nshort
self.Nlong = Nlong
self.Lshort = Lshort
self.Llong = Llong
self.p = p
self.longOnly = longOnly
def setRealGenome(self ,start1 = 3423513 ,start2 = 3689852, lengthOfRep = 5182):
self.start1, self.start2, self.lengthOfRep, self.genome = start1, start2, lengthOfRep, True
def setGenomeStat(self,G = 10000, lrep=500, lsnp=300, lint=50 ):
self.G, self.lrep, self.lsnp, self.lint = G, lrep, lsnp, lint
self.lengthOfRep = lsnp
def setThresholdPara(self, liid = 30, thresForRandom= 0.5,thresForins =0.4, thresFordel=0.4, insMin=4, delMin=4,thresholdForSupport= 0.15, subthreshold= 9, editsub= -10, editins= -1, editdel= -1, editmatch = 1, lookRange =15):
self.liid, self.thresForRandom,self.thresForins , self.thresFordel, self.insMin, self.delMin, self.editsub, self.editins, self.editdel, self.editmatch = liid, thresForRandom,thresForins , thresFordel, insMin, delMin, editsub, editins, editdel, editmatch
self.thresholdForSupport = thresholdForSupport
self.subthreshold = subthreshold
self.lookRange = lookRange
def tunePara(self):
covRatio = self.Nshort*self.Lshort/float(30*self.G)
self.insMin, self.delMin, self.subthreshold = self.insMin*covRatio, self.delMin*covRatio, self.subthreshold*covRatio
class voteTable(object):
def __init__(self, index, eachlongread):
self.index = index
self.longread = eachlongread
self.SNPlocList = []
self.segmentList = []
self.leftSegList = []
self.rightSegList = []
def initVoteTable(self):
L = len(self.longread)
self.delCount = [0 for i in range(L)]
self.confirmCount =[0 for i in range(L)]
# 1, 2,3 ,4 - > 0, 1, 2, 3
self.subCount = [[0 , 0 ,0 ,0 ] for i in range(L)]
self .insCount = [ [] for i in range(L) ]
self.confirmNoIns= [0 for i in range(L)]
def logSNPloc(self, newSNPList):
self.SNPlocList = self.SNPlocList + newSNPList
def filterSNP(self):
self.SNPlocList = sorted(self.SNPlocList)
index = 0
while ( index < len(self.SNPlocList) -1 ):
if self.SNPlocList[index] == self.SNPlocList[index+1]:
self.SNPlocList.pop(index)
else:
index += 1
def matchedIndex(self, seg1, seg2):
matchedCount = 0
minimumMatchingLength = 5
for test1 in seg1:
for test2 in seg2:
for i in range(len(test1) - minimumMatchingLength):
if test1[i:len(test1)] == test2[0:len(test1) - i]:
matchedCount += 1
if matchedCount ==2 :
return True
else:
return False
def filterSNPNeighbor(self):
toRemoveList = []
for i in range(len(self.segmentList) -1 ):
if self.matchedIndex(self.segmentList[i], self.segmentList[i+1]):
toRemoveList.append(i+1)
for j in toRemoveList[-1:-len(toRemoveList)-1:-1]:
self.segmentList.pop(j)
self.SNPlocList.pop(j)
def filterSNPOutOfTrustRange(self,trustRange):
toRemoveList = []
for i in range(len(self.segmentList)):
LOfSegment = len(self.segmentList[i][0])
tmp1 = self.segmentList[i][0][max(0, LOfSegment/2 - trustRange): min(LOfSegment,LOfSegment/2 + trustRange )]
tmp2 = self.segmentList[i][1][max(0, LOfSegment/2 - trustRange): min(LOfSegment,LOfSegment/2 + trustRange )]
print max(0, LOfSegment/2 - trustRange), min(LOfSegment,LOfSegment/2 + trustRange )
print LOfSegment
print tmp1 == tmp2
if tmp1 == tmp2:
toRemoveList.append(i)
print "toRemoveList", toRemoveList
for j in toRemoveList[-1:-len(toRemoveList)-1:-1]:
self.segmentList.pop(j)
self.SNPlocList.pop(j)
print "self.SNPlocList", self.SNPlocList
def trimendSNP(self,trustRange):
toRemoveList= []
for i in range(len(self.segmentList)):
if len(self.longread) - self.SNPlocList[i] < trustRange :
toRemoveList.append(i)
elif self.SNPlocList[i] < trustRange:
toRemoveList.append(i)
for j in toRemoveList[-1:-len(toRemoveList)-1:-1]:
self.segmentList.pop(j)
self.SNPlocList.pop(j)
print "self.SNPlocList", self.SNPlocList
def filterSegmentList(self, parameterRobot):
print "Filter segment List"
if parameterRobot.Llong >1000:
trustRange = parameterRobot.liid
else:
trustRange = parameterRobot.lookRange/3
#self.filterSNPOutOfTrustRange(trustRange)
self.filterSNPNeighbor()
self.trimendSNP(trustRange)
if len(self.segmentList) >=1 :
self.leftSegList = self.segmentList[0]
self.rightSegList = self.segmentList[-1]
else:
self.leftSegList = []
self.rightSegList = []
def dist(self):
if len(self.SNPlocList) == 0:
return len(self.longread)
else:
return max( self.SNPlocList[0], len(self.longread) - self.SNPlocList[-1] )
|
11539875
|
import unittest
from typing import cast
from unittest.mock import MagicMock
import logging
from waves_gateway.service import AddressValidationService, ChainQueryService, TransactionConsumer
from waves_gateway.storage import MapStorage, WalletStorage, TransactionAttemptListStorage
from waves_gateway.common import WavesAddressInvalidError, InvalidTransactionIdentifier
from waves_gateway.controller import GatewayControllerImpl
from waves_gateway.factory import CoinAddressFactory
from waves_gateway.model import MappingEntry, AttemptListQuery, AttemptListTrigger, KeyPair
class GatewayControllerImplTest(unittest.TestCase):
def setUp(self):
self._coin_address_factory = MagicMock()
self._map_storage = MagicMock()
self._wallet_storage = MagicMock()
self._logger = MagicMock()
self._attempt_list_storage = MagicMock()
self._waves_address_validation_service = MagicMock()
self._coin_chain_query_service = MagicMock()
self._coin_transaction_consumer = MagicMock()
self._waves_chain_query_service = MagicMock()
self._waves_transaction_consumer = MagicMock()
self._gateway_controller = GatewayControllerImpl(
coin_address_factory=cast(CoinAddressFactory, self._coin_address_factory),
map_storage=cast(MapStorage, self._map_storage),
wallet_storage=cast(WalletStorage, self._wallet_storage),
logger=cast(logging.Logger, self._logger),
attempt_list_storage=cast(TransactionAttemptListStorage, self._attempt_list_storage),
waves_address_validation_service=cast(AddressValidationService, self._waves_address_validation_service),
coin_chain_query_service=cast(ChainQueryService, self._coin_chain_query_service),
coin_transaction_consumer=cast(TransactionConsumer, self._coin_transaction_consumer),
waves_chain_query_service=cast(ChainQueryService, self._waves_chain_query_service),
waves_transaction_consumer=cast(TransactionConsumer, self._waves_transaction_consumer))
def test_create_address_already_exists(self):
mock_waves_address = "72936587"
mock_coin_address = "8120743689"
self._waves_address_validation_service.validate_address.return_value = True
self._map_storage.waves_address_exists.return_value = True
self._map_storage.get_coin_address_by_waves_address.return_value = mock_coin_address
res = self._gateway_controller.create_address(mock_waves_address)
self._map_storage.waves_address_exists.assert_called_once_with(mock_waves_address)
self._map_storage.get_coin_address_by_waves_address.assert_called_once_with(mock_waves_address)
self.assertEqual(res, mock_coin_address)
def test_create_address_invalid_waves_address(self):
mock_waves_address = "72936587"
mock_coin_address = "8120743689"
self._waves_address_validation_service.validate_address.return_value = False
self._map_storage.waves_address_exists.return_value = True
self._map_storage.get_coin_address_by_waves_address.return_value = mock_coin_address
with self.assertRaises(WavesAddressInvalidError):
self._gateway_controller.create_address(mock_waves_address)
self._waves_address_validation_service.assert_called_once_with(mock_waves_address)
def test_create_address_not_exists_is_string(self):
mock_waves_address = "72936587"
mock_coin_address = "8120743689"
expected_mapping = MappingEntry(coin_address=mock_coin_address, waves_address=mock_waves_address)
self._waves_address_validation_service.validate_address.return_value = True
self._map_storage.waves_address_exists.return_value = False
self._coin_address_factory.create_address.return_value = mock_coin_address
res = self._gateway_controller.create_address(mock_waves_address)
self._map_storage.safely_save_mapping.assert_called_once_with(expected_mapping)
self.assertEqual(res, mock_coin_address)
def test_create_address_not_exists_is_key_pair(self):
mock_waves_address = "72936587"
mock_coin_address = "8120743689"
mock_coin_secret = "2736984"
mock_key_pair = KeyPair(public=mock_coin_address, secret=mock_coin_secret)
expected_mapping = MappingEntry(coin_address=mock_coin_address, waves_address=mock_waves_address)
self._waves_address_validation_service.validate_address.return_value = True
self._map_storage.waves_address_exists.return_value = False
self._coin_address_factory.create_address.return_value = mock_key_pair
res = self._gateway_controller.create_address(mock_waves_address)
self._wallet_storage.safely_save_address_secret.assert_called_once_with(mock_key_pair)
self._map_storage.safely_save_mapping.assert_called_once_with(expected_mapping)
self.assertEqual(res, mock_coin_address)
def test_get_attempt_list_by_id(self):
mock_find_by_attempt_list_id_result = MagicMock()
mock_attempt_list_id = "27891623857"
self._attempt_list_storage.find_by_attempt_list_id.return_value = mock_find_by_attempt_list_id_result
res = self._gateway_controller.get_attempt_list_by_id(mock_attempt_list_id)
self.assertEqual(res, mock_find_by_attempt_list_id_result)
def test_query_attempt_lists(self):
mock_query_attempt_lists_result = MagicMock()
mock_attempt_list_query = AttemptListQuery(anything="7192835")
self._attempt_list_storage.query_attempt_lists.return_value = mock_query_attempt_lists_result
res = self._gateway_controller.query_attempt_lists(mock_attempt_list_query)
self.assertEqual(res, mock_query_attempt_lists_result)
self._attempt_list_storage.query_attempt_lists.assert_called_once_with(mock_attempt_list_query)
def test_get_attempt_list_by_trigger(self):
mock_find_by_trigger_result = MagicMock()
mock_trigger = AttemptListTrigger(tx="2396487", receiver=3, currency="coin")
self._attempt_list_storage.find_by_trigger.return_value = mock_find_by_trigger_result
res = self._gateway_controller.get_attempt_list_by_trigger(mock_trigger)
self.assertEqual(res, mock_find_by_trigger_result)
self._attempt_list_storage.find_by_trigger.assert_called_once_with(mock_trigger)
def test_check_coin_transaction_not_found(self):
mock_tx = "867452378"
self._coin_chain_query_service.get_transaction_by_tx.return_value = None
with self.assertRaises(InvalidTransactionIdentifier):
self._gateway_controller.check_coin_transaction(mock_tx)
self._coin_chain_query_service.get_transaction_by_tx.assert_called_once_with(mock_tx)
self._coin_transaction_consumer.handle_transaction.assert_not_called()
def test_check_coin_transaction_found_but_not_relevant(self):
mock_tx = "867452378"
mock_transaction = MagicMock()
self._coin_chain_query_service.get_transaction_by_tx.return_value = mock_transaction
self._coin_transaction_consumer.filter_transaction.return_value = False
self._gateway_controller.check_coin_transaction(mock_tx)
self._coin_chain_query_service.get_transaction_by_tx.assert_called_once_with(mock_tx)
self._coin_transaction_consumer.handle_transaction.assert_not_called()
def test_check_coin_transaction_found(self):
mock_tx = "867452378"
mock_transaction = MagicMock()
self._coin_chain_query_service.get_transaction_by_tx.return_value = mock_transaction
self._coin_transaction_consumer.filter_transaction.return_value = True
self._gateway_controller.check_coin_transaction(mock_tx)
self._coin_chain_query_service.get_transaction_by_tx.assert_called_once_with(mock_tx)
self._coin_transaction_consumer.handle_transaction.assert_called_once_with(mock_transaction)
def test_check_waves_transaction_not_found(self):
mock_tx = "867452378"
self._waves_chain_query_service.get_transaction_by_tx.return_value = None
with self.assertRaises(InvalidTransactionIdentifier):
self._gateway_controller.check_waves_transaction(mock_tx)
self._waves_chain_query_service.get_transaction_by_tx.assert_called_once_with(mock_tx)
def test_check_waves_transaction_found_but_not_relevant(self):
mock_tx = "867452378"
mock_transaction = MagicMock()
self._waves_chain_query_service.get_transaction_by_tx.return_value = mock_transaction
self._waves_transaction_consumer.filter_transaction.return_value = False
self._gateway_controller.check_waves_transaction(mock_tx)
self._waves_chain_query_service.get_transaction_by_tx.assert_called_once_with(mock_tx)
self._waves_transaction_consumer.handle_transaction.assert_not_called()
def test_check_waves_transaction_found(self):
mock_tx = "867452378"
mock_transaction = MagicMock()
self._waves_chain_query_service.get_transaction_by_tx.return_value = mock_transaction
self._waves_transaction_consumer.filter_transaction.return_value = True
self._gateway_controller.check_waves_transaction(mock_tx)
self._waves_chain_query_service.get_transaction_by_tx.assert_called_once_with(mock_tx)
self._waves_transaction_consumer.handle_transaction.assert_called_once_with(mock_transaction)
|
11539885
|
import pytest
from typing import List
from tests.globals.constants import NUMBER_OF_DOCUMENTS
from tests.globals.document import pandas_document
@pytest.fixture(scope="session")
def pandas_documents() -> List:
return [pandas_document() for _ in range(NUMBER_OF_DOCUMENTS)]
|
11539917
|
from urllib.parse import urlparse
class UrlNormalizer:
@staticmethod
def _parse_sheme(parse, base_parse):
if not parse.scheme:
uri = base_parse.scheme
else:
uri = parse.scheme
return uri + '://'
@staticmethod
def _parse_netloc(parse, base_parse):
if not parse.netloc:
uri = base_parse.netloc
else:
uri = parse.netloc
return uri
@staticmethod
def _test_path_netloc(parse):
if parse.path.find('://') == 0:
return urlparse('http' + parse.path).path
return parse.path
@staticmethod
def __parse_rel_path(parse, base_parse):
path = ''
if base_parse.path.rfind('/') > 0:
path = base_parse.path[0:base_parse.path.rfind('/')]
return path.rstrip('/') + '/' + parse.path.lstrip('/')
@staticmethod
def _parse_path(parse, base_parse):
if parse.netloc:
return parse.path
_path = UrlNormalizer._test_path_netloc(parse)
if _path:
if _path.find('/') == 0:
return _path
else:
return UrlNormalizer.__parse_rel_path(parse, base_parse)
else:
return base_parse.path
@staticmethod
def _parse_query(parse):
if parse.query:
return '?' + parse.query
return ''
@staticmethod
def _parse_fragment(parse):
if parse.fragment:
return '#' + parse.fragment
return ''
@staticmethod
def url_helper(url: str, base_url: str) -> str:
parse = urlparse(url)
base_parse = urlparse(base_url)
un = UrlNormalizer
sheme = un._parse_sheme(parse, base_parse)
netloc = un._parse_netloc(parse, base_parse)
path = un._parse_path(parse, base_parse)
query = un._parse_query(parse)
fragment = un._parse_fragment(parse)
return sheme + netloc + path + query + fragment
normalize_uri = UrlNormalizer.url_helper
|
11539983
|
import torch
import numpy as np
from deepflow.mrst_coupling import PytorchMRSTCoupler, load_production_data, load_gradients
from deepflow.storage import create_dataset
from deepflow.utils import set_seed, load_generator, print_header
from deepflow.utils import slerp, get_latent_vector
from deepflow.losses import compute_prior_loss, compute_prior_loss_kl_divergence, compute_well_loss
import os
import argparse
import sys
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--working_dir", type=str, default="./", help="Working directory")
parser.add_argument("--output_dir", type=str, default="output_mrst", help="Output directory")
parser.add_argument("--matlab_dir", type=str, default="./mrst/mrst-2018a/modules/optimization/examples/model2Dtest/", help="Matlab files directory")
parser.add_argument("--mrst_dir", type=str, default="./mrst/mrst-2018a", help="Matlab files directory")
parser.add_argument("--checkpoints_dir", type=str, default="checkpoints", help="Checkpoints directory")
parser.add_argument("--reference_model", type=str, default="reference/model_67_x.npy", help="Reference Model")
parser.add_argument("--seed", type=int, default=0, help="Random Seed")
parser.add_argument("--iterations", type=int, default=200, help="Number of gradient steps")
parser.add_argument("--optimize_wells", action="store_true", help="Match wells")
parser.add_argument("--optimize_flow", action="store_true", help="Match flow behaviour")
parser.add_argument("--use_prior_loss", action="store_true", help="Regularize latent variables to be Gaussian. Same as weight decay but uses pytorch distributions. Set Weight Decay to 0!")
parser.add_argument("--use_kl_loss", action="store_true", help="Regularize latent variables to be Gaussian using an empirical KL-Divergence")
parser.add_argument('--well_locations', nargs='+', type=int, default=[8, 120], help="Well locations are hardcoded right now.")
parser.add_argument("--wells_only", action="store_true", help="Optimize wells only.")
logger.info('Parsing CMD Line Arguments')
args = parser.parse_args(argv)
logger.info('Completed Parsing CMD Line Arguments')
return args
def interpolate(args, zs, generator, output_path):
z_prior = zs[0].clone()
logger.info('Setup Paths')
working_dir = os.path.expandvars(args.working_dir)
matlab_path = os.path.join(working_dir, args.matlab_dir)
mrst_path = os.path.join(working_dir, args.mrst_dir)
well_loss = torch.from_numpy(np.array([-999.]))
flow_loss = torch.from_numpy(np.array([-999.]))
prior_loss = torch.from_numpy(np.array([-999.]))
well_acc = -999.
case_name = "vertcase3_noise"
os.environ["case_name"] = case_name
matlab_command = ["matlab", "-nodisplay", "-nosplash", "-nodesktop", "-r"]
fcall = ['run("'+os.path.join(mrst_path, "startup.m")+'"), run("'+os.path.join(matlab_path, "run_adjoint.m")+'"), exit']
external_commands = {"command": matlab_command, "call": fcall, "matlab_path": matlab_path}
ref_fname = os.path.join(matlab_path, "utils/"+case_name+"/ws_ref.mat")
syn_fname = os.path.join(matlab_path, "utils/synthetic/ws.mat")
grad_name = os.path.join(matlab_path, "utils/synthetic/grad.mat")
logger.info('Load Reference Case Data')
ref_data = load_production_data(ref_fname, "ws_ref")
np.save(os.path.join(output_path, "prod_data_ref.npy"), ref_data)
logger.info('Load Reference Geological Model')
x_gt = np.load(os.path.join(working_dir, args.reference_model))
x_gt = torch.from_numpy(x_gt).float()
logger.info('Starting Optimization Loop')
for i, z in enumerate(zs):
logger.info('Started Iteration %1.2i'%i)
logger.info('Forward Pass GAN Generator Iteration %1.2i'%i)
k, poro, x = generator(z)
logger.info('Computing Well Loss')
well_loss, well_acc = compute_well_loss(i, x, x_gt, args.well_locations)
logger.info('[Well Loss]: %1.3f [Well Accuracy]: %1.2f' % (well_loss.item(), well_acc))
logger.info('Computing Gaussian Prior Loss')
prior_loss_l2 = compute_prior_loss(z, alpha=1.)
logger.info('[Gaussian Prior Loss]: %1.3f '%prior_loss_l2.item())
logger.info('Computing KL-Divergence Loss')
prior_loss_kl = compute_prior_loss_kl_divergence(z, alpha=1.)
logger.info('[KL-Divergence Loss]: %1.3f '%prior_loss_kl.item())
logger.info('Using Flow Loss, Performing Forward Pass')
coupler = PytorchMRSTCoupler()
layer = coupler.apply
flow_loss = layer(k, poro, external_commands).float()
logger.info('[Flow Loss]: %1.3f '%flow_loss.item())
logger.info('Loading Gradients and Production History')
grads = load_gradients(grad_name)
syn_data = load_production_data(syn_fname, "ws")
logger.info('Storing Iteration Output')
ds = create_dataset(syn_data, syn_data,
np.array([poro.detach().numpy()[0, 0].T, k.detach().numpy()[0, 0].T]),
grads,
z.view(1, 50, 2, 1).detach().numpy(),
z_prior.view(1, 50, 2, 1).detach().numpy(),
z_prior.view(1, 50, 2, 1).numpy(),
np.array([[flow_loss.item(), well_loss.item(), well_acc, prior_loss_l2.item(), prior_loss_kl.item()]]))
ds.to_netcdf(os.path.join(output_path, "iteration_"+str(i)+".nc"))
logger.info('Completed Iteration Output')
logger.info('Completed Iteration %1.2i'%i)
return None
def main(args):
logger.info('Starting DeepFlow')
logger.info('')
print_header()
logger.info('')
logger.info('Setting Random Seed: %1.2i' % args.seed)
set_seed(args.seed)
working_dir = os.path.expandvars(args.working_dir)
checkpoints_path = os.path.join(working_dir, args.checkpoints_dir, "generator_facies_multichannel_4_6790.pth")
logger.info('Inititalizing GAN Generator')
generator = load_generator(checkpoints_path)
z1_file = "./results/interpolations/run_1/iteration_233.nc"
z2_file = "./results/interpolations/run_4/iteration_253.nc"
z3_file = "./results/interpolations/run_5/iteration_475.nc"
output_path_1 = os.path.expandvars("./results/interpolations/interpolation_1_4")
output_path_2 = os.path.expandvars("./results/interpolations/interpolation_4_5")
output_path_3 = os.path.expandvars("./results/interpolations/interpolation_5_1")
z_files = [[z1_file, z2_file], [z2_file, z3_file], [z3_file, z1_file]]
paths = [output_path_1, output_path_2, output_path_3]
z1 = get_latent_vector(z_files[args.seed][0])
z2 = get_latent_vector(z_files[args.seed][1])
vals = np.linspace(0, 1, 101)
z_int = [torch.from_numpy(slerp(val, z1, z2)).view(1, 50, 1, 2) for val in vals]
interpolate(args, z_int, generator, paths[args.seed])
if __name__ == '__main__':
args = parse_args(sys.argv[1:])
main(args)
|
11539984
|
import logging
from anytree import RenderTree
from colorama import Fore, init
from .analyzer import TableNode, ViewAnalyzer
log = logging.getLogger("bqva.analyzer")
init(autoreset=True)
def color(color: str, text: str) -> str:
return color + text + Fore.RESET
def format_key() -> str:
return f"""
Key:
{color(Fore.CYAN, '◉')} Project
{color(Fore.YELLOW, '◉')} Dataset
{color(Fore.RED, '◉')} Table
{color(Fore.GREEN, '◉')} View
"""
def format_tree(view: ViewAnalyzer, show_key=False, show_status=False):
log.info("Formatting tree...")
output = list()
if show_key:
output.append(format_key())
for pre, _, node in RenderTree(view.tree):
output.append(pre + format_node(node, show_status=show_status))
return "\n".join(output)
def format_node(node: TableNode, show_status=False) -> str:
project = color(Fore.CYAN, node.project)
dataset = color(Fore.YELLOW, node.dataset_id)
if node.table.table_type == "VIEW":
table = color(Fore.GREEN, node.table_id)
else:
table = color(Fore.RED, node.table_id)
name = f"{project}:{dataset}.{table}"
if show_status:
if node.is_authorized():
status = color(Fore.GREEN, "✓")
else:
status = color(Fore.RED, "⨯")
name += " " + status
return name
|
11540014
|
from continual import rehearsal
from continual import classifier
from continual import vit
from continual import convit
from continual import utils
from continual import scaler
from continual import cnn
from continual import factory
from continual import sam
from continual import samplers
from continual import mixup
|
11540033
|
import numpy as np
a = np.arange(12).reshape(3, 4)
print(a)
'''
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
'''
print(a.T) #転置
'''
[[ 0 4 8]
[ 1 5 9]
[ 2 6 10]
[ 3 7 11]]
'''
b = np.arange(24).reshape(2, 3, 4)
print(b) # 3 次元 array
'''
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
'''
print(b.transpose(0, 2, 1)) # 軸の入れ替え
'''
[[[ 0 4 8]
[ 1 5 9]
[ 2 6 10]
[ 3 7 11]]
[[12 16 20]
[13 17 21]
[14 18 22]
[15 19 23]]]
'''
|
11540036
|
import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents import NAFAgent
from rl.memory import SequentialMemory
from rl.random import OrnsteinUhlenbeckProcess
from rl.core import Processor
from noise_estimator import *
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--weight', type=float, default=0.6,
help='Weight of random confusion matrix [default: 0.6]')
parser.add_argument('--noise_type', type=str, default='norm_all',
help='Type of noise added: norm_all/norm_one/anti_iden/max_one [default: norm_all]')
FLAGS = parser.parse_args()
REWARD = FLAGS.reward
WEIGHT = FLAGS.weight
NOISE_TYPE = FLAGS.noise_type
assert (NOISE_TYPE in ["norm_all", "norm_one", "anti_iden", "max_one"])
if REWARD == "normal":
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "naf_pendulum"), "normal")
else:
LOG_DIR = os.path.join(os.path.join(os.path.join(FLAGS.log_dir, "naf_pendulum"), NOISE_TYPE), str(WEIGHT))
ENV_NAME = 'Pendulum-v0'
# gym.undo_logger_setup()
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp naf_pendulum.py %s' % (LOG_DIR)) # bkp of train procedure
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
assert len(env.action_space.shape) == 1
nb_actions = env.action_space.shape[0]
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Build all necessary models: V, mu, and L networks.
V_model = Sequential()
V_model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
V_model.add(Dense(16))
V_model.add(Activation('relu'))
V_model.add(Dense(16))
V_model.add(Activation('relu'))
V_model.add(Dense(16))
V_model.add(Activation('relu'))
V_model.add(Dense(1))
V_model.add(Activation('linear'))
V_model.summary()
mu_model = Sequential()
mu_model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
mu_model.add(Dense(16))
mu_model.add(Activation('relu'))
mu_model.add(Dense(16))
mu_model.add(Activation('relu'))
mu_model.add(Dense(16))
mu_model.add(Activation('relu'))
mu_model.add(Dense(nb_actions))
mu_model.add(Activation('linear'))
mu_model.summary()
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
x = Concatenate()([action_input, Flatten()(observation_input)])
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(((nb_actions * nb_actions + nb_actions) // 2))(x)
x = Activation('linear')(x)
L_model = Model(inputs=[action_input, observation_input], outputs=x)
L_model.summary()
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=100000, window_length=1)
random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3, size=nb_actions)
if REWARD == "normal":
processor = NAFPendulumProcessor()
naf_normal = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
memory=memory, nb_steps_warmup=100, random_process=random_process,
gamma=.99, target_model_update=1e-3, processor=processor)
naf_normal.compile(Adam(lr=.00025, clipnorm=1.), metrics=['mae'])
history_normal = naf_normal.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
naf_normal.save_weights(os.path.join(LOG_DIR, 'naf_normal_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
naf_normal.test(env, nb_episodes=10, visualize=False, nb_max_episode_steps=200)
pandas.DataFrame(history_normal.history).to_csv(os.path.join(LOG_DIR, "normal.csv"))
elif REWARD == "noisy":
# processor_noisy = PendulumSurrogateProcessor(weight=WEIGHT, surrogate=False, noise_type=NOISE_TYPE)
processor_noisy = PendulumProcessor(weight=WEIGHT, surrogate=False, noise_type=NOISE_TYPE)
naf_noisy = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
memory=memory, nb_steps_warmup=100, random_process=random_process,
gamma=.99, target_model_update=1e-3, processor=processor_noisy)
naf_noisy.compile(Adam(lr=.00025, clipnorm=1.), metrics=['mae'])
history_noisy = naf_noisy.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
naf_noisy.save_weights(os.path.join(LOG_DIR, 'naf_noisy_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
naf_noisy.test(env, nb_episodes=10, visualize=False, nb_max_episode_steps=200)
pandas.DataFrame(history_noisy.history).to_csv(os.path.join(LOG_DIR, "noisy.csv"))
elif REWARD == "surrogate":
# processor_surrogate = PendulumSurrogateProcessor(weight=WEIGHT, surrogate=True, noise_type=NOISE_TYPE)
processor_surrogate = PendulumProcessor(weight=WEIGHT, surrogate=True, noise_type=NOISE_TYPE)
naf_surrogate = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
memory=memory, nb_steps_warmup=100, random_process=random_process,
gamma=.99, target_model_update=1e-3, processor=processor_surrogate)
naf_surrogate.compile(Adam(lr=.00025, clipnorm=1.), metrics=['mae'])
history_surrogate = naf_surrogate.fit(env, nb_steps=150000, visualize=False, verbose=2, nb_max_episode_steps=200)
naf_surrogate.save_weights(os.path.join(LOG_DIR, 'naf_surrogate_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
naf_surrogate.test(env, nb_episodes=10, visualize=False, nb_max_episode_steps=200)
pandas.DataFrame(history_surrogate.history).to_csv(os.path.join(LOG_DIR, "surrogate.csv"))
else:
raise NotImplementedError
if __name__ == "__main__":
train()
|
11540037
|
from abc import abstractmethod, ABC
from typing import Tuple, Optional
from torch.utils.data import Dataset, ConcatDataset
# Hydra and OmegaConf
from hydra.conf import dataclass
from omegaconf import MISSING
# Project Imports
from slam.common.projection import SphericalProjector
from slam.dataset.sequence_dataset import DatasetOfSequences
@dataclass
class DatasetConfig:
"""A DatasetConfig contains the configuration values used to define a DatasetConfiguration"""
dataset: str = MISSING
# The length of the sequence returned
sequence_len: int = 2
# ----------------------------------
# Default item keys in the data_dict
vertex_map_key: str = "vertex_map"
numpy_pc_key: str = "<KEY>"
absolute_gt_key: str = "absolute_pose_gt"
with_numpy_pc: bool = True # Whether to add the numpy pc to the data_dict
class DatasetLoader(ABC):
"""
A DatasetConfiguration is the configuration for the construction of pytorch Datasets
"""
@classmethod
def max_num_workers(cls):
"""Returns the maximum number of workers allowed by this dataset
Note: Not respecting this constraint can lead to undefined behaviour for
Datasets which do not support Random Access
"""
return 20
@staticmethod
def absolute_gt_key():
"""The key (in data_dict) for the absolute_pose_gt"""
return "absolute_pose_gt"
@staticmethod
def numpy_pc_key():
"""The key (in data_dict) for xyz pointcloud"""
return "numpy_pc"
def __init__(self, config: DatasetConfig):
self.config = config
@abstractmethod
def projector(self) -> SphericalProjector:
"""
Returns the Default Spherical Image projector associated to the dataset_config
"""
raise NotImplementedError("")
@abstractmethod
def sequences(self):
"""
Returns the train, eval and test datasets and the corresponding sequence name
Returns: (train, eval, test, transform)
train (Optional[List[Dataset], List]): Is an Optional pair of a list of datasets
and the corresponding sequences
eval (Optional[List[Dataset], List]): Idem
test (Optional[List[Dataset], List]): Idem
transform (callable): The function applied on the data from the given datasets
"""
raise NotImplementedError("")
def get_dataset(self) -> Tuple[Dataset, Dataset, Dataset, callable]:
"""
Returns:
(train_dataset, eval_dataset, test_dataset)
A tuple of `DatasetOfSequences` consisting of concatenated datasets
"""
train_dataset, eval_datasets, test_datasets, transform = self.sequences()
def __swap(dataset):
if dataset[0] is not None:
return ConcatDataset(dataset[0])
return None
train_dataset = __swap(train_dataset)
eval_datasets = __swap(eval_datasets)
test_datasets = __swap(test_datasets)
return train_dataset, eval_datasets, test_datasets, transform
def get_sequence_dataset(self) -> Tuple[Optional[DatasetOfSequences],
Optional[DatasetOfSequences],
Optional[DatasetOfSequences]]:
"""
Returns:
(train_dataset, eval_dataset, test_dataset) : A tuple of `DatasetOfSequences`
"""
sequence_len = self.config.sequence_len
train_dataset, eval_datasets, test_datasets, transform = self.sequences()
def __to_sequence_dataset(dataset_pair):
if dataset_pair is None or dataset_pair[0] is None:
return None
return DatasetOfSequences(sequence_len, dataset_pair[0], dataset_pair[1], transform=transform)
return tuple(map(__to_sequence_dataset, [train_dataset, eval_datasets, test_datasets]))
@abstractmethod
def get_ground_truth(self, sequence_name):
"""Returns the ground truth for the dataset_config for a given sequence"""
return None
|
11540045
|
from waflib import TaskGen# import feature, taskgen_method, before_method, task_gen
from waflib import Node, Task, Utils, Build
import subprocess
from waflib import Options
import shellcmd
#shellcmd.subprocess = pproc # the WAF version of the subprocess module is supposedly less buggy
from waflib.Logs import debug, error
shellcmd.debug = debug
from waflib import Task
import re
arg_rx = re.compile(r"(?P<dollar>\$\$)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})", re.M)
class command_task(Task.Task):
color = "BLUE"
def __init__(self, env, generator):
Task.Task.__init__(self, env=env, normal=1, generator=generator)
def __str__(self):
"string to display to the user"
env = self.env
src_str = ' '.join([a.nice_path(env) for a in self.inputs])
tgt_str = ' '.join([a.nice_path(env) for a in self.outputs])
if self.outputs:
sep = ' -> '
else:
sep = ''
pipeline = shellcmd.Pipeline()
pipeline.parse(self.generator.command)
cmd = pipeline.get_abbreviated_command()
return 'command (%s): %s%s%s\n' % (cmd, src_str, sep, tgt_str)
def _subst_arg(self, arg, direction, namespace):
"""
@param arg: the command argument (or stdin/stdout/stderr) to substitute
@param direction: direction of the argument: 'in', 'out', or None
"""
def repl(match):
if match.group('dollar'):
return "$"
elif match.group('subst'):
var = match.group('var')
code = match.group('code')
result = eval(var+code, namespace)
if isinstance(result, Node.Node):
if var == 'TGT':
return result.get_bld().abspath()
elif var == 'SRC':
return result.srcpath()
else:
raise ValueError("Bad subst variable %r" % var)
elif result is self.inputs:
if len(self.inputs) == 1:
return result[0].srcpath()
else:
raise ValueError("${SRC} requested but have multiple sources; which one?")
elif result is self.outputs:
if len(self.outputs) == 1:
return result[0].get_bld().abspath()
else:
raise ValueError("${TGT} requested but have multiple targets; which one?")
elif isinstance(result, list):
assert len(result) == 1
return result[0]
else:
return result
return None
return arg_rx.sub(repl, arg)
def run(self):
pipeline = shellcmd.Pipeline()
pipeline.parse(self.generator.command)
namespace = self.env.get_merged_dict()
if self.generator.variables is not None:
namespace.update(self.generator.variables)
namespace.update(env=self.env, SRC=self.inputs, TGT=self.outputs)
for cmd in pipeline.pipeline:
if isinstance(cmd, shellcmd.Command):
if isinstance(cmd.stdin, str):
cmd.stdin = self._subst_arg(cmd.stdin, 'in', namespace)
if isinstance(cmd.stdout, str):
cmd.stdout = self._subst_arg(cmd.stdout, 'out', namespace)
if isinstance(cmd.stderr, str):
cmd.stderr = self._subst_arg(cmd.stderr, 'out', namespace)
for argI in range(len(cmd.argv)):
cmd.argv[argI] = self._subst_arg(cmd.argv[argI], None, namespace)
if cmd.env_vars is not None:
env_vars = dict()
for name, value in list(cmd.env_vars.items()):
env_vars[name] = self._subst_arg(value, None, namespace)
cmd.env_vars = env_vars
elif isinstance(cmd, shellcmd.Chdir):
cmd.dir = self._subst_arg(cmd.dir, None, namespace)
return pipeline.run(verbose=(Options.options.verbose > 0))
@TaskGen.taskgen_method
@TaskGen.feature('command')
def init_command(self):
Utils.def_attrs(self,
# other variables that can be used in the command: ${VARIABLE}
variables = None,
rule='')
@TaskGen.taskgen_method
@TaskGen.feature('command')
@TaskGen.before_method('process_source')
def process_rule(self):
if not 'command' in self.features:
return
# now create one instance
tsk = self.create_task('command')
if getattr(self, 'target', None):
if isinstance(self.target, str):
self.target = self.target.split()
if not isinstance(self.target, list):
self.target = [self.target]
for x in self.target:
if isinstance(x, str):
tsk.outputs.append(self.path.find_or_declare(x))
else:
x.parent.mkdir() # if a node was given, create the required folders
tsk.outputs.append(x)
if getattr(self, 'install_path', None):
# from waf 1.5
# although convenient, it does not 1. allow to name the target file and 2. symlinks
# TODO remove in waf 1.7
self.bld.install_files(self.install_path, tsk.outputs)
if getattr(self, 'source', None):
tsk.inputs = self.to_nodes(self.source)
# bypass the execution of process_source by setting the source to an empty list
self.source = []
elif getattr(self, 'deps', None):
def scan(self):
nodes = []
for x in self.generator.to_list(self.generator.deps):
node = self.generator.path.find_resource(x)
if not node:
self.generator.bld.fatal('Could not find %r (was it declared?)' % x)
nodes.append(node)
return [nodes, []]
cls.scan = scan
setattr(tsk, "dep_vars", getattr(self, "dep_vars", None))
|
11540054
|
import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
from sklearn.mixture import GaussianMixture
import datetime
ticker = input('Enter a ticker: ')
index = 'SPY'
num_of_years = 5
start = datetime.date.today() - datetime.timedelta(days = int(365.25*num_of_years))
yf_prices = yf.download([ticker], start=start)
prices = yf_prices['Adj Close']
subplots_ratio = dict(width_ratios=[3,2], height_ratios=[1])
fig, ax = plt.subplots(1,2, gridspec_kw=subplots_ratio, figsize=(15,10))
prices.plot(title=f'{ticker.upper()} Price', ax=ax[0], grid=True, linewidth=2)
prices.plot.hist(title=f'{ticker.upper()} Price Distribution', ax=ax[1], grid=True, bins=30)
plt.tight_layout()
plt.show()
rs = prices.apply(np.log).diff(1)
subplots_ratio = dict(width_ratios=[3,2], height_ratios=[1])
fig, ax = plt.subplots(1,2, gridspec_kw=subplots_ratio, figsize=(15,10))
rs.plot(title=f'{ticker.upper()} Returns', ax=ax[0], grid=True, linewidth=2)
rs.plot.hist(title=f'{ticker.upper()} Returns Distribution', ax=ax[1], grid=True, bins=30)
plt.tight_layout()
plt.show()
w = 22
s1 = rs.rolling(w).mean()
s2 = rs.rolling(w).std()
s3 = rs.rolling(w).skew()
s4 = rs.rolling(w).kurt()
signals = pd.concat([s1, s2, s3, s4], axis=1)
signals.columns = ['mean', 'std dev', 'skew', 'kurtosis']
signals.plot(subplots=True, figsize=(15,10));
plt.tight_layout()
plt.show()
yf_prices = yf.download([index], start=start)
prices = yf_prices['Adj Close']
rs = prices.apply(np.log).diff(1)
w = 22
vol = rs.rolling(w).std()
vol = vol.dropna()
labels = GaussianMixture(2).fit_predict(vol.values.reshape(-1,1))
prices = prices.reindex(vol.index)
plt.subplots()
prices[labels==0].plot(style='bo', alpha=0.2)
prices[labels==1].plot(style='ro', alpha=0.2)
plt.title(f'{index} Volatility Regimes (Gaussian Mixture)')
plt.tight_layout()
plt.show()
|
11540112
|
import os
import pickle as pkl
import argparse
import re
import json
import numpy as np
import nltk
from collections import defaultdict
def generate_answer_nl(in_dir, dtype):
with open(os.path.join(in_dir, '{}_answer.src'.format(dtype)),'r') as f:
subgraph_answer = f.readlines()
ans_list = []
for line in subgraph_answer:
triples = line.split()
tmp = []
find_answer = False
for each in triples:
if '│A' in each:
tmp.append(each.split('│A')[0])
find_answer = True
else:
assert find_answer == False
find_answer = False
tmp = ' '.join(tmp)
ans_list.append([tmp])
print('generate_answer_nl')
return ans_list
def build_subgraph_nl(in_dir, dtype):
with open(os.path.join(in_dir, '{}.src'.format(dtype)),'r') as f2:
subgraphf = f2.readlines()
new_subgraph = []
all_ents = set()
all_rels = set()
num_triples = []
formatted_subgraphs = []
for line in subgraphf:
path_list = line.strip().split()
g_node_names = {}
# g_node_types = {}
g_edge_types = {}
g_adj = defaultdict(dict)
assert len(path_list) % 2 == 1
num_triples.append((len(path_list) - 1) // 2)
triple = []
for idx, element in enumerate(path_list):
if idx % 2 == 0:
all_ents.add(element)
g_node_names[element] = ' '.join(element.split('_'))
else:
element = element.replace('__', '/')
all_rels.add(element)
g_edge_types[element] = element
triple.append(element)
if idx > 0 and idx % 2 == 0:
triple = triple[-3:]
if triple[2] in g_adj[triple[0]]:
g_adj[triple[0]][triple[2]].append(triple[1])
else:
g_adj[triple[0]][triple[2]] = [triple[1]]
subgraph = {'g_node_names': g_node_names,
'g_edge_types': g_edge_types,
'g_adj': g_adj}
assert len(g_adj) > 0
formatted_subgraphs.append(subgraph)
print('build_subgraph_nl')
print('all_ents', len(all_ents))
print('all_rels', len(all_rels))
print('# of triples: min: {}, max: {}, mean: {}'.format(np.min(num_triples), np.max(num_triples), np.mean(num_triples)))
return formatted_subgraphs
def process_querys(in_dir, dtype):
with open(os.path.join(in_dir, '{}.tgt'.format(dtype)),'r') as f:
querys = f.readlines()
new_queries = []
for idx, line in enumerate(querys):
line = line.lower().replace('_', ' ').strip()
new_queries.append(line)
return new_queries
def prepare_output_data(subgraphs, answer_lists, queries, dtype, out_dir):
count = 0
with open(os.path.join(out_dir, '{}.json'.format(dtype)), 'w') as outf:
for i in range(len(subgraphs)):
example = {}
example['answers'] = answer_lists[i]
example['outSeq'] = queries[i]
example['qId'] = count + 1
# example['topicEntityID'] = None
example['inGraph'] = subgraphs[i]
outf.write(json.dumps(example) + '\n')
count += 1
return count
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_dir', required=True, type=str, help='path to the input dir')
parser.add_argument('-o', '--output_dir', required=True, type=str, help='path to the output dir')
opt = vars(parser.parse_args())
for dtype in ['train', 'test', 'dev']:
new_subgraph = build_subgraph_nl(opt['input_dir'], dtype)
new_ans_list = generate_answer_nl(opt['input_dir'], dtype)
new_queries = process_querys(opt['input_dir'], dtype)
assert len(new_subgraph) == len(new_ans_list) == len(new_queries)
prepare_output_data(new_subgraph, new_ans_list, new_queries, dtype, opt['output_dir'])
|
11540128
|
import subprocess
# Intel MKL number of threads
numThreads = '16'
baseCommand += 'export MKL_NUM_THREADS=' + numThreads + '\nexport OMP_NUM_THREADS=' + numThreads + '\nexport VECLIB_MAXIMUM_THREADS=' + numThreads + '\n'
# run
for script in ['12-14_normal_flow.py']:
for meshName in ['cat']:
for smoothIntensity in ['0.5']:
for magnitude in ['5e-3']:
for frameNum in ['10']:
runCommand = baseCommand + 'python3 ' + script + ' ' + meshName + ' ' + smoothIntensity + ' ' + magnitude + ' ' + frameNum
if subprocess.call([runCommand], shell=True):
continue
for script in ['12-14_normal_flow.py']:
for meshName in ['hand']:
for smoothIntensity in ['0.5']:
for magnitude in ['5e-3']:
for frameNum in ['3']:
runCommand = baseCommand + 'python3 ' + script + ' ' + meshName + ' ' + smoothIntensity + ' ' + magnitude + ' ' + frameNum
if subprocess.call([runCommand], shell=True):
continue
for script in ['12-14_normal_flow.py']:
for meshName in ['walnut71K']:
for smoothIntensity in ['0.1']:
for magnitude in ['5e-3']:
for frameNum in ['8']:
runCommand = baseCommand + 'python3 ' + script + ' ' + meshName + ' ' + smoothIntensity + ' ' + magnitude + ' ' + frameNum
if subprocess.call([runCommand], shell=True):
continue
for script in ['12-14_normal_flow.py']:
for meshName in ['bunny3K']:
for smoothIntensity in ['0.5']:
for magnitude in ['-5e-3']:
for frameNum in ['50']:
runCommand = baseCommand + 'python3 ' + script + ' ' + meshName + ' ' + smoothIntensity + ' ' + magnitude + ' ' + frameNum
if subprocess.call([runCommand], shell=True):
continue
for script in ['12-14_normal_flow.py']:
for meshName in ['feline']:
for smoothIntensity in ['1']:
for magnitude in ['-5e-3']:
for frameNum in ['50']:
runCommand = baseCommand + 'python3 ' + script + ' ' + meshName + ' ' + smoothIntensity + ' ' + magnitude + ' ' + frameNum
if subprocess.call([runCommand], shell=True):
continue
for script in ['12-14_normal_flow.py']:
for meshName in ['font_Tao']:
for smoothIntensity in ['0.5']:
for magnitude in ['5e-3']:
for frameNum in ['10']:
runCommand = baseCommand + 'python3 ' + script + ' ' + meshName + ' ' + smoothIntensity + ' ' + magnitude + ' ' + frameNum
if subprocess.call([runCommand], shell=True):
continue
for script in ['12-14_normal_flow.py']:
for meshName in ['font_Peng']:
for smoothIntensity in ['0.5']:
for magnitude in ['5e-3']:
for frameNum in ['5']:
runCommand = baseCommand + 'python3 ' + script + ' ' + meshName + ' ' + smoothIntensity + ' ' + magnitude + ' ' + frameNum
if subprocess.call([runCommand], shell=True):
continue
for script in ['12-14_normal_flow.py']:
for meshName in ['font_delicious']:
for smoothIntensity in ['0.5']:
for magnitude in ['5e-3']:
for frameNum in ['12']:
runCommand = baseCommand + 'python3 ' + script + ' ' + meshName + ' ' + smoothIntensity + ' ' + magnitude + ' ' + frameNum
if subprocess.call([runCommand], shell=True):
continue
for script in ['12-14_normal_flow.py']:
for meshName in ['font_seriously']:
for smoothIntensity in ['10']:
for magnitude in ['5e-3']:
for frameNum in ['25']:
runCommand = baseCommand + 'python3 ' + script + ' ' + meshName + ' ' + smoothIntensity + ' ' + magnitude + ' ' + frameNum
if subprocess.call([runCommand], shell=True):
continue
for script in ['16_fix_char_seq.py']:
for seqName in ['Rumba_Dancing_unfixed', 'Kick_unfixed']:
runCommand = baseCommand + 'python3 ' + script + ' ' + seqName
if subprocess.call([runCommand], shell=True):
continue
|
11540138
|
import sys
sys.path.append('.')
import cfg
import async_session
while True:
sess = async_session.AsyncSession(cfg.timeout_read)
sess.open_session()
oid = async_session.oid_str_to_tuple('1.3.6.1.2.1.1.1')
try:
sess.get_next(oid)
except async_session.SNMPTimeoutError:
pass
|
11540142
|
from framework.deprecated.controllers import AdvCommScheduler, CheckpointDriving, MathScheduler, AudioRewardLogic, VisualSearchTask
from framework.latentmodule import LatentModule
from framework.convenience import ConvenienceFunctions
from framework.ui_elements.EventWatcher import EventWatcher
from framework.ui_elements.ScrollPresenter import ScrollPresenter
from framework.ui_elements.AudioPresenter import AudioPresenter
from framework.ui_elements.TextPresenter import TextPresenter
from panda3d.core import TextProperties,TextPropertiesManager
from direct.gui.DirectGui import *
import framework.speech
import random
import time
import copy
# =======================
# === Subtask classes ===
# =======================
class WarningLightTask(LatentModule):
"""
A Warning light class (red/blue/green) that sporadically turns on/off and demands a
response that can be configured (press a button when turning on / turning off / stopping to blink).
Has some support for a cue status object/task which is currently unused here (was for MBF2A).
"""
def __init__(self,
# general properties
rewardlogic, # reward handling logic
watcher = None, # optional event watcher
focused = True, # whether this task is currently focused
markerbase = 1, # markers markerbase..markerbase+6 are used
event_interval=lambda: random.uniform(45,85), # interval between two successive events
# cueing control
cueobj = None, # an object that might have .iscued set to true
# graphics parameters
pic_off='light_off.png', # picture to display for the disabled light
pic_on='light_on.png', # picture to display for the enabled light
screen_offset=0, # offset to position this icon on one of the three screens
pic_params={'pos':[0,0],'scale':0.15}, # parameters for the picture() command
snd_params={'volume':0.3,'direction':0.0}, # parameters for the sound() command
# response handling
snd_hit='click2s.wav', # sound when the user correctly detected the warning state
snd_wrongcue='xBuzz01.wav', # the sound that is overlaid with the buzzer when the response was wrong due to incorrect cueing
response_key='sysmonv-check', # key to press in case of an event
timeout=2.5, # response timeout for the user
hit_reward=0, # reward if hit
miss_penalty=-20, # penalty if missed
false_penalty=-5, # penalty for false positives
# ticking support
pic_tick_off=None, # optional blinking in off status
pic_tick_on=None, # optional blinking in on status
tick_rate = None, # tick rate (duration in non-tick status, duration in tick status)
):
LatentModule.__init__(self)
self.rewardlogic = rewardlogic
self.focused = focused
self.markerbase = markerbase
self.event_interval = event_interval
self.pic_off = pic_off
self.pic_on = pic_on
self.pic_params = copy.deepcopy(pic_params)
self.snd_wrongcue = snd_wrongcue
self.snd_params = snd_params
self.snd_hit = snd_hit
self.response_key = response_key
self.timeout = timeout
self.hit_reward = hit_reward
self.miss_penalty = miss_penalty
self.false_penalty = false_penalty
self.screen_offset = screen_offset
self.cueobj = cueobj
self.control = False
self.pic_tick_off = pic_tick_off
self.pic_tick_on = pic_tick_on
if self.pic_tick_on is None:
self.pic_tick_on = self.pic_on
if self.pic_tick_off is None:
self.pic_tick_off = self.pic_off
self.tick_rate = tick_rate
self.watcher = watcher
def run(self):
self.pic_params['pos'][0] += self.screen_offset
# pre-cache the media files...
self.precache_picture(self.pic_on)
self.precache_picture(self.pic_off)
self.precache_picture(self.pic_tick_on)
self.precache_picture(self.pic_tick_off)
self.precache_sound(self.snd_wrongcue)
self.precache_sound(self.snd_hit)
self.accept('control',self.oncontrol,[True])
self.accept('control-up',self.oncontrol,[False])
# set up an event watcher (taking care of timeouts and inappropriate responses)
if self.watcher is None:
self.watcher = EventWatcher(eventtype=self.response_key,
handleduration=self.timeout,
defaulthandler=self.false_detection)
while True:
# show the "off" picture for the inter-event interval
if self.tick_rate is not None:
t_end = time.time()+self.event_interval()
while time.time() < t_end:
self.marker(self.markerbase+10)
# show the off/tic pic
self.picture(self.pic_tick_off, self.tick_rate[1], **self.pic_params)
# show the off pic
self.picture(self.pic_off, self.tick_rate[0], **self.pic_params)
else:
# just show the off pick
self.picture(self.pic_off, self.event_interval(), **self.pic_params)
# start watching for a response
self.watcher.watch_for(self.correct, self.timeout, self.missed)
self.marker(self.markerbase if self.focused else (self.markerbase+1))
if self.tick_rate is not None:
t_end = time.time()+self.timeout
while time.time() < t_end:
self.marker(self.markerbase+11)
# show the on/tic pic
self.picture(self.pic_tick_on, self.tick_rate[1], **self.pic_params)
# show the off pic
self.picture(self.pic_on, self.tick_rate[0], **self.pic_params)
else:
# just show the "on" picture
self.picture(self.pic_on, self.timeout, **self.pic_params)
self.marker(self.markerbase+2)
# reset the cue status
if self.cueobj is not None:
self.cueobj.iscued = False
def oncontrol(self,status):
self.control = status
def missed(self):
if self.focused:
self.marker(self.markerbase+3)
self.rewardlogic.score_event(self.miss_penalty)
def false_detection(self):
self.marker(self.markerbase+4)
self.rewardlogic.score_event(self.false_penalty)
def correct(self):
if self.focused:
if ((self.cueobj is not None) and self.cueobj.iscued):
self.marker(self.markerbase+5 if self.control else self.markerbase+6)
else:
self.marker(self.markerbase+7 if self.control else self.markerbase+8)
if self.control == ((self.cueobj is not None) and self.cueobj.iscued):
# the user correctly spots the warning event
self.sound(self.snd_hit,**self.snd_params)
self.rewardlogic.score_event(self.hit_reward)
else:
# the user spotted it, but didn't get the cue right
self.sound(self.snd_wrongcue,**self.snd_params)
self.rewardlogic.score_event(self.false_penalty)
else:
self.marker(self.markerbase+9)
# the user spotted it, but was not tasked to do so...
self.rewardlogic.score_event(self.false_penalty)
def flash(self,status,duration=1):
self.picture(self.pic_on if status else self.pic_off,duration=duration, **self.pic_params)
class WarningSoundTask(LatentModule):
"""
A warning sound class that turns on sporadically. Demands that the subject responds in some way
when the sound goes on / off or stops "ticking" (if a tick sound).
Has some support for a cue status object/task which is currently unused here (was for MBF2A).
"""
def __init__(self,
# general properties
rewardlogic, # reward handling logic
watcher = None, # response event watcher
focused = True, # whether this task is currently focused
markerbase = 1, # markers markerbase..markerbase+6 are used
event_interval=lambda: random.uniform(45,85), # interval between two successive events
# cueing control
cueobj = None, # an object that might have .iscued set to true
# audio parameters
screen_offset=0, # offset to position this source on one of the three screens
snd_on='xHyprBlip.wav', # sound to play in case of an event
snd_params={'volume':0.25,'direction':0.0}, # parameters for the sound() command
# response handling
snd_hit='click2s.wav', # sound when the user correctly detected the warning state
snd_wrongcue='xBuzz01.wav', # the sound that is overlaid with the buzzer when the response was wrong due to incorrect cueing
response_key='sysmona-check', # key to press in case of an event
timeout=5.5, # response timeout for the user
hit_reward=0, # reward if hit
miss_penalty=-20, # penalty if missed
false_penalty=-5, # penalty for false positives
# ticking support
snd_tick_off=None, # optional ticking in off status
snd_tick_on=None, # optional ticking in on status
tick_rate = None, # tick rate (duration in non-tick status, duration in tick status)
):
LatentModule.__init__(self)
self.rewardlogic = rewardlogic
self.focused = focused
self.markerbase = markerbase
self.event_interval = event_interval
self.snd_on = snd_on
self.snd_params = snd_params
self.snd_wrongcue = snd_wrongcue
self.snd_hit = snd_hit
self.response_key = response_key
self.timeout = timeout
self.hit_reward = hit_reward
self.miss_penalty = miss_penalty
self.false_penalty = false_penalty
self.screen_offset = screen_offset
self.snd_params = copy.deepcopy(snd_params)
self.cueobj = cueobj
self.control = False
self.snd_tick_off = snd_tick_off
self.snd_tick_on = snd_tick_on
self.tick_rate = tick_rate
self.watcher = watcher
def run(self):
self.snd_params['direction'] += self.screen_offset
# pre-cache the media files...
self.precache_sound(self.snd_on)
self.precache_sound(self.snd_tick_on)
self.precache_sound(self.snd_tick_off)
self.precache_sound(self.snd_wrongcue)
self.precache_sound(self.snd_hit)
self.accept('control',self.oncontrol,[True])
self.accept('control-up',self.oncontrol,[False])
# set up an event watcher (taking care of timeouts and inappropriate responses)
if self.watcher is None:
self.watcher = EventWatcher(eventtype=self.response_key,
handleduration=self.timeout,
defaulthandler=self.false_detection)
while True:
# off status
if self.tick_rate is not None:
t_end = time.time()+self.event_interval()
while time.time() < t_end:
self.marker(self.markerbase+10)
# play the off/tic snd
self.sound(self.snd_tick_off, **self.snd_params)
self.sleep(self.tick_rate[1])
# wait
self.sleep(self.tick_rate[0])
else:
# wait
self.sleep(self.event_interval())
# start watching for a response
self.watcher.watch_for(self.correct, self.timeout, self.missed)
self.marker(self.markerbase if self.focused else (self.markerbase+1))
if self.tick_rate is not None:
t_end = time.time()+self.timeout
while time.time() < t_end:
self.marker(self.markerbase+11)
# play the on/tic sound
if self.snd_tick_on is not None:
self.sound(self.snd_tick_on,**self.snd_params)
self.sleep(self.tick_rate[1])
# wait
self.sleep(self.tick_rate[0])
else:
# just play the "on" sound
if self.snd_on is not None:
self.sound(self.snd_on, **self.snd_params)
self.sleep(self.timeout)
self.marker(self.markerbase+2)
# reset the cue status
if self.cueobj is not None:
self.cueobj.iscued = False
def oncontrol(self,status):
self.control = status
def missed(self):
if self.focused:
self.marker(self.markerbase+3)
self.rewardlogic.score_event(self.miss_penalty)
def false_detection(self):
self.marker(self.markerbase+4)
self.rewardlogic.score_event(self.false_penalty)
def correct(self):
if self.focused:
if ((self.cueobj is not None) and self.cueobj.iscued):
self.marker(self.markerbase+5 if self.control else self.markerbase+6)
else:
self.marker(self.markerbase+7 if self.control else self.markerbase+8)
if self.control == ((self.cueobj is not None) and self.cueobj.iscued):
# the user correctly spots the warning event
self.sound(self.snd_hit,**self.snd_params)
self.rewardlogic.score_event(self.hit_reward)
else:
# the user spotted it, but didn't get the cue right
self.sound(self.snd_wrongcue,**self.snd_params)
self.rewardlogic.score_event(self.false_penalty)
else:
self.marker(self.markerbase+9)
# the user spotted it, but was not tasked to do so...
self.rewardlogic.score_event(self.false_penalty)
def flash(self,filename):
self.sound(filename, **self.snd_params)
# ============================
# === Main task definition ===
# ============================
class Main(LatentModule):
def __init__(self):
LatentModule.__init__(self)
# ===============================
# === block design parameters ===
# ===============================
self.randseed = 11463 # some initial randseed for the experiment; note that this should be different for each subject (None = random)
self.uiblocks = 24 # number of blocks with different UI permutation: should be a multiple of 6
self.focus_per_layout = 8 # number of focus conditions within a UI layout block
self.rest_every = 3 # insert a rest period every k UI blocks
self.focus_duration = lambda: random.uniform(30,50) # duration of a focus block (was: 30-50)
self.initial_rest_time = 5 # initial rest time at the beginning of a new UI layout block
self.tasknames = {'sysmonv':'visual system monitoring','sysmona':'auditory system monitoring','comma':'auditory communciations','commv':'text communications','math':'mathematics','satmap':'satellite map','drive':'driving task'}
self.conditions = ['sysmonv-sysmona','math-satmap','math-drive','sysmona-drive','sysmona-satmap','sysmonv','sysmona','satmap','drive','math']
self.bottom_up_probability = 0.5 # probability that the switch stimulus is bottom-up
# (this is the full set of conditions that we're not using any more)
# self.conditions = ['sysmonv-sysmona','commv-comma','math-satmap','math-drive','comma-satmap','comma-drive','comma-sysmona','sysmona-drive','sysmona-satmap','sysmonv','sysmona','commv','comma','satmap','drive','math']
# ==============================
# === score logic parameters ===
# ==============================
self.score_params = {'initial_score':0, # the initial score
'sound_params':{'direction':-0.7}, # properties of the score response sound
'gain_file':'ding.wav', # sound file per point
'loss_file':'xBuzz01-rev.wav', # sound file for losses
'none_file':'click.wav', # file to play if no reward
'buzz_volume':0.4, # volume of the buzz (multiplied by the amount of loss)
'gain_volume':0.5, # volume of the gain sound
'ding_interval':0.1, # interval at which successive gain sounds are played... (if score is > 1)
'scorefile':'C:\\Studies\\DAS\scoretable.txt'} # this is where the scores are logged
self.false_response_penalty = -1 # penalty due to false response in visual/auditory system monitoring
# ===========================================
# === visual system monitoring parameters ===
# ===========================================
self.sysmonv_rect = [-0.4,0.4,0.55,0.9]
self.sysmonv_timeout = 3
self.light_scale = 0.1
self.light_offset = 0.175
self.light_x = 0.09
self.redlight_params = {'markerbase':1, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(15,35), # interval between two successive events
'focused':False,
'pic_off':'buzzer-grey.png', # picture to display for the disabled light
'pic_on':'buzzer-red-real.png', # picture to display for the enabled light
'snd_hit':'xClick01.wav', # sound when the user correctly detected the warning state
'pic_params':{'pos':[self.light_x-2*self.light_offset,0.8],'scale':self.light_scale}, # parameters for the picture() command
'response_key':'sysmonv-check', # key to press in case of an event
'timeout':2.5, # response timeout for the user
'hit_reward':4, # reward if hit
'miss_penalty':-2, # penalty if missed
'false_penalty':-1, # penalty for false positives
}
self.greenlight_params = {'markerbase':20, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(21,41),# interval between two successive events
'focused':False,
'pic_off':'buzzer.png', # picture to display for the disabled light
'pic_on':'buzzer-grey.png', # picture to display for the enabled light
'snd_hit':'xClick01.wav', # sound when the user correctly detected the warning state
'pic_params':{'pos':[self.light_x-1*self.light_offset,0.8],'scale':self.light_scale}, # parameters for the picture() command
'response_key':'sysmonv-check', # key to press in case of an event
'timeout':2.5, # response timeout for the user
'hit_reward':4, # reward if hit
'miss_penalty':-2, # penalty if missed
'false_penalty':-1, # penalty for false positives
}
self.bluelight_params = {'markerbase':40, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(19,44),# interval between two successive events
'focused':False,
'pic_off':'buzzer-grey.png', # picture to display for the disabled light
'pic_on':'buzzer-grey.png', # picture to display for the enabled light
'snd_hit':'xClick01.wav', # sound when the user correctly detected the warning state
'pic_params':{'pos':[self.light_x+0*self.light_offset,0.8],'scale':self.light_scale}, # parameters for the picture() command
'response_key':'sysmonv-check', # key to press in case of an event
'timeout':2.75, # response timeout for the user
'hit_reward':4, # reward if hit
'miss_penalty':-2, # penalty if missed
'false_penalty':-1, # penalty for false positives
'pic_tick_off':'buzzer-blue.png', # picture to display for the disabled light
'tick_rate':[1.2,0.1],
}
self.yellowlight_params = {'markerbase':60, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(40,70),# interval between two successive events
'focused':False,
'pic_off':'buzzer-grey.png', # picture to display for the disabled light
'pic_on':'buzzer-yellow.png', # picture to display for the enabled light
'snd_hit':'xClick01.wav', # sound when the user correctly detected the warning state
'pic_params':{'pos':[self.light_x+1*self.light_offset,0.8],'scale':self.light_scale}, # parameters for the picture() command
'response_key':'sysmonv-check', # key to press in case of an event
'timeout':2.5, # response timeout for the user
'hit_reward':4, # reward if hit
'miss_penalty':-2, # penalty if missed
'false_penalty':-1 # penalty for false positives
}
self.button_sysmonv_par = {'frameSize':(-4.5,4.5,-0.45,0.95),'text':"Check",'scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_sysmonv_pos = [0,0.63]
# =============================================
# === auditory system monitoring parameters ===
# =============================================
self.sysmona_timeout = 3
self.sysmona_rect = [0.1,0.4,-0.34,-0.64]
self.warnsound_params = {'markerbase':80, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(15,35), # interval between two successive events
'focused':False,
'snd_on':'buzzz.wav', # picture to display for the enabled light
'response_key':'sysmona-check', # key to press in case of an event
'timeout':5.5, # response timeout for the user
'hit_reward':4, # reward if hit
'miss_penalty':-2, # penalty if missed
'false_penalty':-1, # penalty for false positives
}
self.ticksound_params = {'markerbase':100, # markers markerbase..markerbase+6 are used
'event_interval':lambda: random.uniform(19,40), # interval between two successive events
'snd_params':{'volume':0.2,'direction':0.0}, # parameters for the sound() command
'focused':False,
'snd_on':None,
'snd_tick_off':'xTick.wav', # picture to display for the enabled light
'response_key':'sysmona-check', # key to press in case of an event
'timeout':6.5, # response timeout for the user
'hit_reward':6, # reward if hit
'miss_penalty':-2, # penalty if missed
'false_penalty':-1, # penalty for false positives
'tick_rate':[0.7,0.1], # rate of the ticking...
}
self.button_sysmona_par = {'frameSize':(-2,2,-0.5,1),'text':'"Check"','scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_sysmona_pos = [0.25,-0.34]
# ==============================
# === auditory comm elements ===
# ==============================
self.voice_params = {'direction':0,'volume':1}
self.commaud_params = {'markerbase':400, # base marker offset
'message_interval': lambda: random.uniform(7,8), # interval between message presentations
'response_timeout':6, # response timeout...
'lull_time': lambda: random.uniform(30,90), # duration of lulls, in seconds (drawn per lull)
'situation_time': lambda: random.uniform(25,45), # duration of developing situations, in seconds (drawn per situation)
'clearafter': 5, # clear the presenter after this many messages
'message_interval': lambda: random.uniform(5,8), # message interval, in s (drawn per message)
'other_callsign_fraction': lambda: random.uniform(0.3,0.5), # fraction of messages that are for other callsigns (out of all messages presented) (drawn per situation)
'no_callsign_fraction': lambda: random.uniform(0.25,0.35), # fraction, out of the messages for "other callsigns", of messages that have no callsign (drawn per situation)
'time_fraction_until_questions': lambda: random.uniform(0.1,0.2), # the fraction of time into the situation until the first question comes up (drawn per situation)
# in the tutorial mode, this should probably be close to zero
'questioned_fraction': lambda: random.uniform(0.6,0.8), # fraction of targeted messages that incur questions
}
self.button_comma_par = {'frameSize':(-2,2,-0.5,1),'text':'"Roger"','scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_comma_pos = [-0.25,-0.34]
# ============================
# === visual comm elements ===
# ============================
self.scroll_pos = [-0.475,-0.4,-0.18]
self.scroll_params = {'width':28,'scale':0.035,'numlines':4,'height':4}
self.commvis_params = {'markerbase':300, # base marker offset
'clearafter': 5, # clear the presenter after this many messages
'message_interval': lambda: random.uniform(5,8), # message interval, in s (drawn per message)
'response_timeout':5, # response timeout...
'lull_time': lambda: random.uniform(30,90), # duration of lulls, in seconds (drawn per lull)
'situation_time': lambda: random.uniform(25,45), # duration of developing situations, in seconds (drawn per situation)
'message_interval': lambda: random.uniform(4,6), # message interval, in s (drawn per message)
'other_callsign_fraction': lambda: random.uniform(0.3,0.5), # fraction of messages that are for other callsigns (out of all messages presented) (drawn per situation)
'no_callsign_fraction': lambda: random.uniform(0.25,0.35), # fraction, out of the messages for "other callsigns", of messages that have no callsign (drawn per situation)
'time_fraction_until_questions': lambda: random.uniform(0.1,0.2), # the fraction of time into the situation until the first question comes up (drawn per situation)
# in the tutorial mode, this should probably be close to zero
'questioned_fraction': lambda: random.uniform(0.6,0.8), # fraction of targeted messages that incur questions
}
self.button_commv_par_y = {'frameSize':(-1.2,1.2,-0.35,0.85),'text':"Yes",'scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_commv_par_n = {'frameSize':(-1,1,-0.35,0.85),'text':"No",'scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_commv_par_s = {'frameSize':(-1.65,1.65,-0.35,0.85),'text':"Skip",'scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_commv_pos_y = [-0.05,-0.44,-0.22]
self.button_commv_pos_n = [0.15,-0.44,-0.22]
self.button_commv_pos_s = [0.375,-0.44,-0.22]
# =======================
# === math task setup ===
# =======================
self.numpad_topleft = [-0.4,0.7] # top-left corner of the numpad
self.math_rect = [-0.52,0.52,0.9,0.15]
self.math_params = {'difficulty': 2, # difficulty level of the problems (determines the size of involved numbers)
'focused':True,
'problem_interval': lambda: random.uniform(3,12), # delay before a new problem appears after the previous one has been solved
'response_timeout': 10.0, # time within which the subject may respond to a problem
'gain_correct':5,
'loss_incorrect':-2,
'numpad_gridspacing': [0.16,-0.16], # spacing of the button grid
'numpad_buttonsize': [0.75,0.75], # size of the buttons
'numpad_textscale': 0.15 # scale of the text
}
self.math_display_par = {'scale':0.04, 'textcolor':[1,1,1,1],'framecolor':[0,0,0,1],'width':9,'height':10}
self.math_display_pos = [0.12,0.67]
# ================================
# === satellite map task setup ===
# ================================
self.satmap_frame = [0.35,0.65,0.57,0.925] # the display region in which to draw everything
self.satmap_rect = [-0.54,0.54,0.9,0.12] # the display region in which to draw everything
self.satmap_params = {'background':'satellite_baseline.png', # background image to use
'frame_boundary':0.2, # (invisible) zone around the display region in which things can move around and spawn
'focused':False,
# parameters of the target/non-target item processes
'clutter_params':{'pixelated':True,
'num_items':30}, # parameters for the clutter process
'target_params':{'pixelated':True,
'num_items':1,
'item_speed':lambda: random.uniform(0.1,0.25),
'item_spiral':lambda: [random.uniform(0,3.14),random.uniform(0.0075,0.0095),random.uniform(0.06,0.07)], # perform a spiraling motion with the given radius and angular velocity
}, # parameters for the target process
'intro_text':'Find the spiraling object!', # the text that should be displayed before the script starts
# situational control
'target_probability':0.5, # probability of a new situation being a target situation (vs. non-target situation)
'target_duration':lambda: random.uniform(3,6), # duration of a target situation
'nontarget_duration':lambda: random.uniform(5,15),# duration of a non-target situation
# end conditions
'end_trials':1000000, # number of situations to produce (note: this is not the number of targets)
'end_timeout':1000000, # lifetime of this stream, in seconds (the stream ends if the trials are exhausted)
# response control
'response_event':'satmap-target', # the event that is generated when the user presses the response button
'loss_misstarget':0, # the loss incurred by missing a target
'loss_nontarget':-1, # the loss incurred by a false detection
'gain_target':4, # the gain incurred by correctly spotting a target
}
# this button is drawn into the satmap and can currently not be clicked
self.button_satmap_par = {'pos':(0.31,0,0.4),'frameSize':(-2.4,2.4,-0.6,1.1),'sortOrder':10,'text':"Target",'scale':.075,'text_font':loader.loadFont('arial.ttf'),'command':messenger.send,'extraArgs':['satmap-target'],'rolloverSound':None,'clickSound':None}
self.button_satmap_pos = [0,0]
# this button is in 3-screen space and can be clicked; it is behind the other button
self.button_satmap2_par = {'frameSize':(-2.5,2.5,-0.4,0.9),'text':"",'scale':.075,'text_font':loader.loadFont('arial.ttf'),'command':messenger.send,'extraArgs':['satmap-target'],'rolloverSound':None,'clickSound':None}
self.button_satmap2_pos = [0.31,0.77]
# ===============================
# === city driving task setup ===
# ===============================
self.drive_frame = [0.35,0.65,0.2,0.55]
self.drive_rect = [-0.54,0.54,0.12,-0.65]
self.drive_params = {'focused':False,
'show_checkpoints':False,
# media
'envmodel':'big\\citty.egg', # the environment model to use
'trucksound':"Diesel_Truck_idle2.wav",# loopable truck sound....
'trucksound_volume':0.25, # volume of the sound
'trucksound_direction':0, # direction relative to listener
'target_model':"moneybag-rev.egg", # model of the target object
'target_scale':0.01, # scale of the target model
'target_offset':0.2, # y offset for the target object
# checkpoint logic
'points':[[-248.91,-380.77,4.812],[0,0,0]], # the sequence of nav targets...
'radius':10, # proximity to checkpoint at which it is considered reached... (meters)
# end conditions
'end_timeout':100000, # end the task after this time
# movement parameters
'acceleration':0.5, # acceleration during manual driving
'friction':0.95, # friction coefficient
'torque':1, # actually angular velocity during turning
'height':0.7}
self.button_drive_par = {'frameSize':(-2.5,2.5,-0.4,0.9),'text':"Report",'scale':.075,'text_font':loader.loadFont('arial.ttf')}
self.button_drive_pos = [0.31,0.025]
# ============================
# === main task parameters ===
# ============================
# focus stimuli
self.bu_drive_img = {'image':'salient_warning.png', # bottom-up driving task
'scale':0.25}
self.bu_satmap_img = {'image':'salient_warning.png', # bottom-up satmap task
'scale':0.25}
self.bu_math_img = {'image':'salient_warning.png', # bottom-up math task
'scale':0.15}
self.bu_sysv_img = {'image':'salient_warning.png', # bottom-up sysmonv task
'scale':0.15}
self.bu_sysmona_img = {'image':'salient_warning.png', # bottom-up sysmona task
'scale':0.15}
self.bu_comma_img = {'image':'salient_warning.png', # bottom-up comma task
'scale':0.15}
self.initial_layout_time = 5 # initial time after layout switch
# callsign setup
self.callsign_file = 'callsigns.txt'
self.numcallsigns = 6
# misc parameters
self.screen_offsets = [-1.13,0,1.13] # the three screen offsets for UI permutation...
self.developer = True
# voice control
self.voice_icon_enlarge_duration = 0.5
self.voice_icon_enlarge_size = 0.12
self.allow_speech = True
# set up some global text highlighting functionality
tpHighlight = TextProperties()
tpHighlight.setTextColor(1, 0, 0, 1)
tpHighlight.setSlant(0.3)
tpMgr = TextPropertiesManager.getGlobalPtr()
tpMgr.setProperties("highlight", tpHighlight)
# bci control
self.notification_cutoff = 0.2 # if the probability that a message was noticed is smaller than this, fire off a message
self.notice_probability = 0.5 # this is the bci variable
self.notice_probability_cumulant = 0.5 # this is a smoothed version of the bci variabe
self.notice_probability_history_mixin = 0.6 # this is an update factor that mixes in previous notice-probability estimates (from earlier messages) to get a smoothed update for the current one
self.notification_snd = 'xBleep.wav'
# inter-block pauses
self.pause_duration = lambda: random.uniform(40,60)
# ambience sound setup
self.ambience_sound = 'media\\ambience\\nyc_amb2.wav'
self.ambience_volume = 0.1
self.frames = []
def run(self):
try:
# init the randseed
if self.randseed is not None:
print "WARNING: Randomization of the experiment is currently bypassed."
random.seed(self.randseed)
self.marker(30000+self.randseed)
# =================================
# === Block schedule generation ===
# =================================
# generate the UI block schedule
layouts = [[0,1,2],[0,2,1],[1,0,2],[1,2,0],[2,0,1],[2,1,0]]
if self.uiblocks % len(layouts) > 0:
raise Exception('The # of UI blocks should be a multiple of 6')
layouts = layouts*(self.uiblocks/len(layouts))
random.shuffle(layouts)
# determine the sequence of focus conditions for each layout block
conditions = self.conditions*(1+self.uiblocks*self.focus_per_layout/len(self.conditions))
conditions = conditions[:self.uiblocks*self.focus_per_layout]
random.shuffle(conditions)
# re-group them by layout
focus_conditions = []
for k in range(len(layouts)):
focus_conditions.append(conditions[k*self.focus_per_layout : (1+k)*self.focus_per_layout])
if (k+1) % self.rest_every == 0:
focus_conditions[k].append('') # append resting...
# pre-pend rest to the first block
focus_conditions[0].insert(0,'')
# ================
# === Tutorial ===
# ================
if not self.developer:
self.write('Welcome to the MBF2 experiment B.')
self.write('Press the space bar when you are ready.','space')
# ===============================
# === One-time initialization ===
# ===============================
# set up the reward logic
self.rewardlogic = AudioRewardLogic(**self.score_params)
# load callsign table
self.callsigns = []
with open('media\\'+self.callsign_file,'r') as f:
for line in f:
self.callsigns.append(line.strip())
self.callsigns = self.callsigns[:self.numcallsigns]
# start some ambience sound loop
self.ambience = self.sound(self.ambience_sound,looping=True,volume=self.ambience_volume,direction=0)
# init speech control
if self.allow_speech:
try:
framework.speech.listenfor(['roger','check','yes','no','skip'],self.onspeech)
except:
print "Could not initialiate speech control; falling back to touch screen only."
# initialize question counters
self.num_question_uv = [0]
self.num_question_lv = [0]
self.num_question_au = [0]
# =======================
# === block main loop ===
# =======================
# for each UI layout block...
for k in range(len(layouts)):
if (k+1) % self.rest_every == 0:
# insert pause
self.marker(1701)
self.write("You may now rest for a while...",3,scale=0.04,pos=[0,0.4])
self.show_score()
# main rest block: just sleep and let the center task do the rest
duration = self.pause_duration()
if self.waitfor('f9', duration):
self.rewardlogic.paused = True
self.marker(900)
self.write("Pausing now. Please press f9 again to continue.",10,scale=0.04,pos=[0,0.4],block=False)
self.waitfor('f9', 10000)
self.rewardlogic.paused = False
self.marker(19)
self.sound('nice_bell.wav')
self.write("The rest block has now ended.",2,scale=0.04,pos=[0,0.4])
# =======================================
# === New layout block initialization ===
# =======================================
if not self.developer:
for i in [3,2,1]:
self.write('New block begins in '+str(i))
self.marker(400+k)
layout = layouts[k]
# WARNING -- these are abstract & subject to layout permutation (names referring to some reference unpermuted layout)
left = self.screen_offsets[layout[0]]
center = self.screen_offsets[layout[1]]
right = self.screen_offsets[layout[2]]
# instantiate the center drive task
frameofs = center/3.35
drive_frame = [self.drive_frame[0] + frameofs,self.drive_frame[1] + frameofs,self.drive_frame[2],self.drive_frame[3]]
drive_rect = [self.drive_rect[0] + center,self.drive_rect[1] + center,self.drive_rect[2],self.drive_rect[3]]
self.drive = self.launch(CheckpointDriving(frame=drive_frame,text_pos=[center,-0.55],**self.drive_params))
self.button_drive = DirectButton(command=messenger.send,extraArgs=['drive-report'],rolloverSound=None,clickSound=None,
pos=(self.button_drive_pos[0]+center,0,self.button_drive_pos[1]),**self.button_drive_par)
# instantiate the satmap task
frameofs = center/3.35
satmap_frame = [self.satmap_frame[0] + frameofs,self.satmap_frame[1] + frameofs,self.satmap_frame[2],self.satmap_frame[3]]
satmap_rect = [self.satmap_rect[0] + center,self.satmap_rect[1] + center,self.satmap_rect[2],self.satmap_rect[3]]
self.satmap = self.launch(VisualSearchTask(self.rewardlogic,
frame=satmap_frame,
button_params=self.button_satmap_par,**self.satmap_params))
self.button_satmap2 = DirectButton(pos=(self.button_satmap2_pos[0]+center,0,self.button_satmap2_pos[1]),**self.button_satmap2_par)
# instantiate visual monitoring task
sysmonv_rect = [self.sysmonv_rect[0] + right,self.sysmonv_rect[1] + right,self.sysmonv_rect[2],self.sysmonv_rect[3]]
self.vismonwatcher = EventWatcher(eventtype='sysmonv-check',
handleduration=self.sysmonv_timeout,
defaulthandler=self.sysmonv_false_detection)
self.redlight = self.launch(WarningLightTask(self.rewardlogic,screen_offset=right,watcher=self.vismonwatcher,**self.redlight_params))
self.greenlight = self.launch(WarningLightTask(self.rewardlogic,screen_offset=right,watcher=self.vismonwatcher,**self.greenlight_params))
self.bluelight = self.launch(WarningLightTask(self.rewardlogic,screen_offset=right,watcher=self.vismonwatcher,**self.bluelight_params))
self.yellowlight = self.launch(WarningLightTask(self.rewardlogic,screen_offset=right,**self.yellowlight_params))
self.button_sysmonv = DirectButton(command=messenger.send,extraArgs=['sysmonv-check'],rolloverSound=None,clickSound=None,
pos=(self.button_sysmonv_pos[0]+right,0,self.button_sysmonv_pos[1]),**self.button_sysmonv_par)
# instantiate the auditory monitoring task
sysmona_rect = [self.sysmona_rect[0] + right,self.sysmona_rect[1] + right,self.sysmona_rect[2],self.sysmona_rect[3]]
self.audmonwatcher = EventWatcher(eventtype='sysmona-check',
handleduration=self.sysmona_timeout,
defaulthandler=self.sysmona_false_detection)
self.warnsound = self.launch(WarningSoundTask(self.rewardlogic,screen_offset=right,watcher=self.audmonwatcher,**self.warnsound_params))
self.ticksound = self.launch(WarningSoundTask(self.rewardlogic,screen_offset=right,watcher=self.audmonwatcher,**self.ticksound_params))
self.icon_sysmona = self.picture('sysmon-speaker.png',100000,block=False,pos=[self.button_sysmona_pos[0]+right,self.button_sysmona_pos[1]-0.15],scale=0.1)
# determine callsign
targetsignidx = random.choice(xrange(len(self.callsigns)))
self.marker(600+targetsignidx)
targetsign = self.callsigns[targetsignidx]
# and display it
self.csign = self.write('Callsign: '+targetsign,10000,block=False,pos=[self.scroll_pos[0]+self.screen_offsets[layout[0]],self.scroll_pos[2]+0.06],scale=0.04,align='left',fg=[1,1,1,1])
# instantiate the vis comm task
self.commbox1 = ScrollPresenter(pos=[self.scroll_pos[0]+self.screen_offsets[layout[0]],self.scroll_pos[1]],**self.scroll_params)
self.commvis1 = self.launch(AdvCommScheduler(self.commbox1,self.rewardlogic,targetsign=targetsign,numcallsigns=self.numcallsigns,callsigns=self.callsign_file,commands='sentences_with_answers1.txt',events=['v1_y','v1_n','v1_s'],callback_func=lambda: self.check_bci("lower visual"),num_question=self.num_question_lv,**self.commvis_params))
self.button_commv1_y = DirectButton(command=messenger.send,extraArgs=['v1_y'],rolloverSound=None,clickSound=None,
pos=(self.button_commv_pos_y[0]+left,0,self.button_commv_pos_y[1]),**self.button_commv_par_y)
self.button_commv1_n = DirectButton(command=messenger.send,extraArgs=['v1_n'],rolloverSound=None,clickSound=None,
pos=(self.button_commv_pos_n[0]+left,0,self.button_commv_pos_n[1]),**self.button_commv_par_n)
self.button_commv1_s = DirectButton(command=messenger.send,extraArgs=['v1_s'],rolloverSound=None,clickSound=None,
pos=(self.button_commv_pos_s[0]+left,0,self.button_commv_pos_s[1]),**self.button_commv_par_s)
self.commbox2 = ScrollPresenter(pos=[self.scroll_pos[0]+self.screen_offsets[layout[0]],self.scroll_pos[2]],**self.scroll_params)
self.commvis2 = self.launch(AdvCommScheduler(self.commbox2,self.rewardlogic,targetsign=targetsign,numcallsigns=self.numcallsigns,callsigns=self.callsign_file,commands='sentences_with_answers2.txt',events=['v2_y','v2_n','v2_s'],callback_func=lambda: self.check_bci("upper visual"),num_question=self.num_question_uv,**self.commvis_params))
self.button_commv2_y = DirectButton(command=messenger.send,extraArgs=['v2_y'],rolloverSound=None,clickSound=None,
pos=(self.button_commv_pos_y[0]+left,0,self.button_commv_pos_y[2]),**self.button_commv_par_y)
self.button_commv2_n = DirectButton(command=messenger.send,extraArgs=['v2_n'],rolloverSound=None,clickSound=None,
pos=(self.button_commv_pos_n[0]+left,0,self.button_commv_pos_n[2]),**self.button_commv_par_n)
self.button_commv2_s = DirectButton(command=messenger.send,extraArgs=['v2_s'],rolloverSound=None,clickSound=None,
pos=(self.button_commv_pos_s[0]+left,0,self.button_commv_pos_s[2]),**self.button_commv_par_s)
# instantiate the aud comm task
self.commsnd = AudioPresenter(**self.voice_params)
self.commaud = self.launch(AdvCommScheduler(self.commsnd,self.rewardlogic,targetsign=targetsign,numcallsigns=self.numcallsigns,callsigns=self.callsign_file,commands='sentences_with_answers3.txt',callback_func=lambda: self.check_bci("audio"),num_question=self.num_question_au,**self.commaud_params))
self.icon_comma = self.picture('comma-speaker.png',100000,block=False,pos=[self.button_comma_pos[0]+right,self.button_comma_pos[1]-0.15],scale=0.1)
# instantiate the math task
math_rect = [self.math_rect[0] + left,self.math_rect[1] + left,self.math_rect[2],self.math_rect[3]]
self.mathdisplay = TextPresenter(pos=[self.math_display_pos[0]+left,self.math_display_pos[1]],**self.math_display_par)
self.math = self.launch(MathScheduler(self.rewardlogic,self.mathdisplay,
numpad_topleft=[self.numpad_topleft[0] + self.screen_offsets[layout[0]],self.numpad_topleft[1]],**self.math_params))
# wait until the layout has sunken in...
self.sleep(self.initial_layout_time)
# for each focus condition
prevfocus = ''
for focus in focus_conditions[k]:
# =======================
# === New focus block ===
# =======================
# reconfigure focused state for each object
self.drive.focused = focus.find('drive')>=0
self.satmap.focused = focus.find('satmap')>=0
self.redlight.focused = focus.find('sysmonv')>=0
self.greenlight.focused = focus.find('sysmonv')>=0
self.bluelight.focused = focus.find('sysmonv')>=0
self.yellowlight.focused = focus.find('sysmonv')>=0
self.warnsound.focused = focus.find('sysmona')>=0
self.ticksound.focused = focus.find('sysmona')>=0
self.math.focused = focus.find('math')>=0
# present a switch stimulus
if prevfocus is None or prevfocus == '' or random.random() < self.bottom_up_probability:
# bottom-up stimulus
if focus.find('drive')>=0:
self.picture(block=False,pos=[center,-0.1],**self.bu_drive_img)
if focus.find('satmap')>=0:
self.picture(block=False,pos=[0,0],parent=self.satmap.renderviewport,**self.bu_satmap_img)
if focus.find('commv')>=0:
self.commbox1.submit_wait("\1highlight\1ATTENTION ATTENTION ATTENTION\2", self)
self.commbox2.submit_wait("\1highlight\1ATTENTION ATTENTION ATTENTION\2", self)
if focus.find('math')>=0:
self.picture(block=False,pos=[left,0.6],**self.bu_math_img)
if focus.find('sysmonv')>=0:
self.picture(block=False,pos=[right,0.65],**self.bu_sysv_img)
if focus.find('sysmona')>=0:
self.sound('xHyprBlip.wav',volume=0.3)
self.picture(block=False,pos=[self.button_sysmona_pos[0]+right,self.button_sysmona_pos[1]-0.15],**self.bu_sysmona_img)
if focus.find('comma')>=0:
self.picture(block=False,pos=[self.button_comma_pos[0]+right,self.button_comma_pos[1]-0.15],**self.bu_comma_img)
self.commsnd.submit_wait("ATTENTION COMMUNICATIONS\2", self)
else:
# top-down stimulus; build a text instruction
instruction = "Please continue with"
spl = focus.split('-')
if len(spl) == 1:
articles = [' the ']
elif len(spl) == 2:
articles = [' the ',' and the ']
elif len(spl) == 3:
articles = [' the ',', the ', ' and the ']
for k in xrange(len(spl)):
instruction += articles[k] + self.tasknames[spl[k]]
instruction += '.'
# ... and insert it on the respective displays
if prevfocus.find('math')>=0:
self.write(instruction,5,block=False,pos=[left,0.9],scale=0.04,wordwrap=25)
if prevfocus.find('commv')>=0:
self.commbox1.submit_wait(instruction,self,3,3)
self.commbox2.submit_wait(instruction,self,3,3)
if prevfocus.find('comma')>=0:
self.commsnd.submit_wait(instruction,self,6,6)
if prevfocus.find('sysmona')>=0:
self.commsnd.submit_wait(instruction,self,6,6)
if prevfocus.find('sysmonv')>=0:
self.write(instruction,5,block=False,pos=[right,0.95],scale=0.04,wordwrap=25)
if prevfocus.find('drive')>=0:
self.write(instruction,5,block=False,pos=[center,-0.25],scale=0.04,wordwrap=25)
if prevfocus.find('satmap')>=0:
self.write(instruction,5,block=False,pos=[center,0.35],scale=0.04,wordwrap=25)
# ================================================
# === wait for the duration of the focus block ===
# ================================================
duration = self.focus_duration()
# smoothly fade frames in around the hot spots
# not the finest way to do it, but gets the job done for now
self.sleep(3)
if True:
for k in [j/10.0 for j in range(1,11)]:
if focus.find('drive') >= 0:
self.frame(drive_rect,duration=duration-8,block=False,color=[1,1,1,k])
if focus.find('satmap') >= 0:
self.frame(satmap_rect,duration=duration-8,block=False,color=[1,1,1,k])
if focus.find('math') >= 0:
self.frame(math_rect,duration=duration-8,block=False,color=[1,1,1,k])
if focus.find('sysmonv') >= 0:
self.frame(sysmonv_rect,duration=duration-8,block=False,color=[1,1,1,k])
if focus.find('sysmona') >= 0:
self.frame(sysmona_rect,duration=duration-8,block=False,color=[1,1,1,k])
self.sleep(0.1)
self.sleep(duration-5-3)
prevfocus = focus
# ======================================
# === end of the screen layout block ===
# ======================================
self.redlight.cancel()
self.greenlight.cancel()
self.bluelight.cancel()
self.yellowlight.cancel()
self.warnsound.cancel()
self.ticksound.cancel()
self.commvis1.cancel()
self.commvis2.cancel()
self.commaud.cancel()
self.math.cancel()
self.satmap.cancel()
self.drive.cancel()
self.sleep(0.1)
# and clear display objects
self.clear_objects()
finally:
# ==========================
# === main task shutdown ===
# ==========================
try:
self.clear_objects()
except:
pass
def sysmonv_false_detection(self):
""" Event handler for false system-monitoring responses (if not focused). """
self.marker(701)
self.rewardlogic.score_event(self.false_response_penalty)
def sysmona_false_detection(self):
""" Event handler for false system-monitoring responses (if not focused). """
self.marker(702)
self.rewardlogic.score_event(self.false_response_penalty)
def onspeech(self,phrase,listener):
"""Dispatch speech commands into regular messages."""
if phrase.lower() == 'roger':
self.send_message('comma-roger')
self.icon_comma.setScale(self.voice_icon_enlarge_size)
self.icon_comma_reset_scale_at = time.time() + self.voice_icon_enlarge_duration
taskMgr.doMethodLater(self.voice_icon_enlarge_duration, self.reset_comma, 'reset_comma()')
if phrase.lower() == 'check':
self.send_message('sysmona-check')
self.icon_sysmona.setScale(self.voice_icon_enlarge_size)
self.icon_sysmona_reset_scale_at = time.time() + self.voice_icon_enlarge_duration
taskMgr.doMethodLater(self.voice_icon_enlarge_duration, self.reset_sysmona, 'reset_sysmona()')
if phrase.lower() == 'yes':
self.send_message('y')
if phrase.lower() == 'no':
self.send_message('n')
if phrase.lower() == 'skip':
self.send_message('s')
def reset_comma(self,task):
"""Part of a graphical gimmick."""
if time.time() >= self.icon_comma_reset_scale_at-0.1:
self.icon_comma.setScale(0.1)
return task.done
def reset_sysmona(self,task):
"""Part of a graphical gimmick."""
if time.time() >= self.icon_sysmona_reset_scale_at-0.1:
self.icon_sysmona.setScale(0.1)
return task.done
def clear_objects(self):
""" Destroy on-screen objects for shutdown / reset. """
# remove event watchers
self.vismonwatcher.destroy()
self.audmonwatcher.destroy()
# remove buttons
self.icon_sysmona.destroy()
self.icon_comma.destroy()
self.button_commv1_y.destroy()
self.button_commv1_n.destroy()
self.button_commv1_s.destroy()
self.button_commv2_y.destroy()
self.button_commv2_n.destroy()
self.button_commv2_s.destroy()
self.button_sysmonv.destroy()
self.button_satmap2.destroy()
self.button_drive.destroy()
# remove presenters
self.mathdisplay.destroy()
self.commbox1.destroy()
self.commbox2.destroy()
self.commsnd.destroy()
self.csign.destroy()
def check_bci(self,which):
""" Query the BCI to determine whether the subject noticed the message. """
self.notice_probability_cumulant = self.notice_probability_cumulant*self.notice_probability_history_mixin + self.notice_probability * (1-self.notice_probability_history_mixin)
if self.notice_probability_cumulant < self.notification_cutoff:
self.write("Please don't forget to pay attention to your " +which+ " messages.", 1, False, [0,-0.75])
self.sound(self.notification_snd, False, 0.5, 0)
def show_score(self):
""" Display the score to the subject & log it."""
self.write("Your score is: " + str(self.rewardlogic.score*10),5,scale=0.1,pos=[0,0.8])
self.rewardlogic.log_score()
|
11540147
|
from django.db import models
class ReportingAgencyTas(models.Model):
"""
Model representing reporting data for appropriation and object class program activity values grouped by TAS and
period
"""
reporting_agency_tas_id = models.AutoField(primary_key=True)
toptier_code = models.TextField()
fiscal_year = models.IntegerField()
fiscal_period = models.IntegerField()
tas_rendering_label = models.TextField()
appropriation_obligated_amount = models.DecimalField(max_digits=23, decimal_places=2)
object_class_pa_obligated_amount = models.DecimalField(max_digits=23, decimal_places=2)
diff_approp_ocpa_obligated_amounts = models.DecimalField(max_digits=23, decimal_places=2)
class Meta:
db_table = "reporting_agency_tas"
indexes = [
models.Index(fields=["fiscal_year", "fiscal_period", "toptier_code"], name="reporting_agency_tas_group_idx")
]
class ReportingAgencyMissingTas(models.Model):
"""
Model representing missing reporting data for appropriation and object class program activity values grouped by TAS and
period
"""
reporting_agency_missing_tas_id = models.AutoField(primary_key=True)
toptier_code = models.TextField()
fiscal_year = models.IntegerField()
fiscal_period = models.IntegerField()
tas_rendering_label = models.TextField()
obligated_amount = models.DecimalField(max_digits=23, decimal_places=2)
class Meta:
db_table = "reporting_agency_missing_tas"
indexes = [
models.Index(fields=["fiscal_year", "fiscal_period", "toptier_code"], name="rpt_agency_missing_tas_grp_idx")
]
class ReportingAgencyOverview(models.Model):
"""
Model representing reporting data for appropriation and object class program activity values grouped by TAS and
period
"""
reporting_agency_overview_id = models.AutoField(primary_key=True)
toptier_code = models.TextField()
fiscal_year = models.IntegerField()
fiscal_period = models.IntegerField()
total_dollars_obligated_gtas = models.DecimalField(max_digits=23, decimal_places=2, null=True)
total_budgetary_resources = models.DecimalField(max_digits=23, decimal_places=2, null=True)
total_diff_approp_ocpa_obligated_amounts = models.DecimalField(max_digits=23, decimal_places=2, null=True)
unlinked_procurement_c_awards = models.IntegerField(null=True)
unlinked_assistance_c_awards = models.IntegerField(null=True)
unlinked_procurement_d_awards = models.IntegerField(null=True)
unlinked_assistance_d_awards = models.IntegerField(null=True)
linked_procurement_awards = models.IntegerField(null=True)
linked_assistance_awards = models.IntegerField(null=True)
class Meta:
db_table = "reporting_agency_overview"
indexes = [
models.Index(fields=["fiscal_year", "fiscal_period", "toptier_code"], name="reporting_agency_ovr_group_idx")
]
|
11540151
|
import json
import io
from PythonBridge.object_registry import registry
mapper = {}
def addMapping(key_type, mapping_function):
mapper[key_type] = mapping_function
class JsonEncoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
json.JSONEncoder.__init__(self, *args, **kwargs)
self.mapper = mapper
def default(self, obj):
if type(obj) in self.mapper:
return self.mapper[type(obj)](obj)
return {
'__pyclass__': type(obj).__name__,
'__pyid__': registry().register(obj)
}
class JsonSerializer:
def serialize(self, obj):
return json.dumps(obj, cls=JsonEncoder)
def deserialize(self, text):
return json.loads(text)
|
11540163
|
import glycowork
from glycowork.motif.annotate import *
glycans = ['Man(a1-3)[Man(a1-6)][Xyl(b1-2)]Man(b1-4)GlcNAc(b1-4)[Fuc(a1-3)]GlcNAc',
'Man(a1-2)Man(a1-2)Man(a1-3)[Man(a1-3)Man(a1-6)]Man(b1-4)GlcNAc(b1-4)GlcNAc',
'GalNAc(a1-4)GlcNAcA(a1-4)[GlcN(b1-7)]Kdo(a2-5)[Kdo(a2-4)]Kdo(a2-6)GlcOPN(b1-6)GlcOPN',
'Man(a1-3)[Man(a1-6)][Xyl(b1-2)]Man(b1-4)GlcNAc(bond)[Fuc(a1-3)]GlcNAc']
print("Annotate Test")
print(annotate_dataset(glycans))
print("Annotate Test with Graph Features")
print(annotate_dataset(glycans, feature_set = ['known', 'graph']))
print("Annotate Test with Everything")
print(annotate_dataset(glycans, feature_set = ['known', 'graph', 'exhaustive']))
print("Annotate Test with Wildcard Matching")
print(annotate_dataset(glycans, extra = 'wildcards', wildcard_list = ['bond'],
condense = True))
print("Annotate Test with Position Matching")
print(annotate_dataset(glycans, extra = 'termini',
condense = True))
|
11540197
|
from neuron import h, crxd as rxd
import numpy
import __main__
name = __main__.__file__
if name[-3:] == ".py":
name = name[:-3]
h.load_file("stdrun.hoc")
dend1 = h.Section()
dend1.diam = 2
dend1.nseg = 101
dend1.L = 50
dend2 = h.Section()
dend2.diam = 2
dend2.nseg = 101
dend2.L = 50
dend2.connect(dend1)
diff_constant = 1
h.cvode_active(True)
r = rxd.Region(h.allsec(), dx=0.25)
rxd.set_solve_type([dend1], dimension=3)
ca = rxd.Species(
r,
d=diff_constant,
atolscale=0.1,
initial=lambda node: 1
if (0.8 < node.x and node.segment in dend1)
or (node.x < 0.2 and node.segment in dend2)
else 0,
)
bistable_reaction = rxd.Rate(ca, -ca * (1 - ca) * (0.01 - ca))
h.finitialize()
if __name__ == "__main__":
from matplotlib import pyplot
for t in [25, 50, 75, 100, 125]:
h.continuerun(t)
pyplot.plot(
[nd.x for nd in ca.nodes if nd.segment in dend1]
+ [1 + nd.x for nd in ca.nodes if nd.segment in dend2],
[nd.concentration for nd in ca.nodes if nd.segment in dend1]
+ [nd.concentration for nd in ca.nodes if nd.segment in dend2],
".",
)
pyplot.tight_layout()
pyplot.savefig("{0}.png".format(name))
pyplot.show()
else:
for t in [25, 50, 75, 100, 125]:
h.continuerun(t)
|
11540198
|
import os
from sys import stdout
STYLE = {'None' : '0', 'bold' : '1', 'disable' : '2',
'underline' : '4', 'blink' : '5', 'reverse' : '7',
'invisible' : '8', 'strike' : '9'}
FG = {'None' : '', 'gray' : ';30', 'red' : ';31',
'green' : ';32', 'yellow': ';33', 'blue' : ';34',
'purple': ';35', 'cyan' : ';36', 'white' : ';39'}
BG = {'None' : '', 'black' : ';40', 'red' : ';41',
'green' : ';42', 'orange': ';43', 'blue' : ';44',
'purple': ';45', 'cyan' : ';46', 'gray' : ';47',}
def code_gen(data, style, color, highlight, windows=False):
return data if windows else '\033[0{}{}{}m{}\033[0m'.format(STYLE[style], FG[color], BG[highlight], data)
def highlight(data, fg='blue', style='bold', bg='None', windows=False):
return code_gen(data, style, fg, bg, windows=True if os.name == 'nt' else False)
def colored(data, fg='None', style='None', bg='None'):
stdout.write("{}\n".format(code_gen(data, style, fg, bg)))
def bullet(data, bullet='[*] ', bullet_fg='blue', bullet_style='bold', bullet_bg='None', fg='None', style='None', bg='None'):
stdout.write("{}{}\n".format(code_gen(bullet, bullet_style, bullet_fg, bullet_bg), code_gen(data, fg, style, bg)))
|
11540217
|
import logging
import os
# Skip bson in requirements , pymongo provides
# noinspection PyPackageRequirements
from bson import decode_file_iter
from bson.codec_options import CodecOptions
from mongodb_consistent_backup.Errors import Error
from mongodb_consistent_backup.Oplog import Oplog
from mongodb_consistent_backup.Pipeline import PoolThread
class ResolverThread(PoolThread):
def __init__(self, config, state, uri, tailed_oplog, mongodump_oplog, max_end_ts, compression='none'):
super(ResolverThread, self).__init__(self.__class__.__name__, compression)
self.config = config
self.state = state
self.uri = uri
self.tailed_oplog = tailed_oplog
self.mongodump_oplog = mongodump_oplog
self.max_end_ts = max_end_ts
self.compression_method = compression
# Pool threads break self.config unless flattened to a normal dict:
self.flush_docs = self.config['oplog']['flush']['max_docs']
self.flush_secs = self.config['oplog']['flush']['max_secs']
self.oplogs = {}
self.last_ts = None
self.changes = 0
self.stopped = False
def run(self):
try:
self.oplogs['backup'] = Oplog(self.mongodump_oplog['file'], self.do_gzip(), 'a+', self.flush_docs, self.flush_secs)
self.oplogs['tailed'] = Oplog(self.tailed_oplog['file'], self.do_gzip())
logging.info("Resolving oplog for %s to max ts: %s" % (self.uri, self.max_end_ts))
self.state.set('running', True)
self.state.set('first_ts', self.mongodump_oplog['first_ts'])
if not self.state.get('first_ts'):
self.state.set('first_ts', self.tailed_oplog['first_ts'])
for change in decode_file_iter(self.oplogs['tailed'], CodecOptions(unicode_decode_error_handler="ignore")):
self.last_ts = change['ts']
if not self.mongodump_oplog['last_ts'] or self.last_ts > self.mongodump_oplog['last_ts']:
if self.last_ts < self.max_end_ts:
self.oplogs['backup'].add(change)
self.changes += 1
elif self.last_ts > self.max_end_ts:
break
self.state.set('count', self.mongodump_oplog['count'] + self.changes)
self.state.set('last_ts', self.last_ts)
self.state.set('running', False)
self.exit_code = 0
except Exception, e:
raise Error("Resolving of oplogs failed! Error: %s" % e)
finally:
self.close()
if self.exit_code == 0:
logging.info("Applied %i oplog changes to %s oplog, end ts: %s" % (self.changes, self.uri, self.last_ts))
return self.uri.str()
def close(self):
if len(self.oplogs) > 0 and not self.stopped:
logging.debug("Closing oplog file handles")
for oplog in self.oplogs:
self.oplogs[oplog].close()
self.stopped = True
if 'file' in self.tailed_oplog and os.path.isfile(self.tailed_oplog['file']):
logging.debug("Removing temporary/tailed oplog file: %s" % self.tailed_oplog['file'])
os.remove(self.tailed_oplog['file'])
|
11540235
|
from __future__ import print_function
import sys, os
import argparse
import json
from random import random
from conrob_pybullet.ss_pybullet.pybullet_tools.utils import connect, disconnect, wait_for_user, LockRenderer, \
has_gui, remove_body, set_camera_pose, get_movable_joints, set_joint_positions, \
wait_for_duration, point_from_pose, get_link_pose, link_from_name, add_line, user_input,\
HideOutput, load_pybullet, create_obj, draw_pose, add_body_name, get_pose, \
pose_from_tform, invert, multiply, set_pose, plan_joint_motion, get_joint_positions, \
add_fixed_constraint, remove_fixed_constraint, Attachment, create_attachment, \
pairwise_collision, set_color
from choreo.choreo_utils import parse_transform
try:
from py_vhacd import compute_convex_decomp
except ImportError as e:
print('\x1b[6;30;43m' + '{}'.format(e) + '\x1b[0m')
raise ImportError
PICKNPLACE_DIR = 'C:\\Users\\harry\\Documents\\pb-construction\\pychoreo\\assembly_instances\\picknplace'
PICKNPLACE_FILENAMES = {
'toggle_rebar_cage_1': 'toggle_rebar_cage_1.json'
}
scale_map = {
'millimeter' : 1e-3,
'meter' : 1.0,
}
def add_sub_id_tag(obj_key, sub_mesh_id, suffix='.obj'):
return obj_key + '_' + str(sub_mesh_id) + suffix
def extract_file_name(str_key):
key_sep = str_key.split('.')
return key_sep[0]
def rebuild_pkg_w_convex_collision_objects(instance_name, decomp_res=300000, verbose=True, write_log=False):
instance_directory = os.path.join(PICKNPLACE_DIR, instance_name)
print('Name: {}'.format(instance_name))
json_file_path = os.path.join(instance_directory, 'json', PICKNPLACE_FILENAMES[instance_name])
with open(json_file_path, 'r') as f:
json_data = json.loads(f.read())
mesh_dir = os.path.join(instance_directory, 'meshes', 'collision')
# decompose element geometries
for e_id, e in json_data['sequenced_elements'].items():
for so_id, so in e['element_geometry_file_names'].items():
so_full = so['full_obj']
obj_name = extract_file_name(so_full)
input_path = os.path.join(mesh_dir, so_full)
output_path = input_path
log_path = os.path.join(mesh_dir, obj_name + '.log') if write_log else ''
print('computing: {}'.format(obj_name))
success, mesh_verts, _ = compute_convex_decomp(input_path, output_path, log_path, resolution=decomp_res, verbose=verbose)
assert(0 == success)
n_convex_hulls = len(mesh_verts)
print('V-HACD done! # of convex hulls: {}'.format(n_convex_hulls))
so['convex_decomp'] = []
for i in range(n_convex_hulls):
so['convex_decomp'].append(add_sub_id_tag(obj_name, i))
# decompose static collision objects
for so_name, so_dict in json_data['static_obstacles'].items():
for so_id, so in so_dict.items():
so_full = so['full_obj']
obj_name = extract_file_name(so_full)
input_path = os.path.join(mesh_dir, so_full)
output_path = input_path
log_path = os.path.join(mesh_dir, obj_name + '.log') if write_log else ''
print('computing: {}'.format(obj_name))
success, mesh_verts, _ = compute_convex_decomp(input_path, output_path, log_path, resolution=decomp_res, verbose=verbose)
assert(0 == success)
n_convex_hulls = len(mesh_verts)
print('V-HACD done! # of convex hulls: {}'.format(n_convex_hulls))
so['convex_decomp'] = []
for i in range(n_convex_hulls):
so['convex_decomp'].append(add_sub_id_tag(obj_name, i))
# overwrite data
with open(json_file_path, 'w') as outfile:
json.dump(json_data, outfile, indent=4)
return json_data
def decompose_collision_object(instance_name, object_id, object_type='so', decomp_res=300000, verbose=True, write_log=False):
instance_directory = os.path.join(PICKNPLACE_DIR, instance_name)
print('Name: {}'.format(instance_name))
json_file_path = os.path.join(instance_directory, 'json', PICKNPLACE_FILENAMES[instance_name])
with open(json_file_path, 'r') as f:
json_data = json.loads(f.read())
mesh_dir = os.path.join(instance_directory, 'meshes', 'collision')
# decompose element geometries
if object_type == 'e':
e = json_data['sequenced_elements']['e_' + str(object_id)]
for so_id, so in e['element_geometry_file_names'].items():
so_full = so['full_obj']
obj_name = extract_file_name(so_full)
input_path = os.path.join(mesh_dir, so_full)
output_path = input_path
log_path = os.path.join(mesh_dir, obj_name + '.log') if write_log else ''
print('computing: {}'.format(obj_name))
success, mesh_verts, _ = compute_convex_decomp(input_path, output_path, log_path, resolution=decomp_res, verbose=verbose)
assert(0 == success)
n_convex_hulls = len(mesh_verts)
print('V-HACD done! # of convex hulls: {}'.format(n_convex_hulls))
so['convex_decomp'] = []
for i in range(n_convex_hulls):
so['convex_decomp'].append(add_sub_id_tag(obj_name, i))
# decompose static collision objects
if object_type == 'so':
so_dict = json_data['static_obstacles']['static_obstacle_' + str(object_id)]
for so_id, so in so_dict.items():
so_full = so['full_obj']
obj_name = extract_file_name(so_full)
input_path = os.path.join(mesh_dir, so_full)
output_path = input_path
log_path = os.path.join(mesh_dir, obj_name + '.log') if write_log else ''
print('computing: {}'.format(obj_name))
success, mesh_verts, _ = compute_convex_decomp(input_path, output_path, log_path, resolution=decomp_res, verbose=verbose)
assert(0 == success)
n_convex_hulls = len(mesh_verts)
print('V-HACD done! # of convex hulls: {}'.format(n_convex_hulls))
so['convex_decomp'] = []
for i in range(n_convex_hulls):
so['convex_decomp'].append(add_sub_id_tag(obj_name, i))
# overwrite data
with open(json_file_path, 'w') as outfile:
json.dump(json_data, outfile, indent=4)
return json_data
################################
def main():
parser = argparse.ArgumentParser()
# toggle_rebar_cage_1 |
parser.add_argument('-p', '--problem', default='toggle_rebar_cage_1', help='The name of the problem to rebuild')
parser.add_argument('-res', '--res', default=100000, help='voxel resolution for V-HACD, default=100000')
parser.add_argument('-ot', '--object_type', default='', help='decompose for specific object type: element, static_obstacles, default: none')
parser.add_argument('-id', '--object_id', default=-1, help='decompose for specific object id: default 0')
parser.add_argument('-v', '--viewer', action='store_true', help='Enables the pybullet viewer')
parser.add_argument('-nrb', '--not_rebuild', action='store_false', help='not rebuild pkg, parse an existing one')
parser.add_argument('-nq', '--not_quiet', action='store_true', help='verbose output')
args = parser.parse_args()
print('Arguments:', args)
instance_directory = os.path.join(PICKNPLACE_DIR, args.problem)
if args.not_rebuild:
if args.object_type and int(args.object_id) >= 0:
json_data = decompose_collision_object(args.problem, object_id=int(args.object_id), object_type=args.object_type, decomp_res=int(args.res), verbose=args.not_quiet)
else:
json_data = rebuild_pkg_w_convex_collision_objects(args.problem, decomp_res=int(args.res), verbose=not args.not_quiet)
else:
json_file_path = os.path.join(instance_directory, 'json', PICKNPLACE_FILENAMES[args.problem])
with open(json_file_path, 'r') as f:
json_data = json.loads(f.read())
# visualization
connect(use_gui=args.viewer)
if args.viewer:
obj_directory = os.path.join(instance_directory, 'meshes', 'collision')
scale = scale_map[json_data['unit']]
# element geometry
for e_id, json_element in json_data['sequenced_elements'].items():
index = json_element['object_id']
# TODO: transform geometry based on json_element['parent_frame']
obj_from_ee_grasp_poses = [pose_from_tform(parse_transform(json_tf)) \
for json_tf in json_element['grasps']['ee_poses']]
# pick_grasp_plane is at the top of the object with z facing downwards
# ee_from_obj = invert(world_from_obj_pick) # Using pick frame
pick_parent_frame = \
pose_from_tform(parse_transform(json_element['assembly_process']['pick']['parent_frame']))
world_from_obj_pick = \
multiply(pick_parent_frame, pose_from_tform(parse_transform(json_element['assembly_process']['pick']['object_target_pose'])))
place_parent_frame = \
pose_from_tform(parse_transform(json_element['assembly_process']['place']['parent_frame']))
world_from_obj_place = \
multiply(place_parent_frame, pose_from_tform(parse_transform(json_element['assembly_process']['place']['object_target_pose'])))
draw_pose(world_from_obj_pick, length=0.04)
draw_pose(world_from_obj_place, length=0.04)
so_dict = json_element['element_geometry_file_names']
for sub_id, so in so_dict.items():
pick_full_body = create_obj(os.path.join(obj_directory, so['full_obj']), scale=scale, color=(0, 0, 1, 0.4))
add_body_name(pick_full_body, 'e_' + str(index))
set_pose(pick_full_body, world_from_obj_place)
for cvd_obj in so['convex_decomp']:
obj_name = extract_file_name(cvd_obj)
pick_cvd_body = create_obj(os.path.join(obj_directory, cvd_obj), scale=scale, color=(random(), random(), random(), 0.6))
# add_body_name(obstacle_from_name[obj_name], obj_name)
set_pose(pick_cvd_body, world_from_obj_place)
# static collision
obstacle_from_name = {}
for so_name, so_dict in json_data['static_obstacles'].items():
for sub_id, so in so_dict.items():
obj_name = so_name + '_' + sub_id + '_full'
obstacle_from_name[obj_name] = create_obj(os.path.join(obj_directory, so['full_obj']),
scale=scale, color=(0, 0, 1, 0.4))
add_body_name(obstacle_from_name[obj_name], obj_name)
for cvd_obj in so['convex_decomp']:
obj_name = extract_file_name(cvd_obj)
obstacle_from_name[obj_name] = create_obj(os.path.join(obj_directory, cvd_obj), scale=scale, color=(random(), random(), random(), 0.6))
# add_body_name(obstacle_from_name[obj_name], obj_name)
wait_for_user()
if __name__ == '__main__':
main()
|
11540247
|
import py
from rpython.jit.backend.test.runner_test import LLtypeBackendTest
from rpython.jit.backend.llgraph.runner import LLGraphCPU
class TestLLTypeLLGraph(LLtypeBackendTest):
# for individual tests see:
# ====> ../../test/runner_test.py
def get_cpu(self):
return LLGraphCPU(None)
def test_memoryerror(self):
py.test.skip("does not make much sense on the llgraph backend")
def test_call_release_gil_variable_function_and_arguments(self):
py.test.skip("the arguments seem not correctly casted")
|
11540461
|
import unittest
from pixray import *
class TestPixrayMethods(unittest.TestCase):
def setup_test_parser(self):
parser = argparse.ArgumentParser()
return setup_parser(parser)
def get_args_for_apply_overlay_test(self, overlay_image, overlay_every, overlay_offset, overlay_until):
settings = {
'--overlay_image': overlay_image,
'--overlay_every': overlay_every,
'--overlay_offset': overlay_offset,
'--overlay_until': overlay_until
}
return self.parse_dictionary_to_args(settings)
def parse_dictionary_to_args(self, settings_dict):
parser = self.setup_test_parser()
args = []
for key, value in settings_dict.items():
if value is not None:
args.append(key)
args.append(value)
args = parser.parse_args(args)
args.overlay_offset = parse_unit(args.overlay_offset, args.iterations, "overlay_offset")
args.overlay_until = parse_unit(args.overlay_until, args.iterations, "overlay_until")
args.overlay_every = parse_unit(args.overlay_every, args.iterations, "overlay_every")
return args
#region apply_overlay
def test_apply_overlay_all_true(self):
args = self.get_args_for_apply_overlay_test('image.png', '1i', '0i', '100i')
self.assertEqual(apply_overlay(args, 10), True)
def test_apply_overlay_no_overlay_image(self):
args = self.get_args_for_apply_overlay_test(None, '1i', '0i', '100i')
self.assertEqual(apply_overlay(args, 10), False)
def test_apply_overlay_not_at_offset(self):
args = self.get_args_for_apply_overlay_test('image.png', '5i', '10i', '100i')
self.assertEqual(apply_overlay(args, 10), False)
def test_apply_overlay_overlay_until_none(self):
args = self.get_args_for_apply_overlay_test('image.png', '5i', '10i', None)
self.assertEqual(apply_overlay(args, 10), False)
def test_apply_overlay_less_than_overlay_until(self):
args = self.get_args_for_apply_overlay_test('image.png', '1i', '0i', '5i')
self.assertEqual(apply_overlay(args, 10), False)
#endregion apply_overlay
#region get_learning_rate_drops
def test_get_learning_rate_drops_empty(self):
self.assertEqual(get_learning_rate_drops(None, 300), [])
def test_get_learning_rate_drops_single(self):
self.assertEqual(get_learning_rate_drops([75], 300), [224])
def test_get_learning_rate_drops_multi(self):
self.assertEqual(get_learning_rate_drops([50, 22.5], 300), [149, 67])
#endregion get_learning_rate_drops
if __name__ == '__main__':
unittest.main()
|
11540465
|
from typing import Dict, Optional, Union
from .SchemaOption import SchemaOption
from .SchemaOptionDefs import CustomTemplateDefs, SchemaOptionDefs
class BooleanSchemaOption(SchemaOption):
def __init__(self):
self.when_true: Union[SchemaOptionDefs, CustomTemplateDefs, None] = None
self.when_false: Union[SchemaOptionDefs, CustomTemplateDefs, None] = None
self.default: Union[bool, None] = None
def add(self, flag: bool, value: SchemaOptionDefs):
if flag:
self.when_true = value
else:
self.when_false = value
def set_default(self, flag: bool):
self.default = flag
def get(
self, flag: Optional[bool] = None
) -> Union[SchemaOptionDefs, CustomTemplateDefs]:
if flag is None and self.default is None:
raise ValueError("No default option set")
real_flag = self.default if flag is None else flag
return self.when_true if real_flag else self.when_false
def names(self):
return []
|
11540495
|
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from dataloader import build_dataloader
from utils import Config, setup_seed, configure_hardware
from model.MMT4Caption import MMT4Caption
from eval import v2t_batch, make_coco_sample, COCOScorer
from tqdm import tqdm
from utils import EarlyStopping
import os
import argparse
import random
from tensorboardX import SummaryWriter
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def build_stuffs(train_cfg: dict, model, local_args):
# optimizer
if train_cfg['optimizer']['name'] == 'adam':
if train_cfg['optimizer']['weight_decay'] == 0:
optimizer = torch.optim.Adam(filter(lambda param: param.requires_grad, model.parameters()),
lr=train_cfg['optimizer']['learning_rate'],
betas=train_cfg['optimizer']['beta'])
else:
optimizer = torch.optim.AdamW(filter(lambda param: param.requires_grad, model.parameters()),
lr=train_cfg['optimizer']['learning_rate'],
betas=train_cfg['optimizer']['beta'],
weight_decay=train_cfg['optimizer']['weight_decay'])
elif train_cfg['optimizer']['name'] == 'sgd':
optimizer = torch.optim.SGD(filter(lambda param: param.requires_grad, model.parameters()),
lr=train_cfg['optimizer']['learning_rate'],
momentum=train_cfg['optimizer']['momentum'])
else:
raise ValueError("Do not support optimizer: {}".format(train_cfg['optimizer']['name']))
# lr_scheduler
sche_cfg = train_cfg['optimizer']['lr_scheduler']
if sche_cfg['name'] == 'CosineAnnealingLR':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=sche_cfg['T_max'], eta_min=sche_cfg['eta_min']
)
elif sche_cfg['name'] == 'ReduceLROnPlateau':
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, verbose=True, patience=sche_cfg['patience']
)
else:
raise ValueError("Do not support lr_scheduler: {}".format(sche_cfg['name']))
# early stop
early_stopping = EarlyStopping(
patience=train_cfg['earlystop'],
verbose=True,
# path=os.path.join(train_cfg['save_dir'], train_cfg['tag'] + str(local_args.local_rank) + "_earlystop.pth"),
path=os.path.join(train_cfg['save_dir'], train_cfg['tag'] + "_earlystop.pth"),
)
# writer
writer = None
if local_args.is_main_rank:
writer = SummaryWriter(os.path.join(train_cfg['log_dir'], train_cfg['tag']))
return optimizer, lr_scheduler, early_stopping, writer
def logging(writer, epoch, task, train_loss, val_loss, **kwargs):
def _log_metric():
print(f"Bleu@4: {round(kwargs['metrics'][0] * 100, 1)}", end='\t')
print(f"METEOR: {round(kwargs['metrics'][1] * 100, 1)}", end='\t')
print(f"ROUGE_L: {round(kwargs['metrics'][2] * 100, 1)}", end='\t')
print(f"CIDEr: {round(kwargs['metrics'][3] * 100, 1)}")
writer.add_scalar("Bleu@4", kwargs['metrics'][0] * 100, epoch)
writer.add_scalar("METEOR", kwargs['metrics'][1] * 100, epoch)
writer.add_scalar("ROUGE_L", kwargs['metrics'][2] * 100, epoch)
writer.add_scalar("CIDEr", kwargs['metrics'][3] * 100, epoch)
if writer is None:
return
print(f"Epoch: {epoch}")
if task == "cross":
print(f" Train: train loss: {train_loss[0]:.3f}\t"
f" train_cap_loss: {train_loss[1]:.3f}\t"
f" train_match_loss: {train_loss[2]:.3f}")
print(f" Val: val loss: {val_loss[0]:.3f}\t"
f" val_cap_loss: {val_loss[1]:.3f}\t"
f" val_match_loss: {val_loss[2]:.3f}")
if kwargs.get('metrics', None) is not None:
_log_metric()
writer.add_scalar("train_loss", train_loss[0], epoch)
writer.add_scalar("train_cap_loss", train_loss[1], epoch)
writer.add_scalar("train_match_loss", train_loss[2], epoch)
writer.add_scalar("val_loss", val_loss[0], epoch)
writer.add_scalar("val_cap_loss", val_loss[1], epoch)
writer.add_scalar("val_match_loss", val_loss[2], epoch)
elif task == "caption":
print(f" train loss: {train_loss[0]:.3f}")
print(f" val loss: {val_loss[0]:.3f}")
if kwargs.get('metrics', None) is not None:
_log_metric()
writer.add_scalar("train_cap_loss", train_loss[0], epoch)
writer.add_scalar("val_cap_loss", val_loss[0], epoch)
elif task == "match":
print(f" train loss: {train_loss[0]:.3f}")
print(f" val loss: {val_loss[0]:.3f}")
writer.add_scalar("train_match_loss", train_loss[0], epoch)
writer.add_scalar("val_match_loss", val_loss[0], epoch)
if 'lr' in kwargs:
writer.add_scalar('lr', kwargs['lr'], epoch)
if 'sample' in kwargs:
truth_caption, pred_caption, vid = kwargs['sample']
print(f"{vid} truth\t: {truth_caption} \n {vid} pred\t: {pred_caption}")
def train_epoch(model: MMT4Caption, optimizer, dataloader, mode, local_args):
model.train()
model.module.mode(mode) if local_args.multi_gpu else model.mode(mode)
running_loss, running_cap_loss, running_match_loss = 0, 0, 0
loader_len = len(dataloader)
# feat_ts, feat_mask_ts, batch_captions, batch_vids
for v_feats, v_masks, captions, vids in tqdm(dataloader):
v_feats = [i.to(local_args.device) for i in v_feats]
v_masks = [i.to(local_args.device) for i in v_masks]
if mode != 'cross':
loss = model(v_feats, v_masks, captions)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if local_args.multi_gpu:
dist.all_reduce(loss, op=dist.ReduceOp.SUM)
loss /= local_args.world_size
running_loss += loss.item()
else:
loss, cap_loss, match_loss = model(v_feats, v_masks, captions)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if local_args.multi_gpu:
dist.all_reduce(loss, op=dist.ReduceOp.SUM)
dist.all_reduce(cap_loss, op=dist.ReduceOp.SUM)
dist.all_reduce(match_loss, op=dist.ReduceOp.SUM)
loss /= local_args.world_size
cap_loss /= local_args.world_size
match_loss /= local_args.world_size
running_loss += loss.item()
running_cap_loss += cap_loss.item()
running_match_loss += match_loss.item()
return running_loss / loader_len, running_cap_loss / loader_len, running_match_loss / loader_len
@torch.no_grad()
def val_epoch(model: MMT4Caption, dataloader, mode, local_args):
model.eval()
model.module.mode(mode) if local_args.multi_gpu else model.mode(mode)
loader_len = len(dataloader)
running_loss, running_cap_loss, running_match_loss = 0, 0, 0
for v_feats, v_masks, captions, vids in dataloader:
v_feats = [i.to(local_args.device) for i in v_feats]
v_masks = [i.to(local_args.device) for i in v_masks]
if mode != 'cross':
loss = model.module(v_feats, v_masks, captions)
running_loss += loss.item()
else:
loss, cap_loss, match_loss = model.module(v_feats, v_masks, captions)
running_loss += loss.item()
running_cap_loss += cap_loss.item()
running_match_loss += match_loss.item()
return running_loss / loader_len, running_cap_loss / loader_len, running_match_loss / loader_len
@torch.no_grad()
def eval_epoch(model: MMT4Caption, data_iter, dataloader, max_len, local_args):
# evaluate
model_core = model.module if local_args.multi_gpu else model
model.eval()
model_core.mode("caption")
vid2result, video2caption = {}, data_iter.video2caption
for v_feats, v_masks, _, vids in tqdm(dataloader):
pred_captions = v2t_batch(model_core, v_feats, v_masks, max_len=max_len, local_args=local_args)
vid2result.update(list(zip(vids, pred_captions)))
# Coco eval
gts, samples, IDs = make_coco_sample(vid2result, video2caption)
scorer = COCOScorer(verbose=False)
scorer.score(gts, samples, IDs)
return scorer.eval['Bleu_4'], scorer.eval['METEOR'], scorer.eval['ROUGE_L'], scorer.eval['CIDEr']
# # syn the data
# metrics_ts = torch.Tensor([scorer.eval['Bleu_4'], scorer.eval['METEOR'], scorer.eval['ROUGE_L'], scorer.eval['CIDEr']])
# if local_args.multi_gpu:
# metrics_ts = metrics_ts.to(local_args.device)
# tensor_list = [torch.zeros(4, device=local_args.device) for _ in range(local_args.world_size)]
# # print(tensor_list[0].device, metrics_ts.device)
# dist.all_gather(tensor_list, metrics_ts) # tensor_list: all rank is same
# return tensor_list
# else:
# return metrics_ts
@torch.no_grad()
def v2t_single(model: MMT4Caption, video_feat, max_len, local_args):
model.eval()
video_feat = [i.unsqueeze(0).to(local_args.device) for i in video_feat]
result = model.greedy_decode(video_feat, max_len=max_len)[0]
result = result.replace("[CLS]", "").replace("[SEP]", "")
return result
def mmt4caption_train(cfg: dict, local_args):
# build model
model = MMT4Caption(cfg['model'], device=local_args.device).to(local_args.device)
model.mode(cfg['train']['task'])
if 'univl' in cfg['model']['caption_decoder'] and cfg['model']['caption_decoder']['univl'] is not None:
model.load_cap_decoder_from_univl(cfg['model']['caption_decoder']['univl'])
if cfg['model']['pretrained_model'] is not None:
model.load_state_dict(torch.load(cfg['model']['pretrained_model'], map_location=local_args.device),
strict=False)
if local_args.multi_gpu:
model = DDP(model, device_ids=[local_args.local_rank], output_device=local_args.local_rank)
model_core = model.module
else:
model_core = model
# build stuffs
optimizer, lr_scheduler, early_stopping, writer = build_stuffs(cfg['train'], model, local_args)
# build dataloaders
train_iter, train_dataloader, train_sampler = build_dataloader(cfg['data']['train'], local_args.multi_gpu)
val_iter, val_dataloader, _ = build_dataloader(cfg['data']['validation'], local_args.multi_gpu)
eval_iter, eval_dataloader, _ = build_dataloader(cfg['data']['eval'], local_args.multi_gpu)
# START
for epoch in range(cfg['train']['epoch']):
# Set epoch for sampler
if train_sampler is not None:
# print("train_sampler set epoch!!")
train_sampler.set_epoch(epoch)
# Start training
train_loss = train_epoch(model, optimizer, train_dataloader, mode=cfg['train']['task'], local_args=local_args)
lr_scheduler.step()
# Do many validations (only in rank:0)
val_loss, metrics = None, None
# calculate val loss
if local_args.is_main_rank:
val_loss = val_epoch(model, val_dataloader, mode=cfg['train']['task'], local_args=local_args)
dist.barrier() # syn each process
# calculate metrics
if local_args.is_main_rank and cfg['train'].get('metric_earlystop', True) is True:
metrics = eval_epoch(model, eval_iter, eval_dataloader, max_len=cfg['test']['max_length'], local_args=local_args)
dist.barrier() # syn each process
# predict a sample
pred_caption, truth_caption, vid = None, None, None
if local_args.is_main_rank:
video_feat, truth_caption, vid = val_iter[random.randint(0, len(val_iter) - 1)]
pred_caption = v2t_single(model_core, video_feat, max_len=cfg['test']['max_length'], local_args=local_args)
dist.barrier() # syn each process
# logging (only in rank:0)
logging(writer, epoch, cfg['train']['task'], train_loss, val_loss,
lr=optimizer.state_dict()['param_groups'][0]['lr'],
sample=(truth_caption, pred_caption, vid),
metrics=metrics)
# early stopping
if cfg['train'].get('metric_earlystop', True) is True:
# get metric score data from rank:0 to update the early_stopping
met_score = torch.zeros([1], dtype=torch.float) if metrics is None else torch.Tensor([sum(metrics)])
met_score = met_score.to(local_args.device)
dist.all_reduce(met_score, op=dist.ReduceOp.SUM)
early_stopping(-met_score.cpu().item(), model_core, do_save=local_args.is_main_rank)
else:
if val_loss is None:
val_loss = 0.0
elif type(val_loss) is tuple:
val_loss = val_loss[0]
else:
val_loss = val_loss
# get metric score data from rank:0 to update the early_stopping
val_loss = torch.Tensor([val_loss]).to(local_args.device)
dist.all_reduce(val_loss, op=dist.ReduceOp.SUM)
early_stopping(val_loss.cpu().item(), model_core, do_save=local_args.is_main_rank)
if early_stopping.early_stop:
print("Early stopping")
break
# save
if epoch % cfg['train']['save_frequency'] == 0 and epoch != 0 and local_args.is_main_rank:
print("Saving checkpoint...")
torch.save(model_core.state_dict(),
os.path.join(cfg['train']['save_dir'], f"{cfg['train']['tag']}_epoch{epoch}.pth"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", required=True, type=str,
help="The path of '.json' config file")
parser.add_argument("-ws", "--world_size", type=int, default=4,
help="The number of GPUs(Only need when --multi_gpu is on)")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--cpu", action="store_true", help="use cpu")
group.add_argument("--gpu", action="store_true", help="use gpu")
group.add_argument("--multi_gpu", action="store_true", help="use multiple gpu")
args_ = parser.parse_args()
# configure hardware
args_ = configure_hardware(args_)
# set seed
setup_seed(666)
# load config
cfg_ = Config(args_.config)
if args_.is_main_rank:
cfg_.display()
mmt4caption_train(cfg_.data, args_)
|
11540508
|
from datetime import datetime, date, timedelta
import unittest
import moment
import moment_parse
# Helpful strftime() format that imcludes all parts of the date including the time zone.
fmt = "%Y-%m-%d %H:%M:%S %Z"
class TestMoment(unittest.TestCase):
new_york = [
# - 1918 -
[datetime(1918, 3, 31, 6, 59, 59), -1633280401000, "EST", 300, 1, 59],
[datetime(1918, 3, 31, 7, 0, 0), -1633280400000, "EDT", 240, 3, 0],
[datetime(1918, 10, 27, 5, 59, 59), -1615140001000, "EDT", 240, 1, 59],
[datetime(1918, 10, 27, 6, 0, 0), -1615140000000, "EST", 300, 1, 0],
# - 1979 -
[datetime(1979, 4, 29, 6, 59, 59), 294217199000, "EST", 300, 1, 59],
[datetime(1979, 4, 29, 7, 0, 0), 294217200000, "EDT", 240, 3, 0],
[datetime(1979, 10, 28, 5, 59, 59), 309938399000, "EDT", 240, 1, 59],
[datetime(1979, 10, 28, 6, 0, 0), 309938400000, "EST", 300, 1, 0],
# - 2037 -
[datetime(2037, 3, 8, 6, 59, 59), 2120108399000, "EST", 300, 1, 59],
[datetime(2037, 3, 8, 7, 0, 0), 2120108400000, "EDT", 240, 3, 0],
[datetime(2037, 11, 1, 5, 59, 59), 2140667999000, "EDT", 240, 1, 59]
]
new_york_errors = [
["America/New_York", "2037-3-8 6:59:59", TypeError],
["America/New_York", [2037, 3, 8, 6, 59, 59], TypeError],
["America/new_york", datetime(1979, 4, 29, 6, 59, 59), KeyError]
]
los_angeles = [
# - 1918 -
# Spanning non-existent hour
[datetime(1918, 3, 31, 1, 59, 59, 0), -1633269601000, "PST", 480, 1, 59],
[datetime(1918, 3, 31, 2, 0, 0, 0), -1633273200000, "PST", 480, 1, 0],
[datetime(1918, 3, 31, 2, 59, 59, 0), -1633269601000, "PST", 480, 1, 59],
[datetime(1918, 3, 31, 3, 0, 0, 0), -1633269600000, "PDT", 420, 3, 0],
# Spanning doubly-existent hour
[datetime(1918, 10, 27, 0, 59, 59, 0), -1615132801000, "PDT", 420, 0, 59],
[datetime(1918, 10, 27, 1, 0, 0, 0), -1615132800000, "PDT", 420, 1, 0],
[datetime(1918, 10, 27, 1, 59, 59, 0), -1615129201000, "PDT", 420, 1, 59],
[datetime(1918, 10, 27, 2, 0, 0, 0), -1615125600000, "PST", 480, 2, 0],
# - 2008 -
# Spanning non-existent hour
[datetime(2008, 3, 9, 1, 59, 59, 0), 1205056799000, "PST", 480, 1, 59],
[datetime(2008, 3, 9, 2, 0, 0, 0), 1205053200000, "PST", 480, 1, 0],
[datetime(2008, 3, 9, 2, 59, 59, 0), 1205056799000, "PST", 480, 1, 59],
[datetime(2008, 3, 9, 3, 0, 0, 0), 1205056800000, "PDT", 420, 3, 0],
# Spanning doubly-existent hour
[datetime(2008, 11, 2, 0, 59, 59, 0), 1225612799000, "PDT", 420, 0, 59],
[datetime(2008, 11, 2, 1, 0, 0, 0), 1225612800000, "PDT", 420, 1, 0],
[datetime(2008, 11, 2, 1, 59, 59, 0), 1225616399000, "PDT", 420, 1, 59],
[datetime(2008, 11, 2, 2, 0, 0, 0), 1225620000000, "PST", 480, 2, 0],
# - 2037 -
[datetime(2037, 3, 8, 1, 59, 59, 0), 2120119199000, "PST", 480, 1, 59],
[datetime(2037, 3, 8, 2, 0, 0, 0), 2120115600000, "PST", 480, 1, 0],
[datetime(2037, 11, 1, 0, 59, 59, 0), 2140675199000, "PDT", 420, 0, 59],
[datetime(2037, 11, 1, 1, 0, 0, 0), 2140675200000, "PDT", 420, 1, 0],
]
parse_samples = [
# Basic set
['MM-DD-YYYY', '12-02-1999', 944092800.000000],
['DD-MM-YYYY', '12-02-1999', 918777600.000000],
['DD/MM/YYYY', '12/02/1999', 918777600.000000],
['DD_MM_YYYY', '12_02_1999', 918777600.000000],
['DD:MM:YYYY', '12:02:1999', 918777600.000000],
['D-M-YY', '2-2-99', 917913600.000000],
['YY', '99', 922060800.000000],
['DD-MM-YYYY h:m:s', '12-02-1999 2:45:10', 918787510.000000],
['DD-MM-YYYY h:m:s a', '12-02-1999 2:45:10 am', 918787510.000000],
['DD-MM-YYYY h:m:s a', '12-02-1999 2:45:10 pm', 918830710.000000],
['h:mm a', '12:00 pm', 1458648000.000000],
['h:mm a', '12:30 pm', 1458649800.000000],
['h:mm a', '12:00 am', 1458604800.000000],
['h:mm a', '12:30 am', 1458606600.000000],
['HH:mm', '12:00', 1458648000.000000],
['YYYY-MM-DDTHH:mm:ss', '2011-11-11T11:11:11', 1321009871.000000],
['ddd MMM DD HH:mm:ss YYYY', 'Tue Apr 07 22:52:51 2009', 1239144771.000000],
['ddd MMMM DD HH:mm:ss YYYY', 'Tue April 07 22:52:51 2009', 1239144771.000000],
['HH:mm:ss', '12:00:00', 1458648000.000000],
['HH:mm:ss', '12:30:00', 1458649800.000000],
['HH:mm:ss', '00:00:00', 1458604800.000000],
['HH:mm:ss S', '00:30:00 1', 1458606600.100000],
['HH:mm:ss SS', '00:30:00 12', 1458606600.120000],
['HH:mm:ss SSS', '00:30:00 123', 1458606600.123000],
['HH:mm:ss S', '00:30:00 7', 1458606600.700000],
['HH:mm:ss SS', '00:30:00 78', 1458606600.780000],
['HH:mm:ss SSS', '00:30:00 789', 1458606600.789000],
# Dropped m
['MM/DD/YYYY h:m:s a', '05/1/2012 12:25:00 p', 1335875100.000000],
['MM/DD/YYYY h:m:s a', '05/1/2012 12:25:00 a', 1335831900.000000],
# 2 digit year with YYYY
['D/M/YYYY', '9/2/99', 918518400.000000],
['D/M/YYYY', '9/2/1999', 918518400.000000],
['D/M/YYYY', '9/2/66', -122860800.000000],
['D/M/YYYY', '9/2/65', 3001363200.000000],
# No separators
['MMDDYYYY', '12021999', 944092800.000000],
['DDMMYYYY', '12021999', 918777600.000000],
['YYYYMMDD', '19991202', 944092800.000000],
['DDMMMYYYY', '10Sep2001', 1000080000.000000],
# Error forgiveness
['MM/DD/YYYY', '12-02-1999', 944092800.000000],
['DD/MM/YYYY', '12/02 /1999', 918777600.000000],
['DD:MM:YYYY', '12:02 :1999', 918777600.000000],
['D-M-YY', '2 2 99', 917913600.000000],
['DD-MM-YYYY h:m:s', '12-02-1999 2:45:10.00', 918787510.000000],
['h:mm a', '12:00pm', 1458648000.000000],
['HH:mm', '1200', 1458648000.000000],
['dddd MMMM DD HH:mm:ss YYYY', 'Tue Apr 7 22:52:51 2009', 1239144771.000000],
['ddd MMM DD HH:mm:ss YYYY', 'Tuesday April 7 22:52:51 2009', 1239144771.000000],
['ddd MMM Do HH:mm:ss YYYY', 'Tuesday April 7th 22:52:51 2009', 1239144771.000000]
]
parse_timezone_samples = [
# Timezone corner cases
['MM-DD-YYYY h:ma', '3-13-2016 1:59am', 'America/New_York', 1457852340], # EST
['MM-DD-YYYY h:ma', '3-13-2016 2:00am', 'America/New_York', 1457848800], # Invalid, -1hr
['MM-DD-YYYY h:ma', '3-13-2016 2:59am', 'America/New_York', 1457852340], # Invalid, -1hr
['MM-DD-YYYY h:ma', '3-13-2016 3:00am', 'America/New_York', 1457852400], # EDT
['MM-DD-YYYY h:ma', '3-13-2016 1:59am', 'America/Los_Angeles', 1457863140], # PST
['MM-DD-YYYY h:ma', '3-13-2016 2:00am', 'America/Los_Angeles', 1457859600], # Invalid, -1hr
['MM-DD-YYYY h:ma', '3-13-2016 2:59am', 'America/Los_Angeles', 1457863140], # Invalid, -1hr
['MM-DD-YYYY h:ma', '3-13-2016 3:00am', 'America/Los_Angeles', 1457863200] # PDT
]
def assertMatches(self, data_entry, moment_obj):
date, timestamp, abbr, offset, hour, minute = data_entry
dt = moment_obj.datetime()
self.assertEqual(moment_obj.timestamp, timestamp)
self.assertEqual(moment_obj.zoneAbbr(), abbr)
self.assertEqual(moment_obj.zoneOffset(), timedelta(minutes=-offset))
self.assertEqual(dt.hour, hour)
self.assertEqual(dt.minute, minute)
# For each UTC date, convert to New York time and compare with expected values
def test_standard_entry(self):
name = "America/New_York"
data = self.new_york
for entry in data:
date = entry[0]
timestamp = entry[1]
m = moment.tz(date).tz(name)
mts = moment.tz(timestamp, name)
self.assertMatches(entry, m)
self.assertMatches(entry, mts)
error_data = self.new_york_errors
for entry in error_data:
name = entry[0]
date = entry[1]
error = entry[2]
self.assertRaises(error, moment.tz, date, name)
# For each Los Angeles date, check that the returned date matches expected values
def test_zone_entry(self):
name = "America/Los_Angeles"
data = self.los_angeles
for entry in data:
date = entry[0]
timestamp = entry[1]
m = moment.tz(date, name)
self.assertMatches(entry, m)
def test_zone(self):
name = "America/New_York"
tzinfo = moment.tzinfo(name)
data = self.new_york
for entry in data:
date = entry[0]
ts = entry[1]
abbr = entry[2]
offset = entry[3]
dt = moment.tz(ts, name).datetime()
self.assertEqual(dt.tzname(), abbr)
self.assertEqual(dt.utcoffset(), timedelta(minutes=-offset))
def test_parse(self):
for s in self.parse_samples:
self.assertEqual(moment_parse.parse(s[1], s[0], 'UTC', date(2016, 3, 22)), s[2])
for s in self.parse_timezone_samples:
self.assertEqual(moment_parse.parse(s[1], s[0], s[2], date(2016, 3, 22)), s[3])
def test_ts_to_dt(self):
# Verify that ts_to_dt works as expected.
value_sec = 1426291200 # 2015-03-14 00:00:00 in UTC
value_dt_utc = moment.ts_to_dt(value_sec, moment.get_zone('UTC'))
value_dt_aware = moment.ts_to_dt(value_sec, moment.get_zone('America/New_York'))
self.assertEqual(value_dt_utc.strftime("%Y-%m-%d %H:%M:%S %Z"), '2015-03-14 00:00:00 UTC')
self.assertEqual(value_dt_aware.strftime("%Y-%m-%d %H:%M:%S %Z"), '2015-03-13 20:00:00 EDT')
def test_dst_switches(self):
# Verify that conversions around DST switches happen correctly. (This is tested in other tests
# as well, but this test case is more focused and easier to debug.)
dst_before = -1633280401
dst_begin = -1633280400
dst_end = -1615140001
dst_after = -1615140000
# Should have no surprises in converting to UTC, since there are not DST dfferences.
def ts_to_dt_utc(dt):
return moment.ts_to_dt(dt, moment.get_zone('UTC'))
self.assertEqual(ts_to_dt_utc(dst_before).strftime(fmt), "1918-03-31 06:59:59 UTC")
self.assertEqual(ts_to_dt_utc(dst_begin ).strftime(fmt), "1918-03-31 07:00:00 UTC")
self.assertEqual(ts_to_dt_utc(dst_end ).strftime(fmt), "1918-10-27 05:59:59 UTC")
self.assertEqual(ts_to_dt_utc(dst_after ).strftime(fmt), "1918-10-27 06:00:00 UTC")
# Converting to America/New_York should produce correct jumps.
def ts_to_dt_nyc(dt):
return moment.ts_to_dt(dt, moment.get_zone('America/New_York'))
self.assertEqual(ts_to_dt_nyc(dst_before).strftime(fmt), "1918-03-31 01:59:59 EST")
self.assertEqual(ts_to_dt_nyc(dst_begin ).strftime(fmt), "1918-03-31 03:00:00 EDT")
self.assertEqual(ts_to_dt_nyc(dst_end ).strftime(fmt), "1918-10-27 01:59:59 EDT")
self.assertEqual(ts_to_dt_nyc(dst_after ).strftime(fmt), "1918-10-27 01:00:00 EST")
self.assertEqual(ts_to_dt_nyc(dst_after + 3599).strftime(fmt), "1918-10-27 01:59:59 EST")
def test_tzinfo(self):
# Verify that tzinfo works correctly.
ts1 = 294217199000 # In EST
ts2 = 294217200000 # In EDT (spring forward, we skip ahead by 1 hour)
utc_dt1 = datetime(1979, 4, 29, 6, 59, 59)
utc_dt2 = datetime(1979, 4, 29, 7, 0, 0)
self.assertEqual(moment.tz(ts1).datetime().strftime(fmt), '1979-04-29 06:59:59 UTC')
self.assertEqual(moment.tz(ts2).datetime().strftime(fmt), '1979-04-29 07:00:00 UTC')
# Verify that we get correct time zone variation depending on DST status.
nyc_dt1 = moment.tz(ts1, 'America/New_York').datetime()
nyc_dt2 = moment.tz(ts2, 'America/New_York').datetime()
self.assertEqual(nyc_dt1.strftime(fmt), '1979-04-29 01:59:59 EST')
self.assertEqual(nyc_dt2.strftime(fmt), '1979-04-29 03:00:00 EDT')
# Make sure we can get timestamps back from these datatimes.
self.assertEqual(moment.dt_to_ts(nyc_dt1)*1000, ts1)
self.assertEqual(moment.dt_to_ts(nyc_dt2)*1000, ts2)
# Verify that the datetime objects we get produce correct time zones in terms of DST when we
# manipulate them. NOTE: it is a bit unexpected that we add 1hr + 1sec rather than just 1sec,
# but it seems like that is how Python datetime works. Note that timezone does get switched
# correctly between EDT and EST.
self.assertEqual(nyc_dt1 + timedelta(seconds=3601), nyc_dt2)
self.assertEqual(nyc_dt2 - timedelta(seconds=3601), nyc_dt1)
self.assertEqual((nyc_dt1 + timedelta(seconds=3601)).strftime(fmt), '1979-04-29 03:00:00 EDT')
self.assertEqual((nyc_dt2 - timedelta(seconds=3601)).strftime(fmt), '1979-04-29 01:59:59 EST')
def test_dt_to_ds(self):
# Verify that dt_to_ts works for both naive and aware datetime objects.
value_dt = datetime(2015, 3, 14, 0, 0) # In UTC
value_sec = 1426291200
tzla = moment.get_zone('America/Los_Angeles')
def format_utc(ts):
return moment.ts_to_dt(ts, moment.get_zone('UTC')).strftime(fmt)
# Check that a naive datetime is interpreted in UTC.
self.assertEqual(value_dt.strftime("%Y-%m-%d %H:%M:%S %Z"), '2015-03-14 00:00:00 ')
self.assertEqual(moment.dt_to_ts(value_dt), value_sec) # Interpreted in UTC
# Get an explicit UTC version and make sure that also works.
value_dt_utc = value_dt.replace(tzinfo=moment.TZ_UTC)
self.assertEqual(value_dt_utc.strftime(fmt), '2015-03-14 00:00:00 UTC')
self.assertEqual(moment.dt_to_ts(value_dt_utc), value_sec)
# Get an aware datetime, and make sure that works too.
value_dt_aware = moment.ts_to_dt(value_sec, moment.get_zone('America/New_York'))
self.assertEqual(value_dt_aware.strftime(fmt), '2015-03-13 20:00:00 EDT')
self.assertEqual(moment.dt_to_ts(value_dt_aware), value_sec)
# Check that dt_to_ts pays attention to the timezone.
# If we interpret midnight in LA time, it's a later timestamp.
self.assertEqual(format_utc(moment.dt_to_ts(value_dt, tzla)), '2015-03-14 07:00:00 UTC')
# The second argument is ignored if the datetime is aware.
self.assertEqual(format_utc(moment.dt_to_ts(value_dt_utc, tzla)), '2015-03-14 00:00:00 UTC')
self.assertEqual(format_utc(moment.dt_to_ts(value_dt_aware, tzla)), '2015-03-14 00:00:00 UTC')
# If we modify an aware datetime, we may get a new timezone abbreviation.
value_dt_aware -= timedelta(days=28)
self.assertEqual(value_dt_aware.strftime(fmt), '2015-02-13 20:00:00 EST')
def test_date_to_ts(self):
d = date(2015, 3, 14)
tzla = moment.get_zone('America/Los_Angeles')
def format_utc(ts):
return moment.ts_to_dt(ts, moment.get_zone('UTC')).strftime(fmt)
self.assertEqual(format_utc(moment.date_to_ts(d)), '2015-03-14 00:00:00 UTC')
self.assertEqual(format_utc(moment.date_to_ts(d, tzla)), '2015-03-14 07:00:00 UTC')
self.assertEqual(moment.ts_to_dt(moment.date_to_ts(d, tzla), tzla).strftime(fmt),
'2015-03-14 00:00:00 PDT')
def test_parse_iso(self):
tzny = moment.get_zone('America/New_York')
iso = moment.parse_iso
self.assertEqual(iso('2011-11-11T11:11:11'), 1321009871.000000)
self.assertEqual(iso('2019-01-22T00:47:39.219071-05:00'), 1548136059.219071)
self.assertEqual(iso('2019-01-22T00:47:39.219071-0500'), 1548136059.219071)
self.assertEqual(iso('2019-01-22T00:47:39.219071', timezone=tzny), 1548136059.219071)
self.assertEqual(iso('2019-01-22T00:47:39.219071'), 1548118059.219071)
self.assertEqual(iso('2019-01-22T00:47:39.219071Z'), 1548118059.219071)
self.assertEqual(iso('2019-01-22T00:47:39.219071Z', timezone=tzny), 1548118059.219071)
self.assertEqual(iso('2019-01-22T00:47:39.219'), 1548118059.219)
self.assertEqual(iso('2019-01-22T00:47:39'), 1548118059)
self.assertEqual(iso('2019-01-22 00:47:39.219071'), 1548118059.219071)
self.assertEqual(iso('2019-01-22 00:47:39'), 1548118059)
self.assertEqual(iso('2019-01-22'), 1548115200)
def test_parse_iso_date(self):
tzny = moment.get_zone('America/New_York')
iso = moment.parse_iso_date
# Note that time components and time zone do NOT affect the returned timestamp.
self.assertEqual(iso('2019-01-22'), 1548115200)
self.assertEqual(iso('2019-01-22T00:47:39.219071'), 1548115200)
self.assertEqual(iso('2019-01-22 00:47:39Z'), 1548115200)
self.assertEqual(iso('2019-01-22T00:47:39.219071-05:00'), 1548115200)
self.assertEqual(iso('2019-01-22T00:47:39.219071+05:00'), 1548115200)
if __name__ == "__main__":
unittest.main()
|
11540554
|
from __future__ import absolute_import, division, print_function
import telnyx
TEST_RESOURCE_ID = "6a09cdc3-8948-47f0-aa62-74ac943d6c58"
class TestCredentialConnection(object):
def test_is_listable(self, request_mock):
resources = telnyx.CredentialConnection.list()
request_mock.assert_requested("get", "/v2/credential_connections")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], telnyx.CredentialConnection)
def test_is_retrievable(self, request_mock):
resource = telnyx.CredentialConnection.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v2/credential_connections/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.CredentialConnection)
def test_is_creatable(self, request_mock):
resource = telnyx.CredentialConnection.create(
active=True,
user_name="some-user-name",
connection_name="some-connection",
password="<PASSWORD>",
)
request_mock.assert_requested("post", "/v2/credential_connections")
assert isinstance(resource, telnyx.CredentialConnection)
def test_is_saveable(self, request_mock):
credential_connection = telnyx.CredentialConnection.retrieve(TEST_RESOURCE_ID)
credential_connection.active = False
resource = credential_connection.save()
request_mock.assert_requested(
"patch", "/v2/credential_connections/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.CredentialConnection)
assert resource is credential_connection
def test_is_modifiable(self, request_mock):
resource = telnyx.CredentialConnection.modify(TEST_RESOURCE_ID, active=False)
request_mock.assert_requested(
"patch", "/v2/credential_connections/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.CredentialConnection)
def test_is_deletable(self, request_mock):
resource = telnyx.CredentialConnection.retrieve(TEST_RESOURCE_ID)
resource.delete()
request_mock.assert_requested(
"delete", "/v2/credential_connections/%s" % TEST_RESOURCE_ID
)
|
11540562
|
from requests import Session
from .lib import IN, web, zonekey
URL = "https://core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx"
ZONE_KEY = "IN-AP"
TIME_FORMAT = "DD-MM-YYYY HH:mm"
SOURCE = "core.ap.gov.in"
def fetch_production(
zone_key=ZONE_KEY, session=None, target_datetime=None, logger=None
) -> dict:
"""Fetch Andhra Pradesh production"""
if target_datetime:
raise NotImplementedError("This parser is not yet able to parse past dates")
zonekey.assert_zone_key(zone_key, ZONE_KEY)
html = web.get_response_soup(zone_key, URL, session)
india_date = IN.read_datetime_from_span_id(
html, "MainContent_lblPowerStatusDate", TIME_FORMAT
)
hydro_value = IN.read_value_from_span_id(html, "MainContent_lblHydel")
gas_value = IN.read_value_from_span_id(html, "MainContent_lblGas")
wind_value = IN.read_value_from_span_id(html, "MainContent_lblWind")
solar_value = IN.read_value_from_span_id(html, "MainContent_lblSolar")
# All thermal centrals are considered coal based production
# https://en.wikipedia.org/wiki/Power_sector_of_Andhra_Pradesh
thermal_value = IN.read_value_from_span_id(html, "MainContent_lblThermal")
cgs_value = IN.read_value_from_span_id(html, "MainContent_lblCGS")
ipp_value = IN.read_value_from_span_id(html, "MainContent_lblIPPS")
return {
"zoneKey": zone_key,
"datetime": india_date.datetime,
"production": {
"biomass": 0.0,
"coal": thermal_value,
"gas": gas_value,
"hydro": hydro_value,
"nuclear": 0.0,
"oil": 0.0,
"solar": solar_value,
"wind": wind_value,
"geothermal": 0.0,
"unknown": round(cgs_value + ipp_value, 2),
},
"storage": {"hydro": 0.0},
"source": SOURCE,
}
def fetch_consumption(
zone_key=ZONE_KEY, session=None, target_datetime=None, logger=None
) -> dict:
"""Fetch Andhra Pradesh consumption"""
if target_datetime:
raise NotImplementedError("This parser is not yet able to parse past dates")
zonekey.assert_zone_key(zone_key, ZONE_KEY)
html = web.get_response_soup(zone_key, URL, session)
india_date = IN.read_datetime_from_span_id(
html, "MainContent_lblPowerStatusDate", TIME_FORMAT
)
demand_value = IN.read_value_from_span_id(html, "MainContent_lblGridDemand")
return {
"zoneKey": zone_key,
"datetime": india_date.datetime,
"consumption": demand_value,
"source": SOURCE,
}
if __name__ == "__main__":
session = Session()
print(fetch_production(ZONE_KEY, session))
print(fetch_consumption(ZONE_KEY, session))
|
11540586
|
import pytest
from silver.fixtures.pytest_fixtures import * # NOQA
pytest.register_assert_rewrite('silver.tests.api.specs.document_entry')
pytest.register_assert_rewrite('silver.tests.api.specs.utils')
|
11540617
|
import pytest
import uvicore
from typing import List
from uvicore.support.dumper import dump
# This is all failing due to my provider refactors
@pytest.mark.asyncio
async def test_package(app1):
from uvicore.package.package import Package
assert Package.__module__ + '.' + Package.__name__ == 'app1.overrides.package.Package'
assert Package.__annotations__.get('custom1') is not None
# Should be able to pull the original via _BASE
original = uvicore.ioc.make('uvicore.package.package.Package_BASE')
assert original.__module__ + '.' + original.__name__ == 'uvicore.package.package.Package'
@pytest.mark.asyncio
async def test_provider(app1):
from uvicore.package.provider import ServiceProvider
assert ServiceProvider.__module__ + '.' + ServiceProvider.__name__ == 'app1.overrides.provider.ServiceProvider'
assert ServiceProvider.__annotations__.get('custom1') is not None
# Should be able to pull the original via _BASE
original = uvicore.ioc.make('uvicore.package.provider.ServiceProvider_BASE')
assert original.__module__ + '.' + original.__name__ == 'uvicore.package.provider.ServiceProvider'
@pytest.mark.asyncio
async def test_application(app1):
package = uvicore.app.package('uvicore.configuration')
assert package.custom1 == 'custom1 override here!!!'
# Should be able to pull the original via _BASE
original = uvicore.ioc.make('uvicore.foundation.application.Application_BASE')
assert original.__module__ + '.' + original.__name__ == 'uvicore.foundation.application.Application'
@pytest.mark.asyncio
async def test_user_model(app1):
# Should return the same class (not an instance, not a singleton)
from uvicore.auth.models.user import User
from app1.models.user import User as Override
assert User == Override
# Should be able to pull the original via _BASE
original = uvicore.ioc.make('uvicore.auth.models.user.User_BASE')
assert original.__module__ + '.' + original.__name__ == 'uvicore.auth.models.user.User'
@pytest.mark.asyncio
async def test_users_table(app1):
# These are singletons and should match the same single instance
from uvicore.auth.database.tables.users import Users
from app1.database.tables.users import Users as Override
assert Users == Override
# Should be able to pull the original via _BASE
original = uvicore.ioc.make('uvicore.auth.database.tables.users.Users_BASE')
assert original.__module__ + '.' + original.__name__ == 'uvicore.auth.database.tables.users.Users'
|
11540640
|
from flask_restful import Resource
from datetime import datetime
from model.FuzzingJobState import FuzzingJobState
from model.FuzzingJob import FuzzingJob
from model.FuzzingHost import FuzzingHost
from model.FuzzingCrash import FuzzingCrash
class StatusCtrl(Resource):
def get(self):
status_active = FuzzingJobState.query.filter_by(name='Active').first()
status_completed = FuzzingJobState.query.filter_by(name='Completed').first()
status_queued = FuzzingJobState.query.filter_by(name='Queued').first()
total_job_count = FuzzingJob.query.count()
active_job_count = FuzzingJob.query.filter_by(state_id=status_active.id).count()
completed_job_count = FuzzingJob.query.filter_by(state_id=status_completed.id).count()
queued_job_count = FuzzingJob.query.filter_by(state_id=status_queued.id).count()
crash_count = FuzzingCrash.query.count()
node_count = FuzzingHost.query.count()
return {
'total_job_count': total_job_count,
'active_job_count': active_job_count,
'completed_job_count': completed_job_count,
'queued_job_count': queued_job_count,
'crash_count': crash_count,
'node_count': node_count,
'serverTime' : str(datetime.now())
}, 200
|
11540663
|
if __name__ == '__main__':
x = int(input())
y = int(input())
z = int(input())
n = int(input())
k=([[i,j,l]for i in range(x+1) for j in range(y+1) for l in range(z+1) if (i+j+l!=n)])
print(k)
|
11540683
|
from .base import *
from .box_space import *
from .tuple_space import *
from .dict_space import *
from .concatenation_space import *
from .axis_angle_space import *
from .translation_axis_angle_space import *
|
11540712
|
from __future__ import unicode_literals
from httoop import URI
def test_simple_uri_comparision(uri):
u1 = URI(b'http://abc.com:80/~smith/home.html')
u2 = URI(b'http://ABC.com/%7Esmith/home.html')
u3 = URI(b'http://ABC.com:/%7esmith/home.html')
u4 = URI(b'http://ABC.com:/%7esmith/./home.html')
u5 = URI(b'http://ABC.com:/%7esmith/foo/../home.html')
assert u1 == u2
assert u2 == u3
assert u1 == u3
assert u1 == u4
assert u1 == u5
def test_request_uri_maxlength():
pass
def test_request_uri_is_star():
pass
def test_request_uri_containig_fragment():
pass
def test_invalid_uri_scheme():
pass
def test_invalid_port():
pass
def test_normalized_uri_redirects():
pass
def test_uri_composing_username_and_password():
assert bytes(URI(b'http://username@example.com')) == b'http://username@example.com'
assert bytes(URI(b'http://username:password@example.com')) == b'http://username:password@example.com'
|
11540718
|
from sqlobject.dbconnection import DBAPI
from sqlobject import col
import re
class MSSQLConnection(DBAPI):
supportTransactions = True
dbName = 'mssql'
schemes = [dbName]
limit_re = re.compile('^\s*(select )(.*)', re.IGNORECASE)
def __init__(self, db, user, password='', host='localhost', port=None,
autoCommit=0, **kw):
drivers = kw.pop('driver', None) or 'adodb,pymssql'
for driver in drivers.split(','):
driver = driver.strip()
if not driver:
continue
try:
if driver in ('adodb', 'adodbapi'):
import adodbapi as sqlmodule
elif driver == 'pymssql':
import pymssql as sqlmodule
else:
raise ValueError('Unknown MSSQL driver "%s", expected adodb or pymssql' % driver)
except ImportError:
pass
else:
break
else:
raise ImportError('Cannot find an MSSQL driver, tried %s' % drivers)
self.module = sqlmodule
if sqlmodule.__name__ == 'adodbapi':
self.dbconnection = sqlmodule.connect
# ADO uses unicode only (AFAIK)
self.usingUnicodeStrings = True
# Need to use SQLNCLI provider for SQL Server Express Edition
if kw.get("ncli"):
conn_str = "Provider=SQLNCLI;"
else:
conn_str = "Provider=SQLOLEDB;"
conn_str += "Data Source=%s;Initial Catalog=%s;"
# MSDE does not allow SQL server login
if kw.get("sspi"):
conn_str += "Integrated Security=SSPI;Persist Security Info=False"
self.make_conn_str = lambda keys: conn_str % (keys.host, keys.db)
else:
conn_str += "User Id=%s;Password=%s"
self.make_conn_str = lambda keys: conn_str % (keys.host, keys.db, keys.user, keys.password)
kw.pop("sspi", None)
kw.pop("ncli", None)
else: # pymssql
self.dbconnection = sqlmodule.connect
sqlmodule.Binary = lambda st: str(st)
# don't know whether pymssql uses unicode
self.usingUnicodeStrings = False
def _make_conn_str(keys):
keys_dict = {}
for attr, value in (
('user', keys.user),
('password', keys.password),
('host', keys.host),
('port', keys.port),
('database', keys.db),
):
if value: keys_dict[attr] = value
return keys_dict
self.make_conn_str = _make_conn_str
self.autoCommit=int(autoCommit)
self.user = user
self.password = password
self.host = host
self.port = port
self.db = db
self._server_version = None
self._can_use_max_types = None
DBAPI.__init__(self, **kw)
@classmethod
def _connectionFromParams(cls, user, password, host, port, path, args):
path = path.strip('/')
return cls(user=user, password=password,
host=host or 'localhost', port=port, db=path, **args)
def insert_id(self, conn):
"""
insert_id method.
"""
c = conn.cursor()
# converting the identity to an int is ugly, but it gets returned
# as a decimal otherwise :S
c.execute('SELECT CONVERT(INT, @@IDENTITY)')
return c.fetchone()[0]
def makeConnection(self):
conn_descr = self.make_conn_str(self)
if isinstance(conn_descr, dict):
con = self.dbconnection(**conn_descr)
else:
con = self.dbconnection(conn_descr)
cur = con.cursor()
cur.execute('SET ANSI_NULLS ON')
cur.execute("SELECT CAST('12345.21' AS DECIMAL(10, 2))")
self.decimalSeparator = str(cur.fetchone()[0])[-3]
cur.close()
return con
HAS_IDENTITY = """
select 1
from INFORMATION_SCHEMA.COLUMNS
where TABLE_NAME = '%s'
and COLUMNPROPERTY(object_id(TABLE_NAME), COLUMN_NAME, 'IsIdentity') = 1
"""
def _hasIdentity(self, conn, table):
query = self.HAS_IDENTITY % table
c = conn.cursor()
c.execute(query)
r = c.fetchone()
return r is not None
def _queryInsertID(self, conn, soInstance, id, names, values):
"""
Insert the Initial with names and values, using id.
"""
table = soInstance.sqlmeta.table
idName = soInstance.sqlmeta.idName
c = conn.cursor()
has_identity = self._hasIdentity(conn, table)
if id is not None:
names = [idName] + names
values = [id] + values
elif has_identity and idName in names:
try:
i = names.index( idName )
if i:
del names[i]
del values[i]
except ValueError:
pass
if has_identity:
if id is not None:
c.execute('SET IDENTITY_INSERT %s ON' % table)
else:
c.execute('SET IDENTITY_INSERT %s OFF' % table)
q = self._insertSQL(table, names, values)
if self.debug:
self.printDebug(conn, q, 'QueryIns')
c.execute(q)
if has_identity:
c.execute('SET IDENTITY_INSERT %s OFF' % table)
if id is None:
id = self.insert_id(conn)
if self.debugOutput:
self.printDebug(conn, id, 'QueryIns', 'result')
return id
@classmethod
def _queryAddLimitOffset(cls, query, start, end):
if end and not start:
limit_str = "SELECT TOP %i" % end
match = cls.limit_re.match(query)
if match and len(match.groups()) == 2:
return ' '.join([limit_str, match.group(2)])
else:
return query
def createReferenceConstraint(self, soClass, col):
return col.mssqlCreateReferenceConstraint()
def createColumn(self, soClass, col):
return col.mssqlCreateSQL(self)
def createIDColumn(self, soClass):
key_type = {int: "INT", str: "TEXT"}[soClass.sqlmeta.idType]
return '%s %s IDENTITY UNIQUE' % (soClass.sqlmeta.idName, key_type)
def createIndexSQL(self, soClass, index):
return index.mssqlCreateIndexSQL(soClass)
def joinSQLType(self, join):
return 'INT NOT NULL'
SHOW_TABLES="SELECT name FROM sysobjects WHERE type='U'"
def tableExists(self, tableName):
for (table,) in self.queryAll(self.SHOW_TABLES):
if table.lower() == tableName.lower():
return True
return False
def addColumn(self, tableName, column):
self.query('ALTER TABLE %s ADD %s' %
(tableName,
column.mssqlCreateSQL(self)))
def delColumn(self, sqlmeta, column):
self.query('ALTER TABLE %s DROP COLUMN %s' % (tableName.table, column.dbName))
# precision and scale is gotten from column table so that we can create
# decimal columns if needed
SHOW_COLUMNS = """
select
name,
length,
( select name
from systypes
where cast(xusertype as int)= cast(sc.xtype as int)
) datatype,
prec,
scale,
isnullable,
cdefault,
m.text default_text,
isnull(len(autoval),0) is_identity
from syscolumns sc
LEFT OUTER JOIN syscomments m on sc.cdefault = m.id
AND m.colid = 1
where
sc.id in (select id
from sysobjects
where name = '%s')
order by
colorder"""
def columnsFromSchema(self, tableName, soClass):
colData = self.queryAll(self.SHOW_COLUMNS
% tableName)
results = []
for field, size, t, precision, scale, nullAllowed, default, defaultText, is_identity in colData:
if field == soClass.sqlmeta.idName:
continue
# precision is needed for decimal columns
colClass, kw = self.guessClass(t, size, precision, scale)
kw['name'] = str(soClass.sqlmeta.style.dbColumnToPythonAttr(field))
kw['dbName'] = str(field)
kw['notNone'] = not nullAllowed
if (defaultText):
# Strip ( and )
defaultText = defaultText[1:-1]
if defaultText[0] == "'":
defaultText = defaultText[1:-1]
else:
if t == "int" : defaultText = int(defaultText)
if t == "float" : defaultText = float(defaultText)
if t == "numeric": defaultText = float(defaultText)
# TODO need to access the "column" to_python method here--but the object doesn't exists yet
# @@ skip key...
kw['default'] = defaultText
results.append(colClass(**kw))
return results
def _setAutoCommit(self, conn, auto):
#raise Exception(repr(auto))
return
#conn.auto_commit = auto
option = "ON"
if auto == 0:
option = "OFF"
c = conn.cursor()
c.execute("SET AUTOCOMMIT " + option)
conn.setconnectoption(SQL.AUTOCOMMIT, option)
# precision and scale is needed for decimal columns
def guessClass(self, t, size, precision, scale):
"""
Here we take raw values coming out of syscolumns and map to SQLObject class types.
"""
if t.startswith('int'):
return col.IntCol, {}
elif t.startswith('varchar'):
if self.usingUnicodeStrings:
return col.UnicodeCol, {'length': size}
return col.StringCol, {'length': size}
elif t.startswith('char'):
if self.usingUnicodeStrings:
return col.UnicodeCol, {'length': size,
'varchar': False}
return col.StringCol, {'length': size,
'varchar': False}
elif t.startswith('datetime'):
return col.DateTimeCol, {}
elif t.startswith('decimal'):
return col.DecimalCol, {'size': precision, # be careful for awkward naming
'precision': scale}
else:
return col.Col, {}
def server_version(self):
if self._server_version is not None:
return self._server_version
try:
server_version = self.queryOne("SELECT SERVERPROPERTY('productversion')")[0]
server_version = server_version.split('.')[0]
server_version = int(server_version)
except:
server_version = None # unknown
self._server_version = server_version
return server_version
def can_use_max_types(self):
if self._can_use_max_types is not None:
return self._can_use_max_types
server_version = self.server_version()
self._can_use_max_types = can_use_max_types = \
(server_version is not None) and (server_version >= 9)
return can_use_max_types
|
11540721
|
from django.core.management.base import BaseCommand
from 臺灣言語服務.models import 訓練過渡格式
class 匯入枋模(BaseCommand):
def handle(self, *args, **參數):
self.stdout.write('資料數量:{}'.format(訓練過渡格式.資料數量()))
訓練過渡格式.加一堆資料(self.全部資料(*args, **參數))
self.stdout.write('資料數量:{}'.format(訓練過渡格式.資料數量()))
|
11540732
|
from mock import patch
from pytest import mark
from invocations.environment import in_ci
@mark.parametrize(
"environ,expected",
[
(dict(), False),
(dict(WHATEVS="true", SURE_WHYNOT=""), False),
(dict(CIRCLECI=""), False),
(dict(TRAVIS=""), False),
(dict(CIRCLECI="", WHATEVS="yo"), False),
(dict(CIRCLECI="", TRAVIS=""), False),
(dict(CIRCLECI="true"), True),
(dict(CIRCLECI="false"), True), # yup
(dict(CIRCLECI="no"), True),
(dict(CIRCLECI="1"), True),
(dict(CIRCLECI="0"), True),
(dict(TRAVIS="true"), True),
(dict(CIRCLECI="true", TRAVIS=""), True),
(dict(CIRCLECI="", TRAVIS="true"), True),
(dict(CIRCLECI="true", TRAVIS="true"), True),
(dict(CIRCLECI="false", TRAVIS="no"), True),
(dict(CIRCLECI="true", WHATEVS=""), True),
(dict(CIRCLECI="true", WHATEVS="huh?"), True),
],
)
def in_ci_true_when_any_expected_vars_nonempty(environ, expected):
with patch("invocations.environment.os.environ", environ):
assert in_ci() is expected
|
11540802
|
from abc import ABC, abstractmethod
from redbot.core import Config
from redbot.core.bot import Red
class MixinMeta(ABC):
"""Base class for well behaved type hint detection with composite class.
Basically, to keep developers sane when not all attributes are defined in each mixin.
"""
def __init__(self, *_args):
self.config: Config
self.bot: Red
|
11540836
|
import unittest
from unittest import SkipTest
import six
from codetools.contexts.data_context import DataContext
from codetools.contexts.multi_context import MultiContext
class Events2TestCase(unittest.TestCase):
""" Test events with the new contexts.
"""
def setUp(self):
self.event_count = 0
self.last_event = None
def event_listener(self, event):
self.event_count += 1
self.last_event = event
def test_assign_value(self):
context = DataContext()
context.on_trait_change(self.event_listener, 'items_modified')
context['a'] = 'foo'
self.assertEqual(self.event_count, 1)
self.assertEqual(self.last_event.added, ['a'])
self.assertEqual(self.last_event.modified, [])
self.assertEqual(self.last_event.removed, [])
def test_change_value(self):
context = DataContext()
context.on_trait_change(self.event_listener, 'items_modified')
context['a'] = 'foo'
context['a'] = 'foo2'
self.assertEqual(self.event_count, 2)
self.assertEqual(self.last_event.added, [])
self.assertEqual(self.last_event.modified, ['a'])
self.assertEqual(self.last_event.removed, [])
def test_defer_add_event(self):
context = DataContext()
context.on_trait_change(self.event_listener, 'items_modified')
context.defer_events = True
context['a'] = 'foo'
context.defer_events = False
self.assertEqual(self.event_count, 1)
self.assertEqual(self.last_event.added, ['a'])
self.assertEqual(self.last_event.modified, [])
self.assertEqual(self.last_event.removed, [])
def test_defer_multiple_events(self):
context = DataContext()
context.on_trait_change(self.event_listener, 'items_modified')
context.defer_events = True
self.assertEqual(self.event_count, 0)
context['a'] = 'foo'
self.assertEqual(self.event_count, 0)
context['a'] = 'foo2'
self.assertEqual(self.event_count, 0)
context['b'] = 'bar'
self.assertEqual(self.event_count, 0)
context.defer_events = False
# the modified will be empty, because it was also added
self.assertEqual(self.event_count, 1)
self.assertEqual(set(self.last_event.added), set(['a', 'b']))
self.assertEqual(self.last_event.modified, [])
self.assertEqual(self.last_event.removed, [])
def test_delete_after_add(self):
context = DataContext()
context.on_trait_change(self.event_listener, 'items_modified')
self.assertEqual(self.event_count, 0)
context.defer_events = True
self.assertEqual(self.event_count, 0)
context['a'] = 'foo'
self.assertEqual(self.event_count, 0)
del context['a']
self.assertEqual(self.event_count, 0)
context.defer_events = False
self.assertEqual(self.event_count, 0)
def test_delete_after_modify(self):
context = DataContext()
context['a'] = 'foo'
context.on_trait_change(self.event_listener, 'items_modified')
self.assertEqual(self.event_count, 0)
context.defer_events = True
self.assertEqual(self.event_count, 0)
context['a'] = 'foo2'
self.assertEqual(self.event_count, 0)
del context['a']
self.assertEqual(self.event_count, 0)
context.defer_events = False
self.assertEqual(self.event_count, 1)
self.assertEqual(self.last_event.added, [])
self.assertEqual(self.last_event.modified, [])
self.assertEqual(self.last_event.removed, ['a'])
def test_block_events(self):
if six.PY3:
raise SkipTest("skipping Block-using tests on Python 3")
import numpy
from codetools.blocks.api import Block
context = DataContext(name="data")
context.on_trait_change(self.event_listener, 'items_modified')
context.defer_events = True
context['a'] = 4
context['b'] = numpy.array((1,2,3))
context.defer_events = False
self.assertEqual(self.event_count, 1)
multi_context = MultiContext(context, name="multi")
multi_context.on_trait_change(self.event_listener, 'items_modified')
block = Block("c = a * b")
block.execute(multi_context)
# we expect one event from data context, one from multi context
self.assertEqual(self.event_count, 3)
|
11540843
|
import random
import argparse
import json
from common import EMPTY, AllKeyValueFactory, IntKeyValueFactory
from dictinfo import dump_py_dict
from dict_reimplementation import PyDictReimplementation32, dump_reimpl_dict
from js_reimplementation_interface import Dict32JsImpl, AlmostPythonDictRecyclingJsImpl, AlmostPythonDictNoRecyclingJsImpl
import hash_chapter3_class_impl
import build_autogenerated_chapter3_chapter4
def dict_factory(pairs=None):
if not pairs:
return {}
# quick&dirty
def to_string(x):
return json.dumps(x) if x is not None else "None"
d = eval("{" + ", ".join("{}:{}".format(to_string(k), to_string(v)) for [k, v] in pairs) + "}")
return d
IMPLEMENTATIONS = {
"dict_actual": (dict_factory, dump_py_dict),
"dict32_reimpl_py": (PyDictReimplementation32, dump_reimpl_dict),
"dict32_reimpl_js": (Dict32JsImpl, dump_reimpl_dict),
"dict32_reimpl_py_extracted": (build_autogenerated_chapter3_chapter4.Dict32Extracted, dump_reimpl_dict),
"almost_python_dict_recycling_py": (hash_chapter3_class_impl.AlmostPythonDictImplementationRecycling, dump_reimpl_dict),
"almost_python_dict_no_recycling_py": (hash_chapter3_class_impl.AlmostPythonDictImplementationNoRecycling, dump_reimpl_dict),
"almost_python_dict_no_recycling_py_simpler": (hash_chapter3_class_impl.AlmostPythonDictImplementationNoRecyclingSimplerVersion, dump_reimpl_dict),
"almost_python_dict_recycling_js": (AlmostPythonDictRecyclingJsImpl, dump_reimpl_dict),
"almost_python_dict_no_recycling_js": (AlmostPythonDictNoRecyclingJsImpl, dump_reimpl_dict),
"almost_python_dict_recycling_py_extracted": (build_autogenerated_chapter3_chapter4.HashClassRecyclingExtracted, dump_reimpl_dict),
"almost_python_dict_no_recycling_py_extracted": (build_autogenerated_chapter3_chapter4.HashClassNoRecyclingExtracted, dump_reimpl_dict),
}
def verify_same(d, dump_d_func, dreimpl, dump_dreimpl_func):
dump_d = dump_d_func(d)
dump_reimpl = dump_dreimpl_func(dreimpl)
if dump_d != dump_reimpl:
hashes_orig, keys_orig, values_orig, fill_orig, used_orig = dump_d
hashes_new, keys_new, values_new, fill_new, used_new = dump_reimpl
print("ORIG SIZE", len(hashes_orig))
print("NEW SIZE", len(hashes_new))
print("ORIG fill/used: ", fill_orig, used_orig)
print("NEW fill/used: ", fill_new, used_new)
if len(hashes_orig) == len(hashes_new):
size = len(hashes_orig)
print("NEW | ORIG")
for i in range(size):
if hashes_new[i] is not EMPTY or hashes_orig[i] is not EMPTY:
print(i, " " * 3,
hashes_new[i], keys_new[i], values_new[i], " " * 3,
hashes_orig[i], keys_orig[i], values_orig[i])
assert dump_d == dump_reimpl
def run(ref_impl_factory, ref_impl_dump, test_impl_factory, test_impl_dump, n_inserts, extra_checks, key_value_factory, initial_state, verbose):
SINGLE_REMOVE_CHANCE = 0.3
MASS_REMOVE_CHANCE = 0.002
MASS_REMOVE_COEFF = 0.8
removed = set()
if initial_state:
d = ref_impl_factory(initial_state)
else:
d = ref_impl_factory()
if initial_state:
dreimpl = test_impl_factory(initial_state)
else:
dreimpl = test_impl_factory()
if verbose:
print("Starting test")
for i in range(n_inserts):
should_remove = (random.random() < SINGLE_REMOVE_CHANCE)
if should_remove and d and d.keys(): # TODO: ugly, written while on a plane
to_remove = random.choice(list(d.keys()))
if verbose:
print("Removing {}".format(to_remove))
del d[to_remove]
del dreimpl[to_remove]
if verbose:
print(d)
verify_same(d, ref_impl_dump, dreimpl, test_impl_dump)
removed.add(to_remove)
should_mass_remove = (random.random() < MASS_REMOVE_CHANCE)
if should_mass_remove and len(d) > 10:
to_remove_list = random.sample(list(d.keys()), int(MASS_REMOVE_COEFF * len(d)))
if verbose:
print("Mass-Removing {} elements".format(len(to_remove_list)))
for k in to_remove_list:
del d[k]
del dreimpl[k]
removed.add(k)
if extra_checks:
for k in d.keys():
assert d[k] == dreimpl[k]
for r in removed:
try:
dreimpl[r]
assert False
except KeyError:
pass
key_to_insert = key_value_factory.generate_key()
value_to_insert = key_value_factory.generate_value()
_keys_set = getattr(d, '_keys_set', None)
# TODO: ugly code written on a plane
# TODO: properly implement in/not in when I land
if _keys_set is not None:
key_present = key_to_insert in _keys_set
else:
key_present = key_to_insert in d
if not key_present:
if verbose:
print("Inserting ({key}, {value})".format(key=key_to_insert, value=value_to_insert))
try:
dreimpl[key_to_insert]
assert False
except KeyError:
pass
else:
if verbose:
print("Replacing ({key}, {value1}) with ({key}, {value2})".format(key=key_to_insert, value1=d[key_to_insert], value2=value_to_insert))
removed.discard(key_to_insert)
d[key_to_insert] = value_to_insert
dreimpl[key_to_insert] = value_to_insert
if verbose:
print(d)
verify_same(d, ref_impl_dump, dreimpl, test_impl_dump)
assert dreimpl[key_to_insert] == value_to_insert
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Stress-test dict-like reimplementations')
parser.add_argument('--reference-implementation', choices=IMPLEMENTATIONS.keys(), required=True)
parser.add_argument('--test-implementation', choices=IMPLEMENTATIONS.keys(), required=True)
parser.add_argument('--no-extra-getitem-checks', dest='extra_checks', action='store_false')
parser.add_argument('--num-inserts', type=int, default=500)
parser.add_argument('--forever', action='store_true')
parser.add_argument('--kv', choices=["numbers", "all"], required=True)
parser.add_argument('--initial-size', type=int, default=-1)
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
if args.kv == "numbers":
kv_factory = IntKeyValueFactory(args.num_inserts)
elif args.kv == "all":
kv_factory = AllKeyValueFactory(args.num_inserts)
ref_impl = IMPLEMENTATIONS[args.reference_implementation]
test_impl = IMPLEMENTATIONS[args.test_implementation]
def test_iteration():
initial_size = args.initial_size if args.initial_size >= 0 else random.randint(0, 100)
initial_state = [(kv_factory.generate_key(), kv_factory.generate_value()) for _ in range(initial_size)]
run(*(ref_impl + test_impl),
n_inserts=args.num_inserts,
extra_checks=args.extra_checks,
key_value_factory=kv_factory,
initial_state=initial_state,
verbose=args.verbose)
if args.forever:
while True:
test_iteration()
else:
test_iteration()
|
11540845
|
import polars as pl
__all__ = [
'col', 'exclude', 'lit', 'Expr', 'Series',
# dtypes
'Int8', 'Int16', 'Int32', 'Int64',
'UInt8', 'UInt16', 'UInt32', 'UInt64',
'Float32', 'Float64', 'Boolean', 'Utf8',
'List', 'Date', 'Datetime', 'Object'
]
col = pl.col
exclude = pl.exclude
lit = pl.lit
Expr = pl.Expr
Series = pl.Series
# dtypes
Int8 = pl.Int8
Int16 = pl.Int16
Int32 = pl.Int32
Int64 = pl.Int64
UInt8 = pl.UInt8
UInt16 = pl.UInt16
UInt32 = pl.UInt32
UInt64 = pl.UInt64
Float32 = pl.Float32
Float64 = pl.Float64
Boolean = pl.Boolean
Utf8 = pl.Utf8
List = pl.List
Date = pl.Date
Datetime = pl.Datetime
Object = pl.Object
|
11540862
|
import os
import io
import yaml
from tcfcli.common.user_exceptions import ContextException
from tcfcli.libs.utils.yaml_parser import yaml_parse
class Template(object):
@staticmethod
def get_template_data(template_file):
if not os.path.exists(template_file):
return {}
with io.open(template_file, mode='r', encoding='utf-8') as f:
try:
return yaml_parse(f.read())
except (ValueError, yaml.YAMLError) as ex:
raise ContextException("Parse template failed: {}".format(str(ex)))
|
11540878
|
import os
from behave.matchers import Match, ParseMatcher, RegexMatcher, MatchWithError
from behave.matchers import matcher_mapping
from collections import defaultdict
import six
from functools import partial
class TransformerBase(object):
"""
Defines the basic functions of a Transformer
As implemented, it does effectively nothing. You are meant to subclass and override the methods.
Don't forget to call ``super`` when extending ``__init__``
"""
def __init__(self, context=None, func=None, **kwargs):
"""
:param context: behave context
:param func: the matched step function currently being executed
:param kwargs: Not doing anything with these, but allowing us to swallow them.
"""
self.context = context
self.func = func
def transform_value(self, value):
return value
def transform_args(self, args):
return [self.transform_value(arg) for arg in args]
def transform_kwargs(self, kwargs):
return {key: self.transform_value(value) for key, value in kwargs.items()}
def transform(self, args, kwargs):
return self.transform_args(args), self.transform_kwargs(kwargs), self.func
class FormatTransformer(TransformerBase):
"""
Implements basic interpolation transformation startegy.
Parameter value is transformed through .format method
using named placeholders and values supplied as
keyword arguments passed at the time of initialization.
"""
def __init__(self, context=None, func=None, **kwargs):
"""
:param context: behave context
:param func: the matched step function currently being executed
:param kwargs: keyword-value pairs used for formatting step strings.
"""
suppress_missing = kwargs.pop('suppress_missing', False)
if context is not None:
kwargs.update(context=context)
if func is not None:
kwargs.update(func=func)
super(FormatTransformer, self).__init__(**kwargs)
self.transformations = kwargs
if suppress_missing:
self.transformations = defaultdict(lambda key: '', self.transformations)
def transform_value(self, value):
if not isinstance(value, six.string_types):
return value # non-string arguments should be returned unadulterated
return value.format(**self.transformations)
class EnvironmentTransformer(FormatTransformer):
"""
Like FormatTransformer, but additionally provides items from ``os.environ`` as keyword arguments
"""
def __init__(self, *args, **kwargs):
kwargs.update(os.environ)
super(EnvironmentTransformer, self).__init__(*args, **kwargs)
class FuncTransformer(TransformerBase):
"""
Replaces the step function with a supplied new function!
"""
def __init__(self, new_func, *args, **kwargs):
self.new_func = new_func
super(FuncTransformer, self).__init__(*args, **kwargs)
def transform(self, *transform_args, **transform_kwargs):
args, kwargs, _old_func = super(FuncTransformer, self).transform(*transform_args, **transform_kwargs)
return args, kwargs, self.new_func
class TransformingMatch(Match):
"""
Tweak of the normal Match object
When the ``transformer_class`` attribute, a subclass of ``behave_webdriver.transformers.TrransformerBase``,
is present on the context, that class will be called with the context and decorated step function for the step
currently being executed. This class has the ability to 'transform' the parsed arguments and the function itself.
"""
def run(self, context):
args = []
kwargs = {}
for arg in self.arguments:
if arg.name is not None:
kwargs[arg.name] = arg.value
else:
args.append(arg.value)
with context.use_with_user_mode():
# the above is a COPY/PASTE of the original `run` implementation,
transformer_class = context.transformer_class if 'transformer_class' in context else None
if transformer_class and ((isinstance(transformer_class, partial) and issubclass(transformer_class.func, TransformerBase)) or issubclass(transformer_class, TransformerBase)):
transformer = transformer_class(context=context, func=self.func)
args, kwargs, func = transformer.transform(args, kwargs)
else:
func = self.func
func(context, *args, **kwargs)
class TransformMixin(object):
"""
Replaces the usual Match object with a TransformingMatch
This can be mixed in with any matcher class and added to the mapping; you could even override existing matchers
>>> from behave.matchers import RegexMatcher, matcher_mapping # any matcher will work
>>> class TransformRegexMatcher(TransformMixin, RegexMatcher): pass
>>> matcher_mapping['re'] = TransformRegexMatcher
"""
def match(self, step):
# -- PROTECT AGAINST: Type conversion errors (with ParseMatcher).
try:
result = self.check_match(step)
except Exception as e: # pylint: disable=broad-except
return MatchWithError(self.func, e)
if result is None:
return None # -- NO-MATCH
# the above is a COPY/PASTE of original implementation; only the following line is changed
return TransformingMatch(self.func, result)
# behave-webdriver uses both ParseMatcher ('parse') and RegexMatcher ('re'); so we need a transforming version of each
class TransformParseMatcher(TransformMixin, ParseMatcher):
pass
class TransformRegexMatcher(TransformMixin, RegexMatcher):
pass
# add the transforming matchers to the mapping so they can be used by ``use_step_matcher``.
matcher_mapping['transform-parse'] = TransformParseMatcher
matcher_mapping['transform-re'] = TransformRegexMatcher
|
11540885
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import pickle
import numpy as np
import math
from config import args
from loss_funcs.keypoints_loss import batch_kp_2d_l2_loss, calc_mpjpe, calc_pampjpe
class Learnable_Loss(nn.Module):
"""docstring for Learnable_Loss"""
def __init__(self, ID_num=0):
super(Learnable_Loss, self).__init__()
self.loss_class = {'det':['CenterMap'],'reg':['MPJPE','PAMPJPE','P_KP2D','Pose','Shape','Prior']}
self.all_loss_names = np.concatenate([loss_list for task_name, loss_list in self.loss_class.items()]).tolist()
if args().learn_2dpose:
self.loss_class['reg'].append('heatmap')
if args().learn_AE:
self.loss_class['reg'].append('AE')
def forward(self, outputs):
loss_dict = outputs['loss_dict']
if args().model_return_loss:
if args().PAMPJPE_weight>0 and outputs['detection_flag']:
try:
kp3d_mask = outputs['meta_data']['valid_masks'][:,1]
kp3d_gt = outputs['meta_data']['kp_3d'][kp3d_mask].contiguous().to(outputs['j3d'].device)
preds_kp3d = outputs['j3d'][kp3d_mask, :kp3d_gt.shape[1]].contiguous()
if len(preds_kp3d)>0:
loss_dict['PAMPJPE'] = calc_pampjpe(kp3d_gt.contiguous(), preds_kp3d.contiguous()).mean() * args().PAMPJPE_weight
except Exception as exp_error:
print('PA_MPJPE calculation failed!', exp_error)
loss_dict = {key:value.mean() for key, value in loss_dict.items() if not isinstance(value, int)}
loss = sum([value if value.item()<args().loss_thresh else value/(value.item()/args().loss_thresh) for key, value in loss_dict.items()])
det_loss = sum([loss_dict[item] for item in self.loss_class['det'] if item in loss_dict])
reg_loss = sum([loss_dict[item] for item in self.loss_class['reg'] if item in loss_dict])
loss_tasks = {'reg': reg_loss, 'det': det_loss}
left_loss = sum([loss_dict[loss_item] for loss_item in loss_dict if loss_item not in self.all_loss_names])
if left_loss!=0:
loss_tasks.update({'Others': left_loss})
outputs['loss_dict'] = dict(loss_tasks, **loss_dict)
return loss, outputs
|
11540918
|
from __future__ import print_function
import io
import os
import re
import stat
import sys
import zipfile
from textwrap import dedent, fill
import six
from click import UsageError
from click.testing import CliRunner
from humanize import naturalsize
from twisted.internet import endpoints, reactor
from twisted.internet.defer import gatherResults, inlineCallbacks, returnValue, CancelledError
from twisted.internet.error import ConnectionRefusedError
from twisted.internet.utils import getProcessOutputAndValue
from twisted.python import log, procutils
from twisted.trial import unittest
from zope.interface import implementer
import mock
from .. import __version__
from .._interfaces import ITorManager
from ..cli import cli, cmd_receive, cmd_send, welcome
from ..errors import (ServerConnectionError, TransferError,
UnsendableFileError, WelcomeError, WrongPasswordError)
from .common import ServerBase, config
def build_offer(args):
s = cmd_send.Sender(args, None)
return s._build_offer()
class OfferData(unittest.TestCase):
def setUp(self):
self._things_to_delete = []
self.cfg = cfg = config("send")
cfg.stdout = io.StringIO()
cfg.stderr = io.StringIO()
def tearDown(self):
for fn in self._things_to_delete:
if os.path.exists(fn):
os.unlink(fn)
del self.cfg
def test_text(self):
self.cfg.text = message = "blah blah blah ponies"
d, fd_to_send = build_offer(self.cfg)
self.assertIn("message", d)
self.assertNotIn("file", d)
self.assertNotIn("directory", d)
self.assertEqual(d["message"], message)
self.assertEqual(fd_to_send, None)
def test_file(self):
self.cfg.what = filename = "my file"
message = b"yay ponies\n"
send_dir = self.mktemp()
os.mkdir(send_dir)
abs_filename = os.path.join(send_dir, filename)
with open(abs_filename, "wb") as f:
f.write(message)
self.cfg.cwd = send_dir
d, fd_to_send = build_offer(self.cfg)
self.assertNotIn("message", d)
self.assertIn("file", d)
self.assertNotIn("directory", d)
self.assertEqual(d["file"]["filesize"], len(message))
self.assertEqual(d["file"]["filename"], filename)
self.assertEqual(fd_to_send.tell(), 0)
self.assertEqual(fd_to_send.read(), message)
def _create_broken_symlink(self):
if not hasattr(os, 'symlink'):
raise unittest.SkipTest("host OS does not support symlinks")
parent_dir = self.mktemp()
os.mkdir(parent_dir)
send_dir = "dirname"
os.mkdir(os.path.join(parent_dir, send_dir))
os.symlink('/non/existent/file',
os.path.join(parent_dir, send_dir, 'linky'))
send_dir_arg = send_dir
self.cfg.what = send_dir_arg
self.cfg.cwd = parent_dir
def test_broken_symlink_raises_err(self):
self._create_broken_symlink()
self.cfg.ignore_unsendable_files = False
e = self.assertRaises(UnsendableFileError, build_offer, self.cfg)
# On english distributions of Linux, this will be
# "linky: No such file or directory", but the error may be
# different on Windows and other locales and/or Unix variants, so
# we'll just assert the part we know about.
self.assertIn("linky: ", str(e))
def test_broken_symlink_is_ignored(self):
self._create_broken_symlink()
self.cfg.ignore_unsendable_files = True
d, fd_to_send = build_offer(self.cfg)
self.assertIn('(ignoring error)', self.cfg.stderr.getvalue())
self.assertEqual(d['directory']['numfiles'], 0)
self.assertEqual(d['directory']['numbytes'], 0)
def test_missing_file(self):
self.cfg.what = filename = "missing"
send_dir = self.mktemp()
os.mkdir(send_dir)
self.cfg.cwd = send_dir
e = self.assertRaises(TransferError, build_offer, self.cfg)
self.assertEqual(
str(e), "Cannot send: no file/directory named '%s'" % filename)
def _do_test_directory(self, addslash):
parent_dir = self.mktemp()
os.mkdir(parent_dir)
send_dir = "dirname"
os.mkdir(os.path.join(parent_dir, send_dir))
ponies = [str(i) for i in range(5)]
for p in ponies:
with open(os.path.join(parent_dir, send_dir, p), "wb") as f:
f.write(("%s ponies\n" % p).encode("ascii"))
send_dir_arg = send_dir
if addslash:
send_dir_arg += os.sep
self.cfg.what = send_dir_arg
self.cfg.cwd = parent_dir
d, fd_to_send = build_offer(self.cfg)
self.assertNotIn("message", d)
self.assertNotIn("file", d)
self.assertIn("directory", d)
self.assertEqual(d["directory"]["dirname"], send_dir)
self.assertEqual(d["directory"]["mode"], "zipfile/deflated")
self.assertEqual(d["directory"]["numfiles"], 5)
self.assertIn("numbytes", d["directory"])
self.assertIsInstance(d["directory"]["numbytes"], six.integer_types)
self.assertEqual(fd_to_send.tell(), 0)
zdata = fd_to_send.read()
self.assertEqual(len(zdata), d["directory"]["zipsize"])
fd_to_send.seek(0, 0)
with zipfile.ZipFile(fd_to_send, "r", zipfile.ZIP_DEFLATED) as zf:
zipnames = zf.namelist()
self.assertEqual(list(sorted(ponies)), list(sorted(zipnames)))
for name in zipnames:
contents = zf.open(name, "r").read()
self.assertEqual(("%s ponies\n" % name).encode("ascii"),
contents)
def test_directory(self):
return self._do_test_directory(addslash=False)
def test_directory_addslash(self):
return self._do_test_directory(addslash=True)
def test_unknown(self):
self.cfg.what = filename = "unknown"
send_dir = self.mktemp()
os.mkdir(send_dir)
abs_filename = os.path.abspath(os.path.join(send_dir, filename))
self.cfg.cwd = send_dir
try:
os.mkfifo(abs_filename)
except AttributeError:
raise unittest.SkipTest("is mkfifo supported on this platform?")
# Delete the named pipe for the sake of users who might run "pip
# wheel ." in this directory later. That command wants to copy
# everything into a tempdir before building a wheel, and the
# shutil.copy_tree() is uses can't handle the named pipe.
self._things_to_delete.append(abs_filename)
self.assertFalse(os.path.isfile(abs_filename))
self.assertFalse(os.path.isdir(abs_filename))
e = self.assertRaises(TypeError, build_offer, self.cfg)
self.assertEqual(
str(e), "'%s' is neither file nor directory" % filename)
def test_symlink(self):
if not hasattr(os, 'symlink'):
raise unittest.SkipTest("host OS does not support symlinks")
# build A/B1 -> B2 (==A/B2), and A/B2/C.txt
parent_dir = self.mktemp()
os.mkdir(parent_dir)
os.mkdir(os.path.join(parent_dir, "B2"))
with open(os.path.join(parent_dir, "B2", "C.txt"), "wb") as f:
f.write(b"success")
os.symlink("B2", os.path.join(parent_dir, "B1"))
# now send "B1/C.txt" from A, and it should get the right file
self.cfg.cwd = parent_dir
self.cfg.what = os.path.join("B1", "C.txt")
d, fd_to_send = build_offer(self.cfg)
self.assertEqual(d["file"]["filename"], "C.txt")
self.assertEqual(fd_to_send.read(), b"success")
def test_symlink_collapse(self):
if not hasattr(os, 'symlink'):
raise unittest.SkipTest("host OS does not support symlinks")
# build A/B1, A/B1/D.txt
# A/B2/C2, A/B2/D.txt
# symlink A/B1/C1 -> A/B2/C2
parent_dir = self.mktemp()
os.mkdir(parent_dir)
os.mkdir(os.path.join(parent_dir, "B1"))
with open(os.path.join(parent_dir, "B1", "D.txt"), "wb") as f:
f.write(b"fail")
os.mkdir(os.path.join(parent_dir, "B2"))
os.mkdir(os.path.join(parent_dir, "B2", "C2"))
with open(os.path.join(parent_dir, "B2", "D.txt"), "wb") as f:
f.write(b"success")
os.symlink(
os.path.abspath(os.path.join(parent_dir, "B2", "C2")),
os.path.join(parent_dir, "B1", "C1"))
# Now send "B1/C1/../D.txt" from A. The correct traversal will be:
# * start: A
# * B1: A/B1
# * C1: follow symlink to A/B2/C2
# * ..: climb to A/B2
# * D.txt: open A/B2/D.txt, which contains "success"
# If the code mistakenly uses normpath(), it would do:
# * normpath turns B1/C1/../D.txt into B1/D.txt
# * start: A
# * B1: A/B1
# * D.txt: open A/B1/D.txt , which contains "fail"
self.cfg.cwd = parent_dir
self.cfg.what = os.path.join("B1", "C1", os.pardir, "D.txt")
d, fd_to_send = build_offer(self.cfg)
self.assertEqual(d["file"]["filename"], "D.txt")
self.assertEqual(fd_to_send.read(), b"success")
if os.name == "nt":
test_symlink_collapse.todo = "host OS has broken os.path.realpath()"
# ntpath.py's realpath() is built out of normpath(), and does not
# follow symlinks properly, so this test always fails. "wormhole send
# PATH" on windows will do the wrong thing. See
# https://bugs.python.org/issue9949" for details. I'm making this a
# TODO instead of a SKIP because 1: this causes an observable
# misbehavior (albeit in rare circumstances), 2: it probably used to
# work (sometimes, but not in #251). See cmd_send.py for more notes.
class LocaleFinder:
def __init__(self):
self._run_once = False
@inlineCallbacks
def find_utf8_locale(self):
if sys.platform == "win32":
returnValue("en_US.UTF-8")
if self._run_once:
returnValue(self._best_locale)
self._best_locale = yield self._find_utf8_locale()
self._run_once = True
returnValue(self._best_locale)
@inlineCallbacks
def _find_utf8_locale(self):
# Click really wants to be running under a unicode-capable locale,
# especially on python3. macOS has en-US.UTF-8 but not C.UTF-8, and
# most linux boxes have C.UTF-8 but not en-US.UTF-8 . For tests,
# figure out which one is present and use that. For runtime, it's a
# mess, as really the user must take responsibility for setting their
# locale properly. I'm thinking of abandoning Click and going back to
# twisted.python.usage to avoid this problem in the future.
(out, err, rc) = yield getProcessOutputAndValue("locale", ["-a"])
if rc != 0:
log.msg("error running 'locale -a', rc=%s" % (rc, ))
log.msg("stderr: %s" % (err, ))
returnValue(None)
out = out.decode("utf-8") # make sure we get a string
utf8_locales = {}
for locale in out.splitlines():
locale = locale.strip()
if locale.lower().endswith((".utf-8", ".utf8")):
utf8_locales[locale.lower()] = locale
for wanted in ["C.utf8", "C.UTF-8", "en_US.utf8", "en_US.UTF-8"]:
if wanted.lower() in utf8_locales:
returnValue(utf8_locales[wanted.lower()])
if utf8_locales:
returnValue(list(utf8_locales.values())[0])
returnValue(None)
locale_finder = LocaleFinder()
class ScriptsBase:
def find_executable(self):
# to make sure we're running the right executable (in a virtualenv),
# we require that our "wormhole" lives in the same directory as our
# "python"
locations = procutils.which("wormhole")
if not locations:
raise unittest.SkipTest("unable to find 'wormhole' in $PATH")
wormhole = locations[0]
if (os.path.dirname(os.path.abspath(wormhole)) != os.path.dirname(
sys.executable)):
log.msg("locations: %s" % (locations, ))
log.msg("sys.executable: %s" % (sys.executable, ))
raise unittest.SkipTest(
"found the wrong 'wormhole' in $PATH: %s %s" %
(wormhole, sys.executable))
return wormhole
@inlineCallbacks
def is_runnable(self):
# One property of Versioneer is that many changes to the source tree
# (making a commit, dirtying a previously-clean tree) will change the
# version string. Entrypoint scripts frequently insist upon importing
# a library version that matches the script version (whatever was
# reported when 'pip install' was run), and throw a
# DistributionNotFound error when they don't match. This is really
# annoying in a workspace created with "pip install -e .", as you
# must re-run pip after each commit.
# So let's report just one error in this case (from test_version),
# and skip the other tests that we know will fail.
# Setting LANG/LC_ALL to a unicode-capable locale is necessary to
# convince Click to not complain about a forced-ascii locale. My
# apologies to folks who want to run tests on a machine that doesn't
# have the C.UTF-8 locale installed.
locale = yield locale_finder.find_utf8_locale()
if not locale:
raise unittest.SkipTest("unable to find UTF-8 locale")
locale_env = dict(LC_ALL=locale, LANG=locale)
wormhole = self.find_executable()
res = yield getProcessOutputAndValue(
wormhole, ["--version"], env=locale_env)
out, err, rc = res
if rc != 0:
log.msg("wormhole not runnable in this tree:")
log.msg("out", out)
log.msg("err", err)
log.msg("rc", rc)
raise unittest.SkipTest("wormhole is not runnable in this tree")
returnValue(locale_env)
class ScriptVersion(ServerBase, ScriptsBase, unittest.TestCase):
# we need Twisted to run the server, but we run the sender and receiver
# with deferToThread()
@inlineCallbacks
def test_version(self):
# "wormhole" must be on the path, so e.g. "pip install -e ." in a
# virtualenv. This guards against an environment where the tests
# below might run the wrong executable.
self.maxDiff = None
wormhole = self.find_executable()
# we must pass on the environment so that "something" doesn't
# get sad about UTF8 vs. ascii encodings
out, err, rc = yield getProcessOutputAndValue(
wormhole, ["--version"], env=os.environ)
err = err.decode("utf-8")
if "DistributionNotFound" in err:
log.msg("stderr was %s" % err)
last = err.strip().split("\n")[-1]
self.fail("wormhole not runnable: %s" % last)
ver = out.decode("utf-8") or err
self.failUnlessEqual(ver.strip(),
"magic-wormhole {}".format(__version__))
self.failUnlessEqual(rc, 0)
@implementer(ITorManager)
class FakeTor:
# use normal endpoints, but record the fact that we were asked
def __init__(self):
self.endpoints = []
def stream_via(self, host, port, tls=False):
self.endpoints.append((host, port, tls))
return endpoints.HostnameEndpoint(reactor, host, port)
def strip_deprecations(stderr, NL):
lines = [line
for line in stderr.split(NL)
if not ("Python 2 is no longer supported" in line or
"from cryptography import utils" in line or
"support will be dropped in the next release of cryptography" in line
)
]
return NL.join(lines)
class PregeneratedCode(ServerBase, ScriptsBase, unittest.TestCase):
# we need Twisted to run the server, but we run the sender and receiver
# with deferToThread()
@inlineCallbacks
def setUp(self):
self._env = yield self.is_runnable()
yield ServerBase.setUp(self)
@inlineCallbacks
def _do_test(self,
as_subprocess=False,
mode="text",
addslash=False,
override_filename=False,
fake_tor=False,
overwrite=False,
mock_accept=False,
verify=False):
assert mode in ("text", "file", "empty-file", "directory", "slow-text",
"slow-sender-text")
if fake_tor:
assert not as_subprocess
send_cfg = config("send")
recv_cfg = config("receive")
message = "blah blah blah ponies"
for cfg in [send_cfg, recv_cfg]:
cfg.hide_progress = True
cfg.relay_url = self.relayurl
cfg.transit_helper = ""
cfg.listen = True
cfg.code = u"1-abc"
cfg.stdout = io.StringIO()
cfg.stderr = io.StringIO()
cfg.verify = verify
send_dir = self.mktemp()
os.mkdir(send_dir)
receive_dir = self.mktemp()
os.mkdir(receive_dir)
if mode in ("text", "slow-text", "slow-sender-text"):
send_cfg.text = message
elif mode in ("file", "empty-file"):
if mode == "empty-file":
message = ""
send_filename = u"testfil\u00EB" # e-with-diaeresis
with open(os.path.join(send_dir, send_filename), "w") as f:
f.write(message)
send_cfg.what = send_filename
receive_filename = send_filename
recv_cfg.accept_file = False if mock_accept else True
if override_filename:
recv_cfg.output_file = receive_filename = u"outfile"
if overwrite:
recv_cfg.output_file = receive_filename
existing_file = os.path.join(receive_dir, receive_filename)
with open(existing_file, 'w') as f:
f.write('pls overwrite me')
elif mode == "directory":
# $send_dir/
# $send_dir/middle/
# $send_dir/middle/$dirname/
# $send_dir/middle/$dirname/[12345]
# cd $send_dir && wormhole send middle/$dirname
# cd $receive_dir && wormhole receive
# expect: $receive_dir/$dirname/[12345]
send_dirname = u"testdir"
def message(i):
return "test message %d\n" % i
os.mkdir(os.path.join(send_dir, u"middle"))
source_dir = os.path.join(send_dir, u"middle", send_dirname)
os.mkdir(source_dir)
modes = {}
for i in range(5):
path = os.path.join(source_dir, str(i))
with open(path, "w") as f:
f.write(message(i))
if i == 3:
os.chmod(path, 0o755)
modes[i] = stat.S_IMODE(os.stat(path).st_mode)
send_dirname_arg = os.path.join(u"middle", send_dirname)
if addslash:
send_dirname_arg += os.sep
send_cfg.what = send_dirname_arg
receive_dirname = send_dirname
recv_cfg.accept_file = False if mock_accept else True
if override_filename:
recv_cfg.output_file = receive_dirname = u"outdir"
if overwrite:
recv_cfg.output_file = receive_dirname
os.mkdir(os.path.join(receive_dir, receive_dirname))
if as_subprocess:
wormhole_bin = self.find_executable()
if send_cfg.text:
content_args = ['--text', send_cfg.text]
elif send_cfg.what:
content_args = [send_cfg.what]
# raise the rx KEY_TIMER to some large number here, to avoid
# spurious test failures on hosts that are slow enough to trigger
# the "Waiting for sender..." pacifier message. We can do in
# not-as_subprocess, because we can directly patch the value before
# running the receiver. But we can't patch across the subprocess
# boundary, so we use an environment variable.
env = self._env.copy()
env["_MAGIC_WORMHOLE_TEST_KEY_TIMER"] = "999999"
env["_MAGIC_WORMHOLE_TEST_VERIFY_TIMER"] = "999999"
send_args = [
'--relay-url',
self.relayurl,
'--transit-helper',
'',
'send',
'--hide-progress',
'--code',
send_cfg.code,
] + content_args
send_d = getProcessOutputAndValue(
wormhole_bin,
send_args,
path=send_dir,
env=env,
)
recv_args = [
'--relay-url',
self.relayurl,
'--transit-helper',
'',
'receive',
'--hide-progress',
'--accept-file',
recv_cfg.code,
]
if override_filename:
recv_args.extend(['-o', receive_filename])
receive_d = getProcessOutputAndValue(
wormhole_bin,
recv_args,
path=receive_dir,
env=env,
)
(send_res, receive_res) = yield gatherResults([send_d, receive_d],
True)
send_stdout = send_res[0].decode("utf-8")
send_stderr = send_res[1].decode("utf-8")
send_rc = send_res[2]
receive_stdout = receive_res[0].decode("utf-8")
receive_stderr = receive_res[1].decode("utf-8")
receive_rc = receive_res[2]
NL = os.linesep
send_stderr = strip_deprecations(send_stderr, NL)
receive_stderr = strip_deprecations(receive_stderr, NL)
self.assertEqual((send_rc, receive_rc), (0, 0),
(send_res, receive_res))
else:
send_cfg.cwd = send_dir
recv_cfg.cwd = receive_dir
if fake_tor:
send_cfg.tor = True
send_cfg.transit_helper = self.transit
tx_tm = FakeTor()
with mock.patch(
"wormhole.tor_manager.get_tor",
return_value=tx_tm,
) as mtx_tm:
send_d = cmd_send.send(send_cfg)
recv_cfg.tor = True
recv_cfg.transit_helper = self.transit
rx_tm = FakeTor()
with mock.patch(
"wormhole.tor_manager.get_tor",
return_value=rx_tm,
) as mrx_tm:
receive_d = cmd_receive.receive(recv_cfg)
else:
KEY_TIMER = 0 if mode == "slow-sender-text" else 99999
rxw = []
with mock.patch.object(cmd_receive, "KEY_TIMER", KEY_TIMER):
send_d = cmd_send.send(send_cfg)
receive_d = cmd_receive.receive(
recv_cfg, _debug_stash_wormhole=rxw)
# we need to keep KEY_TIMER patched until the receiver
# gets far enough to start the timer, which happens after
# the code is set
if mode == "slow-sender-text":
yield rxw[0].get_unverified_key()
# The sender might fail, leaving the receiver hanging, or vice
# versa. Make sure we don't wait on one side exclusively
VERIFY_TIMER = 0 if mode == "slow-text" else 99999
with mock.patch.object(cmd_receive, "VERIFY_TIMER", VERIFY_TIMER):
with mock.patch.object(cmd_send, "VERIFY_TIMER", VERIFY_TIMER):
if mock_accept or verify:
with mock.patch.object(
cmd_receive.six.moves, 'input',
return_value='yes') as i:
yield gatherResults([send_d, receive_d], True)
if verify:
s = i.mock_calls[0][1][0]
mo = re.search(r'^Verifier (\w+)\. ok\?', s)
self.assertTrue(mo, s)
sender_verifier = mo.group(1)
else:
yield gatherResults([send_d, receive_d], True)
if fake_tor:
expected_endpoints = [("127.0.0.1", self.rdv_ws_port, False)]
if mode in ("file", "directory"):
expected_endpoints.append(("127.0.0.1", self.transitport, False))
tx_timing = mtx_tm.call_args[1]["timing"]
self.assertEqual(tx_tm.endpoints, expected_endpoints)
self.assertEqual(
mtx_tm.mock_calls,
[mock.call(reactor, False, None, timing=tx_timing)])
rx_timing = mrx_tm.call_args[1]["timing"]
self.assertEqual(rx_tm.endpoints, expected_endpoints)
self.assertEqual(
mrx_tm.mock_calls,
[mock.call(reactor, False, None, timing=rx_timing)])
send_stdout = send_cfg.stdout.getvalue()
send_stderr = send_cfg.stderr.getvalue()
receive_stdout = recv_cfg.stdout.getvalue()
receive_stderr = recv_cfg.stderr.getvalue()
# all output here comes from a StringIO, which uses \n for
# newlines, even if we're on windows
NL = "\n"
self.maxDiff = None # show full output for assertion failures
key_established = ""
if mode == "slow-text":
key_established = "Key established, waiting for confirmation...\n"
self.assertEqual(send_stdout, "")
# check sender
if mode == "text" or mode == "slow-text":
expected = ("Sending text message ({bytes:d} Bytes){NL}"
"Wormhole code is: {code}{NL}"
"On the other computer, please run:{NL}{NL}"
"wormhole receive {verify}{code}{NL}{NL}"
"{KE}"
"text message sent{NL}").format(
bytes=len(message),
verify="--verify " if verify else "",
code=send_cfg.code,
NL=NL,
KE=key_established)
self.failUnlessEqual(send_stderr, expected)
elif mode == "file":
self.failUnlessIn(u"Sending {size:s} file named '{name}'{NL}"
.format(
size=naturalsize(len(message)),
name=send_filename,
NL=NL), send_stderr)
self.failUnlessIn(u"Wormhole code is: {code}{NL}"
"On the other computer, please run:{NL}{NL}"
"wormhole receive {code}{NL}{NL}".format(
code=send_cfg.code, NL=NL), send_stderr)
self.failUnlessIn(
u"File sent.. waiting for confirmation{NL}"
"Confirmation received. Transfer complete.{NL}".format(NL=NL),
send_stderr)
elif mode == "directory":
self.failUnlessIn(u"Sending directory", send_stderr)
self.failUnlessIn(u"named 'testdir'", send_stderr)
self.failUnlessIn(u"Wormhole code is: {code}{NL}"
"On the other computer, please run:{NL}{NL}"
"wormhole receive {code}{NL}{NL}".format(
code=send_cfg.code, NL=NL), send_stderr)
self.failUnlessIn(
u"File sent.. waiting for confirmation{NL}"
"Confirmation received. Transfer complete.{NL}".format(NL=NL),
send_stderr)
# check receiver
if mode in ("text", "slow-text", "slow-sender-text"):
self.assertEqual(receive_stdout, message + NL)
if mode == "text":
if verify:
mo = re.search(r'^Verifier (\w+)\.\s*$', receive_stderr)
self.assertTrue(mo, receive_stderr)
receiver_verifier = mo.group(1)
self.assertEqual(sender_verifier, receiver_verifier)
else:
self.assertEqual(receive_stderr, "")
elif mode == "slow-text":
self.assertEqual(receive_stderr, key_established)
elif mode == "slow-sender-text":
self.assertEqual(receive_stderr, "Waiting for sender...\n")
elif mode == "file":
self.failUnlessEqual(receive_stdout, "")
self.failUnlessIn(u"Receiving file ({size:s}) into: {name}".format(
size=naturalsize(len(message)), name=receive_filename),
receive_stderr)
self.failUnlessIn(u"Received file written to ", receive_stderr)
fn = os.path.join(receive_dir, receive_filename)
self.failUnless(os.path.exists(fn))
with open(fn, "r") as f:
self.failUnlessEqual(f.read(), message)
elif mode == "directory":
self.failUnlessEqual(receive_stdout, "")
want = (r"Receiving directory \(\d+ \w+\) into: {name}/"
.format(name=receive_dirname))
self.failUnless(
re.search(want, receive_stderr), (want, receive_stderr))
self.failUnlessIn(
u"Received files written to {name}"
.format(name=receive_dirname),
receive_stderr)
fn = os.path.join(receive_dir, receive_dirname)
self.failUnless(os.path.exists(fn), fn)
for i in range(5):
fn = os.path.join(receive_dir, receive_dirname, str(i))
with open(fn, "r") as f:
self.failUnlessEqual(f.read(), message(i))
self.failUnlessEqual(modes[i], stat.S_IMODE(
os.stat(fn).st_mode))
def test_text(self):
return self._do_test()
def test_text_subprocess(self):
return self._do_test(as_subprocess=True)
def test_text_tor(self):
return self._do_test(fake_tor=True)
def test_text_verify(self):
return self._do_test(verify=True)
def test_file(self):
return self._do_test(mode="file")
def test_file_override(self):
return self._do_test(mode="file", override_filename=True)
def test_file_overwrite(self):
return self._do_test(mode="file", overwrite=True)
def test_file_overwrite_mock_accept(self):
return self._do_test(mode="file", overwrite=True, mock_accept=True)
def test_file_tor(self):
return self._do_test(mode="file", fake_tor=True)
def test_empty_file(self):
return self._do_test(mode="empty-file")
def test_directory(self):
return self._do_test(mode="directory")
def test_directory_addslash(self):
return self._do_test(mode="directory", addslash=True)
def test_directory_override(self):
return self._do_test(mode="directory", override_filename=True)
def test_directory_overwrite(self):
return self._do_test(mode="directory", overwrite=True)
def test_directory_overwrite_mock_accept(self):
return self._do_test(
mode="directory", overwrite=True, mock_accept=True)
def test_slow_text(self):
return self._do_test(mode="slow-text")
def test_slow_sender_text(self):
return self._do_test(mode="slow-sender-text")
@inlineCallbacks
def _do_test_fail(self, mode, failmode):
assert mode in ("file", "directory")
assert failmode in ("noclobber", "toobig")
send_cfg = config("send")
recv_cfg = config("receive")
for cfg in [send_cfg, recv_cfg]:
cfg.hide_progress = True
cfg.relay_url = self.relayurl
cfg.transit_helper = ""
cfg.listen = False
cfg.code = u"1-abc"
cfg.stdout = io.StringIO()
cfg.stderr = io.StringIO()
send_dir = self.mktemp()
os.mkdir(send_dir)
receive_dir = self.mktemp()
os.mkdir(receive_dir)
recv_cfg.accept_file = True # don't ask for permission
if mode == "file":
message = "test message\n"
send_cfg.what = receive_name = send_filename = "testfile"
fn = os.path.join(send_dir, send_filename)
with open(fn, "w") as f:
f.write(message)
size = os.stat(fn).st_size
elif mode == "directory":
# $send_dir/
# $send_dir/$dirname/
# $send_dir/$dirname/[12345]
# cd $send_dir && wormhole send $dirname
# cd $receive_dir && wormhole receive
# expect: $receive_dir/$dirname/[12345]
size = 0
send_cfg.what = receive_name = send_dirname = "testdir"
os.mkdir(os.path.join(send_dir, send_dirname))
for i in range(5):
path = os.path.join(send_dir, send_dirname, str(i))
with open(path, "w") as f:
f.write("test message %d\n" % i)
size += os.stat(path).st_size
if failmode == "noclobber":
PRESERVE = "don't clobber me\n"
clobberable = os.path.join(receive_dir, receive_name)
with open(clobberable, "w") as f:
f.write(PRESERVE)
send_cfg.cwd = send_dir
send_d = cmd_send.send(send_cfg)
recv_cfg.cwd = receive_dir
receive_d = cmd_receive.receive(recv_cfg)
# both sides will fail
if failmode == "noclobber":
free_space = 10000000
else:
free_space = 0
with mock.patch(
"wormhole.cli.cmd_receive.estimate_free_space",
return_value=free_space):
f = yield self.assertFailure(send_d, TransferError)
self.assertEqual(
str(f), "remote error, transfer abandoned: transfer rejected")
f = yield self.assertFailure(receive_d, TransferError)
self.assertEqual(str(f), "transfer rejected")
send_stdout = send_cfg.stdout.getvalue()
send_stderr = send_cfg.stderr.getvalue()
receive_stdout = recv_cfg.stdout.getvalue()
receive_stderr = recv_cfg.stderr.getvalue()
# all output here comes from a StringIO, which uses \n for
# newlines, even if we're on windows
NL = "\n"
self.maxDiff = None # show full output for assertion failures
self.assertEqual(send_stdout, "")
self.assertEqual(receive_stdout, "")
# check sender
if mode == "file":
self.failUnlessIn("Sending {size:s} file named '{name}'{NL}"
.format(
size=naturalsize(size),
name=send_filename,
NL=NL), send_stderr)
self.failUnlessIn("Wormhole code is: {code}{NL}"
"On the other computer, please run:{NL}{NL}"
"wormhole receive {code}{NL}".format(
code=send_cfg.code, NL=NL), send_stderr)
self.failIfIn(
"File sent.. waiting for confirmation{NL}"
"Confirmation received. Transfer complete.{NL}".format(NL=NL),
send_stderr)
elif mode == "directory":
self.failUnlessIn("Sending directory", send_stderr)
self.failUnlessIn("named 'testdir'", send_stderr)
self.failUnlessIn("Wormhole code is: {code}{NL}"
"On the other computer, please run:{NL}{NL}"
"wormhole receive {code}{NL}".format(
code=send_cfg.code, NL=NL), send_stderr)
self.failIfIn(
"File sent.. waiting for confirmation{NL}"
"Confirmation received. Transfer complete.{NL}".format(NL=NL),
send_stderr)
# check receiver
if mode == "file":
self.failIfIn("Received file written to ", receive_stderr)
if failmode == "noclobber":
self.failUnlessIn(
"Error: "
"refusing to overwrite existing 'testfile'{NL}"
.format(NL=NL),
receive_stderr)
else:
self.failUnlessIn(
"Error: "
"insufficient free space (0B) for file ({size:d}B){NL}"
.format(NL=NL, size=size), receive_stderr)
elif mode == "directory":
self.failIfIn(
"Received files written to {name}".format(name=receive_name),
receive_stderr)
# want = (r"Receiving directory \(\d+ \w+\) into: {name}/"
# .format(name=receive_name))
# self.failUnless(re.search(want, receive_stderr),
# (want, receive_stderr))
if failmode == "noclobber":
self.failUnlessIn(
"Error: "
"refusing to overwrite existing 'testdir'{NL}"
.format(NL=NL),
receive_stderr)
else:
self.failUnlessIn(("Error: "
"insufficient free space (0B) for directory"
" ({size:d}B){NL}").format(
NL=NL, size=size), receive_stderr)
if failmode == "noclobber":
fn = os.path.join(receive_dir, receive_name)
self.failUnless(os.path.exists(fn))
with open(fn, "r") as f:
self.failUnlessEqual(f.read(), PRESERVE)
def test_fail_file_noclobber(self):
return self._do_test_fail("file", "noclobber")
def test_fail_directory_noclobber(self):
return self._do_test_fail("directory", "noclobber")
def test_fail_file_toobig(self):
return self._do_test_fail("file", "toobig")
def test_fail_directory_toobig(self):
return self._do_test_fail("directory", "toobig")
class ZeroMode(ServerBase, unittest.TestCase):
@inlineCallbacks
def test_text(self):
send_cfg = config("send")
recv_cfg = config("receive")
message = "textponies"
for cfg in [send_cfg, recv_cfg]:
cfg.hide_progress = True
cfg.relay_url = self.relayurl
cfg.transit_helper = ""
cfg.listen = True
cfg.zeromode = True
cfg.stdout = io.StringIO()
cfg.stderr = io.StringIO()
send_cfg.text = message
# send_cfg.cwd = send_dir
# recv_cfg.cwd = receive_dir
send_d = cmd_send.send(send_cfg)
receive_d = cmd_receive.receive(recv_cfg)
yield gatherResults([send_d, receive_d], True)
send_stdout = send_cfg.stdout.getvalue()
send_stderr = send_cfg.stderr.getvalue()
receive_stdout = recv_cfg.stdout.getvalue()
receive_stderr = recv_cfg.stderr.getvalue()
# all output here comes from a StringIO, which uses \n for
# newlines, even if we're on windows
NL = "\n"
self.maxDiff = None # show full output for assertion failures
self.assertEqual(send_stdout, "")
# check sender
expected = ("Sending text message ({bytes:d} Bytes){NL}"
"On the other computer, please run:{NL}"
"{NL}"
"wormhole receive -0{NL}"
"{NL}"
"text message sent{NL}").format(
bytes=len(message), NL=NL)
self.failUnlessEqual(send_stderr, expected)
# check receiver
self.assertEqual(receive_stdout, message + NL)
self.assertEqual(receive_stderr, "")
class NotWelcome(ServerBase, unittest.TestCase):
@inlineCallbacks
def setUp(self):
yield self._setup_relay(error="please upgrade XYZ")
self.cfg = cfg = config("send")
cfg.hide_progress = True
cfg.listen = False
cfg.relay_url = self.relayurl
cfg.transit_helper = ""
cfg.stdout = io.StringIO()
cfg.stderr = io.StringIO()
@inlineCallbacks
def test_sender(self):
self.cfg.text = "hi"
self.cfg.code = u"1-abc"
send_d = cmd_send.send(self.cfg)
f = yield self.assertFailure(send_d, WelcomeError)
self.assertEqual(str(f), "please upgrade XYZ")
@inlineCallbacks
def test_receiver(self):
self.cfg.code = u"1-abc"
receive_d = cmd_receive.receive(self.cfg)
f = yield self.assertFailure(receive_d, WelcomeError)
self.assertEqual(str(f), "please upgrade XYZ")
class NoServer(ServerBase, unittest.TestCase):
@inlineCallbacks
def setUp(self):
yield self._setup_relay(None)
yield self._relay_server.disownServiceParent()
@inlineCallbacks
def test_sender(self):
cfg = config("send")
cfg.hide_progress = True
cfg.listen = False
cfg.relay_url = self.relayurl
cfg.transit_helper = ""
cfg.stdout = io.StringIO()
cfg.stderr = io.StringIO()
cfg.text = "hi"
cfg.code = u"1-abc"
send_d = cmd_send.send(cfg)
e = yield self.assertFailure(send_d, ServerConnectionError)
self.assertIsInstance(e.reason, ConnectionRefusedError)
@inlineCallbacks
def test_sender_allocation(self):
cfg = config("send")
cfg.hide_progress = True
cfg.listen = False
cfg.relay_url = self.relayurl
cfg.transit_helper = ""
cfg.stdout = io.StringIO()
cfg.stderr = io.StringIO()
cfg.text = "hi"
send_d = cmd_send.send(cfg)
e = yield self.assertFailure(send_d, ServerConnectionError)
self.assertIsInstance(e.reason, ConnectionRefusedError)
@inlineCallbacks
def test_receiver(self):
cfg = config("receive")
cfg.hide_progress = True
cfg.listen = False
cfg.relay_url = self.relayurl
cfg.transit_helper = ""
cfg.stdout = io.StringIO()
cfg.stderr = io.StringIO()
cfg.code = u"1-abc"
receive_d = cmd_receive.receive(cfg)
e = yield self.assertFailure(receive_d, ServerConnectionError)
self.assertIsInstance(e.reason, ConnectionRefusedError)
class Cleanup(ServerBase, unittest.TestCase):
def make_config(self):
cfg = config("send")
# common options for all tests in this suite
cfg.hide_progress = True
cfg.relay_url = self.relayurl
cfg.transit_helper = ""
cfg.stdout = io.StringIO()
cfg.stderr = io.StringIO()
return cfg
@inlineCallbacks
@mock.patch('sys.stdout')
def test_text(self, stdout):
# the rendezvous channel should be deleted after success
cfg = self.make_config()
cfg.text = "hello"
cfg.code = u"1-abc"
send_d = cmd_send.send(cfg)
receive_d = cmd_receive.receive(cfg)
yield send_d
yield receive_d
cids = self._rendezvous.get_app(cmd_send.APPID).get_nameplate_ids()
self.assertEqual(len(cids), 0)
@inlineCallbacks
def test_text_wrong_password(self):
# if the password was wrong, the rendezvous channel should still be
# deleted
send_cfg = self.make_config()
send_cfg.text = "secret message"
send_cfg.code = u"1-abc"
send_d = cmd_send.send(send_cfg)
rx_cfg = self.make_config()
rx_cfg.code = u"1-WRONG"
receive_d = cmd_receive.receive(rx_cfg)
# both sides should be capable of detecting the mismatch
yield self.assertFailure(send_d, WrongPasswordError)
yield self.assertFailure(receive_d, WrongPasswordError)
cids = self._rendezvous.get_app(cmd_send.APPID).get_nameplate_ids()
self.assertEqual(len(cids), 0)
class ExtractFile(unittest.TestCase):
def test_filenames(self):
args = mock.Mock()
args.relay_url = u""
ef = cmd_receive.Receiver(args)._extract_file
extract_dir = os.path.abspath(self.mktemp())
zf = mock.Mock()
zi = mock.Mock()
zi.filename = "ok"
zi.external_attr = 5 << 16
expected = os.path.join(extract_dir, "ok")
with mock.patch.object(cmd_receive.os, "chmod") as chmod:
ef(zf, zi, extract_dir)
self.assertEqual(zf.extract.mock_calls,
[mock.call(zi.filename, path=extract_dir)])
self.assertEqual(chmod.mock_calls, [mock.call(expected, 5)])
zf = mock.Mock()
zi = mock.Mock()
zi.filename = "../haha"
e = self.assertRaises(ValueError, ef, zf, zi, extract_dir)
self.assertIn("malicious zipfile", str(e))
zf = mock.Mock()
zi = mock.Mock()
zi.filename = "haha//root" # abspath squashes this, hopefully zipfile
# does too
zi.external_attr = 5 << 16
expected = os.path.join(extract_dir, "haha", "root")
with mock.patch.object(cmd_receive.os, "chmod") as chmod:
ef(zf, zi, extract_dir)
self.assertEqual(zf.extract.mock_calls,
[mock.call(zi.filename, path=extract_dir)])
self.assertEqual(chmod.mock_calls, [mock.call(expected, 5)])
zf = mock.Mock()
zi = mock.Mock()
zi.filename = "/etc/passwd"
e = self.assertRaises(ValueError, ef, zf, zi, extract_dir)
self.assertIn("malicious zipfile", str(e))
class AppID(ServerBase, unittest.TestCase):
@inlineCallbacks
def setUp(self):
yield super(AppID, self).setUp()
self.cfg = cfg = config("send")
# common options for all tests in this suite
cfg.hide_progress = True
cfg.relay_url = self.relayurl
cfg.transit_helper = ""
cfg.stdout = io.StringIO()
cfg.stderr = io.StringIO()
@inlineCallbacks
def test_override(self):
# make sure we use the overridden appid, not the default
self.cfg.text = "hello"
self.cfg.appid = u"appid2"
self.cfg.code = u"1-abc"
send_d = cmd_send.send(self.cfg)
receive_d = cmd_receive.receive(self.cfg)
yield send_d
yield receive_d
used = self._usage_db.execute("SELECT DISTINCT `app_id`"
" FROM `nameplates`").fetchall()
self.assertEqual(len(used), 1, used)
self.assertEqual(used[0]["app_id"], u"appid2")
class Welcome(unittest.TestCase):
def do(self, welcome_message, my_version="2.0"):
stderr = io.StringIO()
welcome.handle_welcome(welcome_message, "url", my_version, stderr)
return stderr.getvalue()
def test_empty(self):
stderr = self.do({})
self.assertEqual(stderr, "")
def test_version_current(self):
stderr = self.do({"current_cli_version": "2.0"})
self.assertEqual(stderr, "")
def test_version_old(self):
stderr = self.do({"current_cli_version": "3.0"})
expected = ("Warning: errors may occur unless both sides are"
" running the same version\n"
"Server claims 3.0 is current, but ours is 2.0\n")
self.assertEqual(stderr, expected)
def test_version_unreleased(self):
stderr = self.do(
{
"current_cli_version": "3.0"
}, my_version="2.5+middle.something")
self.assertEqual(stderr, "")
def test_motd(self):
stderr = self.do({"motd": "hello"})
self.assertEqual(stderr, "Server (at url) says:\n hello\n")
class Dispatch(unittest.TestCase):
@inlineCallbacks
def test_success(self):
cfg = config("send")
cfg.stderr = io.StringIO()
called = []
def fake():
called.append(1)
yield cli._dispatch_command(reactor, cfg, fake)
self.assertEqual(called, [1])
self.assertEqual(cfg.stderr.getvalue(), "")
@inlineCallbacks
def test_timing(self):
cfg = config("send")
cfg.stderr = io.StringIO()
cfg.timing = mock.Mock()
cfg.dump_timing = "filename"
def fake():
pass
yield cli._dispatch_command(reactor, cfg, fake)
self.assertEqual(cfg.stderr.getvalue(), "")
self.assertEqual(cfg.timing.mock_calls[-1],
mock.call.write("filename", cfg.stderr))
def test_debug_state_invalid_machine(self):
cfg = cli.Config()
with self.assertRaises(UsageError):
cfg.debug_state = "ZZZ"
@inlineCallbacks
def test_debug_state_send(self):
args = config("send")
args.debug_state = "B,N,M,S,O,K,SK,R,RC,L,C,T"
args.stdout = io.StringIO()
s = cmd_send.Sender(args, reactor)
d = s.go()
d.cancel()
try:
yield d
except CancelledError:
pass
# just check for at least one state-transition we expected to
# get logged due to the --debug-state option
self.assertIn(
"send.B[S0_empty].close",
args.stdout.getvalue(),
)
@inlineCallbacks
def test_debug_state_receive(self):
args = config("receive")
args.debug_state = "B,N,M,S,O,K,SK,R,RC,L,C,T"
args.stdout = io.StringIO()
s = cmd_receive.Receiver(args, reactor)
d = s.go()
d.cancel()
try:
yield d
except CancelledError:
pass
# just check for at least one state-transition we expected to
# get logged due to the --debug-state option
self.assertIn(
"recv.B[S0_empty].close",
args.stdout.getvalue(),
)
@inlineCallbacks
def test_wrong_password_error(self):
cfg = config("send")
cfg.stderr = io.StringIO()
def fake():
raise WrongPasswordError("abcd")
yield self.assertFailure(
cli._dispatch_command(reactor, cfg, fake), SystemExit)
expected = fill("ERROR: " + dedent(WrongPasswordError.__doc__)) + "\n"
self.assertEqual(cfg.stderr.getvalue(), expected)
@inlineCallbacks
def test_welcome_error(self):
cfg = config("send")
cfg.stderr = io.StringIO()
def fake():
raise WelcomeError("abcd")
yield self.assertFailure(
cli._dispatch_command(reactor, cfg, fake), SystemExit)
expected = (
fill("ERROR: " + dedent(WelcomeError.__doc__)) + "\n\nabcd\n")
self.assertEqual(cfg.stderr.getvalue(), expected)
@inlineCallbacks
def test_transfer_error(self):
cfg = config("send")
cfg.stderr = io.StringIO()
def fake():
raise TransferError("abcd")
yield self.assertFailure(
cli._dispatch_command(reactor, cfg, fake), SystemExit)
expected = "TransferError: abcd\n"
self.assertEqual(cfg.stderr.getvalue(), expected)
@inlineCallbacks
def test_server_connection_error(self):
cfg = config("send")
cfg.stderr = io.StringIO()
def fake():
raise ServerConnectionError("URL", ValueError("abcd"))
yield self.assertFailure(
cli._dispatch_command(reactor, cfg, fake), SystemExit)
expected = fill(
"ERROR: " + dedent(ServerConnectionError.__doc__)) + "\n"
expected += "(relay URL was URL)\n"
expected += "abcd\n"
self.assertEqual(cfg.stderr.getvalue(), expected)
@inlineCallbacks
def test_other_error(self):
cfg = config("send")
cfg.stderr = io.StringIO()
def fake():
raise ValueError("abcd")
# I'm seeing unicode problems with the Failure().printTraceback, and
# the output would be kind of unpredictable anyways, so we'll mock it
# out here.
f = mock.Mock()
def mock_print(file):
file.write(u"<TRACEBACK>\n")
f.printTraceback = mock_print
with mock.patch("wormhole.cli.cli.Failure", return_value=f):
yield self.assertFailure(
cli._dispatch_command(reactor, cfg, fake), SystemExit)
expected = "<TRACEBACK>\nERROR: abcd\n"
self.assertEqual(cfg.stderr.getvalue(), expected)
class Help(unittest.TestCase):
def _check_top_level_help(self, got):
# the main wormhole.cli.cli.wormhole docstring should be in the
# output, but formatted differently
self.assertIn("Create a Magic Wormhole and communicate through it.",
got)
self.assertIn("--relay-url", got)
self.assertIn("Receive a text message, file, or directory", got)
def test_help(self):
result = CliRunner().invoke(cli.wormhole, ["help"])
self._check_top_level_help(result.output)
self.assertEqual(result.exit_code, 0)
def test_dash_dash_help(self):
result = CliRunner().invoke(cli.wormhole, ["--help"])
self._check_top_level_help(result.output)
self.assertEqual(result.exit_code, 0)
|
11540929
|
class Display:
def showTemperature(self, temp_str):
pass
def showDutyCycle(self, duty_cycle):
pass
def showAutoMode(self, set_point):
pass
def showBoilMode(self):
pass
def showManualMode(self):
pass
def showOffMode(self):
pass
class LCD(Display):
def __init__(self, tempUnits):
self.tempUnits = tempUnits
ser = serial.Serial("/dev/ttyAMA0", 9600)
ser.write("?BFF")
time.sleep(.1) #wait 100msec
ser.write("?f?a")
ser.write("?y0?x00PID off ")
ser.write("?y1?x00HLT:")
ser.write("?y3?x00Heat: off ")
ser.write("?D70609090600000000") #define degree symbol
time.sleep(.1) #wait 100msec
def showTemperature(self, temp_str):
#write to LCD
ser.write("?y1?x05")
ser.write(temp_str)
ser.write("?7") #degree
time.sleep(.005) #wait 5msec
if (self.tempUnits == 'F'):
ser.write("F ")
else:
ser.write("C ")
def showDutyCycle(self, duty_cycle):
#write to LCD
ser.write("?y2?x00Duty: ")
ser.write("%3.1f" % duty_cycle)
ser.write("% ")
def showAutoMode(self, set_point):
ser.write("?y0?x00Auto Mode ")
ser.write("?y1?x00HLT:")
ser.write("?y3?x00Set To: ")
ser.write("%3.1f" % set_point)
ser.write("?7") #degree
time.sleep(.005) #wait 5msec
if (self.tempUnits == 'F'):
ser.write("F ")
else:
ser.write("C ")
def showBoilMode(self):
ser.write("?y0?x00Boil Mode ")
ser.write("?y1?x00BK: ")
ser.write("?y3?x00Heat: on ")
def showManualMode(self):
ser.write("?y0?x00Manual Mode ")
ser.write("?y1?x00BK: ")
ser.write("?y3?x00Heat: on ")
def showOffMode(self):
ser.write("?y0?x00PID off ")
ser.write("?y1?x00HLT:")
ser.write("?y3?x00Heat: off ")
class NoDisplay(Display):
def __init__(self):
pass
|
11540953
|
import numpy as np
from mpi4py import MPI
from distdl.backends.mpi.partition import MPIPartition
from distdl.nn.mixins.halo_mixin import HaloMixin
from distdl.nn.mixins.pooling_mixin import PoolingMixin
class MockPoolLayer(HaloMixin, PoolingMixin):
pass
def test_mixin():
P_world = MPIPartition(MPI.COMM_WORLD)
ranks = np.arange(P_world.size)
shape = [1, 1, 4]
P_size = np.prod(shape)
use_ranks = ranks[:P_size]
P_x_base = P_world.create_partition_inclusive(use_ranks)
P_x = P_x_base.create_cartesian_topology_partition(shape)
rank = P_x.rank
layer = MockPoolLayer()
x_global_shape = np.array([1, 1, 10])
kernel_size = np.array([2])
stride = np.array([2])
padding = np.array([0])
dilation = np.array([1])
halo_shape, recv_buffer_shape, send_buffer_shape, needed_ranges = \
layer._compute_exchange_info(x_global_shape,
kernel_size,
stride,
padding,
dilation,
P_x.active,
P_x.shape,
P_x.index)
if P_x.active:
if rank == 0:
expected_halo_shape = np.array([[0, 0], [0, 0], [0, 1]])
expected_recv_buffer_shape = np.array([[0, 0], [0, 0], [0, 1]])
expected_send_buffer_shape = np.array([[0, 0], [0, 0], [0, 0]])
expected_needed_ranges = np.array([[0, 1], [0, 1], [0, 4]])
assert(np.array_equal(halo_shape, expected_halo_shape))
assert(np.array_equal(recv_buffer_shape, expected_recv_buffer_shape))
assert(np.array_equal(send_buffer_shape, expected_send_buffer_shape))
assert(np.array_equal(needed_ranges, expected_needed_ranges))
elif rank == 1:
expected_halo_shape = np.array([[0, 0], [0, 0], [0, 0]])
expected_recv_buffer_shape = np.array([[0, 0], [0, 0], [0, 0]])
expected_send_buffer_shape = np.array([[0, 0], [0, 0], [1, 0]])
expected_needed_ranges = np.array([[0, 1], [0, 1], [1, 3]])
assert(np.array_equal(halo_shape, expected_halo_shape))
assert(np.array_equal(recv_buffer_shape, expected_recv_buffer_shape))
assert(np.array_equal(send_buffer_shape, expected_send_buffer_shape))
assert(np.array_equal(needed_ranges, expected_needed_ranges))
elif rank == 2:
expected_halo_shape = np.array([[0, 0], [0, 0], [0, 0]])
expected_recv_buffer_shape = np.array([[0, 0], [0, 0], [0, 0]])
expected_send_buffer_shape = np.array([[0, 0], [0, 0], [0, 0]])
expected_needed_ranges = np.array([[0, 1], [0, 1], [0, 2]])
assert(np.array_equal(halo_shape, expected_halo_shape))
assert(np.array_equal(recv_buffer_shape, expected_recv_buffer_shape))
assert(np.array_equal(send_buffer_shape, expected_send_buffer_shape))
assert(np.array_equal(needed_ranges, expected_needed_ranges))
elif rank == 3:
expected_halo_shape = np.array([[0, 0], [0, 0], [0, 0]])
expected_recv_buffer_shape = np.array([[0, 0], [0, 0], [0, 0]])
expected_send_buffer_shape = np.array([[0, 0], [0, 0], [0, 0]])
expected_needed_ranges = np.array([[0, 1], [0, 1], [0, 2]])
assert(np.array_equal(halo_shape, expected_halo_shape))
assert(np.array_equal(recv_buffer_shape, expected_recv_buffer_shape))
assert(np.array_equal(send_buffer_shape, expected_send_buffer_shape))
assert(np.array_equal(needed_ranges, expected_needed_ranges))
# Inactive ranks should get null results
else:
assert(halo_shape is None)
assert(recv_buffer_shape is None)
assert(send_buffer_shape is None)
assert(needed_ranges is None)
P_world.deactivate()
P_x_base.deactivate()
P_x.deactivate()
|
11541008
|
from pprint import pprint
from fuzzy_logic.terms import Term
from fuzzy_logic.variables import FuzzyVariable, SugenoVariable, LinearSugenoFunction
from fuzzy_logic.sugeno_fs import SugenoFuzzySystem
from fuzzy_logic.mf import TriangularMF
t1: Term = Term('mf1', TriangularMF(0, 0, 0.5))
t2: Term = Term('mf2', TriangularMF(0, 0.5, 1))
t3: Term = Term('mf3', TriangularMF(0.5, 1, 1))
input1: FuzzyVariable = FuzzyVariable('input1', 0, 1, t1, t2, t3)
input2: FuzzyVariable = FuzzyVariable(
'input2', 0, 1,
Term('mf1', TriangularMF(0, 0, 0.5)),
Term('mf2', TriangularMF(0, 0.5, 1)),
Term('mf3', TriangularMF(0.5, 1, 1))
)
output: SugenoVariable = SugenoVariable(
'output',
LinearSugenoFunction('mf1', {input1: 0.1, input2: 0.3}, 0.5),
LinearSugenoFunction('mf2', {input1: 0.4, input2: 0.2}, 0.7)
)
mf: SugenoFuzzySystem = SugenoFuzzySystem([input1, input2], [output])
mf.rules.append(mf.parse_rule('if (input1 is mf1) and (input2 is mf1) then (output is mf1)'))
mf.rules.append(mf.parse_rule('if (input1 is mf2) and (input2 is mf2) then (output is mf2)'))
result = mf.calculate({input1: 0.45, input2: 0.45})
pprint(result)
|
11541056
|
from abc import ABC, abstractmethod
from typing import Dict, Any
import contextvars
from esphome.types import ConfigFragmentType, ID, ConfigPathType
import esphome.config_validation as cv
class FinalValidateConfig(ABC):
@property
@abstractmethod
def data(self) -> Dict[str, Any]:
"""A dictionary that can be used by post validation functions to store
global data during the validation phase. Each component should store its
data under a unique key
"""
@abstractmethod
def get_path_for_id(self, id: ID) -> ConfigPathType:
"""Get the config path a given ID has been declared in.
This is the location under the _validated_ config (for example, with cv.ensure_list applied)
Raises KeyError if the id was not declared in the configuration.
"""
@abstractmethod
def get_config_for_path(self, path: ConfigPathType) -> ConfigFragmentType:
"""Get the config fragment for the given global path.
Raises KeyError if a key in the path does not exist.
"""
FinalValidateConfig.register(dict)
# Context variable tracking the full config for some final validation functions.
full_config: contextvars.ContextVar[FinalValidateConfig] = contextvars.ContextVar(
"full_config"
)
def id_declaration_match_schema(schema):
"""A final-validation schema function that applies a schema to the outer config fragment of an
ID declaration.
This validator must be applied to ID values.
"""
if not isinstance(schema, cv.Schema):
schema = cv.Schema(schema, extra=cv.ALLOW_EXTRA)
def validator(value):
fconf = full_config.get()
path = fconf.get_path_for_id(value)[:-1]
declaration_config = fconf.get_config_for_path(path)
with cv.prepend_path([cv.ROOT_CONFIG_PATH] + path):
return schema(declaration_config)
return validator
|
11541063
|
from rodan.models import WorkflowJobGroup
from rodan.serializers.workflow import version_map
from django.conf import settings
from rest_framework import serializers
from rodan.serializers import TransparentField
class WorkflowJobGroupSerializer(serializers.HyperlinkedModelSerializer):
appearance = TransparentField(required=False)
class Meta:
model = WorkflowJobGroup
read_only_fields = ("created", "updated", "origin", "workflow")
fields = (
"url",
"uuid",
"name",
"description",
"workflow",
"origin",
"workflow_jobs",
"created",
"updated",
"appearance",
)
def validate_workflow_jobs(self, wfjs):
if len(wfjs) == 0:
raise serializers.ValidationError("Empty WorkflowJobGroup is not allowed.")
else:
first_wf = wfjs[0].workflow
for wfj in wfjs[1:]:
if wfj.workflow != first_wf: # not in the same workflow
raise serializers.ValidationError(
"All WorkflowJobs should belong to the same Workflow."
)
return wfjs
def save(self, **kwargs):
"""
Update `workflow` field in database.
"""
if self.validated_data.get("workflow_jobs"):
self.validated_data["workflow"] = self.validated_data["workflow_jobs"][
0
].workflow
return super(WorkflowJobGroupSerializer, self).save(**kwargs)
class WorkflowJobGroupImportCreateSerializer(serializers.HyperlinkedModelSerializer):
"""
For importing workflow as workflowjobgroup. Check `workflow` and `origin` fields.
"""
class Meta:
model = WorkflowJobGroup
read_only_fields = (
"name",
"description",
"workflow_jobs",
"created",
"updated",
) # workflow and origin fields are not read-only.
fields = (
"url",
"uuid",
"name",
"description",
"workflow",
"origin",
"workflow_jobs",
"created",
"updated",
)
def validate_origin(self, origin):
if origin.valid is False:
raise serializers.ValidationError("Origin workflow must be valid.")
return origin
def save(self, **kwargs):
"""
Set up `name`, `description`, `workflow_jobs`, copy workflow jobs.
"""
self.validated_data["name"] = "From Workflow {0}".format(
self.validated_data["origin"].name[:80]
)
self.validated_data["description"] = self.validated_data["origin"].description
# dump origin workflow and load as a workflowjobgroup
dumped_workflow = version_map[
settings.RODAN_WORKFLOW_SERIALIZATION_FORMAT_VERSION
].dump(self.validated_data["origin"])
loaded_wfjs = version_map[
settings.RODAN_WORKFLOW_SERIALIZATION_FORMAT_VERSION
].load(dumped_workflow, self.validated_data["workflow"])
self.validated_data["workflow_jobs"] = loaded_wfjs
# wfjgroup = super(WorkflowJobGroupImportCreateSerializer, self).save(**kwargs)
super(WorkflowJobGroupImportCreateSerializer, self).save(**kwargs)
|
11541074
|
import torch
from torch import nn
class BaseEmbedding(nn.Module):
def get_loss(self):
return None
def forward(self, **kwargs):
raise NotImplementedError
def train(self, mode=True):
self.training = mode
if self.trainable and mode:
super().train()
return self
def _set_trainable(self):
if not self.trainable:
for pn, p in self.named_parameters():
p.requires_grad = False
self.eval()
|
11541081
|
import webbrowser
class Tabla():
def write(self,entorno,tree):
input = "<html>" + '\n' + "<head>" + '\n' + "<title>Reporte Gramatical</title>" + '\n' + "</head>" + '\n'
input = input + "<body bgcolor=\"white\">" + '\n' + "<center><Font size=12 >" + "Tabla de simbolos" + "</Font></center>" + '\n'
input = input + "<hr >" + '\n' + "<font color=black>" + '\n' + "<center>" + '\n'
input = input + "<table " + '\n'
input = input + "<TR bgcolor=silver>" + "\n"
input = input + "<TH style=\"font-size: 14px; width:15%; \"\" align=center>Tipo de Dato</TH>" + '\n'
input = input + "<TH style=\"font-size: 14px; width:20%; \" align=center>ID</TH>" + '\n'
input = input + "<TH style=\"font-size: 14px; width:15%; \" align=center>Ambito</TH>" + '\n'
input = input + "<TH style=\"font-size: 14px; width:15%; \" align=center>Valor</TH>" + '\n'
input = input + "<TH style=\"font-size: 14px; width:10%; \" align=center>Linea</TH>" + '\n'
input = input + "<TH style=\"font-size: 14px; width:10%; \" align=center>Columna</TH>" + '\n'
for key in entorno.tableofSymbols:
try:
s = entorno.tableofSymbols[key]
print("tableofSymbols con y= "+s.value)
print("tableofSymbols con id= "+s.id)
print("tableofSymbols con ambito= "+s.ambito)
# print("tableofSymbols con linea= "+s.line)
# print("tableofSymbols con col= "+s.column)
line=""
column=""
type1=""
ambito =""
id1 =""
try:
line=s.line
except:
pass
try:
column=s.column
except:
pass
try:
value=str(s.value)
except:
pass
try:
ambito=str(s.ambito)
except:
pass
try:
type1=str(s.type1)
except:
pass
try:
id1=str(s.id)
except:
pass
input = input + "<TR>"
input = input + "<TD style=\"font-size: 15px; ;\" align=center>" + type1+ "</TD>" + '\n'
input = input + "<TD style=\"font-size: 15px; ;\" align=center>" + id1+ "</TD>" + '\n'
input = input + "<TD style=\"font-size: 15px; ;\" align=center>" + ambito+ "</TD>" + '\n'
input = input + "<TD style=\"font-size: 15px; ;\" align=center>" + value+ "</TD>" + '\n'
input = input + "<TD style=\"font-size: 15px; ;\" align=center>" + line+ "</TD>" + '\n'
input = input + "<TD style=\"font-size: 15px; ;\" align=center>" + column+ "</TD>" + '\n'
input = input + "</TR>" + '\n'
input = input + "<TR>------------------------------------------------</TR>" + '\n'
except:
pass
for sent in tree.sentencias:
line=""
column=""
id=""
value=""
ambito=""
type1=""
try:
line=str(s.line)
except:
pass
try:
column=str(s.column)
except:
pass
try:
id=str(sent.id)
except:
pass
try:
value=str(sent.value)
except:
pass
try:
ambito=str(sent.ambito)
except:
pass
try:
type1=str(sent.type)
except:
pass
print("sentvalue con y= "+id)
input = input + "<TR>"
# input = input + "<TD style=\"font-size: 15px; color:white;\" color:white align=center>"+sent.getTipo()+"</TD>" + '\n'
input = input + "<TD style=\"font-size: 15px; color:black;\" align=center>" + type1 + "</TD>" + '\n'
input = input + "<TD style=\"font-size: 15px; ;\" align=center>" + id+ "</TD>" + '\n'
input = input + "<TD style=\"font-size: 15px; ;\" align=center>" + ambito+ "</TD>" + '\n'
input = input + "<TD style=\"font-size: 15px; ;\" align=center>" + value+ "</TD>" + '\n'
input = input + "<TD style=\"font-size: 15px; ;\" align=center>" + line+ "</TD>" + '\n'
input = input + "<TD style=\"font-size: 15px; ;\" align=center>" + column + "</TD>" + '\n'
input = input + "</TR>" + '\n'
input = input + '\n' + "</center>" + '\n' + "</table>" + "</body>" + '\n' + "</html>"
f = open ('Tabla.html','w')
f.write(input)
f.close()
webbrowser.open_new_tab('Tabla.html')
|
11541096
|
import numpy as np
import matplotlib.pyplot as plt
def log_barrier_aux_eval_constraints(eval_f_const_inequality):
"""
Auxiliary function for evaluation of constraint inequalities
in logarithmic barrier
"""
#get values that are nonnegative through indexes
idx_zeros = np.logical_and(eval_f_const_inequality < np.nextafter(0,1),
eval_f_const_inequality > -np.nextafter(0,1))
idx_negative = eval_f_const_inequality < 0
idx = np.logical_or(idx_zeros, idx_negative)
#eval constraint inequality functions
#next line produces warning if a value of constraint
#is nonpositive
eval_f_const_inequality = np.log(eval_f_const_inequality)
#assign large negative value for constraints
#that have values negative or equal to 0
eval_f_const_inequality[idx] = -1e10
return eval_f_const_inequality
def constraint_inequalities_funcs_generator(constraint_inequalities):
"""
Generator for functional form of inequalities.
For every example this function produces different fuctions.
"""
for k, v in constraint_inequalities.items():
yield v
def constraint_inequalities_funcs_eval(x,
constraint_inequalities):
"""
Auxiliary function for the evaluation of constraint inequalities
in logarithmic barrier function
"""
const_ineq_funcs_eval = np.array([const(x) for const in \
constraint_inequalities_funcs_generator(constraint_inequalities)])
return const_ineq_funcs_eval
def phi(x, constraint_inequalities):
"""
Implementation of phi function for logarithmic barrier.
"""
constraint_ineq_funcs_eval = -constraint_inequalities_funcs_eval(x,constraint_inequalities)
log_barrier_const_eval = log_barrier_aux_eval_constraints(constraint_ineq_funcs_eval)
return -np.sum(log_barrier_const_eval)
def logarithmic_barrier(f,x, t_B, constraint_inequalities):
"""
Implementation of Logarithmic barrier function.
"""
return t_B*f(x)+ phi(x, constraint_inequalities)
def line_search_for_log_barrier_by_backtracking(f,dir_desc,
x,t_B,
constraint_inequalities,
der_direct,
alpha=.15, beta=.5):
"""
Line search that sufficiently decreases f restricted to a
ray in the direction dir_desc.
Args:
f (lambda expression): definition of function f.
dir_desc (array): descent direction.
x (array): numpy array that holds values where line search
will be performed.
t_B (float): barrier parameter.
constraint_inequalities (dict): dictionary of inequalities constraints
in "<= 0" form.
der_direct (float): directional derivative of f.
alpha (float): parameter in line search with backtracking,
tipically .15
beta (float): parameter in line search with backtracking,
tipically .5
Returns:
t (float): positive number for stepsize along dir_desc that
sufficiently decreases f.
"""
t = 1
if alpha > 1/2:
print('alpha must be less than or equal to 1/2')
t = -1
if beta>1:
print('beta must be less than 1')
t = -1
if t != -1:
eval1 = logarithmic_barrier(f, x + t*dir_desc, t_B, constraint_inequalities)
eval2 = logarithmic_barrier(f, x, t_B, constraint_inequalities) + alpha*t*der_direct
while eval1 > eval2:
t = beta*t
eval1 = logarithmic_barrier(f, x + t*dir_desc, t_B, constraint_inequalities)
eval2 = logarithmic_barrier(f, x, t_B, constraint_inequalities) + alpha*t*der_direct
return t
def plot_inner_iterations(err):
"""
Auxiliary function for plotting inner iterations error.
"""
plt.yscale('log') #logarithmic scale for y axis
plt.plot(np.arange(err.size),err,'.-')
plt.ylabel("Log relative error: $f_o(x^k)$ y $p^*$",size=12)
plt.xlabel("Inner iterations",size=12)
plt.grid()
plt.show()
def plot_central_path(x_iterations):
"""
Auxiliary function for plotting central points of
central path.
"""
plt.plot(x_iterations[0,:],
x_iterations[1, :], "-*")
plt.ylabel("$x_2$")
plt.xlabel("$x_1$")
plt.annotate("$x^{(0)}$",(x_iterations[0,0],
x_iterations[1,0]),fontsize=12)
plt.title("Primal-dual BL method iterations")
plt.grid()
plt.legend(["Trayectoria central"], bbox_to_anchor=(1,1))
plt.show()
|
11541098
|
from random import shuffle
import os
import sys
def patch_path(path):
return os.path.join(os.path.dirname(__file__), path)
def load_audio_path_label_pairs(max_allowed_pairs=None):
from mxnet_audio.library.utility.gtzan_loader import download_gtzan_genres_if_not_found
download_gtzan_genres_if_not_found(patch_path('very_large_data/gtzan'))
audio_paths = []
with open(patch_path('data/lists/test_songs_gtzan_list.txt'), 'rt') as file:
for line in file:
audio_path = patch_path('very_large_data/' + line.strip())
audio_paths.append(audio_path)
pairs = []
with open(patch_path('data/lists/test_gt_gtzan_list.txt'), 'rt') as file:
for line in file:
label = int(line)
if max_allowed_pairs is None or len(pairs) < max_allowed_pairs:
pairs.append((audio_paths[len(pairs)], label))
else:
break
return pairs
def main():
sys.path.append(patch_path('..'))
from mxnet_audio.library.cifar10 import Cifar10AudioRecommender
music_recommender = Cifar10AudioRecommender()
music_recommender.load_model(model_dir_path=patch_path('models'))
music_archive = load_audio_path_label_pairs()
for path, _ in music_archive:
music_recommender.index_audio(path)
# create fake user history on musics listening to
shuffle(music_archive)
for i in range(30):
song_i_am_listening = music_archive[i][0]
music_recommender.track(song_i_am_listening)
for idx, similar_audio in enumerate(music_recommender.recommend(limits=10)):
print('result #%s: %s' % (idx+1, similar_audio))
if __name__ == '__main__':
main()
|
11541111
|
import random
import string
def vault_from_glacier_url(full_url):
return full_url.split("/")[-1]
def get_job_id():
return "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(92)
)
|
11541115
|
from typing import Any, List, Tuple
from pytest import fixture, mark
from pytest_lazyfixture import lazy_fixture as lz
from arger.typing_utils import get_origin, match_types
@fixture(params=[list, List, List[str], List[int], List[Any]])
def list_type(request):
return request.param
@fixture(
params=[tuple, Tuple, Tuple[str], Tuple[str, str], Tuple[int], Tuple[int, ...]]
)
def tuple_type(request):
return request.param
@mark.parametrize(
"tp, tps, expected",
[
(lz("list_type"), lz("list_type"), True),
(lz("list_type"), lz("tuple_type"), False),
(lz("tuple_type"), lz("tuple_type"), True),
(lz("tuple_type"), lz("list_type"), False),
],
)
def test_match_types(tp, tps, expected):
assert match_types(tp, tps) == expected
@mark.parametrize(
"tp, expected",
[
(lz("list_type"), list),
(lz("tuple_type"), tuple),
],
)
def test_get_origin(tp, expected):
assert get_origin(tp) == expected
|
11541116
|
import subprocess
import os.path
import json
import argparse
import pathlib
import logging
import errno
import getpass
import datetime
import time
class LoopDevice:
__lo_name: str = ""
__offset: int = 0
__target: str = ""
__has_setup: bool = False
def __init__(self, target: str, offset: int):
self.__target = target
self.__offset = offset
pass
def __enter__(self):
self.__lo_name = subprocess.getoutput("losetup -f")
offset_option = ("-o {}".format(self.__offset) if self.__offset != 0 else "")
try:
cmd: str = "sudo losetup {} {} {}".format(offset_option, self.__lo_name, self.__target)
subprocess.run(cmd, check=True, shell=True)
self.__has_setup = True
logging.info("Setting up loop device {} with {}".format(self.__lo_name, self.__target))
return self
except subprocess.CalledProcessError as err:
logging.critical(
"Can't setup loop device {} with file {} at offset {}.".format(self.__lo_name, self.__target,
self.__offset))
raise
except Exception as err:
logging.critical("Exception {} says {}.".format(type(err), err))
raise
def __exit__(self, exc_type, exc_val, exc_tb):
if self.__has_setup:
try:
subprocess.run("sudo losetup -d {}".format(self.__lo_name), check=True, shell=True)
self.__has_setup = False
logging.info("Detach loop device {} with {}".format(self.__lo_name, self.__target))
except subprocess.CalledProcessError as err:
logging.critical(
"can't remove loop device {} with file {} at offset {}.".format(self.__lo_name, self.__target,
self.__offset))
raise
except Exception as err:
logging.critical("exception {} says {}.".format(type(err), err))
raise
@property
def is_ready(self) -> bool:
return self.__has_setup
@property
def name(self) -> str:
return self.__lo_name
class MountPoint:
__path: str
__dev: str = ""
__has_mounted: bool = False
def __command(self):
cmd: str = "mount" if not self.__has_mounted else "umount"
return "sudo {} {} {}".format(cmd, self.__dev if not self.__has_mounted else "", self.__path)
def __init__(self, dev: str, path: str):
self.__dev = dev
self.__path = path
pass
def __enter__(self):
try:
subprocess.run(self.__command(), check=True, shell=True)
self.__has_mounted = True
logging.info("Mount {} on {}".format(self.__dev, self.__path))
return self
except subprocess.CalledProcessError as e:
logging.critical("Can't mount {} on {}".format(self.__dev, self.__path))
raise
except Exception as err:
logging.critical("Exception {} says {}.".format(type(err), err))
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
subprocess.run(self.__command(), check=True, shell=True)
self.__has_mounted = False
logging.info("Umount {} from {}".format(self.__dev, self.__path))
except subprocess.CalledProcessError as e:
logging.critical("Can't umount {} on {}".format(self.__dev, self.__path))
raise
except Exception as err:
logging.critical("Exception {} says {}.".format(type(err), err))
raise
@property
def is_ready(self) -> bool:
return self.__has_mounted
@property
def path(self) -> str:
return self.__path
class MappingItem:
__from: str
__to: str
def __command(self) -> str:
return "sudo cp -f {} {}".format(self.__from, self.__to)
def __init__(self, _fr: str, _to: str, parent: str, mount: str):
fr: str = os.path.join(parent, _fr)
to: str = os.path.join(mount, _to)
if not os.path.exists(fr):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), fr)
self.__from = fr
self.__to = to
pass
def apply(self):
try:
cmd: str = self.__command()
subprocess.run(cmd, check=True, shell=True)
logging.info("Mapping {} -> {} :\n Run \"{}\"".format(self.__from, self.__to, cmd))
except subprocess.CalledProcessError as ce:
logging.critical("Cannot copy from {} to {}".format(self.__from, self.__to))
raise ce
except Exception as err:
logging.critical("Exception {} says {}.".format(type(err), err))
raise err
pass
LOOP1_OFFSET: int = 1048576
JSON_FILE_MAPPINGS_NAME: str = 'file_mappings'
JSON_DIRECTORY_NAME: str = 'image_directories'
def parse_config(args, mp: MountPoint):
with open(str(args.config[0]), "r") as conf:
conf_dict: dict = json.load(conf)
fail: int = 0
success: int = 0
file_mappings_list: dict = conf_dict[JSON_FILE_MAPPINGS_NAME]
for item in (MappingItem(mapping['from'], mapping['to'], str(args.directory[0]), mp.path)
for mapping in file_mappings_list):
try:
item.apply()
success += 1
except Exception as e:
fail += 1
raise e
logging.info("Parse config: {} failed, {} succeeded.".format(fail, success))
def sync_grub_configuration(args, mp: MountPoint):
if not os.path.exists(os.path.join(mp.path, "boot")):
os.mkdir(os.path.join(mp.path, "boot"))
logging.info("Create directory {}".format(os.path.join(mp.path, "boot")))
if not os.path.exists(os.path.join(mp.path, "boot/grub")):
os.mkdir(os.path.join(mp.path, "boot/grub"))
logging.info("Create directory {}".format(os.path.join(mp.path, "boot/grub")))
grub_mp_path: str = os.path.join(mp.path, "boot/grub/grub.cfg")
subprocess.run("sudo cp -f {} {}".format(str(args.grub[0]), grub_mp_path), check=True, shell=True)
logging.info("Sync grub configuration from {} to {}".format(str(args.grub[0]), grub_mp_path))
pass
def workaround_permission(args, mp: MountPoint):
file_path: str = str(args.file[0])
try:
cmd: str = "sudo chown {} {}".format(getpass.getuser(), file_path)
subprocess.run(cmd, check=True, shell=True)
logging.info("Change permissions :\n Run \"{}\"".format(cmd))
except subprocess.CalledProcessError as ce:
logging.critical("Cannot change permission: {}".format(str(ce)))
raise ce
except Exception as err:
logging.critical("Exception {} says {}.".format(type(err), err))
raise err
pass
def update_image(parser: argparse, args):
with LoopDevice(str(args.file[0]), 0) as loop0:
with LoopDevice(loop0.name, LOOP1_OFFSET) as loop1:
assert loop0.is_ready
assert loop1.is_ready
with MountPoint(loop1.name, str(args.mount[0])) as mp:
sync_grub_configuration(args, mp)
parse_config(args, mp)
workaround_permission(args, mp)
logging.info("Finished at {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
def resolve_args_dependency(parser: argparse, args):
file_path: str = str(args.file[0])
if args.action == 'update':
if any([args.config is None, args.grub is None, args.directory is None, args.mount is None]):
parser.error("arguments are not enough for update.")
if not os.path.exists(file_path):
if args.template is None:
parser.error("{} is not exist, so template must be present.".format(file_path))
template_path: str = str(args.template[0])
if not os.path.exists(template_path):
parser.error("{} is not exist, template {} also not exist.".format(file_path, template_path))
try:
subprocess.run("sudo cp {} {}".format(template_path, file_path), check=True, shell=True)
logging.info("Create target {} from template {}".format(file_path, template_path))
except subprocess.CalledProcessError as cpe:
parser.error("Error creating target {} from template {} :\n {}".format(file_path, template_path, str(cpe)))
pass
def main():
def validate_disk_file(parser: argparse, arg: str) -> str:
path: pathlib.Path = pathlib.Path(arg)
abs_file_path: str = str(path.absolute().as_posix())
return abs_file_path
def validate_disk_template_file(parser: argparse, arg: str) -> str:
path: pathlib.Path = pathlib.Path(arg)
if path.exists():
return str(path.absolute().as_posix())
else:
raise argparse.ArgumentError("FATAL: path {} not exists".format(arg))
def validate_config_file(parser: argparse, arg: str) -> str:
path: pathlib.Path = pathlib.Path(arg)
if path.exists():
return str(path.absolute().as_posix())
else:
raise argparse.ArgumentError("FATAL: path {} not exists".format(arg))
def validate_build_directory(parser: argparse, arg: str) -> str:
path: pathlib.Path = pathlib.Path(arg)
if path.exists():
return str(path.absolute().as_posix())
else:
raise argparse.ArgumentError("FATAL: path {} not exists".format(arg))
def validate_mount_point(parser: argparse, arg: str) -> str:
path: pathlib.Path = pathlib.Path(arg)
abs_path_str: str = str(path.absolute().as_posix())
if not path.exists():
os.mkdir(abs_path_str)
logging.info("Creating {}, which does not exists.".format(abs_path_str))
return abs_path_str
parser: argparse = argparse.ArgumentParser(description="disk img manager for project-dionysus",
epilog="This script requires sudo")
parser.add_argument('action', choices=['update', 'convert'])
parser.add_argument('-f', '--file', type=lambda x: validate_disk_file(parser, x), nargs=1,
help="the disk image file",
required=True)
parser.add_argument('-t', '--template', type=lambda x: validate_disk_template_file(parser, x), nargs=1,
help="the disk image template if disk image doesn't exist")
parser.add_argument('-c', '--config', type=lambda x: validate_config_file(parser, x), nargs=1,
help="the configuration file")
parser.add_argument('-g', '--grub', type=lambda x: validate_config_file(parser, x), nargs=1,
help="the grub configuration file")
parser.add_argument('-d', '--directory', type=lambda x: validate_build_directory(parser, x), nargs=1,
default=os.path.join(os.getcwd(), "build"),
help="the build directory")
parser.add_argument('-m', '--mount', type=lambda x: validate_mount_point(parser, x), nargs=1,
default=os.path.join(os.path.join(os.getcwd(), "build"), "mountpoint"),
help="the mount point directory")
args = parser.parse_args()
resolve_args_dependency(parser, args)
if args.action == 'update':
update_image(parser, args)
else:
parser.error("Action {} is not yet implemented.".format(args.action[0]))
if __name__ == "__main__":
# Logging to screen
formatter = logging.Formatter('%(message)s')
logging.getLogger('').setLevel(logging.DEBUG)
main()
|
11541155
|
import argparse
import datetime
import json
import sys
import boto3
import dateutil
from boto3.dynamodb.conditions import Attr
DB_KEY = "COMPUTE_FLEET"
DB_DATA = "Data"
COMPUTE_FLEET_STATUS_ATTRIBUTE = "status"
COMPUTE_FLEET_LAST_UPDATED_TIME_ATTRIBUTE = "lastStatusUpdatedTime"
def to_utc_datetime(time_in, default_timezone=datetime.timezone.utc) -> datetime.datetime:
"""
Convert a given string, datetime or int into utc datetime.
:param time_in: Time in a format that may be parsed, integers are assumed to
be timestamps in UTC timezone.
:param default_timezone: Timezone to assum in the event that the time is
unspecified in the input parameter. This applies only for datetime and str inputs
:return time as a datetime in UTC timezone
"""
if isinstance(time_in, int):
if time_in > 1e12:
time_in /= 1000
time_ = datetime.datetime.utcfromtimestamp(time_in)
time_ = time_.replace(tzinfo=datetime.timezone.utc)
elif isinstance(time_in, str):
time_ = dateutil.parser.parse(time_in)
elif isinstance(time_in, datetime.date):
time_ = time_in
else:
raise TypeError("to_utc_datetime object must be 'str', 'int' or 'datetime'.")
if time_.tzinfo is None:
time_ = time_.replace(tzinfo=default_timezone)
return time_.astimezone(datetime.timezone.utc)
def to_iso_timestr(time_in: datetime.datetime) -> str:
"""
Convert a given datetime ISO 8601 format with milliseconds.
:param time_in: datetime to be converted
:return time in ISO 8601 UTC format with ms (e.g. 2021-07-15T01:22:02.655Z)
"""
if time_in.tzinfo is None:
time_ = time_in.replace(tzinfo=datetime.timezone.utc)
else:
time_ = time_in.astimezone(datetime.timezone.utc)
return to_utc_datetime(time_).isoformat(timespec="milliseconds")[:-6] + "Z"
def update_item(table, status, current_status):
table.update_item(
Key={"Id": DB_KEY},
UpdateExpression="set #dt.#st=:s, #dt.#lut=:t",
ExpressionAttributeNames={
"#dt": DB_DATA,
"#st": COMPUTE_FLEET_STATUS_ATTRIBUTE,
"#lut": COMPUTE_FLEET_LAST_UPDATED_TIME_ATTRIBUTE,
},
ExpressionAttributeValues={
":s": str(status),
":t": str(datetime.datetime.now(tz=datetime.timezone.utc)),
},
ConditionExpression=Attr(f"{DB_DATA}.{COMPUTE_FLEET_STATUS_ATTRIBUTE}").eq(str(current_status)),
)
def update_status_with_last_updated_time(table_name, region, status):
"""Get compute fleet status and the last compute fleet status updated time."""
try:
table = boto3.resource("dynamodb", region_name=region).Table(table_name)
current_status = get_dynamo_db_data(table).get(COMPUTE_FLEET_STATUS_ATTRIBUTE)
if current_status == status:
return
elif current_status == "RUNNING":
update_item(table, status, current_status)
else:
raise Exception(f"Could not update compute fleet status from '{current_status}' to {status}.")
except Exception as e:
raise Exception(f"Failed when updating fleet status with error: {e}")
def get_dynamo_db_data(table):
try:
compute_fleet_item = table.get_item(ConsistentRead=True, Key={"Id": DB_KEY})
if not compute_fleet_item or "Item" not in compute_fleet_item:
raise Exception("COMPUTE_FLEET data not found in db table")
db_data = compute_fleet_item["Item"].get(DB_DATA)
return db_data
except Exception as e:
raise Exception(f"Failed when retrieving data from DynamoDB with error {e}.")
def get_status_with_last_updated_time(table_name, region):
"""Get compute fleet status and the last compute fleet status updated time."""
try:
table = boto3.resource("dynamodb", region_name=region).Table(table_name)
dynamo_db_data = get_dynamo_db_data(table)
print(
json.dumps(
{
COMPUTE_FLEET_STATUS_ATTRIBUTE: dynamo_db_data.get(COMPUTE_FLEET_STATUS_ATTRIBUTE),
COMPUTE_FLEET_LAST_UPDATED_TIME_ATTRIBUTE: to_iso_timestr(
dateutil.parser.parse(dynamo_db_data.get(COMPUTE_FLEET_LAST_UPDATED_TIME_ATTRIBUTE))
),
},
sort_keys=True,
indent=4,
)
)
except Exception as e:
raise Exception(f"Failed when retrieving fleet status from DynamoDB with error {e}.")
def main():
try:
parser = argparse.ArgumentParser(description="Get or update compute fleet status of scheduler plugin.")
parser.add_argument(
"--table-name",
type=str,
required=True,
help="DynamoDB table name",
)
parser.add_argument(
"--region",
type=str,
required=True,
help="Region of cluster",
)
parser.add_argument(
"--status",
type=str,
required=False,
help="Specify the compute fleet status to set, can be PROTECTED",
choices={"PROTECTED"},
)
parser.add_argument(
"--action",
type=str,
required=True,
help="Get or update compute-fleet-status",
choices={"update", "get"},
)
args = parser.parse_args()
if args.action == "update" and not args.status:
parser.error("ERROR: --status is required when 'action' is specified to 'update'.")
elif args.action == "get" and args.status:
parser.error("ERROR: --status can not be specified when 'action' is 'get'.")
if args.action == "update":
update_status_with_last_updated_time(args.table_name, args.region, args.status)
else:
get_status_with_last_updated_time(args.table_name, args.region)
except Exception as e:
print(f"ERROR: Failed to {args.action} compute fleet status, exception: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
|
11541192
|
import platform
from nspawn.wrapper.sudo import SUDO
from nspawn.tool import stamp
from nspawn.base.machine import *
build_stamp = stamp.build_stamp()
epoch = "3.10"
release = f"{epoch}.3"
hardware = platform.machine()
image_url = f"file://localhost/tmp/nspawn/repo/alpine/base/default-{release}-{hardware}.tar.gz"
booter_url = f"http://dl-cdn.alpinelinux.org/alpine/v{epoch}/releases/{hardware}/alpine-minirootfs-{release}-{hardware}.tar.gz"
def test_machine_result():
print()
machine_name = f"a-nspawn-tester-{build_stamp}"
machine_template = CONFIG['machine']['template']
machine_meta = MachineMeta(machine_name, machine_template)
machine_result = machine_result_from_url(booter_url, machine_meta)
print(machine_result)
machine_directory = machine_result.machine_directory()
resource_create_list = machine_result.resource_create_list()
resource_delete_list = machine_result.resource_delete_list()
cmd_report = f"ls -las {machine_directory}".split()
cmd_ensure = f"mkdir -p {machine_directory}".split()
cmd_desure = f"rm -rf {machine_directory}".split()
systemd_nspawn = system_command('systemd-nspawn')
machine_command = [
systemd_nspawn,
f'--quiet',
f'--machine={machine_name}',
f'--directory={machine_directory}',
f'/usr/bin/env',
]
try:
print(f"invoke: {cmd_ensure}")
SUDO.execute_unit_sert(cmd_ensure)
result = SUDO.execute_unit_sert(cmd_report)
print(result.stdout)
for command in resource_create_list:
print(f"invoke: {command}")
SUDO.execute_unit_sert(command)
print(f"invoke: {cmd_report}")
result = SUDO.execute_unit_sert(cmd_report)
print(result.stdout)
print(f"invoke: {machine_command}")
result = SUDO.execute_unit_sert(machine_command)
print(result.stdout)
finally:
for command in resource_delete_list:
print(f"invoke: {command}")
print(command)
SUDO.execute_unit_sert(command)
SUDO.execute_unit_sert(cmd_desure)
|
11541203
|
import dis
import opcode
class InvalidConstantError(Exception): pass
def const(**names):
"""Decorator to rewrite lookups to be constants
@const(x=1)
def func():
print(x) # prints 1
"""
def decorate(func):
# Constants are not allowed to be assigned to within the function.
# Thus they should never appear in the function's local names.
# TODO: Alter the message if the colliding name is an argument.
c = func.__code__
if set(names) & set(c.co_varnames):
raise InvalidConstantError("Cannot assign to local constant")
# Constants should not be declared nonlocal and then assigned to.
# (Unfortunately this doesn't actually check if they were assigned to.)
if set(names) & set(c.co_freevars):
raise InvalidConstantError("Cannot create local and nonlocal constant")
# Constants should therefore be global names.
if set(names) - set(c.co_names):
raise InvalidConstantError("Constant not referenced as global")
# Okay. So now we replace every LOAD_GLOBAL for one of these names
# with a LOAD_CONST.
newcode = []
newconsts = c.co_consts
for name, val in names.items():
try:
names[name] = newconsts.index(val)
except ValueError:
names[name] = len(newconsts)
newconsts += (val,)
LOAD_CONST = opcode.opmap["LOAD_CONST"]
for instr in dis.get_instructions(c):
if instr.opname == "LOAD_GLOBAL" and instr.argval in names:
newcode.append(LOAD_CONST)
newcode.append(names[instr.argval])
else:
newcode.append(instr.opcode)
newcode.append(instr.arg or 0)
codeobj = type(c)(c.co_argcount, c.co_kwonlyargcount, c.co_nlocals, c.co_stacksize,
c.co_flags, bytes(newcode), newconsts, c.co_names, c.co_varnames, c.co_filename,
c.co_name, c.co_firstlineno, c.co_lnotab, c.co_freevars, c.co_cellvars)
func = type(func)(codeobj, func.__globals__, func.__name__, func.__defaults__, func.__closure__)
return func
return decorate
x = "global"
@const(x=1)
def func():
print("This should be one:", x)
func()
try:
@const(x=1)
def func(x):
print("Shouldn't happen")
func(2)
except InvalidConstantError as e:
print(e)
try:
@const(x=1)
def func():
x = 2
print("Shouldn't happen")
func()
except InvalidConstantError as e:
print(e)
try:
def f():
x = 2
@const(x=1)
def func():
nonlocal x
print("Shouldn't happen")
return func
f()
except InvalidConstantError as e:
print(e)
@const(len=len, str=str, int=int)
def digit_count(n):
return len(str(int(n)))
dis.dis(digit_count)
|
11541204
|
from scipy import interpolate
import collections
import numpy as np
import os
import re
import torch
import pylab as plt
import matplotlib.ticker as mtick
import math
import itertools
from tensorboard.backend.event_processing import event_accumulator
def get_run_names(logdir, patterns):
run_names = []
for pattern in patterns:
for root, subdirs, files in os.walk(logdir, followlinks=True):
if re.match(pattern, root):
run_names += [root]
# print(run_names)
run_names.sort()
return run_names
def get_run_names_events(logdir, patterns):
run_names = {}
for pattern in patterns:
for root, subdirs, files in os.walk(logdir, followlinks=True):
if re.match(pattern, root):
run_names[root] = []
for file in files:
if re.match('.*events\.out.*', file):
run_names[root].append(file)
run_names[root] = sorted(run_names[root])
# print(run_names)
return run_names
def get_data_pth(logdir, run_names, tag_names, batch_size=None):
data = []
for run_name in run_names:
d = {}
logdata = torch.load(run_name + '/log.pth.tar')
for tag_name in tag_names:
if tag_name not in logdata:
continue
js = logdata[tag_name]
d[tag_name] = np.array([[x[j] for x in js]
for j in range(1, 3)])
data += [d]
return data
def get_data_pth_events(logdir, run_names, tag_names, batch_size=None):
data = []
all_points = []
for run_name, events in run_names.items():
d = {}
points = {}
for event in events:
ea = event_accumulator.EventAccumulator(run_name+'/'+event,
size_guidance={ # see below regarding this argument
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 4,
event_accumulator.AUDIO: 4,
event_accumulator.SCALARS: 0,
event_accumulator.HISTOGRAMS: 1,
})
ea.Reload()
for tag_name in tag_names:
if tag_name not in ea.Tags()['scalars']:
continue
scalar = ea.Scalars(tag_name)
if tag_name not in d:
d[tag_name] = np.array(
[[dp.step for dp in scalar], [dp.value for dp in scalar]])
points[tag_name] = [len(d[tag_name][0]) - 1]
else:
new_array = np.array([dp.step for dp in scalar])
indexes = new_array > d[tag_name][0][-1]
res1 = np.concatenate(
(d[tag_name][0], np.array([dp.step for dp in scalar])[indexes]))
res2 = np.concatenate(
(d[tag_name][1], np.array([dp.value for dp in scalar])[indexes]))
points[tag_name].append(len(res2) - 1)
d[tag_name] = (res1, res2)
data += [d]
all_points += [points]
return data, all_points
def plot_smooth(x, y, npts=100, order=3, points=None, vlines=None, *args, **kwargs):
points = np.array(points, dtype=int)
#plt.plot(x[points], y[points], 'o', )
x_smooth = np.linspace(x.min(), x.max(), npts)
tck = interpolate.splrep(x, y, k=order)
y_smooth = interpolate.splev(x_smooth, tck, der=0)
plt.plot(x_smooth, y_smooth, *args, **kwargs)
plt.ticklabel_format(axis="x", style="sci", scilimits=None)
def plot_smooth_o1(x, y, points=None, vlines=None, *args, **kwargs):
plot_smooth(x, y, 100, 1, points, vlines, *args, **kwargs)
def get_legend(lg_tags, run_name, lg_replace=[]):
lg = ""
for lgt in lg_tags:
res = ".*?($|,)" if ',' not in lgt and '$' not in lgt else ''
mg = re.search(lgt + res, run_name)
if mg:
lg += mg.group(0)
lg = lg.replace('_,', ',')
lg = lg.strip(',')
for a, b in lg_replace:
lg = lg.replace(a, b)
return lg
class OOMFormatter(mtick.ScalarFormatter):
def __init__(self, useOffset=None, useMathText=None, useLocale=None, acc_bits=None):
super().__init__(useOffset=useOffset, useMathText=useMathText, useLocale=useLocale)
if acc_bits is not None:
self.acc_bits = acc_bits
else:
self.acc_bits = 3
def __call__(self, x, pos=None):
"""
Return the format for tick value *x* at position *pos*.
"""
if len(self.locs) == 0:
return ''
else:
xp = (x - self.offset) / (10. ** self.orderOfMagnitude)
if abs(xp) < 1e-8:
xp = 0
if self._useLocale:
s = locale.format_string(self.format, (xp,))
else:
s = self.format % xp
return self.fix_minus(s)
def _set_format(self):
bits = self.acc_bits
# set the format string to format all the ticklabels
if len(self.locs) < 2:
# Temporarily augment the locations with the axis end points.
_locs = [*self.locs, *self.axis.get_view_interval()]
else:
_locs = self.locs
locs = (np.asarray(_locs) - self.offset) / 10. ** self.orderOfMagnitude
loc_range = np.ptp(locs)
# Curvilinear coordinates can yield two identical points.
if loc_range == 0:
loc_range = np.max(np.abs(locs))
# Both points might be zero.
if loc_range == 0:
loc_range = 1
if len(self.locs) < 2:
# We needed the end points only for the loc_range calculation.
locs = locs[:-2]
loc_range_oom = int(math.floor(math.log10(loc_range)))
# first estimate:
sigfigs = max(0, bits - loc_range_oom)
# refined estimate:
thresh = 10 ** (-bits) * 10 ** (loc_range_oom)
while sigfigs >= 0:
if np.abs(locs - np.round(locs, decimals=sigfigs)).max() < thresh:
sigfigs -= 1
else:
break
sigfigs = bits
self.format = '%1.' + str(sigfigs) + 'f'
if self._usetex or self._useMathText:
self.format = r'$\mathdefault{%s}$' % self.format
def plot_tag(data, plot_f, run_names, tag_name, lg_tags, ylim=None, color0=0,
ncolor=None, lg_replace=[], no_title=False, points=None, xlim=None, vlines=None, orders=None, acc_bits=None, markeroff=True):
xlabel = {}
ylabel = {'Tacc': 'Training Accuracy (%)', 'Terror': 'Training Error (%)',
'train/accuracy': 'Training Accuracy (%)',
'Vacc': 'Test Accuracy (%)', 'Verror': 'Test Error (%)',
'valid/accuracy': 'Test Accuracy (%)',
'loss': 'Loss',
'epoch': 'Epoch',
'Tloss': 'Loss', 'Vloss': 'Loss', 'lr': 'Learning rate',
'grad_bias': 'Gradient Diff norm',
'est_var': 'Average Variance',
'est_snr': 'Mean SNR',
'nb_error': 'NB Error',
'est_nvar': 'Mean Normalized Variance'}
titles = {'Tacc': 'Training Accuracy', 'Terror': 'Training Error',
'train/accuracy': 'Training Accuracy',
'Vacc': 'Test Accuracy', 'Verror': 'Test Error',
'loss': 'Loss',
'epoch': 'Epoch',
'Tloss': 'Loss on full training set', 'lr': 'Learning rate',
'Vloss': 'Loss on validation set',
'grad_bias': 'Optimization Step Bias',
'nb_error': 'Norm-based Variance Error',
'est_var': 'Optimization Step Variance',
'est_snr': 'Optimization Step SNR',
'est_nvar': 'Optimization Step Normalized Variance (w/o lr)',
}
yscale_log = ['Tloss', 'est_var'] # , 'est_var'
yscale_log_offset= ['Vloss'] # , 'est_var'
yscale_scalar= ['Vloss'] # , 'est_var'
yscale_base = []
# yscale_sci = ['est_bias', 'est_var']
plot_fs = {'Tacc': plot_f, 'Vacc': plot_f,
'Terror': plot_f, 'Verror': plot_f,
'Tloss': plot_f, 'Vloss': plot_f,
}
for k in list(ylabel.keys()):
if k not in xlabel:
xlabel[k] = 'Training Iteration'
if k not in plot_fs:
plot_fs[k] = plot_f
if k not in plot_fs:
plot_fs[k] = plt.plot
if not isinstance(data, list):
data = [data]
run_names = [run_names]
# color = ['blue', 'orangered', 'darkred', 'darkkhaki', 'darkblue', 'grey']
color = [[0.00784314, 0.24313725, 1.],
[1., 0.48627451, 0.],
[0.10196078, 0.78823529, 0.21960784],
[0.90980392, 0., 0.04313725],
[0.54509804, 0.16862745, 0.88627451]]
color = color[:ncolor]
#style = ['-', '--', ':', '-.']
style = ['-']
color = [[0.00784314, 0.24313725, 1.],
[1., 0.48627451, 0.],
[0.10196078, 0.78823529, 0.21960784],
[0.90980392, 0., 0.04313725],
[0.54509804, 0.16862745, 0.88627451]]
#style = ['-', '--', ':', '-.']
styles = ['-']
# markers =
colors = color
# styles = ['-', '--', ':', '-.']
markers = ['o', 'X', 'p', '*', 'd', 'v']
plt.rcParams.update({'font.size': 16})
plt.grid(linewidth=1)
legends = []
# extract run index
indexes = [int(run_names[i].split('/')[-1].split('_')[1])
for i in range(len(run_names))]
s_indexes = np.argsort(indexes)
for i in range(len(data)):
if tag_name not in data[i]:
continue
legends += [get_legend(lg_tags, run_names[i], lg_replace)]
if orders:
color_index = orders.index(legends[-1])
else:
color_index = color0 + i
if not markeroff:
plot_fs[tag_name](
data[i][tag_name][0], data[i][tag_name][1], points[i][tag_name],
vlines=vlines,
linestyle=style[0], label=legends[-1],
color=color[(color_index) % len(color)], linewidth=2, marker=markers[(color_index) % len(markers)], markersize=10, markevery=10+ 2*(color_index%5))
else:
plot_fs[tag_name](
data[i][tag_name][0], data[i][tag_name][1], points[i][tag_name],
vlines=vlines,
linestyle=style[0], label=legends[-1],
color=color[(color_index) % len(color)], linewidth=2)
if not no_title:
plt.title(titles[tag_name])
if tag_name in yscale_log:
ax = plt.gca()
if tag_name in yscale_base:
ax.set_yscale('log', basey=np.e)
ax.yaxis.set_major_formatter(mtick.FuncFormatter(ticks))
else:
ax.set_yscale('log')
if tag_name in yscale_log_offset:
ax.yaxis.set_major_formatter(mtick.ScalarFormatter(useOffset=True))
ax.yaxis.set_major_formatter(mtick.ScalarFormatter(useOffset=True))
ax.ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
print('Hone')
else:
ax = plt.gca()
ax.ticklabel_format(axis='y', style='sci', scilimits=(-3, 3))
if tag_name in yscale_scalar:
ax.set_yscale('log')
ax.yaxis.set_minor_locator(mtick.LogLocator(base=10.0, subs=[2,4,6]))
ax.yaxis.set_minor_formatter(mtick.ScalarFormatter())
ax.yaxis.set_major_formatter(OOMFormatter(acc_bits=1))
#ax.ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
ax.ticklabel_format(axis='x', style='sci', scilimits=(0, 0))
if ylim is not None:
plt.ylim(ylim)
handles, labels = plt.gca().get_legend_handles_labels()
if orders:
norders = []
for order in orders:
if order in labels:
norders.append(order)
order = []
for label in labels:
order.append(norders.index(label))
nlabels = np.arange(len(labels)).tolist()
nhandles = np.arange(len(handles)).tolist()
for idx, label, handle in zip(order, labels, handles):
nlabels[idx] = label
nhandles[idx] = handle
handles = nhandles
labels = nlabels
plt.legend(handles, labels,
loc="upper left", bbox_to_anchor=(1.01, 1.0), prop={'size': 12})
if vlines:
for vline in vlines:
plt.axvline(vline, linestyle='--', color='black')
if xlim:
plt.xlim(xlim)
plt.xlabel(xlabel[tag_name])
plt.ylabel(ylabel[tag_name])
def ticks(y, pos):
return r'$e^{{{:.0f}}}$'.format(np.log(y))
def ticks_10(y, pos):
return r'${0:g}$'.format(np.log10(y))
def plot_runs_and_tags(get_data_f, plot_f, logdir, patterns, tag_names,
fig_name, lg_tags, ylim, batch_size=None, sep_h=True,
ncolor=None, save_single=False, lg_replace=[],
xlim=None, acc_bits=None, markeroff=True,
no_title=False, vlines=None, color_order=None):
run_names = get_run_names_events(logdir, patterns)
data, points = get_data_f(logdir, run_names, tag_names, batch_size)
if len(data) == 0:
return data, run_names
num = len(tag_names)
height = (num + 1) // 2
width = 2 if num > 1 else 1
if not save_single:
fig = plt.figure(figsize=(7 * width, 4 * height))
fig.subplots(height, width)
else:
plt.figure(figsize=(7, 4))
plt.tight_layout(pad=1., w_pad=3., h_pad=3.0)
fi = 1
if save_single:
fig_dir = fig_name[:fig_name.rfind('.')]
try:
os.makedirs(fig_dir)
except os.error:
pass
for i in range(len(tag_names)):
yl = ylim[i]
if not isinstance(yl, list) and yl is not None:
yl = ylim
if not save_single:
plt.subplot(height, width, fi)
plot_tag(data, plot_f, list(run_names), tag_names[i], lg_tags, yl,
ncolor=ncolor, lg_replace=lg_replace, no_title=no_title, points=points, vlines=vlines, xlim=xlim, orders=color_order,
acc_bits=acc_bits, markeroff=markeroff)
if save_single:
plt.savefig('%s/%s-lo.pdf' % (fig_dir, tag_names[i]),
dpi=100, bbox_inches='tight')
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
if color_order:
norders = []
for order in color_order:
if order in labels:
norders.append(order)
order = []
for label in labels:
order.append(norders.index(label))
nlabels = np.arange(len(labels)).tolist()
nhandles = np.arange(len(handles)).tolist()
for idx, label, handle in zip(order, labels, handles):
nlabels[idx] = label
nhandles[idx] = handle
handles = nhandles
labels = nlabels
plt.legend(handles, labels, prop={'size': 12})
plt.savefig('%s/%s-li.pdf' % (fig_dir, tag_names[i]),
dpi=100, bbox_inches='tight')
ax.get_legend().remove()
plt.savefig('%s/%s.pdf' % (fig_dir, tag_names[i]),
dpi=100, bbox_inches='tight')
plt.figure(figsize=(7, 4))
fi += 1
plt.savefig(fig_name, dpi=100, bbox_inches='tight')
return data, run_names
def find_largest_common_iteration(iters):
intersect = set(iters[0])
for i in range(1, len(iters)):
intersect = intersect & set(iters[i])
return list(intersect)
def get_accuracies(patterns, lg_replace, lg_tags, log_dir, latex=False, notitle=False, onlyTitle=False):
run_names = get_run_names_events(log_dir, patterns)
tags = ['Vacc', 'Tacc']
data = get_data_pth_events(log_dir, run_names, tags)[0]
run_names = list(run_names)
results = {}
for i in range(len(tags)):
results[tags[i]] = []
legends = []
iters = []
res_i = []
for j in range(len(data)):
if tags[i] not in data[j]:
continue
legends += [get_legend(lg_tags, run_names[j], lg_replace)]
iters.append(data[j][tags[i]][0])
if len(iters) == 0:
continue
max_iters = find_largest_common_iteration(iters)
max_iters = sorted(max_iters)
max_iters.reverse()
max_iters = max_iters[0:5]
for j in range(len(data)):
if tags[i] not in data[j]:
continue
local_result = []
for iter in max_iters:
index = data[j][tags[i]][0].tolist().index(iter)
res = data[j][tags[i]][1][index]
local_result.append(res)
res_i.append((np.sqrt(np.var(local_result)), np.mean(local_result)))
results[tags[i]].append([*zip(res_i, legends)])
if latex == True:
for key, val in results.items():
print('=======', key, '========')
if len(val) == 0:
continue
val_s = sorted(val[0], key=lambda x: x[1])
for res in val_s:
acc = res[0]
if onlyTitle:
print(res[1])
continue
if not notitle:
print(('%s & %.2f\\%% $\pm$ %.2f') % (res[1], acc[1], acc[0]))
if notitle:
print(('%.2f\\%% $\pm$ %.2f') % (acc[1], acc[0]))
return results
def plot_levels(levels, filename):
key = list(levels)[0]
level_indexes = np.arange(0, len(levels[key]))
styles = ['-']
orders = [ 'ALQ', 'AMQ', 'ALQ-N', 'AMQ-N','Qinf', 'TRN', 'NUQ,p=0.5', 'SignSGD', 'SignSGDInf']
# markers =
# styles = ['-', '--', ':', '-.']
markers = ['o', 'X', 'p', '*', 'd', 'v']
orders = [ 'ALQ', 'AMQ', 'ALQ-N', 'AMQ-N','Qinf', 'TRN', 'NUQ,p=0.5', 'SignSGD', 'SignSGDInf']
colors = [[0.00784314, 0.24313725, 1.],
[1., 0.48627451, 0.],
[0.10196078, 0.78823529, 0.21960784],
[0.90980392, 0., 0.04313725],
[0.54509804, 0.16862745, 0.88627451]]
index = 0
levels = collections.OrderedDict(sorted(levels.items()))
print(levels)
for level, label in zip(levels.values(), list(levels)):
index = orders.index(label)
if len(level) == 3:
plt.plot(level, [0, 3, 7], markers[index % len(markers)], label=label, color=colors[index % len(colors)], markersize=15-index)
continue
plt.plot(level, level_indexes, markers[index % len(markers)], label=label, color=colors[index % len(colors)], markersize=15-index)
handles, labels = plt.gca().get_legend_handles_labels()
# plt.title(ytitle + ' vs ' + xtitle)
norders = []
for order in orders:
if order in labels:
norders.append(order)
order = []
for label in labels:
order.append(norders.index(label))
nlabels = np.arange(len(labels)).tolist()
nhandles = np.arange(len(handles)).tolist()
for idx, label, handle in zip(order, labels, handles):
nlabels[idx] = label
nhandles[idx] = handle
print(nlabels)
dirn = 'figs_levels/'
plt.savefig(dirn + filename +'.pdf', dpi=100, bbox_inches='tight')
plt.legend(nhandles, nlabels, bbox_to_anchor=(1.01, 1.0))
plt.savefig(dirn+'lo-'+filename + '.pdf', dpi=100, bbox_inches='tight')
plt.legend(nhandles, nlabels)
plt.savefig(dirn+'li-'+filename + '.pdf', dpi=100, bbox_inches='tight')
def get_levels(patterns, lg_replace, lg_tags, log_dir, num_of_levels=8):
run_names = get_run_names_events(log_dir, patterns)
tags = []
for i in range(num_of_levels):
tags.append('levels/'+str(i))
data = get_data_pth_events(log_dir, run_names, tags)[0]
run_names = list(run_names)
results = {}
for i in range(len(tags)):
results[tags[i]] = []
legends = []
iters = []
res_i = []
for j in range(len(data)):
if tags[i] not in data[j]:
continue
legends += [get_legend(lg_tags, run_names[j], lg_replace)]
iters.append(data[j][tags[i]][0])
if len(iters) == 0:
continue
max_iters = find_largest_common_iteration(iters)
max_iters = sorted(max_iters)
max_iters.reverse()
max_iters = max_iters[0:5]
for j in range(len(data)):
if tags[i] not in data[j]:
continue
local_result = []
for iter in max_iters:
index = data[j][tags[i]][0].tolist().index(iter)
res = data[j][tags[i]][1][index]
local_result.append(res)
res_i.append(np.mean(local_result))
results[tags[i]].append([*zip(res_i, legends)])
return results
|
11541222
|
import pytest
import shutil
from pathlib import Path
from imageatm.scripts import run_cloud
from imageatm.components.cloud import AWS
TEST_JOB_DIR = Path('./tests/data/test_train_job').resolve()
TEST_TF_DIR = 'test_tf_dir'
TEST_REGION = 'test_region'
TEST_INSTANCE_TYPE = 'test_instance_type'
TEST_VPC_ID = 'test_vpc_id'
TEST_S3_BUCKET = 'test_s3_bucket'
TEST_CLOUD_TAG = 'test_cloud_tag'
class TestRunEvaluation(object):
def test_run_cloud_1(self, mocker):
mp_init = mocker.patch('imageatm.components.cloud.AWS.init')
mp_apply = mocker.patch('imageatm.components.cloud.AWS.apply')
mp_destroy = mocker.patch('imageatm.components.cloud.AWS.destroy')
mocker.patch('imageatm.components.cloud.AWS.__init__')
AWS.__init__.return_value = None
run_cloud(
provider='aws',
tf_dir=TEST_TF_DIR,
region=TEST_REGION,
instance_type=TEST_INSTANCE_TYPE,
vpc_id=TEST_VPC_ID,
bucket=TEST_S3_BUCKET,
destroy=False,
job_dir=TEST_JOB_DIR,
cloud_tag=TEST_CLOUD_TAG,
image_dir='test',
)
mp_init.assert_called_once()
mp_apply.assert_called_once()
mp_destroy.assert_not_called()
AWS.__init__.assert_called_with(
tf_dir=TEST_TF_DIR,
region=TEST_REGION,
instance_type=TEST_INSTANCE_TYPE,
vpc_id=TEST_VPC_ID,
s3_bucket=TEST_S3_BUCKET,
job_dir=TEST_JOB_DIR,
cloud_tag=TEST_CLOUD_TAG,
)
def test_run_cloud_2(self, mocker):
mp_init = mocker.patch('imageatm.components.cloud.AWS.init')
mp_apply = mocker.patch('imageatm.components.cloud.AWS.apply')
mp_train = mocker.patch('imageatm.components.cloud.AWS.train')
mp_destroy = mocker.patch('imageatm.components.cloud.AWS.destroy')
mocker.patch('imageatm.components.cloud.AWS.__init__')
AWS.__init__.return_value = None
run_cloud(
provider='aws',
tf_dir=TEST_TF_DIR,
region=TEST_REGION,
instance_type=TEST_INSTANCE_TYPE,
vpc_id=TEST_VPC_ID,
bucket=TEST_S3_BUCKET,
destroy=True,
job_dir=TEST_JOB_DIR,
cloud_tag=TEST_CLOUD_TAG,
)
mp_init.assert_not_called()
mp_apply.assert_not_called()
mp_destroy.assert_called_once()
AWS.__init__.assert_called_with(
tf_dir=TEST_TF_DIR,
region=TEST_REGION,
instance_type=TEST_INSTANCE_TYPE,
vpc_id=TEST_VPC_ID,
s3_bucket=TEST_S3_BUCKET,
job_dir=TEST_JOB_DIR,
cloud_tag=TEST_CLOUD_TAG,
)
|
11541243
|
from pyglet_gui.sliders import Slider
class ScrollBar(Slider):
"""
An abstract scrollbar with a specific knob size to be set.
"""
def __init__(self, width, height):
Slider.__init__(self, width=width, height=height)
# the size of the knob. Value runs from [_knob_size/2, 1 - _knob_size/2]
self._knob_size = 0.0
self._scrolled = 0 # a cumulative value of scroll to avoid re-layout on every scroll event.
def set_size(self, width, height):
self.width = width
self.height = height
def re_layout(self):
self.layout()
# when we layout, we ask the parent also re_layout since
# a scrollbar defines the content region.
if self._scrolled > 2:
self.parent.layout()
self._scrolled = 0
def _get_bar_region(self):
"""
Returns the area of the space where the knob moves (x, y, width, height)
"""
return self.x, self.y, self.width, self.height
def _get_knob_region(self):
"""
Returns the area of the knob (x, y, width, height). To be subclassed.
"""
raise NotImplementedError
def get_knob_pos(self):
"""
Returns the position of the relative position of the knob
in the bar.
"""
raise NotImplementedError
def set_knob_pos(self, pos):
pos = max(min(pos, 1 - self._knob_size/2), self._knob_size/2)
self._value = self._min_value + (self._max_value - self._min_value) * pos
def layout(self):
self._knob.update(*self._get_knob_region())
self._bar.update(*self._get_bar_region())
def on_gain_focus(self):
if self._manager is not None:
self._manager.set_wheel_target(self)
def on_lose_focus(self):
self._scrolled = 0
if self._manager is not None:
self._manager.set_wheel_target(None)
class HScrollbar(ScrollBar):
PATH = 'hscrollbar'
def __init__(self, width):
ScrollBar.__init__(self, width=width, height=0)
def _get_knob_region(self):
return int(self.x + (self._knob_pos() - self._knob_size/2) * self.width), \
self.y, int(self._knob_size * self.width), self.height
def get_knob_pos(self):
return int((self._knob_pos() - self._knob_size/2) * self.width)
def load_graphics(self):
super(HScrollbar, self).load_graphics()
self.height = self._bar.height
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
bar_x, _, bar_width, _ = self._bar.get_content_region()
absolute_distance = float(x - bar_x)
relative_distance = absolute_distance/bar_width
self.set_knob_pos(relative_distance)
self._scrolled = 10
self.re_layout()
return True
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
self._scrolled += abs(scroll_x)
self.set_knob_pos(self._knob_pos() - float(scroll_x) / self.width)
self.re_layout()
return True
def set_knob_size(self, width, max_width):
self._knob_size = float(width)/max_width
# update the knob position given the new knob size.
self.set_knob_pos(self._knob_pos())
def compute_size(self):
return self.width, self._bar.height
class VScrollbar(ScrollBar):
PATH = 'vscrollbar'
def __init__(self, height):
ScrollBar.__init__(self, width=0, height=height)
def _get_knob_region(self):
top = self.y + self.height
return (self.x, int(top - (self._knob_pos() + self._knob_size/2) * self.height),
self.width, int(self._knob_size * self.height))
def get_knob_pos(self):
# we remove half the knob size to pick the center of the knob.
# height/_knob_size = max_height by "set_knob_size()".
return int((self._knob_pos() - self._knob_size/2) * self.height/self._knob_size)
def load_graphics(self):
super(VScrollbar, self).load_graphics()
self.width = self._bar.width
def on_mouse_drag(self, x, y, dx, dy, button, modifiers):
bar_x, bar_y, bar_width, bar_height = self._bar.get_content_region()
absolute_distance = float(y - bar_y)
relative_distance = absolute_distance/bar_height
self.set_knob_pos(1 - relative_distance)
self._scrolled = 10
self.re_layout()
return True
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
self._scrolled += abs(scroll_y)
self.set_knob_pos(self._knob_pos() + float(scroll_y) / self.height)
self.re_layout()
return True
def set_knob_size(self, height, max_height):
self._knob_size = float(height)/max_height
# update the knob position given the new knob size.
self.set_knob_pos(self._knob_pos())
def compute_size(self):
return self._bar.width, self.height
|
11541255
|
import numpy as np
from spanningtrees.heap import Heap
class Edge(object):
"""
An Edge is between an ordered pair of nodes (src and tgt) and
has an associated weight.
It may optionally also have a label (in the case of multigraphs)
"""
__slots__ = 'src', 'tgt', 'weight', 'label'
def __init__(self, src, tgt, weight, label=None):
self.src = src
self.tgt = tgt
self.weight = weight
self.label = label
def copy(self):
return Edge(self.src, self.tgt, self.weight, self.label)
def __lt__(self, other):
return self.weight < other.weight
def __repr__(self):
return f'{self.src}→ {self.tgt}'
class Node(object):
"""
Represents the state of a node in the MST algorithm
- a heap of incoming edge preferences.
"""
__slots__ = 'name', 'edges', 'id'
def __init__(self, name, edges, node_id):
self.name = name
self.edges = Heap(edges)
self.id = node_id
def __eq__(self, other):
return self is other or self.name == other
def __hash__(self):
return self.id
def __repr__(self):
return f'{self.name}'
class Graph(dict):
def __repr__(self):
return 'Graph({\n%s\n})' % (
'\n'.join(
f' {node}: {[(src, w[0] if isinstance(w, tuple) else w.weight) for (src, w) in self[node].items()]}'
for node in self
)
)
def target_nodes(self, src):
"""Get the set of nodes pointed to by `src`, this method expensive O(|V|)."""
return {tgt for tgt in self
if tgt != src
if src in self[tgt]}
def w(self, src, tgt):
"Lookup edge weight"
return self[tgt][src][0] if src in self[tgt] else 0.0
def weight(self, tree):
if isinstance(tree, np.ndarray):
return sum(self.w(src, tgt + 1)
for tgt, src in enumerate(tree[1:]))
else: # tree is a dict
return sum(self.w(e.src, e.tgt) for e in tree.values())
@classmethod
def build(cls, graph):
"""
Build a graph from a numpy array. We assume that 0 represents the dummy root
"""
G = {}
n,m = graph.shape
assert n == m
ninf = -np.inf
for tgt in range(1, n):
G[tgt] = {}
# dummy_root given as 0 in numpy array
for src in range(n):
if tgt == src or graph[src, tgt] == ninf:
continue
G[tgt][src] = graph[src, tgt], None
return cls(G)
def node_list(self):
"""
Create representation of graph as a list of (node, incomming_edges).
This is needed for the MST algorithm
"""
return [(tgt, [Edge(src, tgt, self[tgt][src][0]) for src in self[tgt]])
for tgt in self]
@classmethod
def from_multigraph(cls, graph):
""""
Create a graph from a multigraph. We consider a multigraph to be represented as a dict
where graph[tgt][src] is a list of all edges from src to tgt. We only need to take the
best scoring (minimum weight) edge in order to compute the MST.
"""
G = {}
for tgt in graph:
G[tgt] = {}
for src in graph[tgt]:
w = graph[tgt][src]
if isinstance(w, list):
if isinstance(w, tuple):
w = min(w, key=lambda x: x[0])
else:
w = min(w)
if isinstance(w, tuple):
cost, label = w
else:
cost, label = w, None
G[tgt][src] = cost, label
return cls(G)
|
11541262
|
from RestrictedPython import compile_restricted_exec
from RestrictedPython._compat import IS_PY3
from RestrictedPython._compat import IS_PY35_OR_GREATER
import pytest
YIELD_EXAMPLE = """\
def test_generator():
yield 42
"""
def test_yield():
"""`yield` statement should be allowed."""
result = compile_restricted_exec(YIELD_EXAMPLE)
assert result.errors == ()
assert result.code is not None
local = {}
exec(result.code, {}, local)
test_generator = local['test_generator']
exec_result = list(test_generator())
assert exec_result == [42]
YIELD_FORM_EXAMPLE = """
def reader_wapper(input):
yield from input
"""
@pytest.mark.skipif(
not IS_PY3,
reason="`yield from` statement was first introduced in Python 3.3")
def test_yield_from():
"""`yield from` statement should be allowed."""
result = compile_restricted_exec(YIELD_FORM_EXAMPLE)
assert result.errors == ()
assert result.code is not None
def my_external_generator():
my_list = [1, 2, 3, 4, 5]
for elem in my_list:
yield(elem)
local = {}
exec(result.code, {}, local)
reader_wapper = local['reader_wapper']
exec_result = list(reader_wapper(my_external_generator()))
assert exec_result == [1, 2, 3, 4, 5]
# Modified Example from http://stackabuse.com/python-async-await-tutorial/
ASYNCIO_YIELD_FORM_EXAMPLE = """
import asyncio
@asyncio.coroutine
def get_json(client, url):
file_content = yield from load_file('data.ini')
"""
@pytest.mark.skipif(
not IS_PY3,
reason="`yield from` statement was first introduced in Python 3.3")
def test_asyncio_yield_from():
"""`yield from` statement should be allowed."""
result = compile_restricted_exec(ASYNCIO_YIELD_FORM_EXAMPLE)
assert result.errors == ()
assert result.code is not None
ASYNC_YIELD_FORM_EXAMPLE = """
import asyncio
async def get_json(client, url):
file_content = yield from load_file('data.ini')
"""
@pytest.mark.skipif(
not IS_PY35_OR_GREATER,
reason="`yield from` statement was first introduced in Python 3.3")
def test_async_yield_from():
"""`yield from` statement should be allowed."""
result = compile_restricted_exec(ASYNC_YIELD_FORM_EXAMPLE)
assert result.errors == (
'Line 4: AsyncFunctionDef statements are not allowed.',
)
assert result.code is None
|
11541277
|
class Blog:
def __init__(self, id, owner_id, title):
self.id = id
self.owner_id = owner_id
self.title = title
|
11541287
|
import asyncio
import logging
import random
import socket
import struct
from ipaddress import IPv4Address
from . import settings
logger = logging.getLogger(__name__)
class TrackerUDPProtocol(asyncio.DatagramProtocol):
def __init__(self, cb, infohash):
self.state = "connect"
self.transport = None
self.last_transaction_id = None
self.cb = cb
self.infohash = infohash
def get_transaction_id(self):
self.last_transaction_id = random.randint(0, 2 ** 32 - 1)
return self.last_transaction_id
def connection_made(self, transport):
self.transport = transport
self.send_connect()
def send_connect(self):
data = struct.pack("!qiI", 0x41727101980, 0, self.get_transaction_id())
self.transport.sendto(data)
def send_announce(self):
data = struct.pack(
"!qiI20s20sqqqiiiiH",
self.connection_id,
1,
self.get_transaction_id(),
self.infohash,
settings.PEER_ID,
0,
0,
0,
0,
0,
0, # Key, not sure
100,
0,
)
self.transport.sendto(data)
def datagram_received(self, data, addr):
logger.debug(f"received datagram from {addr}")
asyncio.ensure_future(self._handle_response(data, addr))
async def _handle_response(self, data, addr):
if self.state == "connect":
fmt_header = "!iIq"
if len(data) != struct.calcsize(fmt_header):
logger.warning("Wrong stuff returned on connect")
return
action, transaction_id, connection_id = struct.unpack(fmt_header, data)
self.connection_id = connection_id
self.state = "announce"
self.send_announce()
elif self.state == "announce":
fmt_header = "!iIiii"
header_size = struct.calcsize(fmt_header)
if len(data) < header_size:
logger.warning("Wrong stuff returned on announce")
return
action, transaction_id, interval, leechers, seeders = struct.unpack(
fmt_header, data[:header_size]
)
peer_data = data[header_size:]
peers = []
fmt_peer = "!IH"
peer_size = struct.calcsize(fmt_peer)
while len(peer_data) >= peer_size:
peer_ip, peer_port = struct.unpack(fmt_peer, peer_data[:peer_size])
peers.append((IPv4Address(peer_ip), peer_port))
peer_data = peer_data[peer_size:]
if not self.cb.done():
self.cb.set_result(
{"seeders": seeders, "leechers": leechers, "peers": peers}
)
async def retrieve_peers_udp_tracker(task_registry, host, port, tracker, infohash):
loop = asyncio.get_running_loop()
cb = loop.create_future()
try:
transport, protocol = await loop.create_datagram_endpoint(
lambda: TrackerUDPProtocol(cb, infohash), remote_addr=(host, port)
)
except socket.gaierror:
return tracker, {"seeders": 0, "leechers": 0, "peers": []}
try:
task = asyncio.ensure_future(cb)
task_registry.add(task)
result = await asyncio.wait_for(task, timeout=12)
task_registry.remove(task)
except asyncio.TimeoutError:
return tracker, {"seeders": 0, "leechers": 0, "peers": []}
except asyncio.CancelledError:
transport.close()
else:
return tracker, result
|
11541333
|
import getopt
import logging
import os
import sys
import tensorflow as tf
from keras.preprocessing.sequence import pad_sequences
from tinydb import TinyDB, where
from model.lstm import buildModel
from preprocessing.InputHelper import InputHelper
from preprocessing.InputHelper import transformLabels
# set logging
LOGGING_LEVEL = logging.getLevelName(os.environ.get('LOGGING_LEVEL', "INFO"))
logging.basicConfig(format="%(asctime)s | %(levelname)s | %(message)s", level=LOGGING_LEVEL)
MAX_LEN = 50
# For quick testing set this to a small number like 3
DEFAULT_EPOCHS = 10
DEFAULT_MODEL_FOLDER = os.path.join("..", "saved_models")
DEFAULT_TF_MODELS_FOLDER = DEFAULT_MODEL_FOLDER
DEFAULT_MODEL_NAME = "default_model"
MC_MODEL_DESCRIPTION_SUFFIX = " (confidence scoring)"
MODELS_TINYDB_FILENAME = "models_tinydb.json"
# About the right size
# EPOCHS=30
def train_and_save(train_tsv_path, merged_vec_path, model_name, epochs=DEFAULT_EPOCHS,
model_folder=DEFAULT_MODEL_FOLDER, tf_models_folder=DEFAULT_TF_MODELS_FOLDER, model_description=""):
inpH = InputHelper()
logging.info("converting words to IDs...")
inpH.convertWordsToIds(merged_vec_path)
for num_classes in [0, 7]:
logging.info("loading input data...")
x_train, y_train, x_test, y_test = inpH.loadData(merged_vec_path, train_tsv_path, None)
y_train, y_test = transformLabels(y_train, y_test, num_classes)
x_train = pad_sequences(x_train, padding='post', maxlen=MAX_LEN)
if x_test is not None:
x_test = pad_sequences(x_test, padding='post', maxlen=MAX_LEN)
logging.info("building model (num_classes = {})...".format(num_classes))
logging.debug("DEBUG: During model saving - o/p dimension: {}".format(inpH.emb_dim))
logging.debug("DEBUG: During model saving - emb matrix shape: {}".format(inpH.embedding_matrix.shape))
model = buildModel(num_classes, inpH.vocab_size, inpH.emb_dim, MAX_LEN, inpH.embedding_matrix)
logging.info("training model with num_classes={}...".format(num_classes))
model = fit(model, x_train, y_train, num_classes=num_classes, epochs=epochs, maxlen=MAX_LEN)
logging.info(f"saving model {model_name} and multi-class version for confidence scoring...")
save_model(model, model_name, merged_vec_path, num_classes=num_classes, model_folder=model_folder,
tf_models_folder=tf_models_folder, model_description=model_description)
def fit(model, x_train, y_train, hidden_layer_dim=20, num_classes=0, epochs=DEFAULT_EPOCHS, maxlen=300):
"""
Trains two models, one with regression and the other with classification, for API.
"""
batch_size = int(len(x_train) / 20) # 5% of the training set size
history = model.fit(x_train, y_train,
epochs=epochs,
verbose=True,
validation_split=0.1,
batch_size=batch_size)
return model
def save_model(model, model_name, merged_vec_path, num_classes=0,
model_folder=DEFAULT_MODEL_FOLDER, tf_models_folder=DEFAULT_TF_MODELS_FOLDER, model_description=""):
# original saving code (mostly for local experiments? but might be useless now)
model_h5_path = save_model_h5(model, model_folder, num_classes)
# save models for TensorFlow Serving
model_suffix = "mc" if num_classes > 0 else ""
save_model_for_serving_api(model_name, model_suffix, model_h5_path, tf_models_folder)
# save model name and embedding file path in the TinyDB database
save_model_in_tinydb(model_name, model_suffix,
model_description + (MC_MODEL_DESCRIPTION_SUFFIX if num_classes > 0 else ""),
merged_vec_path, model_folder)
def save_model_h5(model, model_folder, num_classes):
model_header_name = os.path.join(model_folder, "model." + ("mc." if num_classes > 0 else "") + "json")
model_h5_name = os.path.join(model_folder, "model." + ("mc." if num_classes > 0 else "") + "h5")
model_json = model.to_json()
with open(model_header_name, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save(model_h5_name)
return model_h5_name
def save_model_for_serving_api(model_name, model_name_suffix, original_model_path, model_folder):
# Export model for runtime API
model = tf.keras.models.load_model(original_model_path, compile=False)
export_path = os.path.join(model_folder, model_name + model_name_suffix, "1")
logging.info(f"Exporting trained model to {export_path}")
tf.saved_model.save(model, export_path)
logging.info("Done exporting!")
def save_model_in_tinydb(model_name, model_name_suffix, model_description, merged_vec_path, model_folder):
model_id = model_name + model_name_suffix
with TinyDB(os.path.join(model_folder, MODELS_TINYDB_FILENAME)) as db:
# first remove existing models with this name
existing_models = db.search(where("name") == model_id)
db.remove(doc_ids=[x.doc_id for x in existing_models])
# insert the model
db.insert({"name": model_id, "description": model_description, "emb_path": merged_vec_path})
def main(argv):
TRAIN_FILE = None
EMB_FILE = None
model_name = DEFAULT_MODEL_NAME
epochs = DEFAULT_EPOCHS
help_string = "train4api.py -i <trainfile> -n <merged.vecfile> -m <modelname> -e <epochs>"
try:
opts, args = getopt.getopt(argv, "h:i:n:m:e:", ["trainfile=", "nodevecs=", "modelname=", "epochs="])
for opt, arg in opts:
if opt == '-h':
print(help_string)
sys.exit()
elif opt in ("-i", "--trainfile"):
TRAIN_FILE = arg
elif opt in ("-n", "--nodevecs"):
EMB_FILE = arg
elif opt in ("-m", "--modelname"):
model_name = arg
elif opt in ("-e", "--epochs"):
epochs = int(arg)
except getopt.GetoptError:
print(help_string)
sys.exit()
if TRAIN_FILE is None or EMB_FILE is None:
print(help_string)
sys.exit()
print("Training file: %s" % TRAIN_FILE)
print("Emb file: %s" % EMB_FILE)
train_and_save(TRAIN_FILE, EMB_FILE, model_name, epochs)
if __name__ == "__main__":
main(sys.argv[1:])
|
11541397
|
from typing import Dict, List
import itertools
from mypy.errors import Errors
from mypy.errorcodes import ErrorCode
from mypy.options import Options
from mypy.plugin import FunctionContext, Plugin, CheckerPluginInterface
from mypy.types import Instance, Type, CallableType, TypeVarType
from mypy.nodes import Expression, CallExpr, NameExpr, FuncDef, Decorator, MypyFile
from mypy.checker import TypeChecker
from mypy.subtypes import is_subtype
thinc_model_fullname = "thinc.model.Model"
chained_out_fullname = "thinc.types.XY_YZ_OutT"
intoin_outtoout_out_fullname = "thinc.types.XY_XY_OutT"
def plugin(version: str):
return ThincPlugin
class ThincPlugin(Plugin):
def __init__(self, options: Options) -> None:
super().__init__(options)
def get_function_hook(self, fullname: str):
return function_hook
def function_hook(ctx: FunctionContext) -> Type:
try:
return get_reducers_type(ctx)
except AssertionError:
# Add more function callbacks here
return ctx.default_return_type
def get_reducers_type(ctx: FunctionContext) -> Type:
"""
Determine a more specific model type for functions that combine models.
This function operates on function *calls*. It analyzes each function call
by looking at the function definition and the arguments passed as part of
the function call, then determines a more specific return type for the
function call.
This method accepts a `FunctionContext` as part of the Mypy plugin
interface. This function context provides easy access to:
* `args`: List of "actual arguments" filling each "formal argument" of the
called function. "Actual arguments" are those passed to the function
as part of the function call. "Formal arguments" are the parameters
defined by the function definition. The same actual argument may serve
to fill multiple formal arguments. In some cases the relationship may
even be ambiguous. For example, calling `range(*args)`, the actual
argument `*args` may fill the `start`, `stop` or `step` formal
arguments, depending on the length of the list.
The `args` list is of length `num_formals`, with each element
corresponding to a formal argument. Each value in the `args` list is a
list of actual arguments which may fill the formal argument. For
example, in the function call `range(*args, num)`, `num` may fill the
`start`, `end` or `step` formal arguments depending on the length of
`args`, so type-checking needs to consider all of these possibilities.
* `arg_types`: Type annotation (or inferred type) of each argument. Like
`args`, this value is a list of lists with an outer list entry for each
formal argument and an inner list entry for each possible actual
argument for the formal argument.
* `arg_kinds`: "Kind" of argument passed to the function call. Argument
kinds include positional, star (`*args`), named (`x=y`) and star2
(`**kwargs`) arguments (among others). Like `args`, this value is a list
of lists.
* `context`: AST node representing the function call with all available
type information. Notable attributes include:
* `args` and `arg_kinds`: Simple list of actual arguments, not mapped to
formal arguments.
* `callee`: AST node representing the function being called. Typically
this is a `NameExpr`. To resolve this node to the function definition
it references, accessing `callee.node` will usually return either a
`FuncDef` or `Decorator` node.
* etc.
This function infers a more specific type for model-combining functions by
making certain assumptions about how the function operates based on the
order of its formal arguments and its return type.
If the return type is `Model[InT, XY_YZ_OutT]`, the output of each
argument is expected to be used as the input to the next argument. It's
therefore necessary to check that the output type of each model is
compatible with the input type of the following model. The combined model
has the type `Model[InT, OutT]`, where `InT` is the input type of the
first model and `OutT` is the output type of the last model.
If the return type is `Model[InT, XY_XY_OutT]`, all model arguments
receive input of the same type and are expected to produce output of the
same type. It's therefore necessary to check that all models have the same
input types and the same output types. The combined model has the type
`Model[InT, OutT]`, where `InT` is the input type of all model arguments
and `OutT` is the output type of all model arguments.
Raises:
AssertionError: Raised if a more specific model type couldn't be
determined, indicating that the default general return type should
be used.
"""
# Verify that we have a type-checking API and a default return type (presumably a
# `thinc.model.Model` instance)
assert isinstance(ctx.api, TypeChecker)
assert isinstance(ctx.default_return_type, Instance)
# Verify that we're inspecting a function call to a callable defined or decorated function
assert isinstance(ctx.context, CallExpr)
callee = ctx.context.callee
assert isinstance(callee, NameExpr)
callee_node = callee.node
assert isinstance(callee_node, (FuncDef, Decorator))
callee_node_type = callee_node.type
assert isinstance(callee_node_type, CallableType)
# Verify that the callable returns a `thinc.model.Model`
# TODO: Use `map_instance_to_supertype` to map subtypes to `Model` instances.
# (figure out how to look up the `TypeInfo` for a class outside of the module being type-checked)
callee_return_type = callee_node_type.ret_type
assert isinstance(callee_return_type, Instance)
assert callee_return_type.type.fullname == thinc_model_fullname
assert callee_return_type.args
assert len(callee_return_type.args) == 2
# Obtain the output type parameter of the `thinc.model.Model` return type
# of the called API function
out_type = callee_return_type.args[1]
# Check if the `Model`'s output type parameter is one of the "special
# type variables" defined to represent model composition (chaining) and
# homogenous reduction
assert isinstance(out_type, TypeVarType)
assert out_type.fullname
if out_type.fullname not in {intoin_outtoout_out_fullname, chained_out_fullname}:
return ctx.default_return_type
# Extract type of each argument used to call the API function, making sure that they are also
# `thinc.model.Model` instances
args = list(itertools.chain(*ctx.args))
arg_types = []
for arg_type in itertools.chain(*ctx.arg_types):
# TODO: Use `map_instance_to_supertype` to map subtypes to `Model` instances.
assert isinstance(arg_type, Instance)
assert arg_type.type.fullname == thinc_model_fullname
assert len(arg_type.args) == 2
arg_types.append(arg_type)
# Collect neighboring pairs of arguments and their types
arg_pairs = list(zip(args[:-1], args[1:]))
arg_types_pairs = list(zip(arg_types[:-1], arg_types[1:]))
# Determine if passed models will be chained or if they all need to have
# the same input and output type
if out_type.fullname == chained_out_fullname:
# Models will be chained, meaning that the output of each model will
# be passed as the input to the next model
# Verify that model inputs and outputs are compatible
for (arg1, arg2), (type1, type2) in zip(arg_pairs, arg_types_pairs):
assert isinstance(type1, Instance)
assert isinstance(type2, Instance)
assert type1.type.fullname == thinc_model_fullname
assert type2.type.fullname == thinc_model_fullname
check_chained(
l1_arg=arg1, l1_type=type1, l2_arg=arg2, l2_type=type2, api=ctx.api
)
# Generated model takes the first model's input and returns the last model's output
return Instance(
ctx.default_return_type.type, [arg_types[0].args[0], arg_types[-1].args[1]]
)
elif out_type.fullname == intoin_outtoout_out_fullname:
# Models must have the same input and output types
# Verify that model inputs and outputs are compatible
for (arg1, arg2), (type1, type2) in zip(arg_pairs, arg_types_pairs):
assert isinstance(type1, Instance)
assert isinstance(type2, Instance)
assert type1.type.fullname == thinc_model_fullname
assert type2.type.fullname == thinc_model_fullname
check_intoin_outtoout(
l1_arg=arg1, l1_type=type1, l2_arg=arg2, l2_type=type2, api=ctx.api
)
# Generated model accepts and returns the same types as all passed models
return Instance(
ctx.default_return_type.type, [arg_types[0].args[0], arg_types[0].args[1]]
)
# Make sure the default return type is returned if no branch was selected
assert False, "Thinc mypy plugin error: it should return before this point"
def check_chained(
*,
l1_arg: Expression,
l1_type: Instance,
l2_arg: Expression,
l2_type: Instance,
api: CheckerPluginInterface,
):
if not is_subtype(l1_type.args[1], l2_type.args[0]):
api.fail(
f"Layer outputs type ({l1_type.args[1]}) but the next layer expects ({l2_type.args[0]}) as an input",
l1_arg,
code=error_layer_output,
)
api.fail(
f"Layer input type ({l2_type.args[0]}) is not compatible with output ({l1_type.args[1]}) from previous layer",
l2_arg,
code=error_layer_input,
)
def check_intoin_outtoout(
*,
l1_arg: Expression,
l1_type: Instance,
l2_arg: Expression,
l2_type: Instance,
api: CheckerPluginInterface,
):
if l1_type.args[0] != l2_type.args[0]:
api.fail(
f"Layer input ({l1_type.args[0]}) not compatible with next layer input ({l2_type.args[0]})",
l1_arg,
code=error_layer_input,
)
api.fail(
f"Layer input ({l2_type.args[0]}) not compatible with previous layer input ({l1_type.args[0]})",
l2_arg,
code=error_layer_input,
)
if l1_type.args[1] != l2_type.args[1]:
api.fail(
f"Layer output ({l1_type.args[1]}) not compatible with next layer output ({l2_type.args[1]})",
l1_arg,
code=error_layer_output,
)
api.fail(
f"Layer output ({l2_type.args[1]}) not compatible with previous layer output ({l1_type.args[1]})",
l2_arg,
code=error_layer_output,
)
error_layer_input = ErrorCode("layer-mismatch-input", "Invalid layer input", "Thinc")
error_layer_output = ErrorCode("layer-mismatch-output", "Invalid layer output", "Thinc")
class IntrospectChecker(TypeChecker):
def __init__(
self,
errors: Errors,
modules: Dict[str, MypyFile],
options: Options,
tree: MypyFile,
path: str,
plugin: Plugin,
):
self._error_messages: List[str] = []
super().__init__(errors, modules, options, tree, path, plugin)
|
11541456
|
from unittest import TestCase
def find_friend(friend_dict):
not_yet_checked = list(friend_dict.keys())
to_check = []
current_group = []
all_groups = []
while not_yet_checked:
# get student to be checked
if len(to_check) > 0:
checking = to_check.pop()
not_yet_checked.remove(checking)
else:
checking = not_yet_checked.pop()
# add current student to current group
current_group.append(checking)
# add current student's friends to be checked for current group
for friend in friend_dict[checking]:
known_in_current_group = friend in [*current_group, *to_check]
if not known_in_current_group:
to_check.append(friend)
# prep for next iteration
if not to_check:
# start new group
all_groups.append(current_group)
current_group = []
# print(all_groups)
return len(all_groups)
class TestFindFriend(TestCase):
def test_zero(self):
data = {
0: [1, 2], 1: [0, 5], 2: [0],
3: [6], 4: [], 5: [1],
6: [3]
}
assert find_friend(data) == 3
def test_one(self):
data = {
0: [1, 2], 1: [0, 2], 2: [0, 1, 3],
3: [2, 4], 4: [3, 5], 5: [3, 4]
}
assert find_friend(data) == 1
def test_two(self):
data = {
0: [1], 1: [0], 2: [3], 3: [2],
4: [5], 5: [4], 6: [7], 7: [6]
}
assert find_friend(data) == 4
|
11541468
|
import pytest
import numpy as np
import scedar.cluster as cluster
import scedar.eda as eda
class TestCommunity(object):
'''docstring for TestMIRAC'''
np.random.seed(123)
x_20x5 = np.random.uniform(size=(20, 5))
def test_simple_run(self):
cluster.Community(self.x_20x5).labs
def test_wrong_args(self):
with pytest.raises(ValueError):
cluster.Community(self.x_20x5, aff_scale=-0.1).labs
with pytest.raises(ValueError):
cluster.Community(self.x_20x5, metric='123').labs
with pytest.raises(ValueError):
cluster.Community(self.x_20x5, metric='correlation').labs
with pytest.raises(ValueError):
cluster.Community(
self.x_20x5, partition_method='NotImplementedMethod').labs
def test_different_partition_methods(self):
cluster.Community(
self.x_20x5,
partition_method="RBConfigurationVertexPartition").labs
cluster.Community(
self.x_20x5, partition_method="RBERVertexPartition").labs
cluster.Community(
self.x_20x5, partition_method="CPMVertexPartition").labs
cluster.Community(
self.x_20x5, partition_method="SignificanceVertexPartition").labs
cluster.Community(
self.x_20x5, partition_method="SurpriseVertexPartition").labs
def test_provide_graph(self):
sdm = eda.SampleDistanceMatrix(self.x_20x5)
knn_conn_mat = sdm.s_knn_connectivity_matrix(5)
knn_aff_graph = eda.SampleDistanceMatrix.knn_conn_mat_to_aff_graph(
knn_conn_mat, 2)
cluster.Community(
self.x_20x5, graph=knn_aff_graph,
partition_method="RBConfigurationVertexPartition").labs
|
11541472
|
from flask import request, redirect, Response
import urllib.parse
import json
class Origins():
endpoints = ["/api/origins"]
endpoint_name = "api_origins"
endpoint_methods = ["GET", "POST"]
endpoint_default_parameters = {
"method": "get"
}
def __init__(self, fhdhr):
self.fhdhr = fhdhr
def __call__(self, *args):
return self.get(*args)
def get(self, *args):
method = request.args.get('method', default=None, type=str)
redirect_url = request.args.get('redirect', default=None, type=str)
if method == "get":
origins_info = {}
for origin_item in self.fhdhr.origins.valid_origins:
origins_info[origin_item] = {}
origins_info[origin_item]["tuners_max"] = self.fhdhr.origins.origins_dict[origin_item].tuners
origins_info[origin_item]["channel_count"] = len(list(self.fhdhr.device.channels.list[origin_item].keys()))
origins_info[origin_item]["stream_method"] = self.fhdhr.origins.origins_dict[origin_item].stream_method
if hasattr(self.fhdhr.origins.origins_dict[origin_item], "close_stream"):
origins_info[origin_item]["close_stream_method"] = True
else:
origins_info[origin_item]["close_stream_method"] = False
origins_info_json = json.dumps(origins_info, indent=4)
return Response(status=200,
response=origins_info_json,
mimetype='application/json')
else:
return "Invalid Method"
if redirect_url:
if "?" in redirect_url:
return redirect("%s&retmessage=%s" % (redirect_url, urllib.parse.quote("%s Success" % method)))
else:
return redirect("%s?retmessage=%s" % (redirect_url, urllib.parse.quote("%s Success" % method)))
else:
if method == "scan":
return redirect('/lineup_status.json')
else:
return "%s Success" % method
|
11541474
|
from labelmodels.label_model import ClassConditionalLabelModel, LearningConfig, init_random
import numpy as np
from scipy import sparse
import torch
from torch import nn
class HMM(ClassConditionalLabelModel):
"""A generative label model that treats a sequence of true class labels as a
Markov chain, as in a hidden Markov model, and treats all labeling functions
as conditionally independent given the corresponding true class label, as
in a Naive Bayes model.
Proposed for crowdsourced sequence annotations in: <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Aggregating and Predicting
Sequence Labels from Crowd Annotations. In Annual Meeting of the Association
for Computational Linguistics, 2017.
"""
def __init__(self, num_classes, num_lfs, init_acc=.9, acc_prior=1,
balance_prior=1):
"""Constructor.
Initializes labeling function accuracies using optional argument and all
other model parameters uniformly.
:param num_classes: number of target classes, i.e., binary
classification = 2
:param num_lfs: number of labeling functions to model
:param init_acc: initial estimated labeling function accuracy, must
be a float in [0,1]
:param acc_prior: strength of regularization of estimated labeling
function accuracies toward their initial values
"""
super().__init__(num_classes, num_lfs, init_acc, acc_prior)
self.start_balance = nn.Parameter(torch.zeros([num_classes]))
self.transitions = nn.Parameter(torch.zeros([num_classes, num_classes]))
self.balance_prior = balance_prior
def forward(self, votes, seq_starts):
"""
Computes log likelihood of sequence of labeling function outputs for
each (sequence) example in batch.
For efficiency, this function prefers that votes is an instance of
scipy.sparse.coo_matrix. You can avoid a conversion by passing in votes
with this class.
:param votes: m x n matrix in {0, ..., k}, where m is the sum of the
lengths of the sequences in the batch, n is the number of
labeling functions and k is the number of classes
:param seq_starts: vector of length l of row indices in votes indicating
the start of each sequence, where l is the number of
sequences in the batch. So, votes[seq_starts[i]] is
the row vector of labeling function outputs for the
first element in the ith sequence
:return: vector of length l, where element is the log-likelihood of the
corresponding sequence of outputs in votes
"""
jll = self._get_labeling_function_likelihoods(votes)
norm_start_balance = self._get_norm_start_balance()
norm_transitions = self._get_norm_transitions()
for i in range(0, votes.shape[0]):
if i in seq_starts:
jll[i] += norm_start_balance
else:
joint_class_pair = jll[i-1, :].clone().unsqueeze(1)
joint_class_pair = joint_class_pair.repeat(1, self.num_classes)
joint_class_pair += norm_transitions
jll[i] += joint_class_pair.logsumexp(0)
seq_ends = [x - 1 for x in seq_starts] + [votes.shape[0]-1]
seq_ends.remove(-1)
mll = torch.logsumexp(jll[seq_ends], dim=1)
return mll
def estimate_label_model(self, votes, seq_starts, config=None):
"""Estimates the parameters of the label model based on observed
labeling function outputs.
Note that a minibatch's size refers to the number of sequences in the
minibatch.
:param votes: m x n matrix in {0, ..., k}, where m is the sum of the
lengths of the sequences in the data, n is the number of
labeling functions and k is the number of classes
:param seq_starts: vector of length l of row indices in votes indicating
the start of each sequence, where l is the number of
sequences in the batch. So, votes[seq_starts[i]] is
the row vector of labeling function outputs for the
first element in the ith sequence
:param config: optional LearningConfig instance. If None, initialized
with default constructor
"""
if config is None:
config = LearningConfig()
# Initializes random seed
init_random(config.random_seed)
# Converts to CSR and integers to standardize input
votes = sparse.csr_matrix(votes, dtype=np.int)
seq_starts = np.array(seq_starts, dtype=np.int)
batches = self._create_minibatches(
votes, seq_starts, config.batch_size, shuffle_seqs=True)
self._do_estimate_label_model(batches, config)
def get_most_probable_labels(self, votes, seq_starts):
"""
Computes the most probable underlying sequence of labels given function
outputs
:param votes: m x n matrix in {0, ..., k}, where m is the sum of the
lengths of the sequences in the data, n is the number of
labeling functions and k is the number of classes
:param seq_starts: vector of length l of row indices in votes indicating
the start of each sequence, where l is the number of
sequences in the batch. So, votes[seq_starts[i]] is
the row vector of labeling function outputs for the
first element in the ith sequence
:return: vector of length m, where element is the most likely predicted labels
"""
# Converts to CSR and integers to standardize input
votes = sparse.csr_matrix(votes, dtype=np.int)
seq_starts = np.array(seq_starts, dtype=np.int)
out = np.ndarray((votes.shape[0],), dtype=np.int)
out_prob = np.ndarray((votes.shape[0],), dtype=object)
offset = 0
for votes, seq_starts in self._create_minibatches(votes, seq_starts, 32):
jll = self._get_labeling_function_likelihoods(votes)
norm_start_balance = self._get_norm_start_balance()
norm_transitions = self._get_norm_transitions()
T = votes.shape[0]
bt = torch.zeros([T, self.num_classes])
bts = torch.zeros([T, self.num_classes, self.num_classes])
for i in range(0, T):
if i in seq_starts:
jll[i] += norm_start_balance
else:
p = jll[i-1].clone().unsqueeze(1).repeat(
1, self.num_classes) + norm_transitions
jll[i] += torch.max(p, dim=0)[0]
bt[i, :] = torch.argmax(p, dim=0)
bts[i, :, :] = p
jll = torch.exp(jll)
seq_ends = [x - 1 for x in seq_starts] + [votes.shape[0] - 1]
res = []
res_prob = []
j = T-1
while j >= 0:
if j in seq_ends:
res.append(torch.argmax(jll[j, :]).item())
res_prob.append(jll[j,:].detach().numpy())
if j in seq_starts:
j -= 1
continue
res.append(int(bt[j, res[-1]].item()))
res_prob.append(torch.exp(bts[j,:,res[-1]]).detach().numpy())
j -= 1
res = [x + 1 for x in res]
res.reverse()
res_prob.reverse()
for i in range(len(res)):
out[offset + i] = res[i]
out_prob[offset + i] = res_prob[i]
offset += len(res)
return out, out_prob
def get_label_distribution(self, votes, seq_starts):
"""Returns the unary and pairwise marginals over true labels estimated
by the model.
:param votes: m x n matrix in {0, ..., k}, where m is the sum of the
lengths of the sequences in the data, n is the number of
labeling functions and k is the number of classes
:param seq_starts: vector of length l of row indices in votes indicating
the start of each sequence, where l is the number of
sequences in the batch. So, votes[seq_starts[i]] is
the row vector of labeling function outputs for the
first element in the ith sequence
:return: p_unary, p_pairwise where p_unary is a m x k matrix representing
the marginal distributions over individual labels, and p_pairwise
is a m x k x k tensor representing pairwise marginals over the
ith and (i+1)th labels. For the last element in a sequence, the
k x k matrix will be all zeros.
"""
# Converts to CSR and integers to standardize input
votes = sparse.csr_matrix(votes, dtype=np.int)
seq_starts = np.array(seq_starts, dtype=np.int)
out_unary = np.zeros((votes.shape[0], self.num_classes))
out_pairwise = np.zeros((votes.shape[0], self.num_classes, self.num_classes))
offset = 0
for votes, seq_starts in self._create_minibatches(votes, seq_starts, 32):
# Computes observation likelihoods and initializes alpha and beta messages
cll = self._get_labeling_function_likelihoods(votes)
alpha = torch.zeros(cll.shape)
beta = torch.zeros(cll.shape)
# Computes alpha
next_seq = 0
for i in range(votes.shape[0]):
if next_seq == len(seq_starts) or i < seq_starts[next_seq]:
# i is not the start of a sequence
temp = alpha[i-1].unsqueeze(1).repeat(1, self.num_classes)
temp = temp + self._get_norm_transitions()
alpha[i] = cll[i] + temp.logsumexp(0)
else:
# i is the start of a sequence
alpha[i] = cll[i] + self._get_norm_start_balance()
next_seq += 1
# Computes beta
this_seq = seq_starts.shape[0] - 1
beta[-1, :] = 1
for i in range(votes.shape[0] - 2, -1, -1):
if i == seq_starts[this_seq] - 1:
# End of sequence
beta[i, :] = 1
this_seq -= 1
else:
temp = beta[i+1] + cll[i+1]
temp = temp.unsqueeze(1).repeat(1, self.num_classes)
temp = temp + self._get_norm_transitions()
beta[i, :] = temp.logsumexp(0)
# Computes p_unary
p_unary = alpha + beta
temp = p_unary.logsumexp(1).unsqueeze(1).repeat(1, self.num_classes)
p_unary = p_unary - temp
for i in range(p_unary.shape[0]):
p = torch.exp(p_unary[i, :] - torch.max(p_unary[i, :]))
out_unary[offset + i, :] = (p / p.sum()).detach()
# Computes p_pairwise
p_pairwise = torch.zeros(
(votes.shape[0], self.num_classes, self.num_classes))
for i in range(p_pairwise.shape[0] - 1):
p_pairwise[i, :, :] = self._get_norm_transitions()
p_pairwise[i] += alpha[i].unsqueeze(1).repeat(1, self.num_classes)
p_pairwise[i] += cll[i+1].unsqueeze(0).repeat(self.num_classes, 1)
p_pairwise[i] += beta[i+1].unsqueeze(0).repeat(self.num_classes, 1)
denom = p_pairwise[i].view(-1).logsumexp(0)
denom = denom.unsqueeze(0).unsqueeze(1)
denom = denom.repeat(self.num_classes, self.num_classes)
p_pairwise[i] -= denom
out_pairwise[offset + i, :, :] = torch.exp(p_pairwise[i]).detach()
offset += votes.shape[0]
return out_unary, out_pairwise
def get_start_balance(self):
"""Returns the model's estimated class balance for the start of a
sequence
:return: a NumPy array with one element in [0,1] for each target class,
representing the estimated prior probability that the first
element in an example sequence has that label
"""
return np.exp(self._get_norm_start_balance().detach().numpy())
def get_transition_matrix(self):
"""Returns the model's estimated transition distribution from class
label to class label in a sequence.
:return: a k x k Numpy array, in which each element i, j is the
probability p(c_{t+1} = j + 1 | c_{t} = i + 1)
"""
return np.exp(self._get_norm_transitions().detach().numpy())
def _create_minibatches(self, votes, seq_starts, batch_size, shuffle_seqs=False):
# Computes explicit seq ends so that we can shuffle the sequences
seq_ends = np.ndarray((seq_starts.shape[0],), dtype=np.int)
for i in range(1, seq_starts.shape[0]):
seq_ends[i-1] = seq_starts[i] - 1
seq_ends[-1] = votes.shape[0] - 1
# Shuffles the sequences by shuffling the start and end index vectors
if shuffle_seqs:
index = np.arange(np.shape(seq_starts)[0])
np.random.shuffle(index)
seq_starts = seq_starts[index]
seq_ends = seq_ends[index]
# Splits seq_starts
seq_start_batches = [np.array(
seq_starts[i * batch_size: ((i + 1) * batch_size)],
copy=True)
for i in range(int(np.ceil(len(seq_starts) / batch_size)))
]
seq_start_batches[-1] = np.concatenate((seq_start_batches[-1], [votes.shape[0]]))
# Splits seq_ends
seq_end_batches = [
np.array(seq_ends[i * batch_size: ((i + 1) * batch_size + 1)], copy=True)
for i in range(int(np.ceil(len(seq_ends) / batch_size)))
]
seq_end_batches[-1] = np.concatenate((seq_end_batches[-1], [votes.shape[0]]))
# Builds vote_batches and relative seq_start_batches
vote_batches = []
rel_seq_start_batches = []
for seq_start_batch, seq_end_batch in zip(seq_start_batches, seq_end_batches):
vote_batch = []
rel_seq_start_batch = np.zeros((len(seq_start_batch),), dtype=np.int)
total_len = 0
for i, (start, end) in enumerate(zip(seq_start_batch, seq_end_batch)):
vote_batch.append(votes[start:end+1])
rel_seq_start_batch[i] = total_len
total_len += end - start + 1
vote_batches.append(sparse.coo_matrix(sparse.vstack(vote_batch), copy=True))
rel_seq_start_batches.append(rel_seq_start_batch)
return list(zip(vote_batches, rel_seq_start_batches))
def _get_regularization_loss(self):
neg_entropy = 0.0
# Start balance
norm_start_balance = self._get_norm_start_balance()
exp_class_balance = torch.exp(norm_start_balance)
for k in range(self.num_classes):
neg_entropy += norm_start_balance[k] * exp_class_balance[k]
# Transitions
norm_transitions = self._get_norm_transitions()
for i in range(self.num_classes):
exp_transitions = torch.exp(norm_transitions[i])
for k in range(self.num_classes):
neg_entropy += norm_transitions[i, k] * exp_transitions[k]
entropy_prior = self.balance_prior * neg_entropy
return super()._get_regularization_loss() + entropy_prior
def _get_norm_start_balance(self):
return self.start_balance - self.start_balance.logsumexp(0)
def _get_norm_transitions(self):
denom = self.transitions.logsumexp(1).unsqueeze(1).repeat(1, self.num_classes)
return self.transitions - denom
class NaiveBayes(ClassConditionalLabelModel):
"""A generative label model that assumes that all labeling functions are
conditionally independent given the true class label, i.e., the naive Bayes
assumption.
Proposed in: <NAME> and <NAME>. Maximum likelihood
estimation of observer error-rates using the EM algorithm.
Journal of the Royal Statistical Society C, 28(1):20–28, 1979.
Proposed for labeling functions in: <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>. Data programming: Creating large training sets, quickly. In
Neural Information Processing Systems, 2016.
"""
def __init__(self, num_classes, num_lfs, init_acc=.9, acc_prior=0.025,
balance_prior=0.025, learn_class_balance=True):
"""Constructor.
Initializes labeling function accuracies using optional argument and all
other model parameters uniformly.
:param num_classes: number of target classes, i.e., binary
classification = 2
:param num_lfs: number of labeling functions to model
:param init_acc: initial estimated labeling function accuracy, must
be a float in [0,1]
:param acc_prior: strength of regularization of estimated labeling
function accuracies toward their initial values
:param learn_class_balance: whether to estimate the distribution over
target classes (True) or assume to be
uniform (False)
"""
super().__init__(num_classes, num_lfs, init_acc, acc_prior)
self.class_balance = nn.Parameter(
torch.zeros([num_classes]), requires_grad=learn_class_balance)
self.balance_prior = balance_prior
def forward(self, votes):
"""Computes log likelihood of labeling function outputs for each
example in the batch.
For efficiency, this function prefers that votes is an instance of
scipy.sparse.coo_matrix. You can avoid a conversion by passing in votes
with this class.
:param votes: m x n matrix in {0, ..., k}, where m is the batch size,
n is the number of labeling functions and k is the number
of classes
:return: 1-d tensor of length m, where each element is the
log-likelihood of the corresponding row in labels
"""
class_ll = self._get_norm_class_balance()
conditional_ll = self._get_labeling_function_likelihoods(votes)
joint_ll = conditional_ll + class_ll
return torch.logsumexp(joint_ll, dim=1)
def estimate_label_model(self, votes, config=None):
"""Estimates the parameters of the label model based on observed
labeling function outputs.
:param votes: m x n matrix in {0, ..., k}, where m is the batch size,
n is the number of labeling functions and k is the number
of classes
:param config: optional LearningConfig instance. If None, initialized
with default constructor
"""
if config is None:
config = LearningConfig()
# Initializes random seed
init_random(config.random_seed)
# Converts to CSR to standardize input
votes = sparse.csr_matrix(votes, dtype=np.int)
batches = self._create_minibatches(
votes, config.batch_size, shuffle_rows=True)
self._do_estimate_label_model(batches, config)
def get_label_distribution(self, votes):
"""Returns the posterior distribution over true labels given labeling
function outputs according to the model
:param votes: m x n matrix in {0, ..., k}, where m is the batch size,
n is the number of labeling functions and k is the number
of classes
:return: m x k matrix, where each row is the posterior distribution over
the true class label for the corresponding example
"""
# Converts to CSR to standardize input
votes = sparse.csr_matrix(votes, dtype=np.int)
labels = np.ndarray((votes.shape[0], self.num_classes))
batches = self._create_minibatches(votes, 4096, shuffle_rows=False)
offset = 0
for votes, in batches:
class_balance = self._get_norm_class_balance()
lf_likelihood = self._get_labeling_function_likelihoods(votes)
jll = class_balance + lf_likelihood
for i in range(votes.shape[0]):
p = torch.exp(jll[i, :] - torch.max(jll[i, :]))
p = p / p.sum()
for j in range(self.num_classes):
labels[offset + i, j] = p[j]
offset += votes.shape[0]
return labels
def get_most_probable_labels(self, votes):
"""Returns the most probable true labels given observed function outputs.
:param votes: m x n matrix in {0, ..., k}, where m is the batch size,
n is the number of labeling functions and k is the number
of classes
:return: 1-d Numpy array of most probable labels
"""
return np.argmax(self.get_label_distribution(votes), axis=1) + 1
def get_class_balance(self):
"""Returns the model's estimated class balance
:return: a NumPy array with one element in [0,1] for each target class,
representing the estimated prior probability that an example
has that label
"""
return np.exp(self._get_norm_class_balance().detach().numpy())
def _create_minibatches(self, votes, batch_size, shuffle_rows=False):
if shuffle_rows:
index = np.arange(np.shape(votes)[0])
np.random.shuffle(index)
votes = votes[index, :]
# Creates minibatches
batches = [(sparse.coo_matrix(
votes[i * batch_size: (i + 1) * batch_size, :],
copy=True),)
for i in range(int(np.ceil(votes.shape[0] / batch_size)))
]
return batches
def _get_regularization_loss(self):
neg_entropy = 0.0
norm_class_balance = self._get_norm_class_balance()
exp_class_balance = torch.exp(norm_class_balance)
for k in range(self.num_classes):
neg_entropy += norm_class_balance[k] * exp_class_balance[k]
entropy_prior = self.balance_prior * neg_entropy
return super()._get_regularization_loss() + entropy_prior
def _get_norm_class_balance(self):
return self.class_balance - torch.logsumexp(self.class_balance, dim=0)
|
11541496
|
import pypugjs
from pypugjs.parser import Parser
from pypugjs.nodes import Tag
from typing import List
from operator import itemgetter
import ast
class XMLElement:
def __init__(self, tag: str, attrib: dict):
self._tag = tag
self._attrib = attrib
self._children: List = []
self._text = None
@property
def count_children(self):
return len(self._children)
@property
def children(self):
return self._children
@property
def tag(self):
return self._tag
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
def add_child(self, child):
if not self.has_child(child):
self._children.append(child)
def remove_child(self, child):
if self.has_child(child):
self._children.remove(child)
def has_child(self, child):
return child in self._children
@property
def attributes(self):
return self._attrib
def get_attribute(self, key, default= None):
return self._attrib.get(key, default)
def set_attribute(self, key, value):
self._attrib[key] = value
def remove_attribute(self, key):
if self.has_attrib(key):
del self._attrib[key]
def has_attribute(self, key):
return key in self._attrib
class Compiler(object):
def __init__(self, node):
self._node = node
self._buffer = None
def compile(self):
self.visit(self._node, root=self._buffer)
return self._buffer
def visit(self, node, **kwargs):
self.visitNode(node, **kwargs)
def visitNode(self, node, **kwargs):
name = node.__class__.__name__
visit_fn = getattr(self, f"visit{name}", None)
if visit_fn:
visit_fn(node, **kwargs)
else:
raise NotImplementedError(f"Node {name} not supported")
def visitBlock(self, block, **kwargs):
for node in block.nodes:
self.visit(node, **kwargs)
def visitTag(self, tag, **kwargs):
attrs = {}
for attr in tag._attrs:
name, value = itemgetter("name", "val")(attr)
attrs[name] = ast.literal_eval(value)
element = XMLElement(tag.name, attrs)
if kwargs.get("root"):
kwargs.get("root").add_child(element)
else:
self._buffer = element
self.visit(tag.block, root=element)
if tag.text:
self.visit(tag.text, root=element)
def visitText(self, text, **kwargs):
if kwargs.get("root").text:
kwargs.get("root").text += '\n' + ''.join(text.nodes).strip()
else:
kwargs.get("root").text = ''.join(text.nodes).strip()
def visitString(self, text, **kwargs):
self.visitText(text, **kwargs)
def parse_pug_to_obj(template: str):
try:
block = Parser(template).parse()
return Compiler(block).compile()
except Exception as e:
raise ValueError(str(e))
|
11541513
|
import torch
import numpy as np
import numpy as np
from scipy.spatial.transform.rotation import Rotation as R, Slerp
from scipy.interpolate.interpolate import interp1d
from slam.common.utils import assert_debug, check_tensor
from slam.common.rotation import torch_euler_to_mat, torch_mat_to_euler, torch_pose_matrix_jacobian_euler
class PosesInterpolator:
"""Object which performs interpolation of poses using timestamps
Poses and corresponding key timestamps are passed to the constructor.
The PosesInterpolator returns a linear interpolation on these poses
When called with new timestamps.
"""
def __init__(self, poses: np.ndarray, timestamps: np.ndarray):
check_tensor(poses, [-1, 4, 4], np.ndarray)
check_tensor(timestamps, [-1], np.ndarray)
self.min_timestamp = timestamps.min()
self.max_timestamp = timestamps.max()
self.slerp = Slerp(timestamps, R.from_matrix(poses[:, :3, :3]))
self.interp_tr = interp1d(timestamps, poses[:, :3, 3], axis=0)
def __call__(self, timestamps: np.ndarray):
if timestamps.min() < self.min_timestamp or timestamps.max() > self.max_timestamp:
timestamps = np.clip(timestamps, self.min_timestamp, self.max_timestamp)
tr = self.interp_tr(timestamps)
rots = self.slerp(timestamps)
poses = np.eye(4, dtype=np.float64).reshape(1, 4, 4).repeat(timestamps.shape[0], axis=0)
poses[:, :3, :3] = rots.as_matrix()
poses[:, :3, 3] = tr
return poses
def transform_pointcloud(pointcloud: np.ndarray, tr: np.ndarray):
"""
Applies the transform `tr` to the pointcloud
Parameters
----------
pointcloud : np.ndarray (N, 3)
tr : np.ndarray (4, 4)
"""
return np.einsum("ij,nj->ni", tr[:3, :3], pointcloud) + tr[:3, 3].reshape(1, 3)
class Pose(object):
"""
A Pose is a tool to interpret tensors of float as SE3 poses
Parameters
----------
config : dict
A dictionary with the configuration of the pose
"""
def __init__(self, pose_type: str):
self.pose_type = pose_type
assert_debug(self.pose_type in self.__supported_poses())
@staticmethod
def __supported_poses():
return ["euler"] # TODO , "quaternions"
def euler_convention(self):
"""
Returns the euler convention used for the parametrisation of the rotation
Fails if self.pose_type is not equal to "euler"
"""
assert_debug(self.pose_type == "euler")
return "xyz"
def num_rot_params(self) -> int:
"""
Returns
-------
int :
The number of parameters of rotation for this representation
"""
if self.pose_type == "quaternions":
return 4
else:
return 3
def num_params(self) -> int:
"""
Returns
-------
int :
The number of parameters (rotation + translation) for this representation
"""
return self.num_rot_params() + 3
def inverse_pose_matrix(self, params_tensor: torch.Tensor) -> torch.Tensor:
"""
Returns the inverse of the pose matrix
Parameters
----------
params_tensor : [B, 6/7] or [B, 4, 4]
"""
if len(params_tensor.shape) == 2:
params_tensor = self.build_pose_matrix(params_tensor)
check_tensor(params_tensor, [-1, 4, 4])
inverse = torch.zeros_like(params_tensor)
rt = params_tensor[:, :3, :3].permute(0, 2, 1)
inverse[:, :3, :3] = rt
inverse[:, :3, 3] = - torch.einsum("bij,bj->bi", rt, params_tensor[:, :3, 3])
inverse[:, 3, 3] = 1.0
return inverse
def build_pose_matrix(self, params_tensor: torch.Tensor) -> torch.Tensor:
"""
Returns a pose matrix tensor from a pose parameters tensor
Parameters
----------
params_tensor : torch.Tensor
The tensor of the 6 or 7 parameters of the pose
Returns
-------
torch.Tensor
The tensor of matrix
"""
check_tensor(params_tensor, [-1, self.num_rot_params() + 3])
b = params_tensor.size(0)
rotation_tensor = self.rot_matrix_from_params(params_tensor[:, 3:])
pose = torch.cat([rotation_tensor, torch.zeros(b, 1, 3,
device=params_tensor.device,
dtype=params_tensor.dtype)], dim=1) # [B, 4, 3]
trans = torch.cat([params_tensor[:, :3],
torch.ones(b, 1, device=params_tensor.device, dtype=params_tensor.dtype)], dim=1) \
.unsqueeze(2) # [B, 4, 1]
pose = torch.cat([pose, trans], dim=2) # [B, 4, 4]
return pose
def __to_pose_matrix(self, pose: torch.Tensor):
if len(pose.shape) == 3 and pose.size(1) == 4 and pose.size(2) == 4:
t_pose_matrix = pose
else:
check_tensor(pose, [-1, self.num_rot_params() + 3])
t_pose_matrix = self.build_pose_matrix(pose)
return t_pose_matrix
def apply_rotation(self, tensor: torch.Tensor, pose: torch.Tensor) -> torch.Tensor:
"""
Applies the rotation part of the pose on the point cloud or normal cloud
Parameters
----------
tensor : [B, N, 3]
A point or normal cloud tensor
pose : [B, 4, 4] or [B, P]
A pose matrix or pose params tensor
"""
t_pose_matrix = self.__to_pose_matrix(pose)
transformed = torch.einsum("bij,bnj->bni", t_pose_matrix[:, :3, :3], tensor)
return transformed
def apply_transformation(self, points_3d: torch.Tensor, pose: torch.Tensor) -> torch.Tensor:
"""
Applies a transformation to a point cloud
Parameters
----------
points_3d : [B, N, 3]
A X, Y, Z point cloud tensor
pose : [B, 4, 4] or [B, P]
A pose matrix tensor or a pose params tensor
"""
t_pose_matrix = self.__to_pose_matrix(pose)
rot_matrix_t = t_pose_matrix[:, :3, :3].permute(0, 2, 1)
points_3d = torch.matmul(points_3d, rot_matrix_t)
tr = t_pose_matrix[:, :3, 3].unsqueeze(1)
points_3d = points_3d + tr
return points_3d
def from_pose_matrix(self, pose_matrix_tensor: torch.Tensor) -> torch.Tensor:
"""
Returns the tensor of the parameters of the pose
Parameters
----------
pose_matrix_tensor : torch.Tensor
The matrix tensor [B, 4, 4]
Returns
-------
torch.Tensor : [B, P]
The pose parameters tensor.
P is the degrees of freedom 6, (or 7 for 'quaternions')
"""
rotation_matrix = pose_matrix_tensor[:, :3, :3]
rot_params = self.rot_params_from_matrix(rotation_matrix)
trans_params = pose_matrix_tensor[:, :3, 3]
return torch.cat([trans_params, rot_params], dim=1)
def rot_matrix_from_params(self, rot_params: torch.Tensor) -> torch.Tensor:
"""
Builds a pose matrix tensor from its rotation parameters
Parameters
----------
rot_params : [B, ROT_P]
The rotation parameters
"""
if self.pose_type == "euler":
return torch_euler_to_mat(rot_params, convention=self.euler_convention())
# return TF3d.euler_angles_to_matrix(rot_params, convention=self.euler_convention())
elif self.pose_type in ["quaternions", "quaternions_vec"]:
quaternions = rot_params
if self.pose_type == "quaternions_vec":
# Transform the vector part of the quaternion (qx, qy, qz) into a unit quaternion
quaternions = torch.cat([quaternions[:, :1].detach() * 0 + 1, quaternions], dim=1)
# transform to unit quaternions
norm_quat = quaternions / quaternions.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]
B = norm_quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w * x, w * y, w * z
xy, xz, yz = x * y, x * z, y * z
rotation_matrix = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,
2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,
2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)
return rotation_matrix
else:
raise ValueError("Unrecognised pose type")
def rot_params_from_matrix(self, rot_matrix: torch.Tensor) -> torch.Tensor:
"""
Returns
-------
torch.Tensor
A [B, P] tensor with the parameters of the representation of the rotation matrices
"""
if self.pose_type == "euler":
return torch_mat_to_euler(rot_matrix, convention=self.euler_convention())
# return TF3d.matrix_to_euler_angles(rot_matrix, convention=self.euler_convention())
elif self.pose_type in ["quaternions", "quaternions_vec"]:
# TODO quaternions = self.matrix_to_quaternion(rot_matrix)
raise NotImplementedError("")
# # Deal with the sign ambiguity of the quaternions : force the first parameter qw to 1
# quaternions = quaternions / quaternions[:, 0:1]
# if self.pose_type == "quaternions":
# unit_quaternions = quaternions / quaternions.norm(p=2, dim=1, keepdim=True)
# return unit_quaternions
# else:
# # returns unscaled rotation parameters (supposing that qw = 1)
# # Useful for pose prediction
# return quaternions[:, 1:4]
else:
raise ValueError(f"Unexpected pose_type {self.pose_type}")
def pose_matrix_jacobian(self, pose_params: torch.Tensor):
assert_debug(self.pose_type == "euler", 'Only euler angles are supported for now')
return torch_pose_matrix_jacobian_euler(pose_params)
|
11541519
|
from __future__ import print_function
import json
import os
import pickle
import boto3
import tensorflow as tf
from correct_text import create_model, DefaultMovieDialogConfig, decode_sentence
from text_corrector_data_readers import MovieDialogReader
def safe_mkdir(path):
try:
os.mkdir(path)
except OSError:
pass
def download(client, filename, local_path=None, s3_path=None):
if s3_path is None:
s3_path = MODEL_PARAMS_DIR + "/" + filename
if local_path is None:
local_path = os.path.join(MODEL_PATH, filename)
print("Downloading " + filename)
client.download_file(BUCKET_NAME, s3_path, local_path)
# Define resources on S3.
BUCKET_NAME = "deeptextcorrecter"
ROOT_DATA_PATH = "/tmp/"
MODEL_PARAMS_DIR = "model_params"
MODEL_PATH = os.path.join(ROOT_DATA_PATH, MODEL_PARAMS_DIR)
# Create tmp dirs for storing data locally.
safe_mkdir(ROOT_DATA_PATH)
safe_mkdir(MODEL_PATH)
# Download files from S3 to local disk.
s3_client = boto3.client('s3')
model_ckpt = "41900"
tf_meta_filename = "translate.ckpt-{}.meta".format(model_ckpt)
download(s3_client, tf_meta_filename)
tf_params_filename = "translate.ckpt-{}".format(model_ckpt)
download(s3_client, tf_params_filename)
tf_ckpt_filename = "checkpoint"
download(s3_client, tf_ckpt_filename)
corrective_tokens_filename = "corrective_tokens.pickle"
corrective_tokens_path = os.path.join(ROOT_DATA_PATH,
corrective_tokens_filename)
download(s3_client, corrective_tokens_filename,
local_path=corrective_tokens_path)
token_to_id_filename = "token_to_id.pickle"
token_to_id_path = os.path.join(ROOT_DATA_PATH, token_to_id_filename)
download(s3_client, token_to_id_filename, local_path=token_to_id_path)
# Load model.
config = DefaultMovieDialogConfig()
sess = tf.Session()
print("Loading model")
model = create_model(sess, True, MODEL_PATH, config=config)
print("Loaded model")
with open(corrective_tokens_path) as f:
corrective_tokens = pickle.load(f)
with open(token_to_id_path) as f:
token_to_id = pickle.load(f)
data_reader = MovieDialogReader(config, token_to_id=token_to_id)
print("Done initializing.")
def process_event(event, context):
print("Received event: " + json.dumps(event, indent=2))
outputs = decode_sentence(sess, model, data_reader, event["text"],
corrective_tokens=corrective_tokens,
verbose=False)
return {"input": event["text"], "output": " ".join(outputs)}
|
11541522
|
import unittest
import pandas as pd
import nlu
import tests.nlu_hc_tests.secrets as sct
from sparknlp.annotator import BertSentenceEmbeddings
from tests.test_utils import *
class AssertionTests(unittest.TestCase):
def test_assertion_dl_model(self):
SPARK_NLP_LICENSE = sct.SPARK_NLP_LICENSE
AWS_ACCESS_KEY_ID = sct.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = sct.AWS_SECRET_ACCESS_KEY
JSL_SECRET = sct.JSL_SECRET
nlu.auth(SPARK_NLP_LICENSE,AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,JSL_SECRET)
# b = BertSentenceEmbeddings.pretrained('sbiobert_base_cased_mli','en','clinical/models')
# todo en.ner.ade Error not accessable in 2.7.6??
s1='The patient has COVID. He got very sick with it.'
s2='Peter got the Corona Virus!'
s3='COVID 21 has been diagnosed on the patient'
data = [s1,s2,s3]
# en.resolve_sentence.icd10cm
#TODO Not correct
resolver_ref = 'en.resolve_sentence.icd10cm.augmented_billable'
res = nlu.load(f'en.ner.diseases {resolver_ref}', verbose=True).predict(data, drop_irrelevant_cols=False, metadata=True)
# res = nlu.load('en.ner.anatomy', verbose=True).predict(['The patient has cancer and a tumor and high fever and will die next week. He has pain in his left food and right upper brain', ' She had a seizure.'], drop_irrelevant_cols=False, metadata=True)
print(res.columns)
for c in res :
print(c)
print(res[c])
print(res)
if __name__ == '__main__':
AssertionTests().test_entities_config()
|
11541571
|
import praw
import time
import re
# ### BOT CONFIGURATION ### #
CONFIG_WIKIPAGE = "schedulebot-config"
# ### END BOT CONFIGURATION ### #
class ScheduledPost:
def __init__(self, sub, first, title="Scheduled Post", text=None, link=None, repeat="-1", times="-1", flair_text="", flair_css="", distinguish="False", sticky="False", contest_mode="False"):
self.sub = sub
self.first = first
self.title = title
self.text = text
self.link = link
self.repeat = repeat
self.times = int(times)
self.flair_text = flair_text
self.flair_css = flair_css
self.distinguish = distinguish.lower() == "true"
self.sticky = sticky.lower() == "true"
self.contest_mode = contest_mode.lower() == "true"
try:
self.first = time.mktime(time.strptime(self.first, "%d.%m.%Y %H:%M %z"))
except ValueError:
self.first = time.mktime(time.strptime(self.first, "%d.%m.%Y %H:%M"))
num = int(repeat.split(" ")[0])
unit = repeat.split(" ")[-1].lower()
if unit == "years":
num *= 365*24*60*60
elif unit == "months":
num *= 30*24*60*60
elif unit == "weeks":
num *= 7*24*60*60
elif unit == "days":
num *= 24*60*60
elif unit == "hours":
num *= 60*60
elif unit == "minutes":
num *= 60
elif unit == "seconds":
num *= 1
else:
num = -1
if num == 0:
num = 1
self.repeat = num
def get_time_until_next_post(self):
diff = time.time() - self.first
if diff < 0:
return -diff
if self.repeat < 0:
return float("inf")
if self.times > 0:
if diff // self.repeat >= self.times - 1:
return float("inf")
used = diff % self.repeat
return self.repeat - used
def get_next_post_number(self):
diff = time.time() - self.first
if diff < 0:
return 0
if self.repeat < 0:
return -1
return int(diff // self.repeat) + 1
def get_next_post_time(self):
return time.time() + self.get_time_until_next_post()
def repl_indentation(matchobj):
return "\r" * matchobj.group(0).count(matchobj.group(1))
def read_config(sub):
scheduled_posts = []
config = sub.get_wiki_page(CONFIG_WIKIPAGE).content_md
config = config.replace("\r\n", "\n")
rules = list(filter(len, config.split("---\n")))
if len(rules) < 1:
return scheduled_posts
match = re.match("^\s+", rules[0])
if not match:
print("Error: Could not define indentation")
return scheduled_posts
indentation = match.group(0)
for rule in rules:
lines = [re.sub("^({0})+".format(indentation), repl_indentation, line) for line in rule.split("\n")]
properties = {}
last_property = ""
for line in lines:
level = line.count("\r")
if level == 1:
last_property = line.replace("\r", "").split(": ")[0].strip().lower()
properties[last_property] = line.replace("\r", "")[len(last_property) + 2:].strip()
else:
properties[last_property] += "\n" + line.replace("\r", "").strip()
for key in properties:
if properties[key].startswith("|\n"):
properties[key] = properties[key][2:]
# print(key, ":", properties[key])
try:
scheduled_posts.append(ScheduledPost(sub, **properties))
except TypeError:
print("Rule for post with title", properties["title"], "is not correct!")
return scheduled_posts
|
11541572
|
import pyromod.listen
from pyrogram import Client, __version__
from pyrogram.raw.all import layer
from DonLee_Robot_V2 import LOGGER, Config
class User(Client):
def __init__(self):
super().__init__(
Config.USER_SESSION,
api_hash=Config.API_HASH,
api_id=Config.API_ID,
workers=4
)
self.LOGGER = LOGGER
async def start(self):
await super().start()
usr_bot_me = await self.get_me()
return (self, usr_bot_me.id)
class DonLee_Robot(Client):
USER: User = None
USER_ID: int = None
def __init__(self):
super().__init__(
"bot",
api_hash=Config.API_HASH,
api_id=Config.API_ID,
plugins={
"root": "DonLee_Robot_V2"
},
workers=200,
bot_token=Config.BOT_TOKEN,
sleep_threshold=10
)
self.LOGGER = LOGGER
async def start(self):
await super().start()
bot_details = await self.get_me()
self.set_parse_mode("html")
self.LOGGER(__name__).info(
f"@{bot_details.username} started! "
)
self.USER, self.USER_ID = await User().start()
app = DonLee_Robot()
app.run()
|
11541574
|
import pathlib
from daskperiment.core.errors import TrialIDNotFoundError
import daskperiment.io.pickle as pickle
from daskperiment.util.log import get_logger
logger = get_logger(__name__)
def init_backend(experiment_id=None, backend=None):
"""
Initialize backend from Experiment ID and protocol.
Prameters
---------
experiment_id: str
Experiment id
backend: str
Backend identifier
Returns
-------
Backend: backend
"""
if issubclass(type(backend), _BaseBackend):
return backend
if experiment_id is None:
msg = ('Experiment ID is not provided. This is only allowed '
'in package testing (otherwise, it is a package bug)')
logger.warning(msg)
experiment_id = 'daskperiment_package_test'
if backend == 'local':
# LocalBackend
dname = '{}'.format(experiment_id)
from daskperiment.config import _CACHE_DIR
backend = _CACHE_DIR / dname
if maybe_redis(backend):
from daskperiment.backend.redis import RedisBackend
return RedisBackend(experiment_id, backend)
elif maybe_mongo(backend):
from daskperiment.backend.mongo import MongoBackend
return MongoBackend(experiment_id, backend)
elif isinstance(backend, pathlib.Path):
from daskperiment.backend.local import LocalBackend
return LocalBackend(experiment_id, backend)
else:
raise NotImplementedError(backend)
def maybe_redis(uri):
"""
Check whether arg should be regarded as Redis
Prameters
---------
uri: obj
Argument to be distinguished
Returns
-------
bool: maybe_redis
"""
try:
import redis
except ImportError:
return False
if isinstance(uri, redis.ConnectionPool):
return True
elif not isinstance(uri, str):
return False
protocols = ['redis://', 'rediss://', 'unix://']
return any(uri.startswith(p) for p in protocols)
def maybe_mongo(uri):
"""
Check whether arg should be regarded as MongoDB
Prameters
---------
uri: obj
Argument to be distinguished
Returns
-------
bool: maybe_mongo
"""
try:
import pymongo
except ImportError:
return False
if isinstance(uri, (pymongo.mongo_client.MongoClient,
pymongo.collection.Collection)):
msg = ('To initialize MongoBackend, pymongo Database '
'instance must be provided, given: {}{}')
raise ValueError(msg.format(uri, type(uri)))
elif isinstance(uri, pymongo.database.Database):
return True
elif not isinstance(uri, str):
return False
return uri.startswith('mongodb://')
class _BaseBackend(object):
def __init__(self, experiment_id):
self.experiment_id = experiment_id
def save(self):
"""
Save myself to specified location.
Dababase-like backends do nothing because internal status are
all saved during other operations.
"""
# overridden in LocalBackend
# other backends should be stateless
return self
def load(self):
"""
Load myself from specified location.
Dababase-like backends do nothing because internal status are
all saved during other operations.
"""
return self
################################################
# Key & value management
################################################
def get_parameter_key(self, trial_id):
return self._get_parameter_key(trial_id)
def get_history_key(self, trial_id):
return self._get_history_key(trial_id)
def get_metric_key(self, metric_key, trial_id):
return self._get_metric_key(metric_key, trial_id)
def get_persist_key(self, step, trial_id):
"""
Get key to save persisted results
"""
return self._get_persist_key(step, trial_id)
def get_step_hash_key(self, key):
"""
Get key to save persisted results
"""
return self._get_step_hash_key(key)
def get_code_key(self, trial_id):
"""
Get key to save code
"""
return self._get_code_key(trial_id)
def get_environment_key(self, env_key, trial_id, ext):
# ext is used in LocalBackend
return self._get_environment_key(env_key, trial_id, ext)
class _NoSQLBackend(_BaseBackend):
_SEP = ':'
def __repr__(self):
return "{}('{}')".format(self.__class__.__name__, self.uri)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
if self.experiment_id != other.experiment_id:
return False
if self.uri != other.uri:
return False
return True
def __getstate__(self):
state = {}
state['experiment_id'] = self.experiment_id
state['uri'] = self.uri
# do not pickle _client
return state
@property
def client(self):
raise NotImplementedError
################################################
# Managers
################################################
@property
def metrics(self):
if not hasattr(self, '_metrics'):
self._metrics = self.get_metric_manager()
return self._metrics
@property
def trials(self):
if not hasattr(self, '_trials'):
self._trials = self.get_trial_manager()
return self._trials
################################################
# Low level API
################################################
def set(self, key, value):
"""
This method must be overwritten by actual class
"""
raise NotImplementedError
def get(self, key):
"""
This method must be overwritten by actual class
"""
raise NotImplementedError
def append_list(self, key, value):
"""
This method must be overwritten by actual class
"""
raise NotImplementedError
def get_list(self, key):
"""
This method must be overwritten by actual class
"""
raise NotImplementedError
def increment(self, key):
"""
This method must be overwritten by actual class
"""
raise NotImplementedError
################################################
# High level API
################################################
def _validate_key(self, key):
# overwritten in MongoBackend to support MongoKey
assert isinstance(key, (str, bytes)), key
def save_text(self, key, text):
self._validate_key(key)
return self.set(key, text)
def load_text(self, key):
"""
Load code context from file
"""
self._validate_key(key)
res = self.get(key)
if res is None:
# TODO: define better exception
# key may not contain trial id
raise TrialIDNotFoundError(key)
else:
return self._finalize_text(res)
def _finalize_text(self, value):
# overwritten in RedisBackend to decode binary
return value
def dumps_object(self, value):
return pickle.dumps(value)
def loads_object(self, value):
return pickle.loads(value)
def save_object(self, key, obj):
"""
Save object to key
"""
self._validate_key(key)
return self.set(key, self.dumps_object(obj))
def load_object(self, key):
"""
Load object from key
"""
self._validate_key(key)
res = self.get(key)
if res is None:
raise TrialIDNotFoundError(key)
else:
return self.loads_object(res)
|
11541581
|
from .object import MWDBElement, MWDBObject
class MWDBShareReason(object):
"""
Represents the reason why object was shared with specified group
"""
def __init__(self, api, share_data):
self.api = api
self._data = share_data
self._related_object = None
@property
def what(self):
"""
Returns what was shared
:rtype: :class:`mwdblib.MWDBObject` or None
"""
if self._related_object is None:
self._related_object = MWDBObject.create(self.api, {
"id": self._data["related_object_dhash"],
"type": self._data["related_object_type"]
})
return self._related_object
@property
def why(self):
"""
Returns why it was shared
:return: One of actions: 'queried', 'shared', 'added', 'migrated'
"""
return self._data["reason_type"]
@property
def who(self):
"""
Returns who caused action returned by :py:attr:`why` property.
:return: User login
"""
return self._data["related_user_login"]
def __str__(self):
"""
Returns str with unparsed reason string
"""
return "{} {}:{} by {}".format(self._data["reason_type"],
self._data["related_object_type"],
self._data["related_object_dhash"],
self._data["related_user_login"])
class MWDBShare(MWDBElement):
"""
Represents share entry in MWDB object
"""
def __init__(self, api, data, parent):
super(MWDBShare, self).__init__(api, data)
self.parent = parent
@property
def timestamp(self):
"""
Returns timestamp of share
:return: datetime object with object share timestamp
:rtype: datetime.datetime
"""
import dateutil.parser
return dateutil.parser.parse(self.data["access_time"])
@property
def group(self):
"""
Returns a group name that object is shared with
:return: group name
:rtype: str
"""
return self.data["group_name"]
@property
def reason(self):
"""
Returns why object was shared
:rtype: :class:`MWDBShareReason`
"""
return MWDBShareReason(self.api, self.data)
# Backwards compatibility
MalwarecageShareReason = MWDBShareReason
MalwarecageShare = MWDBShare
|
11541600
|
import os
import boto3
def answer_no(x): return True if str(x).lower() in [
'0', 'no', 'false'] else False
def answer_yes(x): return True if str(x).lower() in [
'1', 'yes', 'true'] else False
def send_notifications(message):
# TODO
return True
def is_bucket_not_public(bucket_name):
s3 = boto3.client('s3')
bucket_acl = s3.get_bucket_acl(Bucket=bucket_name)
# If there is a permission attached with any value for AllUsers,
# it means the bucket is public
# We don't need to check if the permission any of
# READ|WRITE|READ_ACP|WRITE_ACP|FULL_CONTROL
for grantee in bucket_acl['Grants']:
if grantee['Grantee']['Type'] == 'Group' \
and grantee['Grantee']['URI'] == 'http://acs.amazonaws.com/groups/global/AllUsers':
return False
return True
def lambda_handler(event, context):
rc = 1
message_body = 'Chekcing trails'
print message_body
cloudtrail = boto3.client('cloudtrail')
trails = cloudtrail.describe_trails()
for trail in trails['trailList']:
notification = 'Checking ' + trail['Name']
print notification
message_body += notification + "\n"
if trail['IsMultiRegionTrail'] \
and ('KmsKeyId' in trail and trail['KmsKeyId'] != '') \
and trail['IncludeGlobalServiceEvents'] \
and trail['LogFileValidationEnabled']:
notification = trail['Name'] + ' is OK'
print notification
message_body += notification + "\n"
rc = 0
else:
notification = trail['Name'] + \
' does not match with the requirements'
print notification
message_body += notification + "\n"
if not is_bucket_not_public(trail['S3BucketName']):
rc = 1
notification = trail['Name'] + \
"\'s bucket has public access."
print notification
message_body += notification + "\n"
if rc == 1 and ('DRY_RUN' in os.environ and answer_no(os.environ['DRY_RUN'])):
send_notifications(message_body)
exit(rc)
# if __name__ == "__main__":
# event = 1
# context = 1
# lambda_handler(event, context)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.