input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
"""Validate the full schema (property, source and referenced)."""
from ..... import exceptions
from ..... import types as oa_types
from .....helpers import foreign_key as foreign_key_helper
from .....helpers import peek
from .....helpers import relationship
from ....helpers import backref as backref_helper
from ....helpers import iterate
from ... import model
from ... import types
from ...helpers import properties as properties_helper
from ...helpers import value as value_helper
from .. import simple
def _check_pre_defined_property_schema(
*,
property_name: str,
property_schema: oa_types.Schema,
schema: oa_types.Schema,
schemas: oa_types.Schemas,
foreign_key: str,
):
"""
Check for a pre-defined property on a schema.
Assume property_schema has already been checked for validity.
Args:
property_name: The expected foreign key property name to check for.
property_schema: The schema for the foreign key.
schema: The schema to check for the property on.
schemas: Used to resolve any $ref.
foreign_key: The foreign key value.
Returns:
A result if something is wrong with the reason or None otherwise.
"""
# Get the pre-defined property schema if it exists
properties = iterate.properties_items(
schema=schema, schemas=schemas, stay_within_tablename=True
)
filtered_properties = filter(lambda arg: arg[0] == property_name, properties)
defined_property = next(filtered_properties, None)
if defined_property is None:
return None
# Validate the schema
_, defined_property_schema = defined_property
schema_result = simple.check(schemas, defined_property_schema)
if not schema_result.valid:
return types.Result(
False,
f"{property_name} property :: {schema_result.reason}",
)
# Check that key information matches
checks = (
(oa_types.OpenApiProperties.TYPE, peek.type_),
(oa_types.OpenApiProperties.FORMAT, peek.format_),
(oa_types.OpenApiProperties.MAX_LENGTH, peek.max_length),
(oa_types.OpenApiProperties.DEFAULT, peek.default),
)
for key, func in checks:
match_result = value_helper.check_matches(
func=func,
reference_schema=property_schema,
check_schema=defined_property_schema,
schemas=schemas,
)
if match_result is None:
continue
return types.Result(
valid=False, reason=f"{property_name} :: {key} :: {match_result}"
)
# Check the foreign key
actual_foreign_key = peek.foreign_key(
schema=defined_property_schema, schemas=schemas
)
if actual_foreign_key is None:
return types.Result(
False,
f"{property_name} must define a foreign key",
)
if actual_foreign_key != foreign_key:
return types.Result(
False,
f"the x-foreign-key of {property_name} is wrong, expected {foreign_key}, "
f"the actual is {actual_foreign_key}",
)
return None
def _check_target_schema(
*,
target_schema: oa_types.Schema,
schemas: oa_types.Schemas,
column_name: str,
modify_schema: oa_types.Schema,
foreign_key_property_name: str,
) -> types.Result:
"""
Check the schema that is targeted by a foreign key.
Assume target_schema is valid.
Args:
target_schema: The schema targeted by a foreign key.
schemas: The schemas used to resolve any $ref.
column_name: The name of the foreign key column.
modify_schema: The schema to add the foreign key property to.
foreign_key_property_name: The name of the foreign key property to define.
Returns:
A result if something is wrong with the reason or None otherwise.
"""
# Look for foreign key property schema
properties = iterate.properties_items(
schema=target_schema,
schemas=schemas,
stay_within_tablename=True,
)
filtered_properties = filter(lambda arg: arg[0] == column_name, properties)
foreign_key_target_property = next(filtered_properties, None)
if foreign_key_target_property is None:
return types.Result(
False,
f"foreign key targeted schema must have the {column_name} property",
)
# Validate the schema
(
foreign_key_target_property_name,
foreign_key_target_property_schema,
) = foreign_key_target_property
schema_result = simple.check(schemas, foreign_key_target_property_schema)
if not schema_result.valid:
return types.Result(
False,
f"{foreign_key_target_property_name} property :: {schema_result.reason}",
)
# Check for pre-defined foreign key property
foreign_key = foreign_key_helper.calculate_foreign_key(
column_name=column_name,
target_schema=target_schema,
schemas=schemas,
)
pre_defined_result = _check_pre_defined_property_schema(
property_name=foreign_key_property_name,
property_schema=foreign_key_target_property_schema,
schema=modify_schema,
schemas=schemas,
foreign_key=foreign_key,
)
if pre_defined_result is not None:
return pre_defined_result
return types.Result(True, None)
def _check_many_to_many_schema(
*, schema: oa_types.Schema, schemas: oa_types.Schemas
) -> types.OptResult:
"""
Check one of the many to many schemas.
Args:
schema: The schema to check.
schemas: Used to resolve any $ref.
Returns:
A result of the schema is not valid with a reason or None.
"""
model_result = model.check(schema=schema, schemas=schemas)
if not model_result.valid:
return model_result
# Check for primary key
properties = iterate.properties_items(
schema=schema, schemas=schemas, stay_within_tablename=True
)
primary_key_properties = filter(
lambda args: peek.primary_key(schema=args[1], schemas=schemas) is True,
properties,
)
primary_key_property = next(primary_key_properties, None)
if primary_key_property is None:
return types.Result(False, "schema must have a primary key")
# Check for multiple primary keys
next_primary_key_property = next(primary_key_properties, None)
if next_primary_key_property is not None:
return types.Result(
False,
"many-to-many relationships currently only support single primary key "
"schemas",
)
# Check property schema
primary_key_property_name, primary_key_property_schema = primary_key_property
schema_result = simple.check(schemas, primary_key_property_schema)
if schema_result.valid is False:
return types.Result(
False, f"{primary_key_property_name} property :: {schema_result.reason}"
)
return None
def _check_many_to_many(
*,
parent_schema: oa_types.Schema,
property_schema: oa_types.Schema,
schemas: oa_types.Schemas,
) -> types.Result:
"""
Check many-to-many relationships.
Args:
parent_schema: The schema that has the property that defines the relationship.
property_schema: The schema of the items for the property that defines the
relationship.
schemas: Used to resolve any $ref.
Returns:
Whether the relationship is valid and the reason if it is not.
"""
# Checking source schema
source_result = _check_many_to_many_schema(schema=parent_schema, schemas=schemas)
if source_result is not None:
return types.Result(
source_result.valid, f"source schema :: {source_result.reason}"
)
# Checking referenced schema
_, ref_schema = relationship.get_ref_schema_many_to_x(
property_schema=property_schema, schemas=schemas
)
ref_result = _check_many_to_many_schema(schema=ref_schema, schemas=schemas)
if ref_result is not None:
return types.Result(
ref_result.valid, f"referenced schema :: {ref_result.reason}"
)
return types.Result(True, None)
def _check_backref_property_properties_basics(
property_name: str,
backref_schema: oa_types.Schema,
schemas: oa_types.Schemas,
) -> types.OptResult:
"""
Check the backref schema.
Args:
parent_schema: The schema that has the property embedded in it.
property_name: The name of the property.
backref_schema: The schema of the back reference.
schemas: All defined schemas used to resolve any $ref.
"""
# Check for object type
type_ = peek.type_(schema=backref_schema, schemas=schemas)
if type_ != "object":
return types.Result(False, "the back reference schema must be an object")
# Check properties values
properties_values_result = properties_helper.check_properties_values(
schema=backref_schema, schemas=schemas
)
if properties_values_result is not None:
return properties_values_result
# Check whether any property names match the property name
properties_items = iterate.properties_items(schema=backref_schema, schemas=schemas)
property_name_matches = next(
filter(lambda args: args[0] == property_name, properties_items), None
)
if property_name_matches is not None:
return types.Result(
False,
"properties cannot contain the property name of the relartionship to avoid "
"circular references",
)
return None
def _check_backref_property_properties(
parent_schema: oa_types.Schema,
property_name: str,
backref_schema: oa_types.Schema,
schemas: oa_types.Schemas,
) -> types.OptResult:
"""
Check the backref schema.
Args:
parent_schema: The schema that has the property embedded in it.
property_name: The name of the property.
backref_schema: The schema of the back reference.
schemas: All defined schemas used to resolve any $ref.
"""
basics_result = _check_backref_property_properties_basics(
property_name=property_name, backref_schema=backref_schema, schemas=schemas
)
if basics_result is not None:
return basics_result
# Check for backreference properties not in the parent schema properties
parent_properties_items = iterate.properties_items(
schema=parent_schema, schemas=schemas
)
parent_properties = dict(parent_properties_items)
properties_items = iterate.properties_items(schema=backref_schema, schemas=schemas)
property_name_not_in_parent = next(
filter(lambda args: args[0] not in parent_properties, properties_items), None
)
if property_name_not_in_parent is not None:
name, _ = property_name_not_in_parent
return types.Result(False, f"could not find {name} in the model schema")
# Check properties are dictionaries
properties_items_result = properties_helper.check_properties_items(
schema=backref_schema, schemas=schemas
)
if properties_items_result is not None:
return properties_items_result
# Check schema matches
checks = (
(oa_types.OpenApiProperties.TYPE, peek.type_),
(oa_types.OpenApiProperties.FORMAT, peek.format_),
(oa_types.OpenApiProperties.MAX_LENGTH, peek.max_length),
(oa_types.OpenApiProperties.DEFAULT, peek.default),
)
for key, func in checks:
properties_items = iterate.properties_items(
schema=backref_schema, schemas=schemas
)
# pylint: disable=cell-var-from-loop
# Calculate result for each property
properties_items_value_results = map(
lambda args: (
args[0],
value_helper.check_matches(
func=func,
reference_schema=parent_properties[args[0]],
check_schema=args[1],
schemas=schemas,
),
),
properties_items,
)
# Look for the first failed result
properties_items_value_result = next(
filter(lambda args: args[1] is not None, properties_items_value_results),
None,
)
if properties_items_value_result is not None:
property_name, result = properties_items_value_result
assert result is not None
return types.Result(
valid=False, reason=f"{property_name} :: {key} :: {result}"
)
return None
_BACKREF_EXPECTED_TYPE = {
oa_types.RelationshipType.MANY_TO_ONE: "array",
oa_types.RelationshipType.ONE_TO_ONE: "object",
oa_types.RelationshipType.ONE_TO_MANY: "object",
oa_types.RelationshipType.MANY_TO_MANY: "array",
}
def _check_backref_property(
parent_schema: oa_types.Schema,
property_name: str,
relationship_type: oa_types.RelationshipType,
backref_property_schema: oa_types.Schema,
schemas: oa_types.Schemas,
) -> types.OptResult:
"""Check the back reference property."""
# Check the type of the property
read_only = peek.read_only(schema=backref_property_schema, schemas=schemas)
if not read_only:
return types.Result(False, "the property must be readOnly")
# Check the type of the backref property
property_type = peek.type_(schema=backref_property_schema, schemas=schemas)
expected_property_type = _BACKREF_EXPECTED_TYPE[relationship_type]
if expected_property_type != property_type:
return types.Result(
False,
f"unexpected type, expected {expected_property_type} actual "
f"{property_type}",
)
# Check the properties
if relationship_type in {
oa_types.RelationshipType.MANY_TO_ONE,
oa_types.RelationshipType.MANY_TO_MANY,
}:
items_schema = peek.items(schema=backref_property_schema, schemas=schemas)
if items_schema is None:
return types.Result(False, "items must be defined")
properties_result = _check_backref_property_properties(
parent_schema=parent_schema,
property_name=property_name,
backref_schema=items_schema,
schemas=schemas,
)
if properties_result is not None:
return types.Result(
False, f"items :: properties :: {properties_result.reason}"
)
else:
properties_result = _check_backref_property_properties(
parent_schema=parent_schema,
property_name=property_name,
backref_schema=backref_property_schema,
schemas=schemas,
)
if properties_result is not None:
return types.Result(False, f"properties :: {properties_result.reason}")
return None
def _check_backref(
parent_schema: oa_types.Schema,
property_name: str,
relationship_type: oa_types.RelationshipType,
property_schema: oa_types.Schema,
schemas: oa_types.Schemas,
) -> types.OptResult:
"""
Check the back reference, if defined.
Assume the property schema and parent schema is valid.
Algorithm:
| |
"""Tests for accounts.views."""
# pylint: disable=no-value-for-parameter,maybe-no-member,invalid-name
from datetime import datetime
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import Http404
from django.test import Client, TestCase, RequestFactory
from django.test.utils import override_settings
from mock import patch
from model_mommy import mommy
from open_connect.accounts import views
from open_connect.accounts.models import Invite, User
from open_connect.connectmessages.tests import ConnectMessageTestCase
from open_connect.media.tests import (
get_in_memory_image_file, get_in_memory_image_instance
)
from open_connect.connect_core.utils.basetests import ConnectTestMixin
class UserDetailViewTest(ConnectTestMixin, TestCase):
"""Tests for the user detail view."""
def setUp(self):
"""Handy things."""
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/')
def test_context_object_name(self):
"""Test that the object name is account."""
user_detail_view = views.UserDetailView.as_view()
user = self.create_user()
self.request.user = user
response = user_detail_view(self.request, user_uuid=user.uuid)
self.assertTrue('account' in response.context_data.keys())
def test_user_property(self):
"""Test that the user property returns the user."""
view = views.UserDetailView()
user = self.create_user()
view.kwargs = {'user_uuid': user.uuid}
self.assertEqual(view.user, user)
def test_non_existant_404(self):
"""Test that a UUID that does not exist causes a 404"""
view = views.UserDetailView()
view.kwargs = {'user_uuid': 'does-not-exist'}
with self.assertRaises(Http404):
# pylint: disable=W0104
view.user
def test_direct_message_regular_user(self):
"""
Test that a regular user cannot send a direct message to regular users
"""
visitor = self.create_user()
recipient = self.create_user()
self.login(visitor)
self.assertFalse(visitor.can_direct_message_user(recipient))
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': recipient.uuid}))
self.assertEqual(response.status_code, 200)
self.assertNotContains(
response,
reverse(
'create_direct_message',
kwargs={
'user_uuid': recipient.uuid
}
)
)
def test_direct_message_staff(self):
"""
Test that a regular user can direct message staff
"""
visitor = self.create_user()
recipient = self.create_user(is_staff=True)
self.login(visitor)
self.assertTrue(visitor.can_direct_message_user(recipient))
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': recipient.uuid}))
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
reverse(
'create_direct_message',
kwargs={
'user_uuid': recipient.uuid
}
)
)
def test_direct_message_regular_user_by_staff(self):
"""
Test that a staff member can send a direct message to regular users
"""
visitor = self.create_user(is_staff=True)
recipient = self.create_user()
self.login(visitor)
self.assertTrue(visitor.can_direct_message_user(recipient))
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': recipient.uuid}))
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
reverse(
'create_direct_message',
kwargs={
'user_uuid': recipient.uuid
}
)
)
def test_direct_message_regular_user_by_superuser(self):
"""
Test that a superuser can send a direct message to regular users
"""
visitor = self.create_user(is_superuser=True)
recipient = self.create_user()
self.login(visitor)
self.assertTrue(visitor.can_direct_message_user(recipient))
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': recipient.uuid}))
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
reverse(
'create_direct_message',
kwargs={
'user_uuid': recipient.uuid
}
)
)
def test_direct_message_regular_user_by_permission(self):
"""
Test that someone with the correct permission can message a user
"""
visitor = self.create_user()
self.add_perm(
visitor, 'can_initiate_direct_messages', 'accounts', 'user')
recipient = self.create_user()
self.login(visitor)
self.assertTrue(visitor.can_direct_message_user(recipient))
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': recipient.uuid}))
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
reverse(
'create_direct_message',
kwargs={
'user_uuid': recipient.uuid
}
)
)
def test_show_banned_warning_user_is_banned(self):
"""Banned warning should be shown if the user is banned."""
request_user = self.create_superuser()
banned_user = self.create_user(is_banned=True)
self.client.login(username=request_user.email, password='<PASSWORD>')
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': banned_user.uuid}))
self.assertTrue(response.context['show_banned_warning'])
def test_show_banned_warning_user_is_not_banned(self):
"""Banned warning should not show if the user is not banned."""
request_user = self.create_user()
unbanned_user = self.create_user()
self.client.login(username=request_user.email, password='<PASSWORD>')
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': unbanned_user.uuid}))
self.assertFalse(response.context['show_banned_warning'])
def test_show_banned_warning_to_self_banned(self):
"""Banned warning should not show to the user that is banned."""
banned_user = self.create_user(is_banned=True)
self.client.login(username=banned_user.email, password='<PASSWORD>')
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': banned_user.uuid}))
self.assertFalse(response.context['show_banned_warning'])
def test_show_banned_warning_to_self_not_banned(self):
"""Banned warning should not show to an unbanned user."""
unbanned_user = self.create_user()
self.client.login(username=unbanned_user.email, password='<PASSWORD>')
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': unbanned_user.uuid}))
self.assertFalse(response.context['show_banned_warning'])
def test_get_context_data(self):
"""Context should have nav_active_item and show_banned_warning."""
user = self.create_user()
self.client.login(username=user.email, password='<PASSWORD>')
response = self.client.get(
reverse('user_details',
kwargs={'user_uuid': user.uuid})
)
context = response.context
self.assertEqual(context['nav_active_item'], user)
self.assertEqual(context['show_banned_warning'], False)
self.assertQuerysetItemsEqual(
context['groups_joined'], user.groups_joined)
def test_get_object(self):
"""get_object should return the correct user."""
view = views.UserDetailView()
view.request = self.request_factory.get('/')
user = self.create_user()
view.request.user = user
view.kwargs = {'user_uuid': user.uuid}
self.assertEqual(view.get_object(), user)
@patch('open_connect.accounts.views.messages')
def test_get_object_user_is_banned(self, mock_messages):
"""should return the user and add a warning if user is banned."""
user = mommy.make('accounts.User', is_banned=True)
view = views.UserDetailView()
view.request = self.request
view.request.user = self.create_superuser()
view.kwargs = {'user_uuid': user.uuid}
self.assertEqual(view.get_object(), user)
self.assertEqual(
mock_messages.warning.call_args_list[0][0][1],
'This is a banned account.'
)
def test_get_object_user_is_banned_no_permission_to_view_profile(self):
"""should raise Http404 if user is banned and you don't have perms."""
user = mommy.make('accounts.User', is_banned=True)
view = views.UserDetailView()
view.request = self.request_factory.get('/')
view.request.user = self.create_user(is_staff=True)
view.kwargs = {'user_uuid': user.uuid}
self.assertRaises(Http404, view.get_object)
class UserUpdateViewTest(ConnectTestMixin, TestCase):
"""Tests for the user update view."""
def setUp(self):
"""Setup the UserUpdateViewTest TestCase"""
self.user = self.create_user(password='<PASSWORD>')
self.client.login(username=self.user.username, password='<PASSWORD>')
def test_authenticated_user_own_profile(self):
"""Test that an authenticated user can access their own update view."""
response = self.client.get(
reverse('update_user', args=(self.user.uuid,)))
self.assertEqual(response.context_data['object'], self.user)
def test_admin_access_view(self):
"""
Test that admins with the `accounts.change_user` permission can view
"""
admin_user = self.create_user(password='<PASSWORD>')
admin_client = Client()
admin_client.login(username=admin_user.username, password='<PASSWORD>')
unprivlidged_result = admin_client.get(
reverse('update_user', args=(self.user.uuid,)))
self.assertEqual(unprivlidged_result.status_code, 404)
change_user_permission = Permission.objects.get(
content_type__app_label='accounts', codename='change_user')
admin_user.user_permissions.add(change_user_permission)
privlidged_result = admin_client.get(
reverse('update_user', args=(self.user.uuid,)))
self.assertEqual(privlidged_result.status_code, 200)
self.assertContains(privlidged_result, self.user)
@override_settings(LOGIN_URL=reverse('login'))
def test_update_anonymous_user(self):
"""Unauthenticated users should be redirected to the login page."""
client = Client()
update_url = reverse('update_user', args=(self.user.uuid,))
response = client.get(update_url)
self.assertRedirects(
response,
'%s?next=%s' % (reverse('login'), update_url)
)
def test_with_image(self):
"""Make sure the user's image gets set when it is provided."""
data = {
'image': get_in_memory_image_file(),
'timezone': 'US/Central',
'group_notification_period': 'none',
'email': self.user.email
}
response = self.client.post(
reverse('update_user', args=(self.user.uuid,)), data)
self.assertRedirects(
response,
reverse('user_profile'),
target_status_code=302
)
user = User.objects.get(pk=self.user.pk)
data['image'].seek(0)
self.assertEqual(user.image.image.read(), data['image'].read())
def test_clear_image(self):
"""A user's image should be removed if clear is selected."""
self.user.image = get_in_memory_image_instance(self.user)
self.user.save()
data = {
'image-clear': True,
'image': None,
'timezone': 'US/Central',
'group_notification_period': 'none',
'email': self.user.email
}
response = self.client.post(
reverse('update_user', args=(self.user.uuid,)), data)
self.assertRedirects(
response,
reverse('user_profile'),
target_status_code=302
)
user = User.objects.get(pk=self.user.pk)
self.assertIsNone(user.image)
def test_group_owner_has_receive_group_join_notifications_field(self):
"""A user who owns any groups should see the field."""
response = self.client.get(
reverse('update_user', args=(self.user.uuid,)))
self.assertNotIn(
'receive_group_join_notifications',
response.context['user_form'].fields.keys()
)
def test_non_group_owner_does_not_have_receive_group_join_field(self):
"""A user who owns no groups should not see the field."""
user = self.create_user()
group = mommy.make('groups.Group')
group.owners.add(user)
client = Client()
client.login(username=user.email, password='<PASSWORD>')
response = client.get(
reverse('update_user', args=(user.uuid,)))
self.assertIn(
'receive_group_join_notifications',
response.context['user_form'].fields.keys()
)
class UpdateUserPermissionViewTest(ConnectTestMixin, TestCase):
"""Tests for UpdateUserPermissionView"""
def setUp(self):
"""Handy things."""
self.request_factory = RequestFactory()
# Add 2 permissions to the test, one valid and visible, one hidden
demo_content_type = ContentType.objects.create(
app_label='demo-app-label', model='DemoModel')
self.valid_permission = mommy.make(
Permission,
codename='viewable-permission',
name='Viewable Permission',
content_type=demo_content_type)
self.hidden_permission = mommy.make(
Permission,
codename='hidden-permission',
name='Hidden Permission',
content_type=demo_content_type)
# Create a view class that contains those permissions
self.view_class = views.UpdateUserPermissionView
self.view_class.editable_permissions = (
('demo-app-label', 'viewable-permission'),
)
def tearDown(self):
"""
Tear down the test
Cleanup the test by deleting the test permissions, then verify the
cleanup
"""
self.valid_permission.delete()
self.hidden_permission.delete()
self.assertNotIn(self.valid_permission, Permission.objects.all())
self.assertNotIn(self.hidden_permission, Permission.objects.all())
def test_no_impersonation(self):
"""Test that the view will reject those actively impersonating"""
# Create a user who is actively impersonating another user
user = self.create_user()
user.impersonating = True
# Create a request
request = self.request_factory.get('/')
request.user = user
# Instead of testing the dispatch() method directly or creating a
# django test client that is both logged in and impersonating, we can
# pass a pre-made request directly into the view.
with self.assertRaises(PermissionDenied):
self.view_class.as_view()(request)
def test_get_queryset(self):
"""
Test the view's get_queryset() method
Test that neither the requesting User nor a superuser User are in the
queryset of User objects returned by the view's get_queryset()
"""
requesting_user = self.create_user()
regular_user = self.create_user()
superuser = self.create_superuser()
view = self.view_class()
view.request = self.request_factory.get('/')
view.request.user = requesting_user
queryset = view.get_queryset()
# The regular user should be in the queryset
self.assertIn(regular_user, queryset)
# Superusers cannot be in the possible queryset
self.assertNotIn(superuser, queryset)
# The requesting user cannot be in the possible queryset
self.assertNotIn(requesting_user, queryset)
def test_get_editable_permissions(self):
"""
Test the `get_editable_permissions` method on the view.
"""
view = self.view_class()
editable_permissions_queryset = view.get_editable_permissions()
self.assertEqual(editable_permissions_queryset.count(), 1)
self.assertIn(self.valid_permission, editable_permissions_queryset)
self.assertNotIn(self.hidden_permission, editable_permissions_queryset)
def test_get_permissions_queryset(self):
"""
Test the get_permissions_queryset() method.
"""
view = self.view_class()
view.request = self.request_factory.get('/')
view.request.user = self.create_user()
# Create a new "target" user, who is the user the view will be set to
# edit during a regular request.
target_user = self.create_user()
view.object = target_user
# Get the existing queryset of changeable permissions. This should only
# include permissions set in the `view.editable_permissions` attribute.
permissions_queryset = view.get_permissions_queryset()
self.assertEqual(permissions_queryset.count(), 1)
self.assertIn(self.valid_permission, permissions_queryset)
self.assertNotIn(self.hidden_permission, permissions_queryset)
# Add the hidden permission to the user's list of permissions. This
# should cause the hidden permission to appear in the queryset
target_user.user_permissions.add(self.hidden_permission)
# Re-generate a queryset of editable views
extended_permissions_queryset = view.get_permissions_queryset()
self.assertEqual(extended_permissions_queryset.count(), 2)
self.assertIn(self.valid_permission, extended_permissions_queryset)
self.assertIn(self.hidden_permission, extended_permissions_queryset)
def test_get_form(self):
"""
Test the `get_form` method for users with and without extra permissions
"""
admin = self.create_superuser()
self.client.login(username=admin.email, password='<PASSWORD>')
# Ensure that by default 'Viewable Permission' is found in the form
# field and 'Hidden Permission' is not
user = self.create_user()
response = self.client.get(
reverse('update_user_permissions', args=[user.uuid]))
form | |
<reponame>PRECISE/SMEDL
# Copyright (c) 2021 The Trustees of the University of Pennsylvania
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Structures and types for monitoring system architectures (.a4smedl files)
"""
import sys
import itertools
import types
from .expr import SmedlType
from smedl.parser.exceptions import (
NameCollision, NameNotDefined, AlreadyInSyncset, ParameterError,
DuplicateConnection, TypeMismatch, InternalError, ChannelMismatch,
LoopbackError)
class Parameter(object):
"""A parameter for a target specification. May be used for either monitor
parameters, event parameters, or state var initializations. Specifies
whether it comes from the source monitor or source event and an index in
the source objects list of parameters."""
def __init__(self, identity, index):
"""Initialize the Parameter.
identity - Boolean. True if the parameter is a monitor parameter, also
known as a monitor identity. False if it is an event parameter. Note:
This is referring only to the source of the parameter! The
destination depends only on where this parameter is used.
index - The index in the monitor or event's parameter list that this
parameter should come from.
If this is to be a wildcard parameter for a monitor, index should be
None.
"""
self._identity = identity
self._index = index
# The Target that this parameter belongs to
self._target = None
@property
def identity(self):
"""Return True if this is a monitor identity ("#x"), False if this is
an event parameter ("$x")."""
return self._identity
@property
def index(self):
"""Return None if this is a wildcard parameter ("*"). Otherwise, return
the index of this parameter (the "x" in "$x" or "#x")."""
return self._index
@property
def target(self):
return self._target
@target.setter
def target(self, value):
"""Set the target if not already set"""
if self._target is None:
self._target = value
else:
raise InternalError("Adding more than one Target to a Parameter")
@property
def source_type(self):
"""Get the type of whichever parameter this Parameter is a reference
to"""
if self._index is None:
raise InternalError(
"Trying to take 'source_type' of a wildcard Parameter")
if self._identity:
return self._target.connection.source_mon.params[self._index]
else:
try:
return self._target.connection.source_event_params[self._index]
except BaseException as e:
raise Exception() from e
def __repr__(self):
if self._index is None:
return '*'
elif self._identity:
return '#' + str(self._index)
else:
return '$' + str(self._index)
class Target(object):
"""The target of a connection, such as an imported event or monitor
creation. Note that this is not the same as the "target system," which is
the system being monitored."""
def __init__(self, type_, monitor, mon_params):
"""Initialize the Target object.
type_ - String describing the type of target for Jinja
monintor - DeclaredMonitor for the destination monitor, or None if this
is a TargetExport
mon_params - Iterable of Parameters for the monitor identities"""
self._target_type = type_
self._monitor = monitor
for param in mon_params:
param.target = self
self._mon_params = tuple(mon_params)
self._connection = None
@property
def target_type(self):
"""Get a string describing the type of target, for use in Jinja"""
return self._target_type
@property
def monitor(self):
"""Get the destination DeclaredMonitor, or None if ExportTarget"""
return self._monitor
@property
def mon_string(self):
"""Get the name of the destination DeclaredMonitor, or "pedl" if
ExportTarget"""
if self._monitor is None:
return "pedl"
else:
return self._monitor.name
@property
def mon_params(self):
"""Get a tuple of Parameters for the monitor identities"""
return self._mon_params
@property
def syncset(self):
"""Get the syncset that this Target belongs to"""
return self._monitor.syncset
@property
def mon_params_w_types(self):
"""Get a sequence of (Parameter, SmedlType) tuples for the monitor
identities"""
return zip(self._mon_params, self._monitor.params)
@property
def connection(self):
return self._connection
@connection.setter
def connection(self, conn):
"""Store a reference to the Connection this target belongs to"""
if self._connection is None:
self._connection = conn
else:
raise InternalError("Target already added to a connection")
def __eq__(self, other):
"""Return True if the other is the same target (ignoring parameters)
as ourselves"""
if not isinstance(other, Target):
return NotImplemented
return self._monitor == other._monitor
class TargetEvent(Target):
"""An imported event connection target. Note that this is not the same as
the "target system," which is the system being monitored."""
def __init__(self, dest_monitor, dest_event, mon_params, event_params):
"""Initialize this target event.
dest_monitor - DeclaredMonitor for the destination monitor
dest_event - Name of the destination monitor's imported event
mon_params - List of Parameters for the monitor identities
event_params - List of Parameters for the event"""
super().__init__('event', dest_monitor, mon_params)
self._event = dest_event
for param in event_params:
param.target = self
self._event_params = tuple(event_params)
@property
def event(self):
"""Get the name of the destination event"""
return self._event
@property
def event_params(self):
"""Get a tuple of Parameters for the destination event"""
return self._event_params
@property
def event_params_w_types(self):
"""Get a sequence of (Parameter, SmedlType) tuples for the destination
event"""
return zip(self._event_params,
self._monitor.spec.imported_events[self._event])
def __eq__(self, other):
"""Return True if the other is the same target (ignoring parameters)
as ourselves"""
if not isinstance(other, Target):
return NotImplemented
if not isinstance(other, TargetEvent):
return False
return super().__eq__(other) and self._event == other._event
def __repr__(self):
mon_param_str = ', '.join([str(p) for p in self._mon_params])
ev_param_str = ', '.join([str(p) for p in self._event_params])
return ('TargetEvent:' + self._monitor.name + '[' + mon_param_str +
'].' + self._event + '(' + ev_param_str + ')')
class TargetCreation(Target):
"""A monitor creation target. Note that this is not the same as the "target
system," which is the system being monitored."""
def __init__(self, dest_monitor, mon_params, state_vars):
"""Initialize this target creation event.
dest_monitor - DeclaredMonitor for the monitor to be created
mon_params - List of Parameters for the monitor identities. None may be
wildcard parameters.
state_vars - Dict containing any state variable initializations, where
keys are state variable names and values are Parameters (which may
not be wildcards).
"""
super().__init__('creation', dest_monitor, mon_params)
for param in state_vars.values():
param.target = self
self._state_vars = state_vars
@property
def state_vars(self):
"""Get a mapping of state var names to Parameters"""
return types.MappingProxyType(self._state_vars)
def __eq__(self, other):
"""Return True if the other is the same target (ignoring parameters)
as ourselves"""
if not isinstance(other, Target):
return NotImplemented
if not isinstance(other, TargetCreation):
return False
return super().__eq__(other)
def __repr__(self):
mon_param_str = ', '.join([str(p) for p in self._mon_params])
state_var_str = ', '.join(
[k + '=' + str(v) for k, v in self._state_vars.values()])
return ('TargetCreation:' + self._monitor.name + '(' + mon_param_str +
', ' + state_var_str + ')')
# TODO Should be renamed to TargetPEDL
class TargetExport(Target):
"""An event export target, for events that are exported out of a
synchronous set back to the target system. Note that "export target" and
"target system" are two different senses of the word "target," the former
being a connection target and the latter being the target of monitoring."""
def __init__(self, exported_event, event_params):
"""Initialize this export target with the given ExportedEvent and
iterable of Parameters"""
super().__init__('export', None, [])
self._exported_event = exported_event
for param in event_params:
param.target = self
self._event_params = tuple(event_params)
@property
def exported_event(self):
return self._exported_event
@property
def event(self):
"""Get the name for the exported event"""
return self._exported_event.name
@property
def syncset(self):
"""Get the syncset that this Target belongs to"""
return self._exported_event.syncset
@property
def event_params(self):
"""Get a tuple of Parameters for the exported event"""
return self._event_params
@property
def event_params_w_types(self):
"""Get a sequence of (Parameter, SmedlType) tuples for the exported
event"""
return zip(self._event_params,
self._exported_event.params)
def __eq__(self, other):
"""Return true if the other is the same target (ignoring parameters)
as ourselves"""
if not isinstance(other, Target):
return NotImplemented
return self._exported_event == | |
<reponame>sdnit-se/intersight-python
# coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class StorageStorageArrayUtilization(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'available': 'int',
'free': 'int',
'total': 'int',
'used': 'int',
'data_reduction': 'float',
'parity': 'float',
'provisioned': 'int',
'shared': 'int',
'snapshot': 'int',
'system': 'int',
'thin_provisioned': 'float',
'total_reduction': 'float',
'volume': 'int'
}
attribute_map = {
'object_type': 'ObjectType',
'available': 'Available',
'free': 'Free',
'total': 'Total',
'used': 'Used',
'data_reduction': 'DataReduction',
'parity': 'Parity',
'provisioned': 'Provisioned',
'shared': 'Shared',
'snapshot': 'Snapshot',
'system': 'System',
'thin_provisioned': 'ThinProvisioned',
'total_reduction': 'TotalReduction',
'volume': 'Volume'
}
def __init__(self, object_type=None, available=None, free=None, total=None, used=None, data_reduction=None, parity=None, provisioned=None, shared=None, snapshot=None, system=None, thin_provisioned=None, total_reduction=None, volume=None):
"""
StorageStorageArrayUtilization - a model defined in Swagger
"""
self._object_type = None
self._available = None
self._free = None
self._total = None
self._used = None
self._data_reduction = None
self._parity = None
self._provisioned = None
self._shared = None
self._snapshot = None
self._system = None
self._thin_provisioned = None
self._total_reduction = None
self._volume = None
if object_type is not None:
self.object_type = object_type
if available is not None:
self.available = available
if free is not None:
self.free = free
if total is not None:
self.total = total
if used is not None:
self.used = used
if data_reduction is not None:
self.data_reduction = data_reduction
if parity is not None:
self.parity = parity
if provisioned is not None:
self.provisioned = provisioned
if shared is not None:
self.shared = shared
if snapshot is not None:
self.snapshot = snapshot
if system is not None:
self.system = system
if thin_provisioned is not None:
self.thin_provisioned = thin_provisioned
if total_reduction is not None:
self.total_reduction = total_reduction
if volume is not None:
self.volume = volume
@property
def object_type(self):
"""
Gets the object_type of this StorageStorageArrayUtilization.
The concrete type of this complex type. The ObjectType property must be set explicitly by API clients when the type is ambiguous. In all other cases, the ObjectType is optional. The type is ambiguous when a managed object contains an array of nested documents, and the documents in the array are heterogeneous, i.e. the array can contain nested documents of different types.
:return: The object_type of this StorageStorageArrayUtilization.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this StorageStorageArrayUtilization.
The concrete type of this complex type. The ObjectType property must be set explicitly by API clients when the type is ambiguous. In all other cases, the ObjectType is optional. The type is ambiguous when a managed object contains an array of nested documents, and the documents in the array are heterogeneous, i.e. the array can contain nested documents of different types.
:param object_type: The object_type of this StorageStorageArrayUtilization.
:type: str
"""
self._object_type = object_type
@property
def available(self):
"""
Gets the available of this StorageStorageArrayUtilization.
Total consumable storage capacity represented in bytes. System may reserve some space for internal purpose which is excluded from total capacity.
:return: The available of this StorageStorageArrayUtilization.
:rtype: int
"""
return self._available
@available.setter
def available(self, available):
"""
Sets the available of this StorageStorageArrayUtilization.
Total consumable storage capacity represented in bytes. System may reserve some space for internal purpose which is excluded from total capacity.
:param available: The available of this StorageStorageArrayUtilization.
:type: int
"""
self._available = available
@property
def free(self):
"""
Gets the free of this StorageStorageArrayUtilization.
Unused space available for user to consume, represented in bytes.
:return: The free of this StorageStorageArrayUtilization.
:rtype: int
"""
return self._free
@free.setter
def free(self, free):
"""
Sets the free of this StorageStorageArrayUtilization.
Unused space available for user to consume, represented in bytes.
:param free: The free of this StorageStorageArrayUtilization.
:type: int
"""
self._free = free
@property
def total(self):
"""
Gets the total of this StorageStorageArrayUtilization.
Total storage capacity, represented in bytes. It is set by the component manufacture.
:return: The total of this StorageStorageArrayUtilization.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""
Sets the total of this StorageStorageArrayUtilization.
Total storage capacity, represented in bytes. It is set by the component manufacture.
:param total: The total of this StorageStorageArrayUtilization.
:type: int
"""
self._total = total
@property
def used(self):
"""
Gets the used of this StorageStorageArrayUtilization.
Used or consumed storage capacity, represented in bytes.
:return: The used of this StorageStorageArrayUtilization.
:rtype: int
"""
return self._used
@used.setter
def used(self, used):
"""
Sets the used of this StorageStorageArrayUtilization.
Used or consumed storage capacity, represented in bytes.
:param used: The used of this StorageStorageArrayUtilization.
:type: int
"""
self._used = used
@property
def data_reduction(self):
"""
Gets the data_reduction of this StorageStorageArrayUtilization.
Ratio of mapped sectors within a volume versus the amount of physical space the data occupies after data compression and deduplication. The data reduction ratio does not include thin provisioning savings. For example, a data reduction ratio of 5.0 means that for every 5 MB the host writes to the array, 1 MB is stored on the array's flash modules.
:return: The data_reduction of this StorageStorageArrayUtilization.
:rtype: float
"""
return self._data_reduction
@data_reduction.setter
def data_reduction(self, data_reduction):
"""
Sets the data_reduction of this StorageStorageArrayUtilization.
Ratio of mapped sectors within a volume versus the amount of physical space the data occupies after data compression and deduplication. The data reduction ratio does not include thin provisioning savings. For example, a data reduction ratio of 5.0 means that for every 5 MB the host writes to the array, 1 MB is stored on the array's flash modules.
:param data_reduction: The data_reduction of this StorageStorageArrayUtilization.
:type: float
"""
self._data_reduction = data_reduction
@property
def parity(self):
"""
Gets the parity of this StorageStorageArrayUtilization.
Percentage of data that is fully protected. The percentage value will drop below 100% if the data is not fully protected.
:return: The parity of this StorageStorageArrayUtilization.
:rtype: float
"""
return self._parity
@parity.setter
def parity(self, parity):
"""
Sets the parity of this StorageStorageArrayUtilization.
Percentage of data that is fully protected. The percentage value will drop below 100% if the data is not fully protected.
:param parity: The parity of this StorageStorageArrayUtilization.
:type: float
"""
self._parity = parity
@property
def provisioned(self):
"""
Gets the provisioned of this StorageStorageArrayUtilization.
Total provisioned storage capacity in Pure FlashArray, represented in bytes.
:return: The provisioned of this StorageStorageArrayUtilization.
:rtype: int
"""
return self._provisioned
@provisioned.setter
def provisioned(self, provisioned):
"""
Sets the provisioned of this StorageStorageArrayUtilization.
Total provisioned storage capacity in Pure FlashArray, represented in bytes.
:param provisioned: The provisioned of this StorageStorageArrayUtilization.
:type: int
"""
self._provisioned = provisioned
@property
def shared(self):
"""
Gets the shared of this StorageStorageArrayUtilization.
Physical space occupied by deduplicated data, represented in bytes. The space is shared with other volumes and snapshots as a result of data deduplication.
:return: The shared of this StorageStorageArrayUtilization.
:rtype: int
"""
return self._shared
@shared.setter
def shared(self, shared):
"""
Sets the shared of this StorageStorageArrayUtilization.
Physical space occupied by deduplicated data, represented in bytes. The space is shared with other volumes and snapshots as a result of data deduplication.
:param shared: The shared of this StorageStorageArrayUtilization.
:type: int
"""
self._shared = shared
@property
def snapshot(self):
"""
Gets the snapshot of this StorageStorageArrayUtilization.
Physical space occupied by the snapshots, represented in bytes.
:return: The snapshot of this StorageStorageArrayUtilization.
:rtype: int
"""
return self._snapshot
@snapshot.setter
def snapshot(self, snapshot):
"""
Sets the snapshot of this StorageStorageArrayUtilization.
Physical space occupied by the snapshots, represented in bytes.
:param snapshot: The snapshot of this StorageStorageArrayUtilization.
:type: int
"""
self._snapshot = snapshot
@property
def system(self):
"""
Gets the system of this StorageStorageArrayUtilization.
Physical space occupied by internal array metadata, represented in bytes.
:return: The system of this StorageStorageArrayUtilization.
:rtype: int
"""
return self._system
@system.setter
| |
Concatenate(funcs, self.axis)
def evalf(self, *arrays):
shape = list(builtins.max(arrays, key=len).shape)
shape[self.axis+1] = builtins.sum(array.shape[self.axis+1] for array in arrays)
retval = numpy.empty(shape, dtype=self.dtype)
n0 = 0
for array in arrays:
n1 = n0 + array.shape[self.axis+1]
retval[(slice(None),)*(self.axis+1)+(slice(n0,n1),)] = array
n0 = n1
assert n0 == retval.shape[self.axis+1]
return retval
@property
def blocks(self):
return _concatblocks(((ind[:self.axis], ind[self.axis+1:]), (ind[self.axis]+n, f))
for n, func in zip(util.cumsum(func.shape[self.axis] for func in self.funcs), self.funcs)
for ind, f in func.blocks)
def _get(self, i, item):
if i != self.axis:
axis = self.axis - (self.axis > i)
return Concatenate([Get(f, i, item) for f in self.funcs], axis=axis)
if item.isconstant:
item, = item.eval()
for f in self.funcs:
if item < f.shape[i]:
return Get(f, i, item)
item -= f.shape[i]
raise Exception
def _derivative(self, var, seen):
funcs = [derivative(func, var, seen) for func in self.funcs]
return concatenate(funcs, axis=self.axis)
def _multiply(self, other):
funcs = [Multiply([func, Take(other, s, self.axis)]) for s, func in self._withslices]
return Concatenate(funcs, self.axis)
def _add(self, other):
if isinstance(other, Concatenate) and self.axis == other.axis:
if [f1.shape[self.axis] for f1 in self.funcs] == [f2.shape[self.axis] for f2 in other.funcs]:
funcs = [add(f1, f2) for f1, f2 in zip(self.funcs, other.funcs)]
else:
if isarray(self.shape[self.axis]):
raise NotImplementedError
funcs = []
beg1 = 0
for func1 in self.funcs:
end1 = beg1 + func1.shape[self.axis]
beg2 = 0
for func2 in other.funcs:
end2 = beg2 + func2.shape[self.axis]
if end1 > beg2 and end2 > beg1:
mask = numpy.zeros(self.shape[self.axis], dtype=bool)
mask[builtins.max(beg1, beg2):builtins.min(end1, end2)] = True
funcs.append(Add([Mask(func1, mask[beg1:end1], self.axis), Mask(func2, mask[beg2:end2], self.axis)]))
beg2 = end2
beg1 = end1
else:
funcs = [Add([func, Take(other, s, self.axis)]) for s, func in self._withslices]
return Concatenate(funcs, self.axis)
def _sum(self, axis):
funcs = [Sum(func, axis) for func in self.funcs]
if axis == self.axis:
while len(funcs) > 1:
funcs[-2:] = Add(funcs[-2:]),
return funcs[0]
return Concatenate(funcs, self.axis - (axis<self.axis))
def _transpose(self, axes):
funcs = [Transpose(func, axes) for func in self.funcs]
axis = axes.index(self.axis)
return Concatenate(funcs, axis)
def _insertaxis(self, axis, length):
funcs = [InsertAxis(func, axis, length) for func in self.funcs]
return Concatenate(funcs, self.axis+(axis<=self.axis))
def _takediag(self, axis, rmaxis):
if self.axis == axis:
funcs = [TakeDiag(Take(func, s, rmaxis), axis, rmaxis) for s, func in self._withslices]
return Concatenate(funcs, axis=axis)
elif self.axis == rmaxis:
funcs = [TakeDiag(Take(func, s, axis), axis, rmaxis) for s, func in self._withslices]
return Concatenate(funcs, axis=axis)
else:
return Concatenate([TakeDiag(f, axis, rmaxis) for f in self.funcs], axis=self.axis-(self.axis>rmaxis))
def _take(self, indices, axis):
if axis != self.axis:
return Concatenate([Take(func, indices, axis) for func in self.funcs], self.axis)
if not indices.isconstant:
return
indices, = indices.eval()
if not numpy.logical_and(numpy.greater_equal(indices, 0), numpy.less(indices, self.shape[axis])).all():
return
ifuncs = numpy.hstack([numpy.repeat(ifunc,func.shape[axis]) for ifunc, func in enumerate(self.funcs)])[indices]
splits, = numpy.nonzero(numpy.diff(ifuncs) != 0)
funcs = []
for i, j in zip(numpy.hstack([0, splits+1]), numpy.hstack([splits+1, len(indices)])):
ifunc = ifuncs[i]
assert numpy.equal(ifuncs[i:j], ifunc).all()
offset = builtins.sum(func.shape[axis] for func in self.funcs[:ifunc])
funcs.append(Take(self.funcs[ifunc], indices[i:j] - offset, axis))
if len(funcs) == 1:
return funcs[0]
return Concatenate(funcs, axis=axis)
def _power(self, n):
return Concatenate([Power(func, Take(n, s, self.axis)) for s, func in self._withslices], self.axis)
def _diagonalize(self, axis, newaxis):
if self.axis != axis:
return Concatenate([Diagonalize(func, axis, newaxis) for func in self.funcs], self.axis+(newaxis<=self.axis))
def _mask(self, maskvec, axis):
if axis != self.axis:
return Concatenate([Mask(func,maskvec,axis) for func in self.funcs], self.axis)
if all(s.isconstant for s, func in self._withslices):
return Concatenate([Mask(func, maskvec[s.eval()[0]], axis) for s, func in self._withslices], axis)
def _unravel(self, axis, shape):
if axis != self.axis:
return Concatenate([Unravel(func, axis, shape) for func in self.funcs], self.axis+(self.axis>axis))
class Interpolate(Array):
'interpolate uniformly spaced data; stepwise for now'
__slots__ = 'xp', 'fp', 'left', 'right'
@types.apply_annotations
def __init__(self, x:asarray, xp:types.frozenarray, fp:types.frozenarray, left:types.strictfloat=None, right:types.strictfloat=None):
assert xp.ndim == fp.ndim == 1
if not numpy.greater(numpy.diff(xp), 0).all():
warnings.warn('supplied x-values are non-increasing')
assert x.ndim == 0
self.xp = xp
self.fp = fp
self.left = left
self.right = right
super.__init__(args=[x], shape=(), dtype=float)
def evalf(self, x):
return numpy.interp(x, self.xp, self.fp, self.left, self.right)
class Cross(Array):
__slots__ = 'func1', 'func2', 'axis'
__cache__ = 'simplified',
@types.apply_annotations
def __init__(self, func1:asarray, func2:asarray, axis:types.strictint):
assert func1.shape == func2.shape
assert 0 <= axis < func1.ndim and func2.shape[axis] == 3
self.func1 = func1
self.func2 = func2
self.axis = axis
super().__init__(args=(func1,func2), shape=func1.shape, dtype=_jointdtype(func1.dtype, func2.dtype))
@property
def simplified(self):
i = types.frozenarray([1, 2, 0])
j = types.frozenarray([2, 0, 1])
return subtract(take(self.func1, i, self.axis) * take(self.func2, j, self.axis),
take(self.func2, i, self.axis) * take(self.func1, j, self.axis)).simplified
def evalf(self, a, b):
assert a.ndim == b.ndim == self.ndim+1
return numpy.cross(a, b, axis=self.axis+1)
def _derivative(self, var, seen):
ext = (...,)+(_,)*var.ndim
return cross(self.func1[ext], derivative(self.func2, var, seen), axis=self.axis) \
- cross(self.func2[ext], derivative(self.func1, var, seen), axis=self.axis)
class Determinant(Array):
__slots__ = 'func',
__cache__ = 'simplified',
@types.apply_annotations
def __init__(self, func:asarray):
assert isarray(func) and func.ndim >= 2 and func.shape[-1] == func.shape[-2]
self.func = func
super().__init__(args=[func], shape=func.shape[:-2], dtype=func.dtype)
@property
def simplified(self):
func = self.func.simplified
retval = func._determinant()
if retval is not None:
assert retval.shape == self.shape
return retval.simplified
return Determinant(func)
def evalf(self, arr):
assert arr.ndim == self.ndim+3
return numpy.linalg.det(arr)
def _derivative(self, var, seen):
Finv = swapaxes(inverse(self.func), -2, -1)
G = derivative(self.func, var, seen)
ext = (...,)+(_,)*var.ndim
return self[ext] * sum(Finv[ext] * G, axis=[-2-var.ndim,-1-var.ndim])
class Multiply(Array):
__slots__ = 'funcs',
__cache__ = 'simplified',
@types.apply_annotations
def __init__(self, funcs:types.frozenmultiset[asarray]):
self.funcs = funcs
func1, func2 = funcs
assert func1.shape == func2.shape
super().__init__(args=self.funcs, shape=func1.shape, dtype=_jointdtype(func1.dtype,func2.dtype))
def edit(self, op):
return Multiply([op(func) for func in self.funcs])
@property
def simplified(self):
func1, func2 = [func.simplified for func in self.funcs]
if func1 == func2:
return power(func1, 2).simplified
retval = func1._multiply(func2)
if retval is not None:
assert retval.shape == self.shape
return retval.simplified
retval = func2._multiply(func1)
if retval is not None:
assert retval.shape == self.shape
return retval.simplified
return Multiply([func1, func2])
def evalf(self, arr1, arr2):
return arr1 * arr2
def _sum(self, axis):
func1, func2 = self.funcs
return Dot([func1, func2], [axis])
def _get(self, axis, item):
func1, func2 = self.funcs
return Multiply([Get(func1, axis, item), Get(func2, axis, item)])
def _add(self, other):
func1, func2 = self.funcs
if other == func1:
return Multiply([func1, Add([func2, ones_like(func2)])])
if other == func2:
return Multiply([func2, Add([func1, ones_like(func1)])])
if isinstance(other, Multiply) and not self.funcs.isdisjoint(other.funcs):
f = next(iter(self.funcs & other.funcs))
return Multiply([f, Add(self.funcs + other.funcs - [f,f])])
def _determinant(self):
func1, func2 = self.funcs
if self.shape[-2:] == (1,1):
return Multiply([Determinant(func1), Determinant(func2)])
def _product(self):
func1, func2 = self.funcs
return Multiply([Product(func1), Product(func2)])
def _multiply(self, other):
func1, func2 = self.funcs
func1_other = func1._multiply(other)
if func1_other is not None:
return Multiply([func1_other, func2])
func2_other = func2._multiply(other)
if func2_other is not None:
return Multiply([func1, func2_other])
def _derivative(self, var, seen):
func1, func2 = self.funcs
ext = (...,)+(_,)*var.ndim
return func1[ext] * derivative(func2, var, seen) \
+ func2[ext] * derivative(func1, var, seen)
def _takediag(self, axis, rmaxis):
func1, func2 = self.funcs
return Multiply([TakeDiag(func1, axis, rmaxis), TakeDiag(func2, axis, rmaxis)])
def _take(self, index, axis):
func1, func2 = self.funcs
return Multiply([Take(func1, index, axis), Take(func2, index, axis)])
def _power(self, n):
func1, func2 = self.funcs
func1pow = func1._power(n)
func2pow = func2._power(n)
if func1pow is not None and func2pow is not None:
return Multiply([func1pow, func2pow])
class Add(Array):
__slots__ = 'funcs',
__cache__ = 'simplified',
@types.apply_annotations
def __init__(self, funcs:types.frozenmultiset[asarray]):
self.funcs = funcs
func1, func2 = funcs
assert func1.shape == func2.shape
super().__init__(args=self.funcs, shape=func1.shape, dtype=_jointdtype(func1.dtype,func2.dtype))
def edit(self, op):
return Add([op(func) for func in self.funcs])
@property
def simplified(self):
func1, func2 = [func.simplified for func in self.funcs]
if iszero(func1):
return func2
if iszero(func2):
return func1
if func1 == func2:
return multiply(func1, 2).simplified
retval = func1._add(func2)
if retval is not None:
assert retval.shape == self.shape
return retval.simplified
retval = func2._add(func1)
if retval is not None:
assert retval.shape == self.shape
return retval.simplified
return Add([func1, func2])
def evalf(self, arr1, arr2=None):
return arr1 + arr2
def _sum(self, axis):
return Add([Sum(func, axis) for func in self.funcs])
def _derivative(self, var, seen):
func1, func2 = self.funcs
return derivative(func1, var, seen) + derivative(func2, var, seen)
def _get(self, axis, item):
func1, func2 = self.funcs
return Add([Get(func1, axis, item), Get(func2, axis, item)])
def _takediag(self, axis, rmaxis):
func1, func2 = self.funcs
return Add([TakeDiag(func1, axis, rmaxis), TakeDiag(func2, axis, rmaxis)])
def _take(self, index, axis):
func1, func2 = self.funcs
return Add([Take(func1, index, axis), Take(func2, index, axis)])
def _add(self, other):
func1, func2 = self.funcs
func1_other = func1._add(other)
if func1_other is not None:
return Add([func1_other, func2])
func2_other = func2._add(other)
if func2_other is not None:
return Add([func1, func2_other])
def _mask(self, maskvec, axis):
func1, func2 = self.funcs
return Add([Mask(func1, maskvec, axis), | |
description = "No categories found."
embed = discord.Embed(
title=f"{ctx.guild.name} whitelisted channels",
description= description
)
await ctx.send(embed=embed)
else:
embed=discord.Embed(
description="Command not found. Try using `add` to add channels and `remove` to remove channels.",
color=0xd80000,
)
await ctx.send(embed=embed)
@bots.error
async def bots_error(ctx,error):
if isinstance(error,commands.BadArgument):
embed=discord.Embed(
description=f"No channel was found. Try using #channel.",
color=0xd80000,
)
await ctx.send(embed=embed)
else:
print(error)
@client.command()
async def checkchannel(ctx, channel:discord.TextChannel):
file_dir = 'checkinvites.txt'
if file_dir not in os.listdir():
file = open(file_dir,'w')
file.close()
channels = open(file_dir).read()
if str(ctx.guild.id) in channels:
channels_list = channels.split('\n')
for i in range(len(channels_list)):
if channels_list[i].split(' : ')[0] == str(ctx.guild.id):
channels_list[i] = f"{ctx.guild.id} : {channel.id}"
string = '\n'.join(channels_list)
with open(file_dir,'w') as f:
f.write(string)
else:
string = f"{channels}\n{ctx.guild.id} : {channel.id}"
with open(file_dir,'w') as f:
f.write(string)
embed=discord.Embed(
description=f"Invite check channel changed to {channel.mention}.",
color=0x14eb14
)
await ctx.send(embed=embed)
@client.command()
async def ids(ctx):
categories = ctx.guild.categories
divided_categories = [categories[i:i+5] for i in range(0, len(categories), 5)]
embed = discord.Embed(
title=f"{ctx.guild.name}'s categories",
colour=0xfffafa,
)
x = 1
for y in divided_categories:
value = ''
for category in y:
value += f"**{category.name}** - `{category.id}`\n"
if x != (x+len(y)-1):
embed.add_field(name=f"Categories {x} - {x+len(y)-1}",value=value,inline=False)
else:
embed.add_field(name=f"Categories {x}",value=value,inline=False)
x += len(y)
await ctx.send(embed=embed)
@client.command()
async def server(ctx, cmd, guild = None):
owners = config()["owners"]
if str(ctx.author.id) in owners:
fn = "servers.txt"
if guild is not None: guild_id = str(guild)
else: guild_id = None
if fn not in os.listdir():
file = open("servers.txt",'w')
file.close()
if cmd == "add" and guild is not None:
guild_ids = open(fn).read().split('\n')
if guild_id in guild_ids:
embed=discord.Embed(
description=f"{guild_id} is already in the whitelist.",
color=0xd80000
)
await ctx.send(embed=embed)
else:
with open(fn,'a') as f:
f.write(guild_id+'\n')
embed=discord.Embed(
description=f"{guild_id} is added into the whitelist.",
color=0x14eb14
)
await ctx.send(embed=embed)
elif cmd in ['rm','remove'] and guild is not None:
guild_ids = open(fn).read().split('\n')
if guild_id not in guild_ids:
embed=discord.Embed(
description="No guild was found. Try using the exact guildid.",
color=0xd80000
)
await ctx.send(embed=embed)
else:
guild_ids.remove(guild_id)
with open(fn,'w') as f:
f.write('\n'.join(guild_ids)+'\n')
embed=discord.Embed(
description=f"{guild_id} was removed from the whitelist.",
color=0x14eb14
)
await ctx.send(embed=embed)
elif cmd == "list":
guild_ids = open(fn).read().split('\n')
description = ""
guild_ids = open(fn).read().split('\n')
valid_servers = len(guild_ids)
for gd_id in guild_ids:
if gd_id != "":
guilds = client.get_guild(int(gd_id))
description += f"・{guilds} `{gd_id}`\n"
if description == "":
description = "No guilds found."
if len(description) <= 1024:
embed = discord.Embed(
title=f"Whitelisted guilds ; {valid_servers} servers",
description=description,
colour=0xfffafa,
)
await ctx.send(embed=embed)
else:
endstringlines = description.split('\n')
endstring1 = '\n'.join(endstringlines[:len(endstringlines)//2])
endstring2 = '\n'.join(endstringlines[len(endstringlines)//2:])
embed1 = discord.Embed(
title=f"Whitelisted guilds ; {valid_servers} servers",
description=endstring1,
colour=0xfffafa,
)
embed2 = discord.Embed(
title=f"Whitelisted guilds ; {valid_servers} servers",
description=endstring2,
colour=0xfffafa,
)
await ctx.send(embed=embed1)
await ctx.send(embed=embed2)
else:
embed=discord.Embed(
description="Command not found. Try using `add` to add guilds and `remove` to remove guilds.",
color=0xd80000,
)
await ctx.send(embed=embed)
else:
return
@server.error
async def server_error(ctx,error):
if isinstance(error,commands.BadArgument):
embed=discord.Embed(
description=f"No guild was found. Try using the guild id.",
color=0xd80000,
)
await ctx.send(embed=embed)
else:
print(error)
@client.command()
async def guilds(ctx):
owners = config()["owners"]
if str(ctx.author.id) in owners:
async with ctx.typing():
await asyncio.sleep(1)
servers = list(client.guilds)
description = " "
for server in servers:
description += f"・{server.name} `{server.id}`\n"
embed=discord.Embed(
title=f"Connected on {str(len(servers))} servers:",
description=description,
colour=0xfffafa
)
await ctx.send(embed=embed)
else:
return
@client.command()
async def checkinvites(ctx):
prefix = config()["prefix"]
embed=discord.Embed(
description=f"Try running `{prefix}check` instead!",
colour=0x2f3136
)
await ctx.send(embed=embed)
@client.command()
@commands.guild_only()
async def prefix(ctx):
prefix = config()["prefix"]
embed=discord.Embed(
description=f"Current prefix is `{prefix}`",
colour=0xfffafa
)
await ctx.send(embed=embed)
@client.command()
async def invid(ctx, invite: discord.Invite):
owners = config()["owners"]
if str(ctx.author.id) in owners:
invite = await client.fetch_invite(invite)
guild = invite.guild
code = invite.code
embed=discord.Embed(
title="Invite information",
description=f"Code ⸝⸝ {code}\nServer ⸝⸝ {guild}\nServer ID ⸝⸝ {guild.id}",
colour=0xfffafa,
)
embed.set_thumbnail(url=guild.icon_url)
await ctx.send(embed=embed)
else:
return
@invid.error
async def invid_error(ctx, error):
if isinstance(error, commands.BadArgument):
embed=discord.Embed(
title="Invalid Invite",
description="That invite is invalid or has expired.",
color=0xe64e43,
)
await ctx.send(embed=embed)
elif isinstance(error, commands.MissingRequiredArgument):
embed=discord.Embed(
title="Invalid Invite",
description="That is not an invite.",
color=0xe64e43,
)
await ctx.send(embed=embed)
else:
print(error)
@client.command()
async def help(ctx,cmd = None):
owners = config()["owners"]
prefix = config()["prefix"] #{prefix}
if str(ctx.author.id) in owners:
if cmd == "server":
embed=discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Owner",inline=False)
embed.add_field(name="Description",value=f"Modifies the servers allowed to use {client.user.name}",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}server <add|remove|list> [guildId]`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}server`",inline=False)
await ctx.send(embed=embed)
elif cmd == "leaveme":
embed=discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Owner",inline=False)
embed.add_field(name="Description",value="Leave the guild with the guild id ",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}leaveme [guildId]`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}leaveme`",inline=False)
await ctx.send(embed=embed)
elif cmd == "guilds":
embed=discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Owner",inline=False)
embed.add_field(name="Description",value=f"Guilds that {client.user.name} is in (total)",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}guilds`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}guilds`",inline=False)
await ctx.send(embed=embed)
elif cmd == None:
embed=discord.Embed(
title=f"{client.user.name}'s commands",
description=f"Here's the list of available commands. For more specific and detailed help for each commands, use `{prefix}help [command]` like `{prefix}help check`.\nBot prefix: `{prefix}`, <@{client.user.id}>\n\nAvailable Commands: 14",
colour=0xfffafa,
)
embed.add_field(name="Owner",value=f"・`server` - Modifies servers allowed to use {client.user.name}\n・`guilds` - Guilds that {client.user.name} is in (total)\n・`leaveme` - Leave the guild with the guild id\n・`invid` - Shows you the server id of the invite provided",inline=False)
embed.add_field(name="Admin",value="・`bots` - Modifies the bot channel list\n・`ignore` - Modifies the channel blacklist\n・`category` - Modifies the category whitelist\n・`ids` - Displays a list of all category IDs in a server\n・`checkchannel` - Modifies the invite check channel\n・`embed` - A guide on an Embed Creator",inline=False)
embed.add_field(name="Invites",value="・`check` - Checks invites from provided category",inline=False)
embed.add_field(name="Utility",value=f"・`prefix` - Shows you the bot's current prefix\n・`guide` - A guide to {client.user.name}\n・`help` - Displays all available commands\n・`ping` - Checks server latency\n・`stats` - Displays bot information",inline=False)
await ctx.send(embed=embed)
elif cmd == "ignore":
embed=discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Admin",inline=False)
embed.add_field(name="Description",value="Modifies the channel blacklist",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}ignore <add|remove|list> [#channel]`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}ignore`",inline=False)
await ctx.send(embed=embed)
elif cmd == "bots":
embed=discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Admin",inline=False)
embed.add_field(name="Description",value="Modifies the channel whitelist",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}bots <add|remove|list> [#channel]`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}bots`",inline=False)
await ctx.send(embed=embed)
elif cmd == "checkchannel":
embed=discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Admin",inline=False)
embed.add_field(name="Description",value="Modifies the invite check channel",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}checkchannel #channel`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}checkchannel`",inline=False)
await ctx.send(embed=embed)
elif cmd == "category":
embed=discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Admin",inline=False)
embed.add_field(name="Description",value="Modifies the category whitelist",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}category <add|remove|list> [categoryChannel]`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}category`",inline=False)
await ctx.send(embed=embed)
elif cmd == "ids":
embed=discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Admin",inline=False)
embed.add_field(name="Description",value="Displays a list of all category IDs in a server",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}ids`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}ids`",inline=False)
await ctx.send(embed=embed)
elif cmd == "check":
embed=discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa,
)
embed.add_field(name="Category",value="General",inline=False)
embed.add_field(name="Description",value="Checks invites from provided categories. Would check for codeblock errors in the channel by default. ",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}check`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}check`",inline=False)
await ctx.send(embed=embed)
elif cmd == "guide":
embed=discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Utility",inline=False)
embed.add_field(name="Description",value=f"A guide to {client.user.name}",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}guide`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}guide`",inline=False)
await ctx.send(embed=embed)
elif cmd == "help":
embed=discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Utility",inline=False)
embed.add_field(name="Description",value="Displays all available commands, including information about a specific command. ",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}help [check]`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}help`",inline=False)
await ctx.send(embed=embed)
elif cmd == "ping":
embed=discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Utility",inline=False)
embed.add_field(name="Description",value="Checks server latency",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}ping`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}ping`",inline=False)
await ctx.send(embed=embed)
elif cmd == "stats":
embed=discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Utility",inline=False)
embed.add_field(name="Description",value="Displays bot information",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}stats`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}stats`",inline=False)
await ctx.send(embed=embed)
elif cmd == "embed":
embed=discord.Embed(
title=f"The {cmd} command",
url="https://robyul.chat/embed-creator",
description="Use [this website](https://robyul.chat/embed-creator)!",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Admin",inline=False)
embed.add_field(name="Description",value="A guide on an Embed Creator",inline=False)
embed.add_field(name="Usage",value=f"Fill in wanted information in the destined boxes, and copy the code part behind ```_embed #channel```. Then {prefix}embed [code].",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}embed`",inline=False)
await ctx.send(embed=embed)
elif cmd == "invid":
embed = discord.Embed(
title=f"The {cmd} command",
colour=0xfffafa
)
embed.add_field(name="Category",value="Owner",inline=False)
embed.add_field(name="Description",value="Shows you the server id of the invite provided",inline=False)
embed.add_field(name="Usage",value=f"`{prefix}invid [invite]`",inline=False)
embed.add_field(name="Aliases",value=f"`{prefix}invid`",inline=False)
await ctx.send(embed=embed)
else:
embed=discord.Embed(
description=f"**Error**:Topic `{cmd}` not found or doesn't have a help module yet!",
colour=0xe74d3f,
)
await ctx.send(embed=embed)
else:
if cmd == None:
embed=discord.Embed(
title=f"{client.user.name}'s commands",
description=f"Here's the list of available commands. For more specific and detailed help for each commands, use `{prefix}help [command]` like `{prefix}help check`.\nBot prefix: `{prefix}`, <@{<EMAIL>}>\n\nAvailable Commands: 11",
colour=0xfffafa,
)
embed.add_field(name="Admin",value="・`bots` - Modifies the bot channel list\n・`ignore` - Modifies the channel blacklist\n・`category` - Modifies the category whitelist\n・`ids` - Displays a list of all category IDs in a server\n・`checkchannel` - Modifies the invite check channel\n・embed - A guide on an embed creator",inline=False)
embed.add_field(name="Invites",value="・`check` - Checks invites from provided category",inline=False)
embed.add_field(name="Utility",value=f"・`prefix` - Shows you the bot's current prefix\n・`guide` - A guide to {client.user.name}\n・`help` - Displays all available commands\n・`ping` - Checks server latency\n・`stats` - Displays bot information",inline=False)
await ctx.send(embed=embed)
elif cmd == "embed":
embed=discord.Embed(
title=f"The {cmd} command",
url="https://robyul.chat/embed-creator",
description="Use [this website](https://robyul.chat/embed-creator)!",
colour=0xfffafa,
)
embed.add_field(name="Category",value="Admin",inline=False)
embed.add_field(name="Description",value="A guide on an Embed Creator",inline=False)
embed.add_field(name="Usage",value=f"Fill in wanted information in the destined boxes, and copy the | |
* this.scaleOffsetY;
/** @type {number} */
var i = 0;
var l = this.canvases.length;
for (;i < l;i++) {
this.canvases[i].translate(x, y, z);
}
},
/**
* @param {number} x
* @param {number} y
* @param {boolean} dataAndEvents
* @return {undefined}
*/
scale : function(x, y, dataAndEvents) {
/** @type {number} */
var px = this.scaleOffsetX * x;
/** @type {number} */
var py = this.scaleOffsetY * y;
/** @type {number} */
var ll = this.translateOffsetX * (x - 1) / px;
/** @type {number} */
var dy = this.translateOffsetY * (y - 1) / py;
/** @type {number} */
this.scaleOffsetX = px;
/** @type {number} */
this.scaleOffsetY = py;
/** @type {number} */
var i = 0;
var l = this.canvases.length;
for (;i < l;i++) {
this.canvases[i].scale(x, y, true);
}
this.translate(ll, dy, false);
},
/**
* @param {boolean} expectation
* @return {?}
*/
getPos : function(expectation) {
if (expectation || !this.pos) {
return this.pos = $.getPos(this.getElement());
}
return this.pos;
},
/**
* @param {number} arr
* @return {undefined}
*/
clear : function(arr) {
this.canvases[arr || 0].clear();
},
/**
* @param {?} id
* @param {?} callback
* @return {undefined}
*/
path : function(id, callback) {
var me = this.canvases[0].getCtx();
me.beginPath();
callback(me);
me[id]();
me.closePath();
},
/**
* @param {string} type
* @param {string} idLabel
* @param {?} dim
* @return {?}
*/
createLabelContainer : function(type, idLabel, dim) {
/** @type {string} */
var NS = "http://www.w3.org/2000/svg";
if (type == "HTML" || type == "Native") {
return $E("div", {
id : idLabel,
style : {
overflow : "visible",
position : "absolute",
top : 0,
left : 0,
width : dim.width + "px",
height : 0
}
});
} else {
if (type == "SVG") {
/** @type {Element} */
var svgContainer = document.createElementNS(NS, "svg:svg");
svgContainer.setAttribute("width", dim.width);
svgContainer.setAttribute("height", dim.height);
/** @type {(CSSStyleDeclaration|null)} */
var style = svgContainer.style;
/** @type {string} */
style.position = "absolute";
/** @type {string} */
style.left = style.top = "0px";
/** @type {Element} */
var labelContainer = document.createElementNS(NS, "svg:g");
labelContainer.setAttribute("width", dim.width);
labelContainer.setAttribute("height", dim.height);
labelContainer.setAttribute("x", 0);
labelContainer.setAttribute("y", 0);
labelContainer.setAttribute("id", idLabel);
svgContainer.appendChild(labelContainer);
return svgContainer;
}
}
}
});
Canvas.Base = {};
Canvas.Base["2D"] = new Class({
translateOffsetX : 0,
translateOffsetY : 0,
scaleOffsetX : 1,
scaleOffsetY : 1,
/**
* @param {?} viz
* @return {undefined}
*/
initialize : function(viz) {
this.viz = viz;
this.opt = viz.config;
/** @type {boolean} */
this.size = false;
this.createCanvas();
this.translateToCenter();
},
/**
* @return {undefined}
*/
createCanvas : function() {
var opt = this.opt;
var width = opt.width;
var h = opt.height;
this.canvas = $E("canvas", {
id : opt.injectInto + opt.idSuffix,
width : width,
height : h,
style : {
position : "absolute",
top : 0,
left : 0,
width : width + "px",
height : h + "px"
}
});
},
/**
* @return {?}
*/
getCtx : function() {
if (!this.ctx) {
return this.ctx = this.canvas.getContext("2d");
}
return this.ctx;
},
/**
* @return {?}
*/
getSize : function() {
if (this.size) {
return this.size;
}
var canvas = this.canvas;
return this.size = {
width : canvas.width,
height : canvas.height
};
},
/**
* @param {boolean} ps
* @return {undefined}
*/
translateToCenter : function(ps) {
var size = this.getSize();
var width = ps ? size.width - ps.width - this.translateOffsetX * 2 : size.width;
height = ps ? size.height - ps.height - this.translateOffsetY * 2 : size.height;
var ctx = this.getCtx();
if (ps) {
ctx.scale(1 / this.scaleOffsetX, 1 / this.scaleOffsetY);
}
ctx.translate(width / 2, height / 2);
},
/**
* @param {number} width
* @param {number} height
* @return {undefined}
*/
resize : function(width, height) {
var size = this.getSize();
var canvas = this.canvas;
var style = canvas.style;
/** @type {boolean} */
this.size = false;
/** @type {number} */
canvas.width = width;
/** @type {number} */
canvas.height = height;
/** @type {string} */
style.width = width + "px";
/** @type {string} */
style.height = height + "px";
if (!supportsCanvas) {
this.translateToCenter(size);
} else {
this.translateToCenter();
}
/** @type {number} */
this.translateOffsetX = this.translateOffsetY = 0;
/** @type {number} */
this.scaleOffsetX = this.scaleOffsetY = 1;
this.clear();
this.viz.resize(width, height, this);
},
/**
* @param {number} x
* @param {number} y
* @param {boolean} z
* @return {undefined}
*/
translate : function(x, y, z) {
var sx = this.scaleOffsetX;
var sy = this.scaleOffsetY;
this.translateOffsetX += x * sx;
this.translateOffsetY += y * sy;
this.getCtx().translate(x, y);
if (!z) {
this.plot();
}
},
/**
* @param {number} x
* @param {number} y
* @param {boolean} dataAndEvents
* @return {undefined}
*/
scale : function(x, y, dataAndEvents) {
this.scaleOffsetX *= x;
this.scaleOffsetY *= y;
this.getCtx().scale(x, y);
if (!dataAndEvents) {
this.plot();
}
},
/**
* @return {undefined}
*/
clear : function() {
var size = this.getSize();
var ox = this.translateOffsetX;
var oy = this.translateOffsetY;
var sx = this.scaleOffsetX;
var sy = this.scaleOffsetY;
this.getCtx().clearRect((-size.width / 2 - ox) * 1 / sx, (-size.height / 2 - oy) * 1 / sy, size.width * 1 / sx, size.height * 1 / sy);
},
/**
* @return {undefined}
*/
plot : function() {
this.clear();
this.viz.plot(this);
}
});
Canvas.Background = {};
Canvas.Background.Circles = new Class({
/**
* @param {?} viz
* @param {?} options
* @return {undefined}
*/
initialize : function(viz, options) {
this.viz = viz;
this.config = $.merge({
idSuffix : "-bkcanvas",
levelDistance : 100,
numberOfCircles : 6,
CanvasStyles : {},
offset : 0
}, options);
},
/**
* @param {number} w
* @param {number} height
* @param {?} opt
* @return {undefined}
*/
resize : function(w, height, opt) {
this.plot(opt);
},
/**
* @param {?} base
* @return {undefined}
*/
plot : function(base) {
var canvas = base.canvas;
var ctx = base.getCtx();
var conf = this.config;
var styles = conf.CanvasStyles;
var s;
for (s in styles) {
ctx[s] = styles[s];
}
var n = conf.numberOfCircles;
var rho = conf.levelDistance;
/** @type {number} */
var i = 1;
for (;i <= n;i++) {
ctx.beginPath();
ctx.arc(0, 0, rho * i, 0, 2 * Math.PI, false);
ctx.stroke();
ctx.closePath();
}
}
});
})();
/**
* @param {number} v
* @param {number} str
* @return {undefined}
*/
var Transform = function(v, str) {
this.theta = v || 0;
this.rho = str || 0;
};
/** @type {function (number, number): undefined} */
$jit.Polar = Transform;
Transform.prototype = {
/**
* @param {boolean} dataAndEvents
* @return {?}
*/
getc : function(dataAndEvents) {
return this.toComplex(dataAndEvents);
},
/**
* @return {?}
*/
getp : function() {
return this;
},
/**
* @param {?} item
* @return {undefined}
*/
set : function(item) {
item = item.getp();
this.theta = item.theta;
this.rho = item.rho;
},
/**
* @param {number} x
* @param {number} y
* @return {undefined}
*/
setc : function(x, y) {
/** @type {number} */
this.rho = Math.sqrt(x * x + y * y);
/** @type {number} */
this.theta = Math.atan2(y, x);
if (this.theta < 0) {
this.theta += Math.PI * 2;
}
},
/**
* @param {number} theta
* @param {?} dataAndEvents
* @return {undefined}
*/
setp : function(theta, dataAndEvents) {
/** @type {number} */
this.theta = theta;
this.rho = dataAndEvents;
},
/**
* @return {?}
*/
clone : function() {
return new Transform(this.theta, this.rho);
},
/**
* @param {boolean} dataAndEvents
* @return {?}
*/
toComplex : function(dataAndEvents) {
/** @type {number} */
var ex = Math.cos(this.theta) * this.rho;
/** @type {number} */
var py = Math.sin(this.theta) * this.rho;
if (dataAndEvents) {
return{
x : ex,
y : py
};
}
return new Vector(ex, py);
},
/**
* @param {?} v2
* @return {?}
*/
add : function(v2) {
return new Transform(this.theta + v2.theta, this.rho + v2.rho);
},
/**
* @param {number} x
* @return {?}
*/
scale : function(x) {
return new Transform(this.theta, this.rho * x);
},
/**
* @param {?} item
* @return {?}
*/
equals : function(item) {
return this.theta == item.theta && this.rho | |
the
former case.
Parameters
----------
item :
The item to search for links with.
category :
The category to search in.
Return
------
labels : tuple
The item labels in `category` that `item` can still link to.
"""
pos1, cat1 = self.item_to_pos(item)
cat2 = self.categories.index(category)
links = self.linked_set(pos1, cat2)
return tuple(self.labels[p] for p in links)
def find_missing(self, category):
"""
Retrieve a set of all the items in category that do not have
all of their 1-to-1 mappings set.
Parameters
----------
category :
The category to search in.
Return
------
missing : set
A set of item labels in `category` that still require work.
"""
cat = self.categories.index(category)
s = self.cat_slice(cat)
# 1. Take the N rows in the selected category: self.matrix[start:end]
# 2. Make it so you have (M * N, N) containing the links between each
# item and a given category. Each block of N rows is for the links
# between an item and a different category. The N columns are the
# items in the *other* category: ...reshape(-1, self.n)
# 3. Sum across the rows. This gives a count of items that each item
# links to in another category: ...sum(axis=1)
# 4. Reshape into an NxM matrix, where each row corresponds to a row
# in the original slice (the selected category) and columns are the
# number of links to each other category: ...reshape(self.n, self.m)
links = self.matrix[s].reshape(-1, self.n).sum(
axis=1).reshape(self.n, self.m)
# Select any rows that have any missing mappings.
mask = (links != 1).any(axis=1)
return set(self.labels[x] for x in np.flatnonzero(mask) + s.start)
def pos_to_item(self, pos, cat=None):
"""
Convert a matrix position to a (category, label) tuple.
Parameters
----------
pos : int
The index of the item in :py:attr:`matrix`.
cat : int
An optional index of the category in :py:attr:`categories`.
If omitted, it is computed as ``pos // n``.
Return
------
item : tuple
A two-element (category, label) tuple providing an
unambiguous high-level reference to the item.
"""
if cat is None:
cat = pos // self.n
return self.categories[cat], self.labels[pos]
def item_to_pos(self, item):
"""
Convert a two-element (category, value) item tuple into a matrix
position and category index.
Parameters
----------
item :
Either a two-element (category, label) tuple, or just an
item label. Item labels are only accepted if they are not
tuples and are unambiguous across the entire problem space.
Return
------
pos : int
The index of the item within the :py:attr:`matrix`.
cat : int
The index of the item's category within
:py:attr:`categories`.
"""
if isinstance(item, tuple):
cat = self.categories.index(item[0])
pos = self.labels.index(item[1], self.n * cat, self.n * (cat + 1))
else:
pos = self.map[item]
if pos is None:
raise ValueError(f'Ambiguous item label {item!r}')
cat = pos // self.n
return pos, cat
def unlink(self, pos1, pos2):
"""
Set two items to be definitely not associated.
All edges between items associated with either one are updated
as well.
This is the low-level equivalent of :py:meth:`unmatch`: the
inputs are matrix positions.
Unlinking already unlinked items is a no-op. Unlinking an item
from itself is an error. The updated relationships are pruned
recursively according to the description in the he
:ref:`elimination-logic` section.
Parameters
-----------
pos1 :
The matrix position of an item to unmatch.
pos2 :
The matrix position of the item to unmatch it from.
Return
------
count : int
The number of edges removed. Zero if the items are
already unlinked.
"""
return self._implications(pos1, pos2 // self.n, self.pos_mask(pos2))
def find_matches(self, pos):
"""
Find all items with single links across categories to `pos`.
A matching item is one that is the only one in its category that
shares an edge with the item at `pos`.
Parameters
----------
pos : int
A row or column in :py:attr:`matrix`.
Return
------
matches : numpy.ndarray
A numpy array containing the matching positions.
"""
# reshape so rows are categories
row = self.matrix[pos].reshape(self.m, self.n)
# find indices of the attributes that are linked
m = np.flatnonzero(row.sum(axis=1) == 1)
# indices of the items within each linked attribute
n = np.argmax(row[m, :], axis=1)
return m * self.n + n
def _implications(self, pos, cat_hint, mask):
"""
Remove links between an item and other items, and follow through
with the logical implications of the removal.
This method handles removals starting with a single node at
`pos`. The other endpoints of the edges are given in `mask`.
`mask` is an array of bits, where `True` elements correspond to
edges that should be removed. The mask must be `False` at `pos`,
since a node can not be unlinked from itself. It may contain any
number of links that have already been removed.
In addition to the implications of
:ref:`elimination-logic-explicit` rules, this method will check
each removed edge against the currently registered assertions.
See the :ref:`elimination-logic` section for more information.
Parameters
----------
pos : int
The matrix position of the starting item to unlink from.
cat_hint : int or None
If known, the category in which all items in `mask` belong
to. `None` if unknown. Any bits in `mask` outside this
category will be ignored.
mask : numpy.ndarray
An array of the same length as a row in :py:attr:`matrix`,
inidcating the links to remove from `pos`.
Return
------
count : int
The total number of links removed.
"""
self._indent()
def check(pos, unlink):
"""
Add the `unlink` array to the stack if it has any elements.
"""
if unlink.any():
self._log('Need to remove {0:M:X} edges from {1:P:I}\n',
unlink, pos, cont=True)
stack.append((pos, None, unlink))
else:
self._log('All links already shared: nothing to remove\n',
cont=True)
def update_assertions(pos, cat, mask):
"""
Call the :py:meth:`~Assertion.update` methods of any
matching :py:class:`Assertion`\ s found in
:py:attr:`assertions`.
Parameters
----------
pos : int
One of the potential endpoints of the assertion.
cat : int
The linking category of the assertion.
mask : int or numpy.ndarray
Either an integer containing a single value for the
second endpoint, or a mask containing flags marking
multiple endpoints.
Return
------
count :
The number of edges actually removed from the graph as a
result of this operation.
"""
key = (pos, cat)
mask = np.array(mask, ndmin=1)
if mask.dtype.type is np.bool_:
mask = np.flatnonzero(mask)
count = 0
if key in self.assertions:
assertions = self.assertions[key]
self._log('{0:P:P} assertions for {1:P:I} -> {2:N:C}',
assertions, pos, cat, cont=True)
self._indent()
for pos2 in mask:
self._log('Updating all for {0:P:I} x {1:P:I}', pos, pos2)
for assertion in assertions:
count += assertion.update(pos, pos2)
self._dedent()
else:
self._log('No assertions for {0:P:I} -> {1:N:C}',
pos, cat, cont=True)
return count
def update(pos, cat, mask):
"""
Update assertions and check implications of recently deleted
links.
This method gets called twice, once for each direction of
the link. This means that the assertion check only needs to
happen once per call.
Parameters
----------
pos : int
The source node
cat : int
The category to check against
mask : numpy.ndarray or int
The sink nodes in `cat` that were just removed. If an
integer, only one sink node. Otherwise, all bits
represent sink nodes.
Return
------
count : int
The number of nodes removed by assersions.
"""
self._indent()
self._log('Updating links based on {0:P:I} -> {1:N:C}', pos, cat)
count = update_assertions(pos, cat, mask)
# Select items in `cat` that link to `pos`
rows = self.matrix[self.cat_slice(cat), :]
selection = rows[:, pos]
rows = rows[selection]
other = np.logical_or.reduce(rows, axis=0)
# Find all shared links
row = self.matrix[pos]
matches = row & other
# Ensure that `pos` only links to those links
check(pos, matches ^ row)
if len(rows) == 1:
pos2 = np.argmax(selection) + cat * self.n
self._log('{0:P:I} to {1:P:I} is 1-to-1', pos, pos2)
check(pos2, matches ^ other)
self._dedent()
return count
stack = deque([(pos, cat_hint, mask)])
count = 0
self._log('Assessing implications', start=True)
while stack:
pos1, cat2, mask = stack.pop()
cat1 = pos1 // self.n
if mask[pos1]:
raise ValueError('Please do not attempt to unlink an item '
f'from itself: | |
points with stc.plot."""
sample_src = read_source_spaces(src_fname)
kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.RandomState(0).rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
stc.plot(**kwargs)
stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
stc.plot(colormap='hot', clim='auto', **kwargs)
stc.plot(colormap='mne', clim='auto', **kwargs)
stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
pytest.raises(TypeError, stc.plot, clim='auto', figure=[0], **kwargs)
# Test for correct clim values
with pytest.raises(ValueError, match='monotonically'):
stc.plot(clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
with pytest.raises(ValueError, match=r'.*must be \(3,\)'):
stc.plot(colormap='mne', clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
with pytest.raises(ValueError, match="'value', 'values' and 'percent'"):
stc.plot(clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
with pytest.raises(ValueError, match='must be "auto" or dict'):
stc.plot(colormap='mne', clim='foo', **kwargs)
with pytest.raises(TypeError, match='must be an instance of'):
plot_source_estimates('foo', clim='auto', **kwargs)
with pytest.raises(ValueError, match='hemi'):
stc.plot(hemi='foo', clim='auto', **kwargs)
with pytest.raises(ValueError, match='Exactly one'):
stc.plot(clim=dict(lims=[0, 1, 2], pos_lims=[0, 1, 2], kind='value'),
**kwargs)
# Test handling of degenerate data: thresholded maps
stc._data.fill(0.)
with pytest.warns(RuntimeWarning, match='All data were zero'):
plot_source_estimates(stc, **kwargs)
def _assert_mapdata_equal(a, b):
__tracebackhide__ = True
assert set(a.keys()) == {'clim', 'colormap', 'transparent'}
assert a.keys() == b.keys()
assert a['transparent'] == b['transparent'], 'transparent'
aa, bb = a['clim'], b['clim']
assert aa.keys() == bb.keys(), 'clim keys'
assert aa['kind'] == bb['kind'] == 'value'
key = 'pos_lims' if 'pos_lims' in aa else 'lims'
assert_array_equal(aa[key], bb[key], err_msg=key)
assert isinstance(a['colormap'], Colormap), 'Colormap'
assert isinstance(b['colormap'], Colormap), 'Colormap'
assert a['colormap'].name == b['colormap'].name
def test_process_clim_round_trip():
"""Test basic input-output support."""
# With some negative data
out = _process_clim('auto', 'auto', True, -1.)
want = dict(
colormap=mne_analyze_colormap([0, 0.5, 1], 'matplotlib'),
clim=dict(kind='value', pos_lims=[1, 1, 1]),
transparent=True,)
_assert_mapdata_equal(out, want)
out2 = _process_clim(**out)
_assert_mapdata_equal(out, out2)
_linearize_map(out) # smoke test
ticks = _get_map_ticks(out)
assert_allclose(ticks, [-1, 0, 1])
# With some positive data
out = _process_clim('auto', 'auto', True, 1.)
want = dict(
colormap=plt.get_cmap('hot'),
clim=dict(kind='value', lims=[1, 1, 1]),
transparent=True,)
_assert_mapdata_equal(out, want)
out2 = _process_clim(**out)
_assert_mapdata_equal(out, out2)
_linearize_map(out)
ticks = _get_map_ticks(out)
assert_allclose(ticks, [1])
# With some actual inputs
clim = dict(kind='value', pos_lims=[0, 0.5, 1])
out = _process_clim(clim, 'auto', True)
want = dict(
colormap=mne_analyze_colormap([0, 0.5, 1], 'matplotlib'),
clim=clim, transparent=True)
_assert_mapdata_equal(out, want)
_linearize_map(out)
ticks = _get_map_ticks(out)
assert_allclose(ticks, [-1, -0.5, 0, 0.5, 1])
clim = dict(kind='value', pos_lims=[0.25, 0.5, 1])
out = _process_clim(clim, 'auto', True)
want = dict(
colormap=mne_analyze_colormap([0, 0.5, 1], 'matplotlib'),
clim=clim, transparent=True)
_assert_mapdata_equal(out, want)
_linearize_map(out)
ticks = _get_map_ticks(out)
assert_allclose(ticks, [-1, -0.5, -0.25, 0, 0.25, 0.5, 1])
@testing.requires_testing_data
@requires_nibabel()
def test_stc_mpl():
"""Test plotting source estimates with matplotlib."""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
with pytest.warns(RuntimeWarning, match='not included'):
stc.plot(subjects_dir=subjects_dir, time_unit='s', views='ven',
hemi='rh', smoothing_steps=2, subject='sample',
backend='matplotlib', spacing='oct1', initial_time=0.001,
colormap='Reds')
fig = stc.plot(subjects_dir=subjects_dir, time_unit='ms', views='dor',
hemi='lh', smoothing_steps=2, subject='sample',
backend='matplotlib', spacing='ico2', time_viewer=True,
colormap='mne')
time_viewer = fig.time_viewer
_fake_click(time_viewer, time_viewer.axes[0], (0.5, 0.5)) # change t
time_viewer.canvas.key_press_event('ctrl+right')
time_viewer.canvas.key_press_event('left')
pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
hemi='both', subject='sample', backend='matplotlib')
pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
time_unit='ss', subject='sample', backend='matplotlib')
plt.close('all')
@pytest.mark.timeout(60) # can sometimes take > 60 sec
@testing.requires_testing_data
@requires_nibabel()
@pytest.mark.parametrize('coord_frame, idx, show_all, title',
[('head', 'gof', True, 'Test'),
('mri', 'amplitude', False, None)])
def test_plot_dipole_mri_orthoview(coord_frame, idx, show_all, title):
"""Test mpl dipole plotting."""
dipoles = read_dipole(dip_fname)
trans = read_trans(trans_fname)
fig = dipoles.plot_locations(trans=trans, subject='sample',
subjects_dir=subjects_dir,
coord_frame=coord_frame, idx=idx,
show_all=show_all, title=title,
mode='orthoview')
fig.canvas.scroll_event(0.5, 0.5, 1) # scroll up
fig.canvas.scroll_event(0.5, 0.5, -1) # scroll down
fig.canvas.key_press_event('up')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event('a') # some other key
ax = plt.subplot(111)
pytest.raises(TypeError, dipoles.plot_locations, trans, 'sample',
subjects_dir, ax=ax)
plt.close('all')
@testing.requires_testing_data
def test_plot_dipole_orientations(renderer):
"""Test dipole plotting in 3d."""
dipoles = read_dipole(dip_fname)
trans = read_trans(trans_fname)
for coord_frame, mode in zip(['head', 'mri'],
['arrow', 'sphere']):
dipoles.plot_locations(trans=trans, subject='sample',
subjects_dir=subjects_dir,
mode=mode, coord_frame=coord_frame)
renderer.backend._close_all()
@testing.requires_testing_data
@traits_test
def test_snapshot_brain_montage(renderer):
"""Test snapshot brain montage."""
info = read_info(evoked_fname)
fig = plot_alignment(
info, trans=None, subject='sample', subjects_dir=subjects_dir)
xyz = np.vstack([ich['loc'][:3] for ich in info['chs']])
ch_names = [ich['ch_name'] for ich in info['chs']]
xyz_dict = dict(zip(ch_names, xyz))
xyz_dict[info['chs'][0]['ch_name']] = [1, 2] # Set one ch to only 2 vals
# Make sure wrong types are checked
pytest.raises(TypeError, snapshot_brain_montage, fig, xyz)
# All chs must have 3 position values
pytest.raises(ValueError, snapshot_brain_montage, fig, xyz_dict)
# Make sure we raise error if the figure has no scene
pytest.raises(ValueError, snapshot_brain_montage, None, info)
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
@requires_dipy()
@requires_nibabel()
@requires_version('nilearn', '0.4')
@pytest.mark.parametrize('mode, stype, init_t, want_t, init_p, want_p', [
('glass_brain', 's', None, 2, None, (-30.9, 18.4, 56.7)),
('stat_map', 'vec', 1, 1, None, (15.7, 16.0, -6.3)),
('glass_brain', 'vec', None, 1, (10, -10, 20), (6.6, -9.0, 19.9)),
('stat_map', 's', 1, 1, (-10, 5, 10), (-12.3, 2.0, 7.7))])
def test_plot_volume_source_estimates(mode, stype, init_t, want_t,
init_p, want_p):
"""Test interactive plotting of volume source estimates."""
forward = read_forward_solution(fwd_fname)
sample_src = forward['src']
if init_p is not None:
init_p = np.array(init_p) / 1000.
vertices = [s['vertno'] for s in sample_src]
n_verts = sum(len(v) for v in vertices)
n_time = 2
data = np.random.RandomState(0).rand(n_verts, n_time)
if stype == 'vec':
stc = VolVectorSourceEstimate(
np.tile(data[:, np.newaxis], (1, 3, 1)), vertices, 1, 1)
else:
assert stype == 's'
stc = VolSourceEstimate(data, vertices, 1, 1)
with pytest.warns(None): # sometimes get scalars/index warning
with catch_logging() as log:
fig = stc.plot(
sample_src, subject='sample', subjects_dir=subjects_dir,
mode=mode, initial_time=init_t, initial_pos=init_p,
verbose=True)
log = log.getvalue()
want_str = 't = %0.3f s' % want_t
assert want_str in log, (want_str, init_t)
want_str = '(%0.1f, %0.1f, %0.1f) mm' % want_p
assert want_str in log, (want_str, init_p)
for ax_idx in [0, 2, 3, 4]:
_fake_click(fig, fig.axes[ax_idx], (0.3, 0.5))
fig.canvas.key_press_event('left')
fig.canvas.key_press_event('shift+right')
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
@requires_dipy()
@requires_nibabel()
@requires_version('nilearn', '0.4')
def test_plot_volume_source_estimates_morph():
"""Test interactive plotting of volume source estimates with morph."""
forward = read_forward_solution(fwd_fname)
sample_src = forward['src']
vertices = [s['vertno'] for s in sample_src]
n_verts = sum(len(v) for v in vertices)
n_time = 2
data = np.random.RandomState(0).rand(n_verts, n_time)
stc = VolSourceEstimate(data, vertices, 1, 1)
sample_src[0]['subject_his_id'] = 'sample' # old src
morph = compute_source_morph(sample_src, 'sample', 'fsaverage', zooms=5,
subjects_dir=subjects_dir)
initial_pos = (-0.05, -0.01, -0.006)
with pytest.warns(None): # sometimes get scalars/index warning
with catch_logging() as log:
stc.plot(morph, subjects_dir=subjects_dir, mode='glass_brain',
initial_pos=initial_pos, verbose=True)
log = log.getvalue()
assert 't = 1.000 s' in log
assert '(-52.0, -8.0, -7.0) mm' in log
with pytest.raises(ValueError, match='Allowed values are'):
stc.plot(sample_src, 'sample', subjects_dir, mode='abcd')
vertices.append([])
surface_stc = SourceEstimate(data, vertices, 1, 1)
with pytest.raises(TypeError, match='an instance of VolSourceEstimate'):
plot_volume_source_estimates(surface_stc, sample_src, 'sample',
subjects_dir)
with pytest.raises(ValueError, match='Negative colormap limits'):
stc.plot(sample_src, 'sample', subjects_dir,
clim=dict(lims=[-1, 2, 3], kind='value'))
bad_azure_3d = pytest.mark.skipif(
os.getenv('AZURE_CI_WINDOWS', 'false') == 'true' and
sys.version_info[:2] == (3, 8),
reason='Crashes workers on Azure')
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
@requires_pysurfer
@traits_test
@bad_azure_3d
def test_plot_vector_source_estimates(renderer_interactive):
"""Test plotting of vector source estimates."""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_verts = sum(len(v) for v in vertices)
n_time = 5
data = np.random.RandomState(0).rand(n_verts, 3, n_time)
stc = VectorSourceEstimate(data, vertices, 1, 1)
brain = stc.plot('sample', subjects_dir=subjects_dir, hemi='both',
smoothing_steps=1, verbose='error')
brain.close()
del brain
with pytest.raises(ValueError, match='use "pos_lims"'):
stc.plot('sample', subjects_dir=subjects_dir,
clim=dict(pos_lims=[1, 2, 3]))
@testing.requires_testing_data
def test_plot_sensors_connectivity(renderer):
"""Test plotting of sensors connectivity."""
from mne import io, pick_types
data_path = data_dir
raw_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
raw = io.read_raw_fif(raw_fname)
picks = pick_types(raw.info, meg='grad', eeg=False, stim=False,
eog=True, exclude='bads')
n_channels = len(picks)
con = np.random.RandomState(42).randn(n_channels, n_channels)
info = raw.info
with pytest.raises(TypeError):
plot_sensors_connectivity(info='foo', con=con,
picks=picks)
with pytest.raises(ValueError):
plot_sensors_connectivity(info=info, con=con[::2, ::2],
picks=picks)
plot_sensors_connectivity(info=info, con=con, picks=picks)
@pytest.mark.parametrize('orientation', ('horizontal', 'vertical'))
@pytest.mark.parametrize('diverging', (True, False))
@pytest.mark.parametrize('lims', ([0.5, 1, 10], [0, 1, 10]))
def test_brain_colorbar(orientation, diverging, lims):
"""Test brain colorbar plotting."""
_, ax = plt.subplots()
clim = dict(kind='value')
if diverging:
clim['pos_lims'] = lims
else:
clim['lims'] = lims
plot_brain_colorbar(ax, clim, orientation=orientation)
if orientation == 'vertical':
have, empty = ax.get_yticklabels, ax.get_xticklabels
else:
have, empty = ax.get_xticklabels, ax.get_yticklabels
if diverging:
if lims[0] == 0:
ticks = list(-np.array(lims[1:][::-1])) + lims
else:
ticks = list(-np.array(lims[::-1])) + [0] + lims
else:
ticks = lims
plt.draw()
# old mpl always spans 0->1 for the actual ticks, so we need to
# look at the labels
assert_array_equal(
[float(h.get_text().replace('−', '-')) for h in have()], ticks)
assert_array_equal(empty(), [])
plt.close('all')
@pytest.mark.slowtest # slow-ish on Travis OSX
@requires_pysurfer
@testing.requires_testing_data
@traits_test
@bad_azure_3d
def test_mixed_sources_plot_surface(renderer_interactive):
"""Test plot_surface() for mixed source space."""
src = read_source_spaces(fwd_fname2)
N = np.sum([s['nuse'] for s in src]) # number of sources
T = 2 # number of time points
S = 3 # number of source spaces
rng = np.random.RandomState(0)
data = rng.randn(N, T)
vertno = S * [np.arange(N // S)]
stc = MixedSourceEstimate(data, vertno, 0, 1)
stc.surface().plot(views='lat', hemi='split',
subject='fsaverage', | |
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparktk.frame.ops.classification_metrics_value import ClassificationMetricsValue
from sparktk.models.logistic_regression_summary_table import LogisticRegressionSummaryTable
from sparktk.loggers import log_load
from sparktk.propobj import PropertiesObject
from sparktk import TkContext
from sparktk.arguments import affirm_type
log_load(__name__)
del log_load
__all__ = ["train", "LogisticRegressionModel"]
def train(frame,
observation_columns,
label_column,
frequency_column=None,
num_classes=2,
optimizer="LBFGS",
compute_covariance=True,
intercept=True,
feature_scaling=False,
threshold=0.5,
reg_type="L2",
reg_param=0.0,
num_iterations=100,
convergence_tolerance=0.0001,
num_corrections=10,
mini_batch_fraction=1.0,
step_size=1.0):
"""
Build logistic regression model.
Creating a Logistic Regression Model using the observation column and label column of the train frame.
Parameters
----------
:param frame: (Frame) A frame to train the model on.
:param observation_columns: (List[str]) Column(s) containing the observations.
:param label_column: (str) Column name containing the label for each observation.
:param frequency_column:(Option[str]) Optional column containing the frequency of observations.
:param num_classes: (int) Number of classes
:param optimizer: (str) Set type of optimizer.
LBFGS - Limited-memory BFGS.
LBFGS supports multinomial logistic regression.
SGD - Stochastic Gradient Descent.
SGD only supports binary logistic regression.
:param compute_covariance: (bool) Compute covariance matrix for the model.
:param intercept: (bool) Add intercept column to training data.
:param feature_scaling: (bool) Perform feature scaling before training model.
:param threshold: (double) Threshold for separating positive predictions from negative predictions.
:param reg_type: (str) Set type of regularization
L1 - L1 regularization with sum of absolute values of coefficients
L2 - L2 regularization with sum of squares of coefficients
:param reg_param: (double) Regularization parameter
:param num_iterations: (int) Maximum number of iterations
:param convergence_tolerance: (double) Convergence tolerance of iterations for L-BFGS. Smaller value will lead to higher accuracy with the cost of more iterations.
:param num_corrections: (int) Number of corrections used in LBFGS update.
Default is 10.
Values of less than 3 are not recommended;
large values will result in excessive computing time.
:param mini_batch_fraction: (double) Fraction of data to be used for each SGD iteration
:param step_size: (double) Initial step size for SGD. In subsequent steps, the step size decreases by stepSize/sqrt(t)
:return: (LogisticRegressionModel) A LogisticRegressionModel with a summary of the trained model.
The data returned is composed of multiple components\:
**int** : *numFeatures*
Number of features in the training data
**int** : *numClasses*
Number of classes in the training data
**table** : *summaryTable*
A summary table composed of:
**Frame** : *CovarianceMatrix (optional)*
Covariance matrix of the trained model.
The covariance matrix is the inverse of the Hessian matrix for the trained model.
The Hessian matrix is the second-order partial derivatives of the model's log-likelihood function.
"""
if frame is None:
raise ValueError("Frame cannot be None")
tc = frame._tc
_scala_obj = get_scala_obj(tc)
if isinstance(observation_columns, basestring):
observation_columns = [observation_columns]
scala_observation_columns = tc.jutils.convert.to_scala_list_string(observation_columns)
scala_frequency_column = tc.jutils.convert.to_scala_option(frequency_column)
if not isinstance(compute_covariance, bool):
raise ValueError("compute_covariance must be a bool, received %s" % type(compute_covariance))
if not isinstance(intercept, bool):
raise ValueError("intercept must be a bool, received %s" % type(intercept))
if not isinstance(feature_scaling, bool):
raise ValueError("feature_scaling must be a bool, received %s" % type(feature_scaling))
scala_model = _scala_obj.train(frame._scala,
scala_observation_columns,
label_column,
scala_frequency_column,
num_classes,
optimizer,
compute_covariance,
intercept,
feature_scaling,
threshold,
reg_type,
reg_param,
num_iterations,
convergence_tolerance,
num_corrections,
mini_batch_fraction,
float(step_size))
return LogisticRegressionModel(tc, scala_model)
def get_scala_obj(tc):
"""Gets reference to the scala object"""
return tc.sc._jvm.org.trustedanalytics.sparktk.models.classification.logistic_regression.LogisticRegressionModel
def load(path, tc=TkContext.implicit):
"""load LogisticRegressionModel from given path"""
TkContext.validate(tc)
return tc.load(path, LogisticRegressionModel)
class LogisticRegressionModel(PropertiesObject):
"""
A trained logistic regression model
Example
--------
>>> rows = [[4.9,1.4,0], [4.7,1.3,0], [4.6,1.5,0], [6.3,4.9,1],[6.1,4.7,1], [6.4,4.3,1], [6.6,4.4,1],[7.2,6.0,2], [7.2,5.8,2], [7.4,6.1,2], [7.9,6.4,2]]
>>> schema = [('Sepal_Length', float),('Petal_Length', float), ('Class', int)]
>>> frame = tc.frame.create(rows, schema)
<progress>
Consider the following frame containing three columns.
>>> frame.inspect()
[#] Sepal_Length Petal_Length Class
======================================
[0] 4.9 1.4 0
[1] 4.7 1.3 0
[2] 4.6 1.5 0
[3] 6.3 4.9 1
[4] 6.1 4.7 1
[5] 6.4 4.3 1
[6] 6.6 4.4 1
[7] 7.2 6.0 2
[8] 7.2 5.8 2
[9] 7.4 6.1 2
>>> model = tc.models.classification.logistic_regression.train(frame, ['Sepal_Length', 'Petal_Length'], 'Class', num_classes=3, optimizer='LBFGS', compute_covariance=True)
<progress>
<skip>
>>> model.training_summary
coefficients degrees_freedom standard_errors \
intercept_0 -0.780153 1 NaN
Sepal_Length_1 -120.442165 1 28497036.888425
Sepal_Length_0 -63.683819 1 28504715.870243
intercept_1 -90.484405 1 NaN
Petal_Length_0 117.979824 1 36178481.415888
Petal_Length_1 206.339649 1 36172481.900910
wald_statistic p_value
intercept_0 NaN NaN
Sepal_Length_1 -0.000004 1.000000
Sepal_Length_0 -0.000002 1.000000
intercept_1 NaN NaN
Petal_Length_0 0.000003 0.998559
Petal_Length_1 0.000006 0.998094
>>> model.training_summary.covariance_matrix.inspect()
[#] Sepal_Length_0 Petal_Length_0 intercept_0
===============================================================
[0] 8.12518826843e+14 -1050552809704907 5.66008788624e+14
[1] -1.05055305606e+15 1.30888251756e+15 -3.5175956714e+14
[2] 5.66010683868e+14 -3.51761845892e+14 -2.52746479908e+15
[3] 8.12299962335e+14 -1.05039425964e+15 5.66614798332e+14
[4] -1.05027789037e+15 1308665462990595 -352436215869081
[5] 566011198950063 -3.51665950639e+14 -2527929411221601
[#] Sepal_Length_1 Petal_Length_1 intercept_1
===============================================================
[0] 812299962806401 -1.05027764456e+15 5.66009303434e+14
[1] -1.05039450654e+15 1.30866546361e+15 -3.51663671537e+14
[2] 566616693386615 -3.5243849435e+14 -2.5279294114e+15
[3] 8.1208111142e+14 -1050119118230513 5.66615352448e+14
[4] -1.05011936458e+15 1.30844844687e+15 -3.5234036349e+14
[5] 566617247774244 -3.52342642321e+14 -2528394057347494
</skip>
>>> predict_frame = model.predict(frame, ['Sepal_Length', 'Petal_Length'])
<progress>
>>> predict_frame.inspect()
[#] Sepal_Length Petal_Length Class predicted_label
=======================================================
[0] 4.9 1.4 0 0
[1] 4.7 1.3 0 0
[2] 4.6 1.5 0 0
[3] 6.3 4.9 1 1
[4] 6.1 4.7 1 1
[5] 6.4 4.3 1 1
[6] 6.6 4.4 1 1
[7] 7.2 6.0 2 2
[8] 7.2 5.8 2 2
[9] 7.4 6.1 2 2
>>> test_metrics = model.test(frame, ['Sepal_Length', 'Petal_Length'], 'Class')
<progress>
>>> test_metrics
accuracy = 1.0
confusion_matrix = Predicted_0.0 Predicted_1.0 Predicted_2.0
Actual_0.0 3 0 0
Actual_1.0 0 4 0
Actual_2.0 0 0 4
f_measure = 1.0
precision = 1.0
recall = 1.0
>>> model.save("sandbox/logistic_regression")
>>> restored = tc.load("sandbox/logistic_regression")
>>> restored.training_summary.num_features == model.training_summary.num_features
True
The trained model can also be exported to a .mar file, to be used with the scoring engine:
>>> canonical_path = model.export_to_mar("sandbox/logisticRegressionModel.mar")
<hide>
>>> import os
>>> assert(os.path.isfile(canonical_path))
</hide>
"""
def __init__(self, tc, scala_model):
self._tc = tc
tc.jutils.validate_is_jvm_instance_of(scala_model, get_scala_obj(tc))
self._scala = scala_model
@staticmethod
def _from_scala(tc, scala_model):
"""Loads a Logistic Regression Model from a scala model"""
return LogisticRegressionModel(tc, scala_model)
@property
def observation_columns(self):
"""Column(s) containing the observations."""
return self._tc.jutils.convert.from_scala_seq(self._scala.observationColumns())
@property
def label_column(self):
"""Column name containing the label for each observation."""
return self._scala.labelColumn()
@property
def frequency_column(self):
"""Optional column containing the frequency of observations."""
return self._scala.frequencyColumn()
@property
def num_classes(self):
"""Number of classes"""
return self._scala.numClasses()
@property
def optimizer(self):
"""Set type of optimizer.
LBFGS - Limited-memory BFGS.
LBFGS supports multinomial logistic regression.
SGD - Stochastic Gradient Descent.
SGD only supports binary logistic regression."""
return self._scala.optimizer()
@property
def compute_covariance(self):
"""Compute covariance matrix for the model."""
return self._scala.computeCovariance()
@property
def intercept(self):
"""intercept column of training data."""
return self._scala.intercept()
@property
def feature_scaling(self):
"""Perform feature scaling before training model."""
return self._scala.featureScaling()
@property
def threshold(self):
"""Threshold for separating positive predictions from negative predictions."""
return self._scala.threshold()
@property
def reg_type(self):
"""Set type of regularization
L1 - L1 regularization with sum of absolute values of coefficients
L2 - L2 regularization with sum of squares of coefficients"""
return self._scala.regType()
@property
def reg_param(self):
"""Regularization parameter"""
return self._scala.regParam()
@property
def num_iterations(self):
"""Maximum number of iterations"""
return self._scala.numIterations()
@property
def convergence_tolerance(self):
"""Convergence tolerance of iterations for L-BFGS. Smaller value will lead to higher accuracy with the cost of more iterations."""
return self._scala.convergenceTolerance()
@property
def num_corrections(self):
"""Number of corrections used in LBFGS update.
Default is 10.
Values of less than 3 are not recommended;
large values will result in excessive computing time."""
return self._scala.numCorrections()
@property
def mini_batch_fraction(self):
"""Fraction of data to be used for each SGD iteration"""
return self._scala.miniBatchFraction()
@property
def step_size(self):
"""Initial step size for SGD. In subsequent steps, the step size decreases by stepSize/sqrt(t)"""
return self._scala.stepSize()
@property
def training_summary(self):
"""Logistic regression summary table"""
return LogisticRegressionSummaryTable(self._tc, self._scala.trainingSummary())
def predict(self, frame, observation_columns=None):
"""
Predict labels for data points using trained logistic regression model.
Predict the labels for a test frame using trained logistic regression model, and create a new frame revision with
existing columns and a new predicted label's column.
Parameters
----------
:param frame: (Frame) A frame whose labels are to be predicted. By default, predict is run on the same columns
over which the model is trained.
:param observation_columns: (None or list[str]) Column(s) containing the observations whose labels are
to be predicted. Default is the labels the model was trained on.
:return: (Frame) Frame containing the original frame's columns and a column with the predicted label.
"""
columns_option = self._tc.jutils.convert.to_scala_option_list_string(self.__get_observation_columns(observation_columns))
from sparktk.frame.frame import Frame
return Frame(self._tc, self._scala.predict(frame._scala, columns_option))
def test(self, frame, observation_columns=None, label_column=None):
"""
Get the predictions for | |
<gh_stars>0
# -*- coding: utf-8 -*-
import numpy as np
import pylab as pb
import os
import math
class MCMetropolis(object):
"""A class to calculate the SHO velocity autocorrelation function using MC"""
# creates an instance of a Metropolis MC sampler class
def __init__(self, name, config):
np.random.seed() # give arg to remove randomness
self.name = name
self.config = config
if 'L' not in config.keys(): config['L'] = 1.0
if 'omegasq' not in config.keys(): config['omegasq'] = 1.0
# temperature is specified as T / Tmax, where Tmax = 2 * 1/4 omegasq (L/2)^2
# (m = kB = 1)
if 'iniTemp' not in config.keys(): config['iniTemp'] = 0.10
if 'npoints' not in config.keys(): config['npoints'] = 10000
# stepX is put in relative to L/2, stepP relative to omega * L/2
# this will multiplied by reference values automatically
if 'stepP' not in config.keys(): config['stepP'] = 2.0
if 'stepX' not in config.keys(): config['stepX'] = 2.0
if 'adjustStep' not in config.keys(): config['adjustStep'] = 'False'
if 'nadjst' not in config.keys(): config['nadjst'] = 20
if 'batchmode' not in config.keys(): config['batchmode'] = 'False'
if 'writeout' not in config.keys(): config['writeout'] = 'False'
if 'outdir' not in config.keys():
outdirname = self.name
os.makedirs(outdirname)
outdir = os.path.join( os.getcwd(), outdirname )
config['outdir'] = outdir
if (config['debug'] == 'True'):
print "SHO Monte-Carlo may have modified the configuration."
print "The current configuration is:"
print( config )
print "\n"
# PHYSICS
self.L = float(config['L']) # size of the box
self.omegasq = float(config['omegasq']) # to calculate period T
self.omega = math.sqrt( self.omegasq )
self.tempMax = 2 * 0.25 * self.omegasq * (self.L / 2)**2
self.iniTemp = float(config['iniTemp']) # in units of T / Tmax
self.kbT = self.iniTemp * self.tempMax
self.T = 2 * math.pi / self.omega
self.pRef = self.omega * (self.L / 2) # reference momentum
# SIMULATION
self.npoints = int(config['npoints']) # sample how many?
self.stepP = float(config['stepP']) * self.pRef
self.stepX = float(config['stepX']) * (self.L / 2)
self.figures = 0 # number of figures plotted
self.naccpt = 0 # for step adjustment
self.nadjst = float(config['nadjst']) # for step adjustment
# DATA
self.point = (0., 0.) # current (x, p)
self.points = np.array([]) # array of sampled points
self.freq = np.array([]) # ith point sampled # times
self.trials = 0 # steps attempted
self.newAcc = 0 # unique points accepted
self.totAcc = 0 # total points accepted
self.ratios = np.array([]) # ratios accepts/trials
self.stepsX = np.array([]) # for testing purposes
self.stepsP = np.array([]) # for testing purposes
self.iniEnergy = 0. # initial energy
self.enArray = np.array([]) # monitoring total energy
self.samplesteps= np.array([]) # steps w sampled ratios
self.outdir = config['outdir'] # output directory
# uses an RNG to sample the first pair (p, x)
# allowed by the input temperature PE_max / kT, omegasq, and size of box L
def sampleFirst(self):
# random position
corr = math.sqrt( self.iniTemp )
x = (np.random.random() - 0.5) * self.L * corr
# assign momentum compatible with temperature and position
totEn = 0.5 * self.omegasq * (self.L / 2)**2 * self.iniTemp
potEn = 0.5 * self.omegasq * x**2
p = (1 - 2 * (np.random.random() < 0.5)) * math.sqrt( 2*(totEn - potEn) )
# update points
self.point = (x, p)
self.iniEnergy = self.energy()
self.points = np.append( self.points, self.point )
self.freq = np.append( self.freq, 1) # x0, p0 occures 1 times
self.newAcc += 1 # +1 unique accepts
self.totAcc += 1 # +1 total accepts
self.naccpt += 1
self.trials += 1
# clears all sampled points and counters
def reset(self):
# use to find an optimal step size
# only current point is left over
self.npoints = 1
self.trials = 1
self.newAcc = 1
self.totAcc = 1
self.naccpt = 1
self.freq = np.array( [1] ) # 1 points remains sampled
self.ratios = np.array([]) # ignore ratios before calibration
self.stepsX = np.array([])
self.stepsP = np.array([])
self.samplesteps = np.array([])
# only (x, p) for the last sampled point remain in the array
self.points = np.array([ self.points[-2], self.points[-1] ])
# calculates total energy
def energy(self):
(x, p) = self.point
return 0.5 * ( p * p ) + 0.5 * self.omegasq * ( x * x )
# calculates mean energy from enArray
def meanEnergy(self):
return self.enArray.mean()
# calculates current ratio of unique accepts to number of attempts
def calculateRatio(self):
return float(self.newAcc) / float(self.trials)
# adjust the ratio and does the bookkeeping
def adjustRatio(self):
ratio = float(self.naccpt) / float(self.nadjst)
if ratio < 0.5:
# acceptance too low, decrease the step
correction = 0.95
self.stepX = self.stepX * correction
self.stepP = self.stepP * correction
elif ratio > 0.5:
# acceptance too high, inscrease the step
correction = 1.05
self.stepX = self.stepX * correction
self.stepP = self.stepP * correction
# zero the counter
self.naccpt = 0
# adds ratio and steps to the lists for plotting during the test
def addRatio(self):
ratio = self.calculateRatio()
self.samplesteps = np.append( self.samplesteps, self.trials )
self.ratios = np.append(self.ratios, ratio)
# returns an array of npoints
def sampledPoints(self):
return self.points
# makes a random step in phase space
# updates respective counters
def makeStep(self):
self.trials += 1
# generate a vector in (x, p) space
(xOld, pOld) = self.point
# make a step in phase space
xNew = xOld + (2 * np.random.random() - 1) * self.stepX
pNew = pOld + (2 * np.random.random() - 1) * self.stepP
return (xNew, pNew)
# compute weight exp( - delta V)
def computePba(self, xNew, pNew):
(xOld, pOld) = self.point
oldPE = 0.5 * self.omegasq * (xOld * xOld)
oldKE = 0.5 * (pOld * pOld)
newPE = 0.5 * self.omegasq * (xNew * xNew)
newKE = 0.5 * (pNew * pNew)
delPE = newPE - oldPE
delKE = newKE - oldKE
weight = delPE + delKE
if weight <= 0:
pba = 1
else:
pba = math.exp( - weight / self.kbT )
return pba
# accepts a new trial point and updates correspoding counters / lists
def accept(self, xTry, pTry):
self.newAcc += 1 # for tracking unique accepts (not zeroed)
self.totAcc += 1 # for tracking total accepts (not zeroed)
self.naccpt += 1 # for adjusting steps (zeroed periodically)
self.point = (xTry, pTry)
self.points = np.append( self.points, self.point )
self.freq = np.append( self.freq, 1 )
# rejects a new trial point and updates corresponding counters / lists
def reject(self):
# append previous point to the list again
self.totAcc += 1
self.freq[-1] += 1
# makes nsteps new Monte-Carlo steps, saves sampled points in points array
def evolve(self, nsteps):
# make nsteps
for istep in range(nsteps):
(xTry, pTry) = self.makeStep()
# ACCEPT or REJECT
pba = self.computePba(xTry, pTry)
if pba == 1:
self.accept(xTry, pTry)
elif pba < 1:
if np.random.random() <= pba:
self.accept(xTry, pTry)
else:
self.reject()
else:
print "Error, probability > 1"
# adjust step if neccessary
if (self.trials % self.nadjst == 0):
if (self.config['adjustStep'] == 'True'):
self.adjustRatio()
self.addRatio()
self.stepsX = np.append(self.stepsX, self.stepX)
self.stepsP = np.append(self.stepsP, self.stepP)
self.enArray = np.append( self.enArray, self.energy() )
# calculates the normalization factor based on the current sampled points
def normalization(self):
# convert to np.arrays
self.points = np.array( self.points ) # (x, p)
self.freq = np.array( self.freq )
# extract velocities
v0 = self.points[1::2] # only odd indices
# integrate
C = (self.freq * v0 ** 2).sum() / self.totAcc
return C
# plots the initial (x0, p0) distributions
def plotPhaseSpace(self):
self.figures += 1
pb.figure( self.figures )
pb.title("(x0, p0) distribution")
pb.xlabel("x0 / (L/2)" )
pb.ylabel("p0 / w (L/2)")
pb.grid(True)
#pb.axis('equal')
x = self.points[::2]
p = self.points[1::2]
corr = math.sqrt( self.iniTemp )
margin = 0.25
xmax = abs( x.max() ) / (self.L / 2)
xmin = abs( x.min() ) / (self.L / 2)
xbound = max( xmax, xmin, 1.0 * corr)
pmax = abs( p.max() ) / (self.pRef)
pmin = abs( p.min() ) / (self.pRef)
pbound = max( pmax, pmin, 1.0 * corr )
pb.xlim(-xbound - margin, xbound + margin)
pb.ylim(-pbound - margin, pbound + margin)
if self.config['debug'] == 'True':
print "pbound | |
3.82;
# to work around this issue a smart way to proceed is just putting the ? wildcard instead of spaces:
list_of_files.append(fullpath.replace(' ', '?'))
list_of_files = sorted(list_of_files)
text = rpm_file + ": \\\n\t" + " \\\n\t".join(list_of_files) + "\n"
# According to the GNU make User’s Manual section "Rules without Recipes or Prerequisites":
# If a rule has no prerequisites or recipe, and the target of the rule is a nonexistent file,
# then `make’ imagines this target to have been updated whenever its rule is run.
# This implies that all targets depending on this one will always have their recipes run.
if generate_empty_recipes:
text += "\n\n# Empty recipes for dependency files (to avoid GNU make failures on dependency file removal):\n"
for dep_filename in list_of_files:
text += dep_filename + ":\n\n"
try:
with open(outfile, "w") as f:
f.write(text)
except:
print("Failed writing to output file '{}'. Aborting".format(outfile))
sys.exit(2)
def generate_missed_file_list(self, outfile, rpm_file, packaged_files_notfound):
"""Write a text file with the list of packaged files that could not be found inside search folders.
The text file is written in a simple CSV format
"""
if len(packaged_files_notfound)>0:
try:
with open(outfile, "w") as f:
f.write("File,SHA256SUM_or_MD5SUM\n")
for fname,fname_checksum in packaged_files_notfound:
f.write(("{},{}\n".format(fname,fname_checksum)))
except (OSError, IOError) as e:
print("Failed writing to output file '{}': {}. Aborting".format(outfile, e))
sys.exit(2)
else:
try:
# remove the file (in case it's there since a previous run)
os.remove(outfile)
except:
# ignore errors in delete
pass
print("Written list of packaged files not found in file '{}'".format(outfile))
def run(self, config):
"""Chains all together previous utility functions:
- extracts from an RPM the MD5/SHA256 sums of contained files
- matches those checksums with the search directories
- generates the GNU make dependency list file
"""
# STEP 1
rpm_file_checksums = self.get_checksum_tuples_from_rpm(config['abs_input_rpm'])
# STEP 2
dict_matching_files = self.match_checksum_tuples_with_fileystem(config['search_dirs'], rpm_file_checksums, config['strict'], config['nameonly_check_for_exec_files'])
nfound = 0
packaged_files_notfound = []
for rpm_file,rpm_checksum,rpm_permission,rpm_is_exec in rpm_file_checksums:
rpm_fname = os.path.basename(rpm_file)
if rpm_fname not in dict_matching_files or len(dict_matching_files[rpm_fname])==0:
packaged_files_notfound.append( (rpm_fname,rpm_checksum) )
else:
nfound = nfound+1
# report all files not found all together at the end:
if len(config['missed_list_outfile'])>0:
# generate or remove the list of missed file:
self.generate_missed_file_list(config['missed_list_outfile'], config['abs_input_rpm'], packaged_files_notfound)
if len(packaged_files_notfound)>0:
if verbose or config['strict']:
dirs = ",".join(config['search_dirs'])
print("Unable to find {} packaged files inside provided search folders {}. Files packaged and not found (with their SHA256 sum) are:".format(len(packaged_files_notfound), dirs))
for fname,fname_checksum in packaged_files_notfound:
print(" {} {}".format(fname,fname_checksum))
if config['strict']:
print("Aborting output generation (strict mode)")
sys.exit(3)
if verbose:
print("Found a total of {} packaged files across all search folders".format(nfound))
input_rpm = config['input_rpm']
if config['strip_dirname']:
input_rpm = os.path.basename(input_rpm)
# add explicit dependencies provided via command line:
if len(config['explicit_dependencies'])>0:
for filepath in config['explicit_dependencies'].split(','):
filename_only = os.path.basename(filepath)
if filename_only:
if verbose:
print("Adding as explicit dependency: {}".format(filepath))
if filename_only in dict_matching_files:
dict_matching_files[filename_only].add(filepath)
else:
dict_matching_files[filename_only]=set([filepath])
# STEP 3: finally generate the dependency listing:
self.generate_dependency_list(config['output_dep'], input_rpm, dict_matching_files, config['generate_empty_recipes'])
print("Successfully generated dependency list for '{}' in file '{}' listing {} dependencies ({} packaged files are missing)".format(
input_rpm, config['output_dep'], len(dict_matching_files.items()), len(packaged_files_notfound)))
##
## MAIN
##
def usage():
"""Provides commandline usage
"""
version = pkg_resources.require("rpm_make_rules_dependency_lister")[0].version
print('rpm_make_rules_dependency_lister version {}'.format(version))
print('Typical usage:')
print(' %s --input=somefile.rpm [--output=somefile.d] [--search=somefolder1,somefolder2,...]' % sys.argv[0])
print('Required parameters:')
print(' -i, --input=<file.rpm> The RPM file to analyze.')
print('Main options:')
print(' -h, --help (this help)')
print(' -v, --verbose Be verbose.')
print(' --version Print version and exit.')
print(' -o, --output=<file.d> The output file where the list of RPM dependencies will be written;')
print(' if not provided the dependency file is written in the same folder of ')
print(' input RPM with .d extension in place of .rpm extension.')
print(' -s, --strict Refuse to generate the output dependency file specified by --output if ')
print(' some packaged file cannot be found inside the search directories.')
print(' See also the --dump-missed-files option as alternative to --strict.')
print(' -m, --dump-missed-files=<file.csv>')
print(' Writes in the provided <file.csv> the list of files packaged in the RPM')
print(' that could not be found in the search directories.')
print(' -d, --search=<dir list> The directories where RPM packaged files will be searched in (recursively);')
print(' this option accepts a comma-separated list of directories;')
print(' if not provided the files will be searched in the same folder of input RPM.')
print(' -e, --explicit-dependencies=<file1,file2,...>')
print(' Put the given list of filepaths in the output dependency file as explicit')
print(' dependencies of the RPM.')
print('Advanced options:')
print(' -x, --match-executable-by-name-only')
print(' By default the matching between RPM packaged files and file system files is')
print(' based on filename and MD5/SHA256 sums. This flag will loosen the match criteria')
print(' to the filename only, but only for files packages as executable. This is useful')
print(' in particular for ELF files that may be transformed by RPM macros during packaging.')
print(' -t, --strip-dirname In the output dependency file strip the dirname of the provided RPM;')
print(' produces a change in output only if an absolute/relative path is provided')
print(' to --output option (e.g., if --output=a/b/c/myrpm.rpm is given).')
print(' -n, --no-empty-recipes')
print(' Disable generation of empty recipes for all dependency files.')
print(' Note that empty recipes are useful to avoid GNU make errors when a dependency')
print(' file is removed from the filesystem.')
sys.exit(0)
def parse_command_line():
"""Parses the command line
"""
try:
opts, remaining_args = getopt.getopt(sys.argv[1:], "ihvosmdextn",
["input=", "help", "verbose", "version", "output=", "strict",
"dump-missed-files=", "search=", "explicit-dependencies=",
"match-executable-by-name-only", "strip-dirname", "no-empty-recipes"])
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
usage() # will exit program
global verbose
version = False
input_rpm = ""
output_dep = ""
search_dirs = ""
missed_list_outfile = ""
explicit_deps = ""
strict = False
strip_dirname = False
generate_empty_recipes = True
match_exec_by_filename_only = False
for o, a in opts:
if o in ("-i", "--input"):
input_rpm = a
elif o in ("-h", "--help"):
usage()
elif o in ("-v", "--verbose"):
verbose = True
elif o in ("--version"):
version = True
elif o in ("-s", "--strict"):
strict = True
elif o in ("-o", "--output"):
output_dep = a
elif o in ("-t", "--strip-dirname"):
strip_dirname = True
elif o in ("-d", "--search"):
search_dirs = a
elif o in ("-m", "--dump-missed-files"):
missed_list_outfile = a
elif o in ("-e", "--explicit-dependencies"):
explicit_deps = a
elif o in ("-n", "--no-empty-recipes"):
generate_empty_recipes = False
elif o in ("-x", "--match-executable-by-name-only"):
match_exec_by_filename_only = True
else:
assert False, "unhandled option " + o + a
if version:
version = pkg_resources.require("rpm_make_rules_dependency_lister")[0].version
print("{}".format(version))
sys.exit(0)
if input_rpm == "":
print("Please provide --input option (it is a required option)")
sys.exit(os.EX_USAGE)
abs_input_rpm = input_rpm
if not os.path.isabs(input_rpm):
abs_input_rpm = os.path.join(os.getcwd(), input_rpm)
return {'spec_files': remaining_args,
'input_rpm' : input_rpm,
'abs_input_rpm' : abs_input_rpm,
'output_dep' : output_dep,
'search_dirs' : search_dirs,
'strict': strict,
'strip_dirname': strip_dirname,
'missed_list_outfile': missed_list_outfile,
"explicit_dependencies":explicit_deps,
"generate_empty_recipes":generate_empty_recipes,
"nameonly_check_for_exec_files":match_exec_by_filename_only }
def main():
if sys.version_info[0] < 3:
# this is useful because on some systems with old versions of the "pip" utility you can download
# this package using pip/Python2 even if this package is tagget with python_requires>=3: only
# recent pip versions respect that tag! In case an old "pip" version was used tell the user:
print('You need to run this with Python 3.')
print('If you installed this package with "pip install" please uninstall it using "pip uninstall"')
print('and reinstall it using "pip3 install" instead.')
sys.exit(1)
config = parse_command_line()
# adjust list of search directories
if len(config['search_dirs'])==0:
# if not provided the search directory is the directory of input file
config['search_dirs'] = [ os.path.dirname(config['abs_input_rpm']) ]
if verbose:
print("No search directory provided, using current directory '{}'".format(os.path.dirname(config['abs_input_rpm'])))
else:
# convert command-separated string to list:
config['search_dirs'] = config['search_dirs'].split(',')
# adjust output file name:
if len(config['output_dep'])==0:
# if not provided the output file lives in the same directory of input RPM
# and is named like that RPM file just with .d extension
input_rpm_dir = os.path.dirname(config['input_rpm'])
input_rpm_filename = os.path.basename(config['input_rpm'])
output_filename = os.path.splitext(input_rpm_filename)[0] + ".d"
config['output_dep'] = | |
-> None:
primitive_step = self._get_primitive_step(primitive_step_id)
primitive_step.hyperparams = hyperparams
primitive_step.pipeline_hyperparams = set(pipeline_hyperparams.keys())
def set_primitive_step_random_seed(self, primitive_step_id: int, random_seed: int) -> None:
primitive_step = self._get_primitive_step(primitive_step_id)
primitive_step.random_seed = random_seed
def add_subpipeline_step(self, subpipeline_run: 'PipelineRun') -> int:
pipeline_run_subpipeline_step = PipelineRunSubpipelineStep(
self._step_start_timestamps[len(self.steps)], subpipeline_run.random_seed
)
for step_id, step in enumerate(subpipeline_run.steps):
step_json = step.to_json_structure()
pipeline_run_subpipeline_step.add_step(step_json)
state = step_json['status']['state']
message = step_json['status'].get('message', None)
if state == metadata_base.PipelineRunStatusState.SUCCESS.name:
pipeline_run_subpipeline_step.set_successful(message)
elif state == metadata_base.PipelineRunStatusState.FAILURE.name:
message = 'Failed on subpipeline step {}:\n{}'.format(step_id, message)
pipeline_run_subpipeline_step.set_failed(message)
if message is not None and message:
self.status['message'] = message
else:
raise exceptions.UnexpectedValueError('unknown subpipeline status state: {}'.format(state))
self.steps.append(pipeline_run_subpipeline_step)
return len(self.steps) - 1
def add_method_call_to_primitive_step(
self, primitive_step_id: int, method_name: str, *,
runtime_arguments: typing.Dict = None, environment: typing.Dict[str, typing.Any] = None
) -> typing.Tuple[int, int]:
if runtime_arguments is None:
runtime_arguments = {}
# TODO allow runtime arguments not specified in pipeline?
primitive_step = self._get_primitive_step(primitive_step_id)
method_call_id = primitive_step.add_method_call(
method_name, runtime_arguments=runtime_arguments, environment=environment
)
return (primitive_step_id, method_call_id)
def get_method_call_logging_callback(
self, step_and_method_call_id: typing.Tuple[int, int]
) -> typing.Callable:
step_id, method_call_id = step_and_method_call_id
primitive_step = self._get_primitive_step(step_id)
return primitive_step.get_method_call_logging_callback(method_call_id)
def run_started(self) -> None:
self.start = utils.datetime_for_json(utils.now())
def _set_end_timestamp(self) -> None:
self.end = utils.datetime_for_json(utils.now())
def step_started(self, step_id: int) -> None:
self._step_start_timestamps[step_id] = utils.datetime_for_json(utils.now())
def method_call_started(self, step_and_method_call_id: typing.Tuple[int, int]) -> None:
step_id, method_call_id = step_and_method_call_id
primitive_step = self._get_primitive_step(step_id)
primitive_step.set_method_call_start_timestamp(method_call_id)
def set_method_call_result_metadata(
self, step_and_method_call_id: typing.Tuple[int, int],
result: typing.Union[base.CallResult, base.MultiCallResult]
) -> None:
step_id, method_call_id = step_and_method_call_id
primitive_step = self._get_primitive_step(step_id)
primitive_step.set_method_call_result_metadata(method_call_id, result)
def run_successful(self, message: str = None) -> None:
self._set_end_timestamp()
self.status['state'] = metadata_base.PipelineRunStatusState.SUCCESS.name
if message is not None and message:
self.status['message'] = message
def step_successful(self, step_id: int, message: str = None) -> None:
if step_id >= len(self.steps):
raise exceptions.InvalidArgumentValueError('There does not exist a step with id {}'.format(step_id))
self.steps[step_id].set_end_timestamp()
self.steps[step_id].set_successful(message)
def method_call_successful(self, step_and_method_call_id: typing.Tuple[int, int], message: str = None) -> None:
step_id, method_call_id = step_and_method_call_id
primitive_step = self._get_primitive_step(step_id)
primitive_step.set_method_call_end_timestamp(method_call_id)
primitive_step.set_method_call_successful(method_call_id, message)
def run_failed(self, message: str = None) -> None:
self._set_end_timestamp()
self.status['state'] = metadata_base.PipelineRunStatusState.FAILURE.name
if message is not None and message:
self.status['message'] = message
def step_failed(self, step_id: int, message: str = None) -> None:
if step_id >= len(self.steps):
return
self.steps[step_id].set_end_timestamp()
self.steps[step_id].set_failed(message)
def method_call_failed(self, step_and_method_call_id: typing.Tuple[int, int], message: str = None) -> None:
step_id, method_call_id = step_and_method_call_id
if step_id >= len(self.steps):
return
primitive_step = self._get_primitive_step(step_id)
primitive_step.set_method_call_end_timestamp(method_call_id)
primitive_step.set_method_call_failed(method_call_id, message)
def is_failed(self) -> bool:
return self.status['state'] == metadata_base.PipelineRunStatusState.FAILURE.name
def _set_problem(self, problem_description: problem.Problem) -> None:
self.problem = {
'id': problem_description['id'],
'digest': problem_description.get_digest(),
}
def set_fold_group(self, fold_group_id: uuid.UUID, fold: int) -> None:
self.run['fold_group'] = {
'id': str(fold_group_id),
'fold': fold,
}
def set_data_preparation_pipeline_run(
self, data_preparation_pipeline_run: 'PipelineRun'
) -> None:
if data_preparation_pipeline_run.start is None:
raise exceptions.InvalidArgumentValueError("Data preparation pipeline start timestamp argument not provided.")
if data_preparation_pipeline_run.end is None:
raise exceptions.InvalidArgumentValueError("Data preparation pipeline end timestamp argument not provided.")
self.run['data_preparation'] = {
'pipeline': data_preparation_pipeline_run.pipeline,
'steps': [step.to_json_structure() for step in data_preparation_pipeline_run.steps],
'status': data_preparation_pipeline_run.status,
'start': data_preparation_pipeline_run.start,
'end': data_preparation_pipeline_run.end,
'random_seed': data_preparation_pipeline_run.random_seed,
}
self.datasets = data_preparation_pipeline_run.datasets
if data_preparation_pipeline_run.is_failed():
message = 'Data preparation pipeline failed:\n{}'.format(
data_preparation_pipeline_run.status['message']
)
self.status['state'] = metadata_base.PipelineRunStatusState.FAILURE.name
if message is not None and message:
self.status['message'] = message
def set_scoring_pipeline_run(
self, scoring_pipeline_run: 'PipelineRun', scoring_datasets: typing.Sequence[typing.Any] = None,
) -> None:
if scoring_pipeline_run.start is None:
raise exceptions.InvalidArgumentValueError("Scoring pipeline start timestamp argument not provided.")
if scoring_pipeline_run.end is None:
raise exceptions.InvalidArgumentValueError("Scoring pipeline end timestamp argument not provided.")
self.run['scoring'] = {
'pipeline': scoring_pipeline_run.pipeline,
'steps': [step.to_json_structure() for step in scoring_pipeline_run.steps],
'status': scoring_pipeline_run.status,
'start': scoring_pipeline_run.start,
'end': scoring_pipeline_run.end,
'random_seed': scoring_pipeline_run.random_seed,
}
if scoring_datasets:
self.run['scoring']['datasets'] = []
for dataset in scoring_datasets:
metadata = dataset.metadata.query(())
self.run['scoring']['datasets'].append({
'id': metadata['id'],
'digest': metadata['digest'],
})
if scoring_pipeline_run.is_failed():
message = 'Scoring pipeline failed:\n{}'.format(
scoring_pipeline_run.status['message']
)
self.status['state'] = metadata_base.PipelineRunStatusState.FAILURE.name
if message is not None and message:
self.status['message'] = message
def set_scores(
self, scores: container.DataFrame, metrics: typing.Sequence[typing.Dict],
) -> None:
if not self.is_standard_pipeline:
raise exceptions.InvalidStateError("Setting scores for non-standard pipelines is not allowed.")
json_scores = []
if 'normalized' in scores.columns:
columns = ['metric', 'value', 'normalized']
else:
columns = ['metric', 'value']
for row in scores.loc[:, columns].itertuples(index=False, name=None):
metric, value = row[:2]
json_scores.append(
{
# TODO: Why is "deepcopy" needed here?
'metric': copy.deepcopy(self._get_metric_description(metric, metrics)),
'value': float(value),
},
)
if len(row) == 3:
json_scores[-1]['normalized'] = float(row[2])
if not json_scores:
return
if 'results' not in self.run:
self.run['results'] = {}
if 'scores' not in self.run['results']:
self.run['results']['scores'] = json_scores
else:
raise exceptions.InvalidStateError("Scores already set for pipeline run.")
def _get_metric_description(self, metric: str, performance_metrics: typing.Sequence[typing.Dict]) -> typing.Dict:
"""
Returns a metric description from a list of them, given metric.
Parameters
----------
metric:
A metric name.
performance_metrics:
A list of performance metric descriptions requested for scoring.
Returns
-------
A metric description.
"""
for performance_metric in performance_metrics:
if performance_metric['metric'] == metric:
metric_description = {
'metric': performance_metric['metric'].name,
}
if performance_metric.get('params', {}):
metric_description['params'] = performance_metric['params']
return metric_description
return {
'metric': metric,
}
def set_predictions(self, predictions: container.DataFrame) -> None:
if not self.is_standard_pipeline:
raise exceptions.InvalidStateError("Setting predictions for non-standard pipelines is not allowed.")
if not isinstance(predictions, container.DataFrame):
logger.warning("Unable to set predictions for pipeline run because predictions are not a DataFrame.")
return
try:
json_predictions: typing.Dict[str, typing.List] = {
'header': [],
'values': [],
}
column_names = []
for column_index in range(len(predictions.columns)):
# We use column name from the DataFrame is metadata does not have it. This allows a bit more compatibility.
column_names.append(predictions.metadata.query_column(column_index).get('name', predictions.columns[column_index]))
# "tolist" converts values to Python values and does not keep them as numpy.float64 or other special types.
json_predictions['values'].append(utils.to_json_structure(predictions.iloc[:, column_index].tolist()))
json_predictions['header'] += column_names
except Exception as error:
logger.warning("Unable to convert predictions to JSON structure for pipeline run.", exc_info=error)
return
if 'results' not in self.run:
self.run['results'] = {}
if 'predictions' not in self.run['results']:
self.run['results']['predictions'] = json_predictions
else:
raise exceptions.InvalidStateError("Predictions already set for pipeline run.")
def get_id(self) -> str:
return self._to_json_structure()['id']
@classmethod
def json_structure_equals(cls, pipeline_run1: typing.Dict, pipeline_run2: typing.Dict) -> bool:
"""
Checks whether two pipeline runs in a JSON structure are equal.
This ignores the pipeline run id and all timestamps.
"""
if not isinstance(pipeline_run1, collections.Mapping) or not isinstance(pipeline_run2, collections.Mapping):
raise exceptions.InvalidArgumentTypeError("Pipeline run arguments must be dicts.")
return utils.json_structure_equals(pipeline_run1, pipeline_run2, {'id', 'start', 'end', 'environment', 'logging'})
class RuntimeEnvironment(dict):
def __init__(
self, *,
worker_id: str = None,
cpu_resources: typing.Dict[str, typing.Any] = None,
memory_resources: typing.Dict[str, typing.Any] = None,
gpu_resources: typing.Dict[str, typing.Any] = None,
reference_benchmarks: typing.Sequence[str] = None,
reference_engine_version: str = None,
engine_version: str = None,
base_docker_image: typing.Dict[str, str] = None,
docker_image: typing.Dict[str, str] = None,
) -> None:
"""
Create an instance of the runtime environment description in which a pipeline is run.
All values stored in an instance should be JSON compatible.
Parameters
----------
worker_id:
A globally unique identifier for the machine on which the runtime is running.
The idea is that multiple runs on the same system can be grouped together.
If not provided, `uuid.getnode()` is used to obtain an identifier.
cpu_resources:
A description of the CPU resources available in this environment.
memory_resources:
A description of the memory resources available in this environment.
gpu_resources:
A description of the GPU resources available in this environment.
reference_benchmarks:
A list of ids of standard and optional additional benchmarks which were run in the same or
equivalent RuntimeEnvironment. The timing characteristics of these benchmarks can be
expected to be the same as anything timed in this RuntimeEnvironment.
reference_engine_version:
A git commit hash or version number for the reference engine used. If subclassing the
reference engine, list it here.
engine_version:
A git commit hash or version number for the engine used. This is primarily useful for the
author. If using the reference engine directly, list its git commit hash or version number
here as well as in the reference_engine_version.
base_docker_image:
If the engine was run in a public or known docker container, specify the base docker image
description here.
docker_image:
If the engine was run in a public or known docker container, specify the actual docker
image description here. This is primarily useful for the author.
"""
super().__init__()
if worker_id is None:
worker_id = self._get_worker_id(environment_variables.D3M_WORKER_ID)
self['worker_id'] = worker_id
resources = {}
if cpu_resources is None:
cpu_resources = self._get_cpu_resources()
if cpu_resources is not None:
resources['cpu'] = cpu_resources
if memory_resources is None:
memory_resources = self._get_memory_resources()
if memory_resources is not None:
resources['memory'] = memory_resources
if gpu_resources is None:
gpu_resources | |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=protected-access
import argparse
from collections import defaultdict
from knack.util import CLIError
class AddFusionAlertRule(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.fusion_alert_rule = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'alert-rule-template-name':
d['alert_rule_template_name'] = v[0]
elif kl == 'enabled':
d['enabled'] = v[0]
elif kl == 'etag':
d['etag'] = v[0]
d['kind'] = 'Fusion'
return d
class AddMicrosoftSecurityIncidentCreationAlertRule(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.microsoft_security_incident_creation_alert_rule = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'display-names-filter':
d['display_names_filter'] = v
elif kl == 'display-names-exclude-filter':
d['display_names_exclude_filter'] = v
elif kl == 'product-filter':
d['product_filter'] = v[0]
elif kl == 'severities-filter':
d['severities_filter'] = v
elif kl == 'alert-rule-template-name':
d['alert_rule_template_name'] = v[0]
elif kl == 'description':
d['description'] = v[0]
elif kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'enabled':
d['enabled'] = v[0]
elif kl == 'etag':
d['etag'] = v[0]
d['kind'] = 'MicrosoftSecurityIncidentCreation'
return d
class AddScheduledAlertRule(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.scheduled_alert_rule = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'query':
d['query'] = v[0]
elif kl == 'query-frequency':
d['query_frequency'] = v[0]
elif kl == 'query-period':
d['query_period'] = v[0]
elif kl == 'severity':
d['severity'] = v[0]
elif kl == 'trigger-operator':
d['trigger_operator'] = v[0]
elif kl == 'trigger-threshold':
d['trigger_threshold'] = v[0]
elif kl == 'alert-rule-template-name':
d['alert_rule_template_name'] = v[0]
elif kl == 'description':
d['description'] = v[0]
elif kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'enabled':
d['enabled'] = v[0]
elif kl == 'suppression-duration':
d['suppression_duration'] = v[0]
elif kl == 'suppression-enabled':
d['suppression_enabled'] = v[0]
elif kl == 'tactics':
d['tactics'] = v
elif kl == 'etag':
d['etag'] = v[0]
d['kind'] = 'Scheduled'
return d
class AddIncidentInfo(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.incident_info = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'incident-id':
d['incident_id'] = v[0]
elif kl == 'severity':
d['severity'] = v[0]
elif kl == 'title':
d['title'] = v[0]
elif kl == 'relation-name':
d['relation_name'] = v[0]
return d
class AddAadDataConnector(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.aad_data_connector = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'tenant-id':
d['tenant_id'] = v[0]
elif kl == 'state':
d['state'] = v[0]
elif kl == 'etag':
d['etag'] = v[0]
d['kind'] = 'AzureActiveDirectory'
return d
class AddAatpDataConnector(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.aatp_data_connector = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'tenant-id':
d['tenant_id'] = v[0]
elif kl == 'state':
d['state'] = v[0]
elif kl == 'etag':
d['etag'] = v[0]
d['kind'] = 'AzureAdvancedThreatProtection'
return d
class AddAscDataConnector(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.asc_data_connector = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'subscription-id':
d['subscription_id'] = v[0]
elif kl == 'state':
d['state'] = v[0]
elif kl == 'etag':
d['etag'] = v[0]
d['kind'] = 'AzureSecurityCenter'
return d
class AddAwsCloudTrailDataConnector(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.aws_cloud_trail_data_connector = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'aws-role-arn':
d['aws_role_arn'] = v[0]
elif kl == 'state':
d['state'] = v[0]
elif kl == 'etag':
d['etag'] = v[0]
d['kind'] = 'AmazonWebServicesCloudTrail'
return d
class AddMcasDataConnector(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.mcas_data_connector = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'tenant-id':
d['tenant_id'] = v[0]
elif kl == 'state-data-types-alerts-state':
d['state_data_types_alerts_state'] = v[0]
elif kl == 'state-data-types-discovery-logs-state':
d['state_data_types_discovery_logs_state'] = v[0]
elif kl == 'etag':
d['etag'] = v[0]
d['kind'] = 'MicrosoftCloudAppSecurity'
return d
class AddMdatpDataConnector(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.mdatp_data_connector = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'tenant-id':
d['tenant_id'] = v[0]
elif kl == 'state':
d['state'] = v[0]
elif kl == 'etag':
d['etag'] = v[0]
d['kind'] = 'MicrosoftDefenderAdvancedThreatProtection'
return d
class AddOfficeDataConnector(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.office_data_connector = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {
'dataTypes': {
'sharePoint': {'state': 'Disabled'},
'exchange': {'state': 'Disabled'}
}
}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'tenant-id':
d['tenantId'] = v[0]
elif kl == 'sharepoint-enabled':
d['dataTypes']['sharePoint']['state'] = 'Enabled'
elif kl == 'exchange-enabled':
d['dataTypes']['exchange']['state'] = 'Enabled'
elif kl == 'etag':
d['etag'] = v[0]
d['kind'] = 'Office365'
print(d)
return d
class AddTiDataConnector(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.ti_data_connector = action
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'tenant-id':
d['tenant_id'] = v[0]
elif kl == 'state':
d['state'] = v[0]
elif kl == 'etag':
d['etag'] = v[0]
d['kind'] = 'ThreatIntelligence'
return d
class AddLabels(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddLabels, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage | |
<filename>torchuq/transform/conformal.py
import pandas as pd
import numpy as np
import itertools, math
from matplotlib import pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
from functools import partial
import matplotlib.pyplot as plt
import os, sys, shutil, copy, time, random
from .basic import Calibrator, ConcatDistribution, DistributionBase
from .utils import BisectionInverse
from ..models.flow import NafFlow
from .. import _implicit_quantiles, _get_prediction_device, _move_prediction_device, _parse_name, _get_prediction_batch_shape
from ..evaluate.distribution import compute_std, compute_mean_std
class DistributionConformal:
""" Abstract base class for a distribution that arises from conformal calibration.
This class behaves like torch.distribution.Distribution, and supports the cdf, icdf and rsample functions.
Args:
val_predictions: a set of validation predictions, the type must be compatible with score_func
val_labels: array [validation_batch_shape], a batch of labels
test_predictions: a set of test predictions, the type must be compatible with score func
score_func: non-conformity score function. A function that take as input a batched predictions q, and an array v of values with shape [n_evaluations, batch_shape]
returns an array s of shape [n_evaluations, batch_shape] where s_{ij} is the score of v_{ij} under prediction q_j
iscore_func: inverse non-conformity score function: a function that take as input a batched prediction q, and an array s os scores with shape [n_evaluations, batch_shape]
returns an array v of shape [n_evaluations, batch_shape] which is the inverse of score_func (i.e. iscore_func(q, score_func(q, v)) = v)
"""
def __init__(self, val_predictions, val_labels, test_predictions, score_func, iscore_func):
self.score = score_func
self.iscore = iscore_func
self.test_predictions = test_predictions
self.device = _get_prediction_device(test_predictions)
with torch.no_grad():
self.batch_shape = self.score(test_predictions, torch.zeros(1, 1, device=self.device)).shape[1:2] # A hack to find out the number of distributions
# Compute the scores for all the predictions in the validation set and sort them from small to large
val_scores = self.score(val_predictions, val_labels.view(1, -1)).flatten()
# Need to fix this: To avoid numerical instability neighboring values should not be too similar, maybe add a tiny biy of noise
val_scores = val_scores.sort()[0]
# Prepend the 0 quantile and append the 1 quantile for convenient handling of boundary conditions
self.val_scores = torch.cat([val_scores[:1] - (val_scores[1:] - val_scores[:-1]).mean(dim=0, keepdims=True),
val_scores,
val_scores[-1:] + (val_scores[1:] - val_scores[:-1]).mean(dim=0, keepdims=True)])
# self.test_std = compute_std(self) + 1e-10 # This is the last thing that can be called
if iscore_func == _conformal_iscore_ensemble:
min_label = val_labels.min()
max_label = val_labels.max()
min_search = min_label - (max_label - min_label)
max_search = max_label + (max_label - min_label)
self.iscore = partial(iscore_func, min_search=min_search, max_search=max_search)
def to(self, device):
""" Move this class and all the tensors it owns to a specified device.
Args:
device (torch.device): the device to move this class to.
"""
if self.device != device:
self.device = device
self.val_scores = self.val_scores.to(device)
self.test_predictions = _move_prediction_device(self.test_predictions, device)
def rsample(self, sample_shape=torch.Size([])):
""" Generates a sample_shape shaped (batch of) sample.
Args:
sample_shape (torch.Size): the shape of the samples.
Returns:
tensor: the drawn samples.
"""
rand_vals = torch.rand(list(sample_shape) + [self.batch_shape[0]])
return self.icdf(rand_vals.view(-1, self.batch_shape[0])).view(rand_vals.shape)
def sample(self, sample_shape=torch.Size([])):
""" Generates a sample_shape shaped (batch of) sample.
Args:
sample_shape (torch.Size): the shape of the samples.
Returns:
tensor: the drawn samples.
"""
return self.rsample(sample_shape)
def sample_n(self, n):
""" Generates n batches of samples.
Args:
n (int): the number of batches of samples.
Returns:
tensor: the drawn samples.
"""
return self.rsample(torch.Size([n]))
def log_prob(self, value):
""" Returns the log of the probability density evaluated at value.
Args:
value (tensor): the values to evaluate the ICDF.
Returns:
tensor: the evaluated log_prob.
"""
# This implementation will be improved in the future as it is numerically unstable and require tricks to not throw faults.
# # Get the shape
# shape = e
# eps = self.test_std * 1e-3 # Use the same unit as the std
# if len(values) == 0:
eps = 1e-4
return torch.log(self.cdf(value + eps) - self.cdf(value) + 1e-10) - math.log(eps)
def shape_inference(self, value):
""" Change the shape of the input into a canonical shape
"""
# Enumerate all the valid input shapes for value
if type(value) == int or type(value) == float:
return value.view(1, 1).repeat(1, self.batch_shape[0]), self.batch_shape[0]
elif len(value.shape) == 1 and value.shape[0] == 1: # If the value is 1-D it must be either 1 or equal to batch_shape[0]
return value.view(1, 1).repeat(1, self.batch_shape[0]), self.batch_shape[0]
elif len(value.shape) == 1 and value.shape[0] == self.batch_shape[0]: # If the value is 1-D it must be either 1 or equal to batch_shape[0]
return value.view(1, -1), self.batch_shape[0]
elif len(value.shape) == 2 and value.shape[1] == 1:
return value.repeat(1, self.batch_shape[0]), [len(value), self.batch_shape[0]]
elif len(value.shape) == 2 and value.shape[1] == self.batch_shape[0]:
return value, [len(value), self.batch_shape[0]]
else:
assert False, "Shape [%s] invalid" % ', '.join([str(shape) for shape in value.shape])
class DistributionConformalLinear(DistributionConformal):
""" Use linear interpolation for conformal calibration.
"""
def __init__(self, val_predictions, val_labels, test_predictions, score_func, iscore_func, verbose=False):
super(DistributionConformalLinear, self).__init__(val_predictions, val_labels, test_predictions, score_func, iscore_func)
def cdf(self, value):
""" The CDF at value. This function is differentiable
Args:
value (tensor): an array of shape [n_evaluations, batch_shape] or shape [batch_size].
Returns:
tensor: the value of CDF at the queried values.
"""
# First perform automatic shape induction and convert value into an array of shape [n_evaluations, batch_shape]
value, out_shape = self.shape_inference(value)
# self.to(value.device) # Move all assets in this class to the same device as value to avoid device mismatch error
# Move value to the device of test_predictions to avoid device mismatch error
out_device = value.device
value = value.to(self.device)
# Non-conformity score
scores = self.score(self.test_predictions, value)
# Compare the non-conformity score to the validation set non-conformity scores
quantiles = self.val_scores.view(1, 1, -1)
comparison = (scores.unsqueeze(-1) - quantiles[:, :, :-1]) / (quantiles[:, :, 1:] - quantiles[:, :, :-1] + 1e-20)
cdf = comparison.clamp(min=0, max=1).sum(dim=-1) / (len(self.val_scores) - 1)
return cdf.view(out_shape).to(out_device)
def icdf(self, value):
""" Get the inverse CDF. This function is differentiable.
Args:
value (tensor): an array of shape [n_evaluations, batch_shape] or shape [batch_shape], each entry should take values in [0, 1]
Supports automatic shape induction, e.g. if cdf has shape [n_evaluations, 1] it will automatically be converted to shape [n_evaluations, batch_shape]
Returns:
tensor: the value of inverse CDF function at the queried cdf values
"""
cdf, out_shape = self.shape_inference(value) # Convert cdf to have shape [n_evaluations, batch_shape]
# self.to(cdf.device) # Move all assets in this class to the same device as value to avoid device mismatch error
# Move cdf to the device of test_predictions to avoid device mismatch error
out_device = cdf.device
cdf = cdf.to(self.device)
# out_device = cdf.device
# cdf = cdf.to(self.test_predictions)
quantiles = cdf * (len(self.val_scores) - 1)
ratio = torch.ceil(quantiles) - quantiles
target_score = self.val_scores[torch.floor(quantiles).type(torch.long)] * ratio + \
self.val_scores[torch.ceil(quantiles).type(torch.long)] * (1 - ratio)
value = self.iscore(self.test_predictions, target_score)
return value.view(out_shape).to(out_device) # Output the original device
class DistributionConformalRandom(DistributionConformal):
""" Use randomization for conformal calibration.
This distribution does not have a differentiable CDF (i.e. it does not have a density), so the behavior of functions such as log_prob and plot_density are undefined.
"""
def __init__(self, val_predictions, val_labels, test_predictions, score_func, iscore_func, verbose=False):
super(DistributionConformalRandom, self).__init__(val_predictions, val_labels, test_predictions, score_func, iscore_func)
self.rand_cdf = torch.rand(1, _get_prediction_batch_shape(test_predictions), device=self.device)
# Random interpolation is special, the ICDF could be infinite
self.val_scores[0] = -float('inf')
self.val_scores[-1] = float('inf')
def cdf(self, value):
""" The CDF at value. This function is NOT differentiable
Args:
value (tensor): an array of shape [n_evaluations, batch_shape] or shape [batch_size].
Returns:
tensor: the value of CDF at the queried values.
"""
# First perform automatic shape induction and convert value into an array of shape [n_evaluations, batch_shape]
value, out_shape = self.shape_inference(value)
# Move value to the device of test_predictions to avoid device mismatch error
out_device = value.device
value = value.to(self.device)
# Non-conformity score
scores = self.score(self.test_predictions, value)
| |
# -*- coding: utf-8 -*-
# parser.py
"""Parses command-line options"""
from __future__ import print_function
import argparse
import os
import re
import sys
from copy import deepcopy
from .print_tools import print_date
from ..notes import RESOURCE_LIST
__author__ = '<NAME>, PhD'
__email__ = '<EMAIL>, <EMAIL>'
__date__ = '2016-06-10'
__updated__ = '2018-02-14'
verbosity_range = range(4)
multi_file_formats = ['stl', 'map', 'mrc', 'rec']
prepable_file_formats = ['mrc', 'map', 'rec']
rescalable_file_formats = ['stl']
def add_args(parser, the_arg):
"""Convenience function to add ``the_arg`` to the ``parser``.
This relies on the argument being structured as a dictionary with the keys
``args`` for positional arguments and ``kwargs`` for the keyword
arguments. The value of doing this is that arguments that are reused
in several parsers can be referred to by a variable instead of being
redefined.
Usage::
>>> my_arg = {'arg': ['-x'], 'kwargs': {'help': 'help'}}
>>> this_parser = argparse.ArgumentParser()
>>> add_args(this_parser, my_arg)
:param parser: a parser
:type parser: ``argparse.Parser``
:param dict the_arg: the argument specified as a dict with keys 'args' and 'kwargs'
:return parser: a parser
:rtype parser: ``argparse.Parser``
"""
return parser.add_argument(*the_arg['args'], **the_arg['kwargs'])
Parser = argparse.ArgumentParser(
prog='sff', description="The EMDB-SFF Toolkit (sfftk)")
Parser.add_argument(
'-V', '--version',
action='store_true',
default=False,
help='show the sfftk version string and the supported EMDB-SFF version string',
)
subparsers = Parser.add_subparsers(
title='Tools',
dest='subcommand',
description='The EMDB-SFF Toolkit (sfftk) provides the following tools:',
metavar="EMDB-SFF tools"
)
# =========================================================================
# common arguments
# =========================================================================
sff_file = {
'args': ['sff_file'],
'kwargs': {
'help': 'path (rel/abs) to an EMDB-SFF file',
}
}
complexes = {
'args': ['-C', '--complexes'],
'kwargs': {
'help': "PDBe accession for complexes separated by commas without spaces \
between e.g. 'comp1,comp2,...,compN'",
}
}
complex_id = {
'args': ['-c', '--complex-id'],
'kwargs': {
'type': int,
'help': "the complex ID as shown with the 'list' command",
}
}
description = {
'args': ['-d', '--description'],
'kwargs': {
'help': 'the description'
}
}
details = {
'args': ['-D', '--details'],
'kwargs': {
'help': "populates <details>...</details> in the XML file"
}
}
external_ref_id = {
'args': ['-e', '--external-ref-id'],
'kwargs': {
'type': int,
'help': "the external reference ID as shown with the 'list' command",
}
}
external_ref = {
'args': ['-E', '--external-ref'],
'kwargs': {
'nargs': 3,
'help': """An external reference consists of three components: the
name of the external reference, a URL to the particular external reference
and the accession. If you use the sff notes search utility these will
correspond to the ontology_name, IRI and short_form. The following is a list
of valid external references: {}. You can also specify multiple external
reference arguments e.g. sff notes add -i <int> -E r11 r12 r13 -E r21 r22 r23
file.json""".format(', '.join(RESOURCE_LIST.keys())),
}
}
# file_path = {
# 'args': ['-F', '--file-path'],
# 'kwargs': {
# 'default': None,
# 'help': "file path [default: '.']"
# }
# }
FORMAT_LIST = [
('sff', 'XML'),
('hff', 'HDF5'),
('json', 'JSON'),
]
format_ = {
'args': ['-f', '--format'],
'kwargs': {
# 'default': 'sff',
'help': "output file format; valid options are: {} [default: sff]".format(
", ".join(map(lambda x: "{} ({})".format(x[0], x[1]), FORMAT_LIST))
),
}
}
header = {
'args': ['-H', '--header'],
'kwargs': {
'default': False,
'action': 'store_true',
'help': 'show EMDB-SFF header (global) attributes [default: False]'
}
}
segment_id = {
'args': ['-i', '--segment-id'],
'kwargs': {
'help': 'refer to a segment by its ID'
}
}
macromolecules = {
'args': ['-M', '--macromolecules'],
'kwargs': {
'help': "PDBe accession for macromolecules separated by commas without \
spaces between e.g. 'macr1,macr2,...,macrN'",
}
}
macromolecule_id = {
'args': ['-m', '--macromolecule-id'],
'kwargs': {
'type': int,
'help': "the macromolecule ID as shown with the 'list' command",
}
}
name = {
'args': ['-N', '--name'],
'kwargs': {
'help': "the segmentation name"
}
}
number_of_instances = {
'args': ['-n', '--number-of-instances'],
'kwargs': {
'type': int,
'help': 'the number of instances',
}
}
output = {
'args': ['-o', '--output'],
'kwargs': {
'default': None,
'help': "file to convert to; the extension (.sff, .hff, .json) determines the output format [default: None]"
}
}
software_proc_details = {
'args': ['-P', '--software-processing-details'],
'kwargs': {
'help': "details of how the segmentation was processed"
}
}
config_path = {
'args': ['-p', '--config-path'],
'kwargs': {
'help': "path to configs file"
}
}
primary_descriptor = {
'args': ['-R', '--primary-descriptor'],
'kwargs': {
'help': "populates the <primaryDescriptor>...</primaryDescriptor> to this value [valid values: threeDVolume, meshList, shapePrimitiveList]"
}
}
software_name = {
'args': ['-S', '--software-name'],
'kwargs': {
'help': "the name of the software used to create the segmentation"
}
}
segment_name = {
'args': ['-s', '--segment-name'],
'kwargs': {
'help': "the name of the segment"
}
}
shipped_configs = {
'args': ['-b', '--shipped-configs'],
'kwargs': {
'default': False,
'action': 'store_true',
'help': 'use shipped configs only if config path and user configs fail [default: False]'
}
}
software_version = {
'args': ['-V', '--software-version'],
'kwargs': {
'help': "the version of software used to create the segmentation"
}
}
verbose = {
'args': ['-v', '--verbose'],
'kwargs': {
'action': 'store_true',
'default': False,
'help': "verbose output"
},
}
# =========================================================================
# prep subparser
# =========================================================================
prep_parser = subparsers.add_parser(
'prep',
description="Prepare a segmentation for conversion to EMDB-SFF",
help="prepares a segmentation"
)
prep_subparsers = prep_parser.add_subparsers(
title='Segmentation preparation utility',
dest='prep_subcommand',
description="The following commands provide a number of pre-processing steps for various segmentation file formats. "
"Most only apply to one file type. See the help for each command by typing 'sff prep <command>'",
metavar='Preparation steps:'
)
# =========================================================================
# prep: binmap
# =========================================================================
binmap_prep_parser = prep_subparsers.add_parser(
'binmap',
description='Bin the CCP4 file to reduce file size',
help='bin a CCP4 map',
)
binmap_prep_parser.add_argument(
'from_file', help='the name of the segmentation file'
)
add_args(binmap_prep_parser, config_path)
add_args(binmap_prep_parser, shipped_configs)
binmap_prep_parser.add_argument(
'-m', '--mask-value',
default=1, type=int,
help='value to set to; all other voxels set to zero [default: 1]'
)
binmap_prep_parser.add_argument(
'-o', '--output',
default=None,
help='output file name [default: <infile>_binned.<ext>]'
)
binmap_prep_parser.add_argument(
'--overwrite',
default=False,
action='store_true',
help='overwrite output file [default: False]'
)
binmap_prep_parser.add_argument(
'-c', '--contour-level',
default=0,
type=float,
help='value (exclusive) about which to threshold [default: 0.0]'
)
binmap_prep_parser.add_argument(
'--negate',
default=False,
action='store_true',
help='use values below the contour level [default: False]'
)
binmap_prep_parser.add_argument(
'-B', '--bytes-per-voxel',
default=1,
type=int,
choices=[1, 2, 4, 8, 16],
help='number of bytes per voxel [default: 1]'
)
binmap_prep_parser.add_argument(
'--infix',
default='prep',
help="infix to be added to filenames e.g. file.map -> file_<infix>.map [default: 'prep']",
)
add_args(binmap_prep_parser, verbose)
# =========================================================================
# prep: transform
# =========================================================================
transform_prep_parser = prep_subparsers.add_parser(
'transform',
description='Transform the STL mesh vertices by the given values',
help='transform an STL mesh',
)
# todo: add a new option for the voxel coordinates e.g. --voxel-size <v_x> <v_y> <v_z> which is mutually exclusive with --lengths and --indices
transform_prep_parser.add_argument(
'from_file', help="the name of the segmentation file"
)
add_args(transform_prep_parser, config_path)
add_args(transform_prep_parser, shipped_configs)
transform_prep_parser.add_argument(
'-L', '--lengths',
nargs=3, type=float,
required=True,
help="the X, Y and Z physical lengths (in angstrom) of the space; three (3) space-separated values [required]"
)
transform_prep_parser.add_argument(
'-I', '--indices',
nargs=3, type=int,
required=True,
help="the I, J, and K image dimensions of the space, corresponding to X, Y and Z, respectively; three (3) "
"space-separated integers [required]"
)
transform_prep_parser.add_argument(
'-O', '--origin',
nargs=3, type=float,
default=[0.0, 0.0, 0.0],
help="the origin position (in angstrom); literally, the distance between the first voxel (lowest indices) and the "
"physical origin; three (3) space-separated values [default: 0.0 0.0 0.0]"
)
transform_prep_parser.add_argument(
'-o', '--output',
default=None,
help='output file name [default: <infile>_transformed.<ext>]'
)
transform_prep_parser.add_argument(
'--infix',
default='transformed',
help="infix to be added to filenames e.g. file.stl -> file_<infix>.stl [default: 'transformed']",
)
add_args(transform_prep_parser, verbose)
# =========================================================================
# convert subparser
# =========================================================================
convert_parser = subparsers.add_parser(
'convert', description="Perform conversions to EMDB-SFF", help="converts from/to EMDB-SFF")
convert_parser.add_argument('from_file', nargs='*', help="file to convert from")
add_args(convert_parser, config_path)
add_args(convert_parser, shipped_configs)
convert_parser.add_argument('-t', '--top-level-only', default=False,
action='store_true', help="convert only the top-level segments [default: False]")
# convert_parser.add_argument('-M', '--contours-to-mesh', default=False, action='store_true', help="convert an 'contourList' EMDB-SFF to a 'meshList' EMDB-SFF")
convert_parser.add_argument(*details['args'], **details['kwargs'])
convert_parser.add_argument(
*primary_descriptor['args'], **primary_descriptor['kwargs'])
convert_parser.add_argument(*verbose['args'], **verbose['kwargs'])
# convert_parser.add_argument('-s', '--sub-tomogram-average', nargs=2,
# help="convert a subtomogram average into an EMDB-SFF file; two arguments are required: the "
# "table file and volume file (in that order)")
convert_parser.add_argument(
'-m', '--multi-file',
action='store_true',
default=False,
help="enables convert to treat multiple files as individual segments of a single segmentation; only works for the "
"following filetypes: {} [default: False]".format(
', '.join(multi_file_formats),
)
)
group = convert_parser.add_mutually_exclusive_group()
group.add_argument(*output['args'], **output['kwargs'])
group.add_argument(*format_['args'], **format_['kwargs'])
# =========================================================================
# config subparser
# =========================================================================
config_parser = subparsers.add_parser(
'config',
description="Configuration utility",
help="manage sfftk configs"
)
config_subparsers = config_parser.add_subparsers(
title='sfftk configurations',
dest='config_subcommand',
description='Persistent configurations utility',
metavar='Commands:'
)
# =============================================================================
# config: get
# =============================================================================
get_config_parser = config_subparsers.add_parser(
'get',
description='Get the value of a single configuration parameter',
help='get single sfftk config'
)
get_config_parser.add_argument(
'name',
nargs="?",
default=None,
help="the name of the argument to retrieve",
)
add_args(get_config_parser, config_path)
add_args(get_config_parser, shipped_configs)
get_config_parser.add_argument(
'-a', '--all',
action='store_true',
default=False,
help='get all configs'
)
add_args(get_config_parser, verbose)
# =============================================================================
# config: set
# =============================================================================
set_config_parser = config_subparsers.add_parser(
'set',
description='Set the value of a single configuration parameter',
help='set single sfftk config'
)
set_config_parser.add_argument(
'name', help="the name of the argument to set",
)
set_config_parser.add_argument(
'value', help="the value of the argument to set",
)
add_args(set_config_parser, config_path)
add_args(set_config_parser, shipped_configs)
add_args(set_config_parser, verbose)
set_config_parser.add_argument(
'-f', '--force',
action='store_true',
default=False,
help='force overwriting of an existing config; do not ask to confirm [default: False]'
)
# =============================================================================
# config: del
# =============================================================================
del_config_parser = config_subparsers.add_parser(
'del',
description='Delete the named configuration parameter',
help='delete single sfftk config'
)
del_config_parser.add_argument(
'name',
nargs='?',
default=None,
help="the name | |
import logging
import math
import time
from datetime import date, datetime, timedelta
import numpy as np
import pandas as pd
from analysis.models import StockHistoryDaily, StockStrategyTestLog, StockIndexHistory
from analysis.stock_hist import download_hist_data
from analysis.utils import (generate_task, get_analysis_task, get_event_status,
get_trade_cal_diff, init_eventlog, ready2proceed,
set_event_completed, set_task_completed)
from investors.models import StockFollowing, TradeStrategy
from scipy import stats
from stockmarket.models import StockNameCodeMap
from .utils import calculate_slope, mark_mov_avg
logger = logging.getLogger(__name__)
version = 'v2'
# def trade_calendar(exchange, stadf_slc_date, end_date):
# # 获取20200101~20200401之间所有有交易的日期
# pro = ts.pro_api()
# df = pro.trade_cal(exchange=exchange, is_open='1',
# start_date=start_date,
# end_date=edf_slc_date,
# fidf_slcds='cal_date')
# return df
# # print(df.head())
def pre_handle_jx(ts_code, freq='D', ma_freq='25', version='v1', slope_offset=2):
exec_date = date.today()
if ts_code is None:
if ready2proceed('junxian'+ma_freq +
'_bs', freq):
init_eventlog('MARK_CP', exec_date, 'junxian'+ma_freq +
'_bs', freq=freq)
process_junxian_cp(ts_code, freq, ma_freq,
version, slope_offset)
set_event_completed('MARK_CP', exec_date, 'junxian'+ma_freq +
'_bs', freq=freq)
else:
process_junxian_cp(ts_code, freq, ma_freq, version, slope_offset)
def process_junxian_cp(ts_codes, freq='D', ma_freq='25', version='v1', slope_offset=2):
start_date = None
end_date = None
today = date.today()
btest_event_list = ['EXP_PCT_TEST', 'PERIOD_TEST']
strategy_list = ['ma'+ma_freq+'_zhicheng', 'ma'+ma_freq +
'_tupo', 'ma'+ma_freq+'_diepo', 'ma'+ma_freq+'_yali']
try:
if ts_codes is None:
listed_companies = StockNameCodeMap.objects.filter().order_by('-ts_code')
else:
ts_code_list = ts_codes.split(',')
if ts_code_list is not None and len(ts_code_list) >= 1:
listed_companies = StockNameCodeMap.objects.filter(
ts_code__in=ts_code_list).order_by('-ts_code')
for listed_company in listed_companies:
if listed_company.asset == 'E':
hist = StockHistoryDaily.objects.filter(
ts_code=listed_company.ts_code)
else:
hist = StockIndexHistory.objects.filter(
ts_code=listed_company.ts_code)
if hist is not None and len(hist) < int(ma_freq):
print('stock hist to mark is less than required vol, will not proceed')
continue
tasks = get_analysis_task(
listed_company.ts_code, 'MARK_CP', 'junxian'+ma_freq+'_bs', freq)
if tasks is not None and len(tasks) > 0:
for task in tasks:
atype = '1' # 标记更新的股票历史记录
# 如何差额取之前的历史记录?9
if task.start_date == listed_company.list_date:
print('第一次处理,从上市日开始。。。')
atype = '0' # 从上市日开始标记
start_date = task.start_date
else:
# q更新交易记录开始时间需要往前获取日期为MA周期的时间
print('更新处理,从上一次更新时间-25,60,200d - 开盘日 开始...')
if len(hist) - 1 < int(ma_freq) + int(slope_offset) * 2:
print(
'stock hist to mark is less than required vol, will pick list date')
start_date = listed_company.list_date
# continue
else:
start_date = task.start_date - \
timedelta(days=get_trade_cal_diff(
listed_company.ts_code, task.start_date, listed_company.asset, period=int(ma_freq)+int(slope_offset) * 2))
mark_junxian_cp(listed_company.ts_code, listed_company.asset, start_date,
task.end_date, ma_freq=ma_freq, atype=atype, slope_offset=int(slope_offset))
# print(task.start_date)
# # print(task.end_date)
set_task_completed(listed_company.ts_code, 'MARK_CP',
freq, 'junxian'+ma_freq+'_bs', task.start_date, task.end_date)
# generate_task(listed_company.ts_code,
# freq, task.start_date, task.end_date, event_list=btest_event_list, strategy_list=strategy_list)
else:
print('no junxian mark cp task')
except Exception as e:
print(e)
def mark_junxian_cp(ts_code, asset, start_date, end_date, atype='1', freq='D', ma_freq='25', price_chg_pct=0.03, slope_deg=0.05241, slope_offset=2, version='v2', done_by='system'):
'''
目标:
参数化分析均线,可能对结果有影响的参数
- 价格变化
- 斜率
- 计算斜率所要考虑的连续的天数(前几天,后几天)
'''
print('marked junxian on start code - ' + ts_code +
',' + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
try:
df = None
hist_list = []
if asset == 'E':
df = pd.DataFrame.from_records(StockHistoryDaily.objects.filter(ts_code=ts_code,
trade_date__gte=start_date, trade_date__lte=end_date).order_by('trade_date').values('id', 'ma'+ma_freq, 'trade_date', 'open', 'close', 'low', 'high', 'slope'))
# print(df.head())
else:
df = pd.DataFrame.from_records(StockIndexHistory.objects.filter(ts_code=ts_code,
trade_date__gte=start_date, trade_date__lte=end_date).order_by('trade_date').values('id', 'ma'+ma_freq, 'trade_date', 'open', 'close', 'low', 'high', 'slope'))
if df is not None and len(df) > 0:
# 标记均线
mark_mov_avg(ts_code, df, ma_freq)
# 存储结果
start_index = 0
if atype == '1': # 更新标记
if len(df) <= int(ma_freq) + slope_offset:
start_index = int(ma_freq)
else:
start_index = int(ma_freq) + slope_offset # - day_offset
# 计算斜率,需要朝前取一个offset记录
calculate_slope(df, ts_code, ma_freq=ma_freq)
# print(start_index)
# q只对更新交易记录做切片处理
df = df[start_index:]
# 标记均线关键点
mark_ma_cp(price_chg_pct, df, ma_freq, version)
# print(df.head(10))
# print(df['trade_date'].iloc[start_index])
# print(len(df))
for index, row in df.iterrows():
hist = object
if asset == 'E':
hist = StockHistoryDaily(pk=row['id'])
else:
hist = StockIndexHistory(pk=row['id'])
# print(str(row['trade_date']) + ' ' + str(row['ma'+ma_freq+'_slope']))
setattr(hist, 'ma'+ma_freq, row['ma'+ma_freq] if not math.isnan(
row['ma'+ma_freq]) else None)
setattr(hist, 'ma'+ma_freq+'_slope', round(row['ma'+ma_freq+'_slope'], 3) if not math.isnan(
row['ma'+ma_freq+'_slope']) else None)
setattr(hist, 'ma'+ma_freq+'_zhicheng', round(row['ma'+ma_freq+'_zhicheng'], 3) if not math.isnan(
row['ma'+ma_freq+'_zhicheng']) else None)
setattr(hist, 'ma'+ma_freq+'_tupo', round(row['ma'+ma_freq+'_tupo'], 3) if not math.isnan(
row['ma'+ma_freq+'_tupo']) else None)
setattr(hist, 'ma'+ma_freq+'_diepo', round(row['ma'+ma_freq+'_diepo'], 3) if not math.isnan(
row['ma'+ma_freq+'_diepo']) else None)
setattr(hist, 'ma'+ma_freq+'_yali', round(row['ma'+ma_freq+'_yali'], 3) if not math.isnan(
row['ma'+ma_freq+'_yali']) else None)
hist_list.append(hist)
if asset == 'E':
StockHistoryDaily.objects.bulk_update(hist_list, ['ma'+ma_freq,
'ma'+ma_freq+'_slope',
'ma'+ma_freq+'_zhicheng',
'ma'+ma_freq+'_tupo',
'ma'+ma_freq+'_diepo',
'ma'+ma_freq+'_yali'])
else:
StockIndexHistory.objects.bulk_update(hist_list, ['ma'+ma_freq,
'ma'+ma_freq+'_slope',
'ma'+ma_freq+'_zhicheng',
'ma'+ma_freq+'_tupo',
'ma'+ma_freq+'_diepo',
'ma'+ma_freq+'_yali'])
# listed_company.is_marked_junxian_bs = True
# listed_company.save()
print(' marked junxian bs on end code - ' + ts_code +
',' + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# hist_list.clear() # 清空已经保存的记录列表
except Exception as e:
print(e)
def mark_ma_cp(price_chg_pct, df_slc, ma_freq, version):
print('mark cp start')
mark_zhicheng(price_chg_pct, df_slc, ma_freq, version)
mark_tupo(price_chg_pct, df_slc, ma_freq, version)
mark_diepo(price_chg_pct, df_slc, ma_freq, version)
mark_yali(price_chg_pct, df_slc, ma_freq, version)
# return df
print('mark cp end')
def mark_zhicheng(price_chg_pct, df_slc, ma_freq, version):
'''
1. 收盘 > MA && 开盘 > MA
2. |最低价 - MA| < delta
'''
# cond = (df['close'] > df['ma'+freq]) & (df['open'] >
# df['ma'+freq]) & ((abs(df['low'] - df['ma'+freq]) / df['ma'+freq]) < delta)
df_slc['ma'+ma_freq+'_zhicheng'] = np.where((df_slc['close'] > df_slc['ma'+ma_freq]) & (df_slc['open'] >
df_slc['ma'+ma_freq]) & (abs(df_slc['low'] - df_slc['ma'+ma_freq]) / df_slc['ma'+ma_freq] <= price_chg_pct), 1, np.nan)
# print(df.head(100))
return df_slc
# for index, row in zhicheng_df.iterrows():
# zhicheng_df.loc[index, 'ma25_zhicheng_'+version] = 1
def mark_tupo(price_chg_pct, df_slc, ma_freq, version):
'''
1. 收盘 > MA && 开盘 < MA
2. 收盘 - MA / MA >= delta
'''
df_slc['ma'+ma_freq+'_tupo'] = np.where((df_slc['close'] > df_slc['ma'+ma_freq]) & (df_slc['open'] <
df_slc['ma'+ma_freq]) & ((df_slc['close'] - df_slc['ma'+ma_freq]) / df_slc['ma'+ma_freq] >= price_chg_pct), 1, np.nan)
# print(df.head(100))
return df_slc
def mark_diepo(price_chg_pct, df_slc, ma_freq, version):
'''
1. 收盘 < MA && 开盘 > MA
2. MA - 收盘 / MA >= delta
'''
df_slc['ma'+ma_freq+'_diepo'] = np.where((df_slc['close'] < df_slc['ma'+ma_freq]) & (df_slc['open'] >
df_slc['ma'+ma_freq]) & ((df_slc['ma'+ma_freq] - df_slc['close']) / df_slc['ma'+ma_freq] >= price_chg_pct), 1, np.nan)
# print(df.head(100))
return df_slc
def mark_yali(price_chg_pct, df_slc, ma_freq, version):
'''
1. 收盘 < MA && 开盘 < MA
2. && (MA - 最高价 / MA < delta or MA - 收盘价 / MA < delta)
'''
df_slc['ma'+ma_freq+'_yali'] = np.where((df_slc['close'] < df_slc['ma'+ma_freq]) & (df_slc['open'] <
df_slc['ma'+ma_freq]) & (((df_slc['ma'+ma_freq] - df_slc['high']) / df_slc['ma'+ma_freq] <= price_chg_pct) | ((df_slc['ma'+ma_freq] - df_slc['close']) / df_slc['ma'+ma_freq] <= price_chg_pct)), 1, np.nan)
# print(df.head(100))
return df_slc
def post_mark(ts_code, df, price_chg_pct):
'''
标记股票的ma b&s
'''
print('post mark junxian b&s started on code - ' + ts_code + ',' +
datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
try:
# MA zhicheng
# df['open_ma25_pct'] = (df['open'] - df['ma25']).div(df['ma25'])
# df['low_ma25_pct'] = (df['low'] - df['ma25']).div(df['ma25'])
# df['close_ma25_pct'] = (df['close'] - df['ma25']).div(df['ma25'])
# 计算支撑,股价底部趋势
min_idx_list = df.loc[df['di_min'] == 1].index
# 计算突破,股价需要在上升趋势
up_idx_list = df.loc[df['slope'] > 0].index
# 计算跌破,股价下跌趋势
down_idx_list = df.loc[df['slope'] < 0].index
# 计算MA压力,股价顶部趋势
max_idx_list = df.loc[df['ding_max'] == 1].index
# 计算支撑,股价底部趋势
print('zhicheng')
for min_idx in min_idx_list:
low_pct = (df.loc[min_idx].ma25 -
df.loc[min_idx].low) / df.loc[min_idx].low
if abs(low_pct) <= price_chg_pct and df.loc[min_idx].close > df.loc[min_idx].ma25:
df.loc[min_idx, 'ma25_zhicheng_b'] = 1
print(df.loc[min_idx].trade_date)
print('ma:'+str(df.loc[min_idx].ma25)+',low:' +
str(df.loc[min_idx].low)+',close:'+str(df.loc[min_idx].close))
# 计算突破,股价需要在上升趋势
print('tupo')
idx_prev = -1
for max_idx in min_idx_list:
if idx_prev != -1: # slope >0 means 上涨趋势
for idx_bwt in range(idx_prev, max_idx):
close_pct = (df.loc[idx_prev].close -
df.loc[idx_prev].ma25) / df.loc[idx_prev].ma25
if df.loc[idx_bwt].slope > 0 and close_pct >= price_chg_pct and df.loc[idx_bwt].open < df.loc[idx_bwt].ma25:
# pass
df.loc[max_idx, 'ma25_tupo_b'] = 1
print(df.loc[idx_bwt].trade_date)
print('ma:'+str(df.loc[idx_bwt].ma25)+',low:'+str(
df.loc[idx_bwt].low)+',close:'+str(df.loc[idx_bwt].close))
break
idx_prev = max_idx
# for up_idx in up_idx_list:
# close_pct = (df.loc[up_idx].close -
# df.loc[up_idx].ma25) / df.loc[up_idx].ma25
# if abs(close_pct) >= price_chg_pct and df.loc[up_idx].low < df.loc[up_idx].ma25:
# df.loc[id, 'ma25_tupo_b'] = 1
# print(df.loc[up_idx].trade_date)
# 计算跌破,股价下跌趋势
print('diepo')
idx_prev = -1
for max_idx in max_idx_list:
if idx_prev != -1: # slope >0 means 上涨趋势
for idx_bwt in range(idx_prev, max_idx):
close_pct = (df.loc[idx_bwt].close -
df.loc[idx_bwt].ma25) / df.loc[idx_bwt].ma25
if df.loc[idx_bwt].slope < 0 and df.loc[idx_bwt].close < df.loc[idx_bwt].ma25 and df.loc[idx_bwt].open > df.loc[idx_bwt].ma25 and close_pct <= -price_chg_pct:
# pass
df.loc[max_idx, 'ma25_diepo_s'] = 1
print(df.loc[idx_bwt].trade_date)
print('ma:'+str(df.loc[idx_bwt].ma25)+',low:'+str(
df.loc[idx_bwt].open)+',close:'+str(df.loc[idx_bwt].close))
break
idx_prev = max_idx
# for down_idx in down_idx_list:
# close_pct = (df.loc[down_idx].close -
# df.loc[down_idx].ma25) / df.loc[down_idx].ma25
# if df.loc[down_idx].close < df.loc[down_idx].ma25 and df.loc[down_idx].open > df.loc[down_idx].ma25 and abs(close_pct) >= price_chg_pct:
# df.loc[id, 'ma25_diepo_s'] = 1
# print(df.loc[down_idx].trade_date)
# 计算MA压力,股价顶部趋势
print('MA yali')
idx_prev = -1
for max_idx in max_idx_list:
# option 1
# high_pct = (df.loc[max_idx].ma25 -
# df.loc[max_idx].high) / df.loc[max_idx].high
# if df.loc[max_idx].close < df.loc[max_idx].ma25 and abs(high_pct) <= price_chg_pct:
# # print('ma25_yali_s')
# df.loc[max_idx, 'ma25_yali_s'] = 1
# print(df.loc[max_idx].trade_date)
# print('ma:'+str(df.loc[max_idx].ma25)+',low:'+str(df.loc[max_idx].close)+',close:'+str(df.loc[max_idx].high))
# option 1 (optimized)
if idx_prev != -1: # slope >0 means 上涨趋势
for idx_bwt in range(idx_prev, max_idx-1):
high_pct = (df.loc[idx_bwt].ma25 -
df.loc[idx_bwt].high) / df.loc[idx_bwt].high
if df.loc[idx_bwt].close < df.loc[idx_bwt].ma25 and df.loc[idx_bwt].slope < 0 and abs(high_pct) <= price_chg_pct:
# pass
# print(df.loc[idx_bwt].trade_date)
# print(df.loc[idx_bwt].close)
df.loc[idx_bwt, 'ma25_yali_s'] = 1
print('ma:'+str(df.loc[idx_bwt].ma25)+',low:'+str(
df.loc[idx_bwt].close)+',close:'+str(df.loc[idx_bwt].high))
break
idx_prev = max_idx
# print(len(slope_list))
# print(len(dingdi_list))
# print(len(dingdi_count_list))
# print(len(end_dingdi_list))
# df['w_di'] = w_di_list
# df['m_tou'] = m_tou_list
print('post mark ma b&s end on code - ' + ts_code +
',' + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
except Exception as e:
# time.sleep(1)
print(e)
else:
| |
"""
a list of static routes. A `cidr` and a `gateway` must
be provided. The `gateway` must be reachable via the bridge interface.
"""
return pulumi.get(self, "routes")
@routes.setter
def routes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkRouteArgs']]]]):
pulumi.set(self, "routes", value)
@property
@pulumi.getter
def xml(self) -> Optional[pulumi.Input['NetworkXmlArgs']]:
return pulumi.get(self, "xml")
@xml.setter
def xml(self, value: Optional[pulumi.Input['NetworkXmlArgs']]):
pulumi.set(self, "xml", value)
class Network(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
autostart: Optional[pulumi.Input[bool]] = None,
bridge: Optional[pulumi.Input[str]] = None,
dhcp: Optional[pulumi.Input[pulumi.InputType['NetworkDhcpArgs']]] = None,
dns: Optional[pulumi.Input[pulumi.InputType['NetworkDnsArgs']]] = None,
dnsmasq_options: Optional[pulumi.Input[pulumi.InputType['NetworkDnsmasqOptionsArgs']]] = None,
domain: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
mtu: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkRouteArgs']]]]] = None,
xml: Optional[pulumi.Input[pulumi.InputType['NetworkXmlArgs']]] = None,
__props__=None):
"""
Manages a VM network resource within libvirt. For more information see
[the official documentation](https://libvirt.org/formatnetwork.html).
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] addresses: A list of (0 or 1) IPv4 and (0 or 1) IPv6 subnets in
CIDR notation. This defines the subnets associated to that network.
This argument is also used to define the address on the real host.
If `dhcp { enabled = true }` addresses is also used to define the address range served by
the DHCP server.
No DHCP server will be started if `addresses` is omitted.
:param pulumi.Input[bool] autostart: Set to `true` to start the network on host boot up.
If not specified `false` is assumed.
:param pulumi.Input[str] bridge: The bridge device defines the name of a bridge
device which will be used to construct the virtual network (when not provided,
it will be automatically obtained by libvirt in `none`, `nat`, `route` and `open` modes).
:param pulumi.Input[pulumi.InputType['NetworkDhcpArgs']] dhcp: DHCP configuration.
You need to use it in conjuction with the adresses variable.
:param pulumi.Input[pulumi.InputType['NetworkDnsArgs']] dns: configuration of DNS specific settings for the network
:param pulumi.Input[pulumi.InputType['NetworkDnsmasqOptionsArgs']] dnsmasq_options: configuration of Dnsmasq options for the network
You need to provide a list of option name and value pairs.
:param pulumi.Input[str] domain: The domain used by the DNS server.
:param pulumi.Input[str] mode: One of:
- `none`: the guests can talk to each other and the host OS, but cannot reach
any other machines on the LAN.
- `nat`: it is the default network mode. This is a configuration that
allows guest OS to get outbound connectivity regardless of whether the host
uses ethernet, wireless, dialup, or VPN networking without requiring any
specific admin configuration. In the absence of host networking, it at
least allows guests to talk directly to each other.
- `route`: this is a variant on the default network which routes traffic from
the virtual network to the LAN **without applying any NAT**. It requires that
the IP address range be pre-configured in the routing tables of the router
on the host network.
- `open`: similar to `route`, but no firewall rules are added.
- `bridge`: use a pre-existing host bridge. The guests will effectively be
directly connected to the physical network (i.e. their IP addresses will
all be on the subnet of the physical network, and there will be no
restrictions on inbound or outbound connections). The `bridge` network
attribute is mandatory in this case.
:param pulumi.Input[int] mtu: The MTU to set for the underlying network interfaces. When
not supplied, libvirt will use the default for the interface, usually 1500.
Libvirt version 5.1 and greater will advertise this value to nodes via DHCP.
:param pulumi.Input[str] name: A unique name for the resource, required by libvirt.
Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkRouteArgs']]]] routes: a list of static routes. A `cidr` and a `gateway` must
be provided. The `gateway` must be reachable via the bridge interface.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[NetworkArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a VM network resource within libvirt. For more information see
[the official documentation](https://libvirt.org/formatnetwork.html).
:param str resource_name: The name of the resource.
:param NetworkArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NetworkArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
autostart: Optional[pulumi.Input[bool]] = None,
bridge: Optional[pulumi.Input[str]] = None,
dhcp: Optional[pulumi.Input[pulumi.InputType['NetworkDhcpArgs']]] = None,
dns: Optional[pulumi.Input[pulumi.InputType['NetworkDnsArgs']]] = None,
dnsmasq_options: Optional[pulumi.Input[pulumi.InputType['NetworkDnsmasqOptionsArgs']]] = None,
domain: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
mtu: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkRouteArgs']]]]] = None,
xml: Optional[pulumi.Input[pulumi.InputType['NetworkXmlArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NetworkArgs.__new__(NetworkArgs)
__props__.__dict__["addresses"] = addresses
__props__.__dict__["autostart"] = autostart
__props__.__dict__["bridge"] = bridge
__props__.__dict__["dhcp"] = dhcp
__props__.__dict__["dns"] = dns
__props__.__dict__["dnsmasq_options"] = dnsmasq_options
__props__.__dict__["domain"] = domain
__props__.__dict__["mode"] = mode
__props__.__dict__["mtu"] = mtu
__props__.__dict__["name"] = name
__props__.__dict__["routes"] = routes
__props__.__dict__["xml"] = xml
super(Network, __self__).__init__(
'libvirt:index/network:Network',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
autostart: Optional[pulumi.Input[bool]] = None,
bridge: Optional[pulumi.Input[str]] = None,
dhcp: Optional[pulumi.Input[pulumi.InputType['NetworkDhcpArgs']]] = None,
dns: Optional[pulumi.Input[pulumi.InputType['NetworkDnsArgs']]] = None,
dnsmasq_options: Optional[pulumi.Input[pulumi.InputType['NetworkDnsmasqOptionsArgs']]] = None,
domain: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
mtu: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkRouteArgs']]]]] = None,
xml: Optional[pulumi.Input[pulumi.InputType['NetworkXmlArgs']]] = None) -> 'Network':
"""
Get an existing Network resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] addresses: A list of (0 or 1) IPv4 and (0 or 1) IPv6 subnets in
CIDR notation. This defines the subnets associated to that network.
This argument is also used to define the address on the real host.
If `dhcp { enabled = true }` addresses is also used to define the address range served by
the DHCP server.
No DHCP server will be started if `addresses` is omitted.
:param pulumi.Input[bool] autostart: Set to `true` to start the network on host boot up.
If not specified `false` is assumed.
:param pulumi.Input[str] bridge: The bridge device defines the name of a bridge
device which will be used to construct the virtual network (when not provided,
it will be automatically obtained by libvirt in `none`, `nat`, `route` and `open` modes).
:param pulumi.Input[pulumi.InputType['NetworkDhcpArgs']] dhcp: DHCP configuration.
You need to use it in conjuction with the adresses variable.
:param pulumi.Input[pulumi.InputType['NetworkDnsArgs']] dns: configuration of DNS specific settings for the network
:param pulumi.Input[pulumi.InputType['NetworkDnsmasqOptionsArgs']] dnsmasq_options: configuration of Dnsmasq options for the network
You need to provide a list of option name and value pairs.
:param pulumi.Input[str] domain: The domain used by the DNS server.
:param pulumi.Input[str] mode: One of:
- `none`: the guests can talk to each other and the host OS, but cannot reach
any other machines on the LAN.
- `nat`: it is the default network mode. This is a configuration that
allows guest OS to get outbound connectivity regardless of whether the host
uses ethernet, wireless, dialup, or VPN networking without requiring any
specific admin configuration. In the absence of host networking, it at
least allows guests to talk directly to each other.
- `route`: this is a variant on the default network which routes traffic from
the virtual network to the LAN **without applying any NAT**. It requires that
the IP address range be pre-configured in the routing tables of the router
on the host network.
- `open`: similar to `route`, but no firewall rules are | |
ib_contract.exchange = symbol_fields[2]
elif symbol_fields[1] == 'CASH':
ib_contract.symbol = symbol_fields[0][0:3] # EUR
ib_contract.secType = symbol_fields[1] # CASH
ib_contract.currency = symbol_fields[0][3:] # GBP
ib_contract.exchange = symbol_fields[2] # IDEALPRO
elif symbol_fields[1] == 'FUT':
ib_contract.localSymbol = symbol_fields[0].replace('_', ' ') # ESM9, in case YM___SEP_20
ib_contract.secType = symbol_fields[1] # FUT
ib_contract.exchange = symbol_fields[2] # GLOBEX
ib_contract.currency = 'USD'
elif symbol_fields[1] == 'OPT': # AAPL OPT 20201016 128.75 C SMART
ib_contract.symbol = symbol_fields[0] # AAPL
ib_contract.secType = symbol_fields[1] # OPT
ib_contract.lastTradeDateOrContractMonth = symbol_fields[2] # 20201016
ib_contract.strike = float(symbol_fields[3]) if '.' in symbol_fields[3] else int(symbol_fields[3]) # 128.75
ib_contract.right = symbol_fields[4] # C
ib_contract.exchange = symbol_fields[5] # SMART
ib_contract.currency = 'USD'
ib_contract.multiplier = '100'
elif symbol_fields[1] == 'FOP': # ES FOP 20200911 3450 C 50 GLOBEX
ib_contract.symbol = symbol_fields[0] # ES
ib_contract.secType = symbol_fields[1] # FOP
ib_contract.lastTradeDateOrContractMonth = symbol_fields[2] # 20200911
ib_contract.strike = float(symbol_fields[3]) if '.' in symbol_fields[3] else int(symbol_fields[3]) # 128.75
ib_contract.right = symbol_fields[4] # C
ib_contract.multiplier = symbol_fields[5] # 50
ib_contract.exchange = symbol_fields[6] # GLOBEX
ib_contract.currency = 'USD'
elif symbol_fields[1] == 'CMDTY': # XAUUSD CMDTY SMART
ib_contract.symbol = symbol_fields[0] # XAUUSD
ib_contract.secType = symbol_fields[1] # COMDTY
ib_contract.currency = 'USD'
ib_contract.exchange = symbol_fields[2] # SMART
elif symbol_fields[1] == 'BAG':
ib_contract.symbol = symbol_fields[0] # CL.BZ
ib_contract.secType = symbol_fields[1] # BAG
leg1 = ComboLeg()
leg1.conId = int(symbol_fields[2]) # 174230608
leg1.ratio = int(symbol_fields[3]) # 1
leg1.action = "BUY"
leg1.exchange = symbol_fields[4] # NYMEX
leg2 = ComboLeg()
leg2.conId = int(symbol_fields[5]) # 162929662
leg2.ratio = int(symbol_fields[6]) # 1
leg2.action = "SELL"
leg2.exchange = symbol_fields[7] # NYMEX
ib_contract.comboLegs = []
ib_contract.comboLegs.append(leg1)
ib_contract.comboLegs.append(leg2)
ib_contract.exchange = symbol_fields[8] # NYMEX
ib_contract.currency = 'USD'
else:
_logger.error(f'invalid contract {symbol}')
return ib_contract
@staticmethod
def contract_to_symbol(ib_contract):
"""
Convert IB contract to full symbol
:param ib_contract: IB contract
:return: full symbol
"""
full_symbol = ''
if ib_contract.secType == 'STK':
full_symbol = ' '.join([ib_contract.localSymbol, 'STK', 'SMART']) # or ib_contract.primaryExchange?
elif ib_contract.secType == 'CASH':
full_symbol = ' '.join([ib_contract.symbol+ib_contract.currency, 'CASH', ib_contract.exchange])
elif ib_contract.secType == 'FUT':
full_symbol = ' '.join([ib_contract.localSymbol.replace(' ', '_'), 'FUT',
ib_contract.primaryExchange if ib_contract.primaryExchange != ''
else ib_contract.exchange])
elif ib_contract.secType == 'OPT':
full_symbol = ' '.join([
ib_contract.symbol, 'OPT', ib_contract.lastTradeDateOrContractMonth,
str(ib_contract.strike), ib_contract.right, 'SMART'
])
elif ib_contract.secType == 'FOP':
full_symbol = ' '.join([
ib_contract.symbol, 'FOP', ib_contract.lastTradeDateOrContractMonth,
str(ib_contract.strike), ib_contract.right, ib_contract.multiplier, ib_contract.exchange
])
elif ib_contract.secType == 'COMDTY':
full_symbol = ' '.join([ib_contract.symbol, 'COMDTY', 'SMART'])
elif ib_contract.secType == 'BAG':
full_symbol = ' '.join([ib_contract.symbol, 'COMDTY', 'SMART'])
return full_symbol
@staticmethod
def order_to_ib_order(order_event):
"""
Convert order event to IB order
:param order_event: internal representation of order
:return: IB representation of order
"""
ib_order = Order()
ib_order.action = 'BUY' if order_event.order_size > 0 else 'SELL'
ib_order.totalQuantity = abs(order_event.order_size)
if order_event.order_type == OrderType.MARKET:
ib_order.orderType = 'MKT'
elif order_event.order_type == OrderType.LIMIT:
ib_order.orderType = 'LMT'
ib_order.lmtPrice = order_event.limit_price
elif order_event.order_type == OrderType.STOP:
ib_order.orderType = 'STP'
ib_order.auxPrice = order_event.stop_price
elif order_event.order_type == OrderType.STOP_LIMIT:
ib_order.orderType = 'STP LMT'
ib_order.lmtPrice = order_event.limit_price
ib_order.auxPrice = order_event.stop_price
else:
return None
return ib_order
@staticmethod
def ib_order_to_order(ib_order):
"""
Convert IB order to order event
:param ib_order: IB representation of order
:return: internal representation of order
"""
order_event = OrderEvent()
# order_event.order_id = orderId
# order_event.order_status = orderState.status
direction = 1 if ib_order.action == 'BUY' else -1
order_event.order_size = ib_order.totalQuantity * direction
if ib_order.orderType == 'MKT':
order_event.order_type = OrderType.MARKET
elif ib_order.orderType == 'LMT':
order_event.order_type = OrderType.LIMIT
order_event.limit_price = ib_order.lmtPrice
elif ib_order.orderType == 'STP':
order_event.order_type = OrderType.STOP
order_event.stop_price = ib_order.auxPrice
elif ib_order.orderType == 'STP LMT':
order_event.order_type = OrderType.STOP_LIMIT
order_event.limit_price = ib_order.lmtPrice
order_event.stop_price = ib_order.auxPrice
else:
order_event.order_type = OrderType.UNKNOWN
order_event.limit_price = ib_order.lmtPrice
order_event.stop_price = ib_order.auxPrice
return order_event
class IBApi(EWrapper, EClient):
def __init__(self, broker):
EWrapper.__init__(self)
EClient.__init__(self, wrapper=self)
self.broker = broker
self.thread = Thread(target=self.run)
self.nKeybInt = 0
self.connected = False
self.globalCancelOnly = False
self.simplePlaceOid = None
# ------------------------------------------ EClient functions --------------------------------------- #
def keyboardInterrupt(self):
self.nKeybInt += 1
if self.nKeybInt == 1:
self.stop()
else:
_logger.info("Finishing test")
def stop(self):
_logger.info("Executing cancels")
_logger.info("Executing cancels ... finished")
# ------------------------------------------------------------------ End EClient functions -------------------------------------------------------- #
#---------------------------------------------------------------------- EWrapper functions -------------------------------------------------------- #
def connectAck(self):
if self.asynchronous:
self.startApi()
self.connected = True
_logger.info('IB connected')
def nextValidId(self, orderId: int):
super().nextValidId(orderId)
msg = f"nextValidOrderId: {orderId}"
_logger.info(msg)
self.broker.log(msg)
self.broker.orderid = orderId
# we can start now
self.broker.subscribe_market_datas()
def error(self, reqId: TickerId, errorCode: int, errorString: str):
super().error(reqId, errorCode, errorString)
msg = f'Error. id: {reqId}, Code: {errorCode}, Msg: {errorString}'
_logger.error(msg)
self.broker.log(msg)
def winError(self, text: str, lastError: int):
super().winError(text, lastError)
msg = f"Error Id: {lastError}, Msg: {text}"
_logger.error(msg)
self.broker.log(msg)
def openOrder(self, orderId: OrderId, contract: Contract, order: Order, orderState: OrderState):
"""
Currently IB sends out two openOrder and two OrderStatus; so there are four filled order_status sent out
"""
super().openOrder(orderId, contract, order, orderState)
msg = f"OpenOrder. PermId: {order.permId}, ClientId: {order.clientId}, OrderId: {orderId}, " \
f"Account: {order.account}, Symbol: {contract.symbol}, SecType: {contract.secType}, " \
f"Exchange: {contract.exchange}, Action: {order.action}, OrderType: {order.orderType}, " \
f"TotalQty: {order.totalQuantity}, CashQty: {order.cashQty}, LmtPrice: {order.lmtPrice}, " \
f"AuxPrice: {order.auxPrice}, Status: {orderState.status}"
_logger.info(msg)
self.broker.log(msg)
if orderId in self.broker.order_dict.keys():
order_event = self.broker.order_dict[orderId]
else: # not placed by algo
order_event = InteractiveBrokers.ib_order_to_order(order)
order_event.order_id = orderId
order_event.full_symbol = InteractiveBrokers.contract_to_symbol(contract)
order_event.account = self.broker.account
order_event.order_time = datetime.now().strftime('%H:%M:%S.%f')
order_event.source = -1 # unrecognized source
self.broker.order_dict[orderId] = order_event
if orderState.status == 'Submitted':
order_event.order_status = OrderStatus.SUBMITTED
elif orderState.status == 'Filled':
order_event.order_status = OrderStatus.FILLED
elif orderState.status == 'PreSubmitted':
order_event.order_status = OrderStatus.PENDING_SUBMIT
elif orderState.status == 'Cancelled':
order_event.order_status = OrderStatus.CANCELED
elif orderState.status == 'Inactive': # e.g. exchange closed
order_event.order_status = OrderStatus.ERROR
else:
order_event.order_status = OrderStatus.UNKNOWN
self.broker.event_engine.put(copy(order_event))
def openOrderEnd(self):
super().openOrderEnd()
_logger.info("OpenOrderEnd")
_logger.info(f"Received openOrders {len(list(self.broker.order_dict.keys()))}")
def orderStatus(self, orderId: OrderId, status: str, filled: float,
remaining: float, avgFillPrice: float, permId: int,
parentId: int, lastFillPrice: float, clientId: int,
whyHeld: str, mktCapPrice: float):
super().orderStatus(orderId, status, filled, remaining,
avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeld, mktCapPrice)
msg = f"OrderStatus. Id: {orderId}, Status: {status}, Filled: {filled}, " \
f"Remaining: {remaining}, AvgFillPrice: {avgFillPrice}, PermId: {permId}, ParentId: {parentId}, " \
f"LastFillPrice: {lastFillPrice}, ClientId: {clientId}, WhyHeld: {whyHeld}, MktCapPrice: {mktCapPrice}"
_logger.info(msg)
self.broker.log(msg)
order_event = self.broker.order_dict.get(orderId, None)
if order_event is None:
msg = f'OrderStatus: Unable to find order {orderId}'
_logger.error(msg)
self.broker.log(msg)
order_event = OrderEvent()
order_event.order_id = orderId
order_event.account = self.broker.account
order_event.order_size = filled + remaining
order_event.fill_size = filled
self.order_type = OrderType.UNKNOWN
self.order_status = OrderStatus.UNKNOWN
order_event.order_time = datetime.now().strftime('%H:%M:%S.%f')
order_event.source = -1 # unrecognized source
self.broker.order_dict[orderId] = order_event
if status == 'Submitted':
order_event.order_status = OrderStatus.SUBMITTED
elif status == 'Filled':
order_event.order_status = OrderStatus.FILLED
elif status == 'PreSubmitted':
order_event.order_status = OrderStatus.PENDING_SUBMIT
elif status == 'Cancelled' or status == 'ApiCancelled':
order_event.order_status = OrderStatus.CANCELED
order_event.fill_size = filled # remaining = order_size - fill_size
order_event.cancel_time = datetime.now().strftime("%H:%M:%S.%f")
elif status == 'Inactive': # e.g. exchange closed
order_event.order_status = OrderStatus.ERROR
else:
order_event.order_status = OrderStatus.UNKNOWN
order_event.fill_size = filled
self.broker.event_engine.put(copy(order_event))
def managedAccounts(self, accountsList: str):
super().managedAccounts(accountsList)
msg = f'Account list:, {accountsList}'
_logger.info(msg)
self.broker.log(msg)
self.broker.account = accountsList.split(",")[0]
self.reqAccountUpdates(True, self.broker.account)
def accountSummary(self, reqId: int, account: str, tag: str, value: str,
currency: str):
super().accountSummary(reqId, account, tag, value, currency)
msg = f"AccountSummary. ReqId: {reqId}, Account: {account}, Tag: {tag}, Value: {value}, Currency: {currency}"
_logger.info(msg)
self.broker.log(msg)
def accountSummaryEnd(self, reqId: int):
super().accountSummaryEnd(reqId)
_logger.info(f"AccountSummaryEnd. ReqId: {reqId}")
def updateAccountValue(self, key: str, val: str, currency: str,
accountName: str):
"""
Just as with the TWS' Account Window, unless there is a position change this information is updated at a fixed interval of three minutes.
"""
super().updateAccountValue(key, val, currency, accountName)
msg = f'UpdateAccountValue. Key: {key}, Value: {val}, Currency: {currency}, AccountName: {accountName}'
_logger.info(msg)
self.broker .account_summary.timestamp = datetime.now().strftime("%H:%M:%S.%f")
if key == 'NetLiquidationByCurrency' and currency == 'USD':
self.broker.account_summary.balance = float(val)
elif key == 'NetLiquidation' and currency == 'USD':
self.broker.account_summary.balance = float(val)
self.broker.account_summary.account_id = accountName
elif key == 'AvailableFunds' and currency == 'USD':
self.broker.account_summary.available = float(val)
elif key == 'MaintMarginReq' and currency == 'USD':
self.broker.account_summary.margin = float(val)
elif key == 'RealizedPnL' and currency == 'USD':
self.broker.account_summary.closed_pnl = float(val)
elif key == 'UnrealizedPnL' and currency == 'USD':
self.broker.account_summary.open_pnl = float(val)
self.broker.event_engine.put(self.broker.account_summary) # assume alphabatic order
def updatePortfolio(self, contract: Contract, position: float,
marketPrice: float, marketValue: float,
averageCost: float, unrealizedPNL: float,
realizedPNL: float, accountName: str):
"""
Just as with the TWS' Account Window, unless there is a position change this information is updated at a fixed interval of three minutes.
"""
super().updatePortfolio(contract, position, marketPrice, marketValue,
averageCost, unrealizedPNL, realizedPNL, accountName)
msg | |
<reponame>hyker/codechecker<filename>tools/report-converter/codechecker_report_converter/report/output/html/html.py
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
import io
import json
import logging
import os
import shutil
import sys
from collections import defaultdict
from string import Template
from typing import Callable, Dict, List, Optional, Set, Tuple
from codechecker_report_converter.report import BugPathEvent, \
InvalidFileContentMsg, File, MacroExpansion, Report, report_file, \
reports as reports_helper
from codechecker_report_converter.report.statistics import Statistics
from codechecker_report_converter.report.checker_labels import CheckerLabels
if sys.version_info >= (3, 8):
from typing import TypedDict # pylint: disable=no-name-in-module
else:
from mypy_extensions import TypedDict
LOG = logging.getLogger('report-converter')
SkipReportHandler = Callable[
[str, str, int, str, dict, Dict[int, str]],
Tuple[bool, list]
]
class HTMLBugPathEvent(TypedDict):
message: str
fileId: str
line: int
column: int
HTMLBugPathEvents = List[HTMLBugPathEvent]
class HTMLMacroExpansion(HTMLBugPathEvent):
name: str
HTMLMacroExpansions = List[HTMLMacroExpansion]
class Checker(TypedDict):
name: str
url: Optional[str]
class HTMLReport(TypedDict):
fileId: str
reportHash: Optional[str]
checker: Checker
analyzerName: Optional[str]
line: int
column: int
message: str
events: HTMLBugPathEvents
macros: HTMLMacroExpansions
notes: HTMLBugPathEvents
reviewStatus: Optional[str]
severity: Optional[str]
HTMLReports = List[HTMLReport]
class FileSource(TypedDict):
filePath: str
content: str
FileSources = Dict[str, FileSource]
class HtmlReportLink(TypedDict):
report: HTMLReport
link: str
def get_file_content(file_path: str) -> str:
""" Return file content of the given file. """
with open(file_path, 'r', encoding='utf-8', errors='replace') as f:
return f.read()
class HtmlBuilder:
"""
Helper class to create html file from a report data.
"""
def __init__(
self,
layout_dir: str,
checker_labels: Optional[CheckerLabels] = None
):
self._checker_labels = checker_labels
self.layout_dir = layout_dir
self.generated_html_reports: Dict[str, HTMLReports] = {}
self.files: FileSources = {}
css_dir = os.path.join(self.layout_dir, 'css')
js_dir = os.path.join(self.layout_dir, 'js')
codemirror_dir = os.path.join(
self.layout_dir, 'vendor', 'codemirror')
# Mapping layout tags to files.
self._layout_tag_files = {
'style_css': os.path.join(css_dir, 'style.css'),
'buglist_css': os.path.join(css_dir, 'buglist.css'),
'bugview_css': os.path.join(css_dir, 'bugview.css'),
'statistics_css': os.path.join(css_dir, 'statistics.css'),
'icon_css': os.path.join(css_dir, 'icon.css'),
'table_css': os.path.join(css_dir, 'table.css'),
'codemirror_license': os.path.join(codemirror_dir,
'codemirror.LICENSE'),
'codemirror_css': os.path.join(codemirror_dir,
'codemirror.min.css'),
'codemirror_js': os.path.join(codemirror_dir, 'codemirror.min.js'),
'clike_js': os.path.join(codemirror_dir, 'clike.min.js'),
'bug_viewer': os.path.join(js_dir, 'bugviewer.js'),
'bug_list': os.path.join(js_dir, 'buglist.js'),
'browser_support': os.path.join(js_dir, 'browsersupport.js')
}
# Get the HTML layout file content.
self._layout = Template(get_file_content(
os.path.join(self.layout_dir, 'layout.html')))
self._index = Template(get_file_content(
os.path.join(self.layout_dir, 'index.html')))
self._statistics = Template(get_file_content(
os.path.join(self.layout_dir, 'statistics.html')))
# Get the content of the HTML layout dependencies.
self._tag_contents = {}
for tag in self._layout_tag_files:
self._tag_contents[tag] = get_file_content(
self._layout_tag_files[tag])
def get_severity(self, checker_name: str) -> str:
""" Returns severity level for the given checker name. """
return self._checker_labels.severity(checker_name) \
if self._checker_labels else 'UNSPECIFIED'
def _add_source_file(self, file: File) -> FileSource:
"""
Updates file source data by file id if the given file hasn't been
processed.
"""
if file.id in self.files:
return self.files[file.id]
try:
file_content = file.content
except Exception:
file_content = InvalidFileContentMsg
self.files[file.id] = {
'filePath': file.path, 'content': file_content}
return self.files[file.id]
def _get_doc_url(self, report: Report) -> Optional[str]:
""" Get documentation url for the given report if exists. """
if self._checker_labels:
doc_urls = self._checker_labels.label_of_checker(
report.checker_name, 'doc_url', report.analyzer_name)
return doc_urls[0] if doc_urls else None
return None
def _get_html_reports(
self,
reports: List[Report]
) -> Tuple[HTMLReports, FileSources]:
""" Get HTML reports from the given reports.
Returns a list of html reports and references to file sources.
"""
html_reports: HTMLReports = []
files: FileSources = {}
def to_bug_path_events(
events: List[BugPathEvent]
) -> HTMLBugPathEvents:
""" Converts the given events to html compatible format. """
html_events: HTMLBugPathEvents = []
for event in events:
files[event.file.id] = self._add_source_file(event.file)
html_events.append({
'message': event.message,
'fileId': event.file.id,
'line': event.line,
'column': event.column,
})
return html_events
def to_macro_expansions(
macro_expansions: List[MacroExpansion]
) -> HTMLMacroExpansions:
""" Converts the given events to html compatible format. """
html_macro_expansions: HTMLMacroExpansions = []
for macro_expansion in macro_expansions:
files[macro_expansion.file.id] = self._add_source_file(
macro_expansion.file)
html_macro_expansions.append({
'message': macro_expansion.message,
'name': macro_expansion.name,
'fileId': macro_expansion.file.id,
'line': macro_expansion.line,
'column': macro_expansion.column,
})
return html_macro_expansions
for report in reports:
files[report.file.id] = self._add_source_file(report.file)
html_reports.append({
'fileId': report.file.id,
'reportHash': report.report_hash,
'checker': {
'name': report.checker_name,
'url': self._get_doc_url(report)
},
'analyzerName': report.analyzer_name,
'line': report.line,
'column': report.column,
'message': report.message,
'events': to_bug_path_events(report.bug_path_events),
'macros': to_macro_expansions(report.macro_expansions),
'notes': to_bug_path_events(report.notes),
'reviewStatus': report.review_status,
'severity': self.get_severity(report.checker_name)
})
return html_reports, files
def create(
self,
output_file_path: str,
reports: List[Report]
) -> Tuple[Optional[HTMLReports], Set[str]]:
"""
Create html file from the given analyzer result file to the output
path.
"""
changed_files = reports_helper.get_changed_files(reports)
if changed_files:
return None, changed_files
html_reports, files = self._get_html_reports(reports)
self.generated_html_reports[output_file_path] = html_reports
substitute_data = self._tag_contents
substitute_data.update({
'report_data': json.dumps({
'files': files,
'reports': html_reports
})
})
content = self._layout.substitute(substitute_data)
with open(output_file_path, 'w+',
encoding='utf-8', errors='replace') as f:
f.write(content)
return html_reports, changed_files
def create_index_html(self, output_dir: str):
"""
Creates an index.html file which lists all available bugs which was
found in the processed plist files. This also creates a link for each
bug to the created html file where the bug can be found.
"""
# Sort reports based on file path levels.
html_report_links: List[HtmlReportLink] = []
for html_file, reports in self.generated_html_reports.items():
for report in reports:
html_report_links.append({'link': html_file, 'report': report})
html_report_links.sort(
key=lambda data: self.files[data['report']['fileId']]['filePath'])
with io.StringIO() as table_reports:
# Create table header.
table_reports.write('''
<tr>
<th id="report-id"> </th>
<th id="file-path">File</th>
<th id="severity">Severity</th>
<th id="checker-name">Checker name</th>
<th id="message">Message</th>
<th id="bug-path-length">Bug path length</th>
<th id="review-status">Review status</th>
</tr>''')
# Create table lines.
for i, data in enumerate(html_report_links):
html_file = os.path.basename(data['link'])
report = data['report']
severity = report['severity'].lower() \
if 'severity' in report \
and report['severity'] is not None \
else ''
review_status = report['reviewStatus'] \
if 'reviewStatus' in report and \
report['reviewStatus'] is not None \
else ''
events = report['events']
if events:
line = events[-1]['line']
message = events[-1]['message']
bug_path_length = len(events)
else:
line = report['line']
message = report['message']
bug_path_length = 1
rs = review_status.lower().replace(' ', '-')
file_path = self.files[report['fileId']]['filePath']
checker = report['checker']
doc_url = checker.get('url')
if doc_url:
checker_name_col_content = f'<a href="{doc_url}" '\
f'target="_blank">{checker["name"]}</a>'
else:
checker_name_col_content = checker["name"]
table_reports.write(f'''
<tr>
<td>{i + 1}</td>
<td file="{file_path}" line="{line}">
<a href="{html_file}#reportHash={report['reportHash']}">
{file_path} @ Line {line}
</a>
</td>
<td class="severity" severity="{severity}">
<i class="severity-{severity}"
title="{severity}"></i>
</td>
<td>{checker_name_col_content}</td>
<td>{message}</td>
<td class="bug-path-length">{bug_path_length}</td>
<td class="review-status review-status-{rs}">
{review_status}
</td>
</tr>''')
substitute_data = self._tag_contents
substitute_data.update({'table_reports': table_reports.getvalue()})
content = self._index.substitute(substitute_data)
output_path = os.path.join(output_dir, 'index.html')
with open(output_path, 'w+', encoding='utf-8',
errors='replace') as html_output:
html_output.write(content)
def create_statistics_html(self, output_dir: str):
"""
Creates an statistics.html file which contains statistics information
from the HTML generation process.
"""
def severity_order(severity: str) -> int:
"""
This function determines in which order severities should be
printed to the output. This function can be given via "key"
attribute to sort() function.
"""
severities = ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW', 'STYLE',
'UNSPECIFIED']
return severities.index(severity)
num_of_analyzer_result_files = len(self.generated_html_reports)
num_of_reports = 0
for html_file in self.generated_html_reports:
num_of_reports += len(self.generated_html_reports[html_file])
checker_statistics: Dict[str, int] = defaultdict(int)
for html_file in self.generated_html_reports:
for report in self.generated_html_reports[html_file]:
checker = report['checker']['name']
checker_statistics[checker] += 1
checker_rows: List[List[str]] = []
severity_statistics: Dict[str, int] = defaultdict(int)
with io.StringIO() as string:
for checker_name in sorted(checker_statistics):
severity = self.get_severity(checker_name)
string.write('''
<tr>
<td>{0}</td>
<td class="severity" severity="{1}">
<i class="severity-{1}" title="{1}"></i>
</td>
<td>{2}</td>
</tr>
'''.format(checker_name, severity.lower(),
checker_statistics[checker_name]))
checker_rows.append([checker_name, severity,
str(checker_statistics[checker_name])])
severity_statistics[severity] += \
checker_statistics[checker_name]
checker_statistics_content = string.getvalue()
severity_rows: List[List[str]] = []
with io.StringIO() as string:
for severity in sorted(severity_statistics, key=severity_order):
num = severity_statistics[severity]
string.write('''
<tr>
<td class="severity" severity="{0}">
<i class="severity-{0}" title="{0}"></i>
</td>
<td>{1}</td>
</tr>
'''.format(severity.lower(), num))
severity_rows.append([severity, str(num)])
severity_statistics_content = string.getvalue()
substitute_data = self._tag_contents
substitute_data.update({
'num_of_analyzer_result_files': str(num_of_analyzer_result_files),
'number_of_reports': str(num_of_reports),
'checker_statistics': checker_statistics_content,
'severity_statistics': severity_statistics_content})
content = self._statistics.substitute(substitute_data)
output_path = os.path.join(output_dir, 'statistics.html')
with open(output_path, 'w+', encoding='utf-8',
errors='ignore') as html_output:
html_output.write(content)
def finish(self, output_dir_path: str, statistics: Statistics):
""" Creates common html files and print summary messages. """
self.create_index_html(output_dir_path)
self.create_statistics_html(output_dir_path)
statistics.write()
print(f"\nTo view statistics in a browser run:\n> firefox "
f"{os.path.join(output_dir_path, 'statistics.html')}")
print(f"\nTo view the results in a browser run:\n> firefox "
f"{os.path.join(output_dir_path, 'index.html')}")
def convert(
file_path: str,
reports: List[Report],
output_dir_path: str,
html_builder: HtmlBuilder
) -> Set[str]:
"""
Prints the results in the given file to HTML file.
Returns the skipped analyzer result files because of source
file content change.
"""
if not reports:
LOG.info(f'No report data in {file_path} file.')
return set()
html_filename = f"{os.path.basename(file_path)}.html"
html_output_path = os.path.join(output_dir_path, html_filename)
_, changed_files = html_builder.create(
html_output_path, reports)
if changed_files:
return changed_files
LOG.info(f"Html file was generated: {html_output_path}")
return changed_files
def parse(
input_path: str,
output_path: str,
layout_dir: str,
html_builder: Optional[HtmlBuilder] = None
) -> Set[str]:
"""
Parses analyzer result files from the given input directory to the output
directory.
Return a set of changed files.
"""
files = []
input_path = os.path.abspath(input_path)
output_dir = os.path.abspath(output_path)
if os.path.exists(output_path):
LOG.info("Previous analysis results in '%s' have been removed, "
"overwriting with current results.", output_dir)
shutil.rmtree(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if os.path.isfile(input_path):
files.append(input_path)
elif os.path.isdir(input_path):
_, _, | |
= [f]
else:
for subc in corpus:
to_iterate_over[(subc.name, subc.path)] = subc.files
elif corpus[0].level == 'f':
for f in corpus:
to_iterate_over[(f.name, f.path)] = [f]
elif corpus.singlefile:
to_iterate_over = {(corpus.name, corpus.path): [corpus]}
elif not hasattr(corpus, 'subcorpora') or not corpus.subcorpora:
# just files in a directory
if files_as_subcorpora:
to_iterate_over = {}
for f in corpus.files:
to_iterate_over[(f.name, f.path)] = [f]
else:
to_iterate_over = {(corpus.name, corpus.path): corpus.files}
else:
to_iterate_over = {}
if files_as_subcorpora:
# don't know if possible: has subcorpora but also .files
if hasattr(corpus, 'files') and corpus.files is not None:
for f in corpus.files:
to_iterate_over[(f.name, f.path)] = [f]
# has subcorpora with files in those
elif hasattr(corpus, 'files') and corpus.files is None:
for subc in corpus.subcorpora:
for f in subc.files:
to_iterate_over[(f.name, f.path)] = [f]
else:
if corpus[0].level == 's':
for subcorpus in corpus:
to_iterate_over[(subcorpus.name, subcorpus.path)] = subcorpus.files
elif corpus[0].level == 'f':
for f in corpus:
to_iterate_over[(f.name, f.path)] = [f]
else:
for subcorpus in corpus.subcorpora:
to_iterate_over[(subcorpus.name, subcorpus.path)] = subcorpus.files
return to_iterate_over
def welcome_printer(return_it=False):
"""Print welcome message"""
if no_conc:
message = 'Interrogating'
else:
message = 'Interrogating and concordancing'
if only_conc:
message = 'Concordancing'
if kwargs.get('printstatus', True):
thetime = strftime("%H:%M:%S", localtime())
from corpkit.process import dictformat
sformat = dictformat(search)
welcome = ('\n%s: %s %s ...\n %s\n ' \
'Query: %s\n %s corpus ... \n' % \
(thetime, message, cname, optiontext, sformat, message))
if return_it:
return welcome
else:
print(welcome)
def goodbye_printer(return_it=False, only_conc=False):
"""Say goodbye before exiting"""
if not kwargs.get('printstatus', True):
return
thetime = strftime("%H:%M:%S", localtime())
if only_conc:
finalstring = '\n\n%s: Concordancing finished! %s results.' % (thetime, format(len(conc_df), ','))
else:
finalstring = '\n\n%s: Interrogation finished!' % thetime
if countmode:
finalstring += ' %s matches.' % format(tot, ',')
else:
finalstring += ' %s unique results, %s total occurrences.' % (format(numentries, ','), format(total_total, ','))
if return_it:
return finalstring
else:
print(finalstring)
def get_conc_colnames(corpus,
fsi_index=False,
simple_tregex_mode=False):
fields = []
base = 'c f s l m r'
if simple_tregex_mode:
base = base.replace('f ', '')
if fsi_index and not simple_tregex_mode:
base = 'i ' + base
if PYTHON_VERSION == 2:
base = base.encode('utf-8').split()
else:
base = base.split()
if show_conc_metadata:
from corpkit.build import get_all_metadata_fields
meta = get_all_metadata_fields(corpus.path)
if isinstance(show_conc_metadata, list):
meta = [i for i in meta if i in show_conc_metadata]
#elif show_conc_metadata is True:
# pass
for i in sorted(meta):
if i in ['speaker', 'sent_id', 'parse']:
continue
if PYTHON_VERSION == 2:
base.append(i.encode('utf-8'))
else:
base.append(i)
return base
def make_conc_obj_from_conclines(conc_results, fsi_index=False):
"""
Turn conclines into DataFrame
"""
from corpkit.interrogation import Concordance
#fsi_place = 2 if fsi_index else 0
all_conc_lines = []
for sc_name, resu in sorted(conc_results.items()):
if only_unique:
unique_results = uniquify(resu)
else:
unique_results = resu
#make into series
for lin in unique_results:
#spkr = str(spkr, errors = 'ignore')
#if not subcorpora:
# lin[fsi_place] = lin[fsi_place]
#lin.insert(fsi_place, sc_name)
if len(lin) < len(conc_col_names):
diff = len(conc_col_names) - len(lin)
lin.extend(['none'] * diff)
all_conc_lines.append(Series(lin, index=conc_col_names))
try:
conc_df = pd.concat(all_conc_lines, axis=1).T
except ValueError:
return
if all(x == '' for x in list(conc_df['s'].values)) or \
all(x == 'none' for x in list(conc_df['s'].values)):
conc_df.drop('s', axis=1, inplace=True)
locs['corpus'] = corpus.name
if maxconc:
conc_df = Concordance(conc_df[:maxconc])
else:
conc_df = Concordance(conc_df)
try:
conc_df.query = locs
except AttributeError:
pass
return conc_df
def lowercase_result(res):
"""
Take any result and do spelling/lowercasing if need be
todo: remove lowercase and change name
"""
if not res or statsmode:
return res
# this is likely broken, but spelling in interrogate is deprecated anyway
if spelling:
res = [correct_spelling(r) for r in res]
return res
def postprocess_concline(line, fsi_index=False, conc=False):
# todo: are these right?
if not conc:
return line
subc, star, en = 0, 2, 5
if fsi_index:
subc, star, en = 2, 4, 7
if not preserve_case:
line[star:en] = [str(x).lower() for x in line[star:en]]
if spelling:
line[star:en] = [correct_spelling(str(b)) for b in line[star:en]]
return line
def make_progress_bar():
"""generate a progress bar"""
if simple_tregex_mode:
total_files = len(list(to_iterate_over.keys()))
else:
total_files = sum(len(x) for x in list(to_iterate_over.values()))
par_args = {'printstatus': kwargs.get('printstatus', True),
'root': root,
'note': note,
'quiet': quiet,
'length': total_files,
'startnum': kwargs.get('startnum'),
'denom': kwargs.get('denominator', 1)}
term = None
if kwargs.get('paralleling', None) is not None:
from blessings import Terminal
term = Terminal()
par_args['terminal'] = term
par_args['linenum'] = kwargs.get('paralleling')
if in_notebook:
par_args['welcome_message'] = welcome_message
outn = kwargs.get('outname', '')
if outn:
outn = getattr(outn, 'name', outn)
outn = outn + ': '
tstr = '%s%d/%d' % (outn, current_iter, total_files)
p = animator(None, None, init=True, tot_string=tstr, **par_args)
tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)
animator(p, current_iter, tstr, **par_args)
return p, outn, total_files, par_args
# find out if using gui
root = kwargs.get('root')
note = kwargs.get('note')
language_model = kwargs.get('language_model')
# set up pause method
original_sigint = signal.getsignal(signal.SIGINT)
if kwargs.get('paralleling', None) is None:
if not root:
original_sigint = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal_handler)
# find out about concordancing
only_conc = False
no_conc = False
if conc is False:
no_conc = True
if isinstance(conc, str) and conc.lower() == 'only':
only_conc = True
no_conc = False
numconc = 0
# wipe non essential class attributes to not bloat query attrib
if isinstance(corpus, Corpus):
import copy
corpus = copy.copy(corpus)
for k, v in corpus.__dict__.items():
if isinstance(v, (Interrogation, Interrodict)):
corpus.__dict__.pop(k, None)
# convert path to corpus object
if not isinstance(corpus, (Corpus, Corpora, Subcorpus, File, Datalist)):
if not multiprocess and not kwargs.get('outname'):
corpus = Corpus(corpus, print_info=False)
# figure out how the user has entered the query and show, and normalise
from corpkit.process import searchfixer
search = searchfixer(search, query)
show = fix_show(show, gramsize)
locs['show'] = show
# instantiate lemmatiser if need be
lem_instance = False
if any(i.endswith('l') for i in show) and isinstance(search, dict) and search.get('t'):
from nltk.stem.wordnet import WordNetLemmatizer
lem_instance = WordNetLemmatizer()
# do multiprocessing if need be
im, corpus, search, query, = is_multiquery(corpus, search, query,
kwargs.get('outname', False))
# figure out if we can multiprocess the corpus
if hasattr(corpus, '__iter__') and im:
corpus = Corpus(corpus, print_info=False)
if hasattr(corpus, '__iter__') and not im:
im = 'datalist'
if isinstance(corpus, Corpora):
im = 'multiplecorpora'
# split corpus if the user wants multiprocessing but no other iterable
if not im and multiprocess:
im = 'datalist'
if getattr(corpus, 'subcorpora', False):
corpus = corpus[:]
else:
corpus = corpus.files
search = fix_search(search, case_sensitive=case_sensitive, root=root)
exclude = fix_search(exclude, case_sensitive=case_sensitive, root=root)
# if it's already been through pmultiquery, don't do it again
locs['search'] = search
locs['exclude'] = exclude
locs['query'] = query
locs['corpus'] = corpus
locs['multiprocess'] = multiprocess
locs['print_info'] = kwargs.get('printstatus', True)
locs['multiple'] = im
locs['subcorpora'] = subcorpora
locs['nosubmode'] = nosubmode
# send to multiprocess function
if im:
signal.signal(signal.SIGINT, original_sigint)
from corpkit.multiprocess import pmultiquery
return pmultiquery(**locs)
# get corpus metadata
cname = corpus.name
if isinstance(save, STRINGTYPE):
savename = corpus.name + '-' + save
if save is True:
raise ValueError('save must be str, not bool.')
datatype = getattr(corpus, 'datatype', 'conll')
singlefile = getattr(corpus, 'singlefile', False)
level = getattr(corpus, 'level', 'c')
# store all results in here
from collections import defaultdict
results = defaultdict(Counter)
count_results = defaultdict(list)
conc_results = defaultdict(list)
# check if just counting, turn off conc if so
countmode = 'c' in show or 'mc' in show
if countmode:
no_conc = True
only_conc = False
# where we are at in interrogation
current_iter = 0
# multiprocessing progress bar
denom = kwargs.get('denominator', 1)
startnum = kwargs.get('startnum', 0)
# Determine the search function to be used #
optiontext, simple_tregex_mode, statsmode, tree_to_text, search_trees = determine_search_func(show)
# no conc for statsmode
if statsmode:
no_conc = True
only_conc = False
conc = False
# Set some Tregex-related values
translated_option = False
if search.get('t'):
query, translated_option = get_tregex_values(show)
if query == 'Bad query' and translated_option is None:
if root:
return 'Bad query'
else:
return
# more tregex options
if tree_to_text:
treg_q = r'ROOT << __'
op = ['-o', '-t', '-w', '-f']
elif simple_tregex_mode:
treg_q = search['t']
op = ['-%s' % i for i in translated_option] + ['-o', '-f']
# make iterable object for corpus interrogation
to_iterate_over = make_search_iterable(corpus)
try:
nam = get_ipython().__class__.__name__
if nam == 'ZMQInteractiveShell':
in_notebook = True
else:
in_notebook | |
#!/usr/bin/env python3
# predmet: PDS 2018/2019
# projekt: Hybridní chatovací P2P síť
# autor: <NAME> (xsuhaj02)
import socket
import sys
import threading
import os
import json
import time
import errno
import signal
import arg_parser
from my_bencode import *
BUFFSIZE = 65536
node_socket = None
# pre ulozenie adresy
node_ip = None
node_port = None
#txid
txid = 0
toacked = None
# flag pre urcenie konca - nastavi sa na ture pri siginte
end = False
# databaza
database = {}
# slovniky pre susednych node a peer
neighbour_nodes = {}
neighbour_peers = {}
# premenna pre pocitadlo peerov v peerrecord
peercounter = 0
# kontrola txid aby nepresiahol uint
def check_txid():
global txid
if txid == 65536:
txid = 0
# funkcia pre send, posle vsetko kym je co poslat
def my_send(sock, message, ip, port):
message = bencode(message)
try:
sock.sendto(message.encode(), (ip, port))
except socket.error:
sys.stderr.write("Error at sending the data on socket.\n")
# prijatie dat zo socketu
def my_recv(sock):
data = ""
address = ""
#kontrola blokovania
try:
data, address = sock.recvfrom(BUFFSIZE)
except socket.error as e:
err = e.args[0]
if err == errno.EAGAIN or err == errno.EWOULDBLOCK: # ak by blokoval tak return None
return None, None
else:
sys.stderr.write("Error at receiving the data on socket.\n")
return b_decode(data.decode()), address # bencode to dict
# vytvorenie socketu
# ipv4 string, port cislo
def create_socket(ipv4, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
try:
sock.bind((ipv4, port))
except socket.error:
sys.stderr.write("Error at creating UDP socket.\n")
exit(1)
return sock
# signal handler - ctrl+C
def sigint_handler(signum, frame):
global end
end = True
# pre vlakno - posielanie periodickych update sprav
def send_update_thread():
global end
global txid
global node_socket
global neighbour_nodes
global database
while not end:
# poslat update pre kazdy sused
for key, val in neighbour_nodes.items():
message = {"type": "update", "txid": txid, "db": database}
my_send(node_socket, message, val['ip'], val['port'])
txid += 1
check_txid()
# 2 sekundy aby sa to stohlo do 4
time.sleep(2)
# po timeoute update sa vykona tato akcia
def timeout_update(ip, port):
global neighbour_nodes
global database
key = ip + "," + str(port)
neighbour_nodes[key]['timer'].cancel()
del database[key]
del neighbour_nodes[key]
# po timeoute hello sa vykona tato akcia
def timeout_hello(ip, port):
global neighbour_peers
global database
# vymazat z db
peers_key = ip + "," + str(port)
db_key = node_ip + "," + str(node_port)
neighbour_peers[peers_key]['timer'].cancel()
for k, peer in database[db_key].items():
if peer["username"] == neighbour_peers[peers_key]["username"]:
del database[db_key][k]
break
del neighbour_peers[peers_key]
# update timera
def reset_update_timer(ip, port):
global neighbour_nodes
key = ip + "," + str(port)
neighbour_nodes[key]['timer'].cancel()
t = threading.Timer(12.0, timeout_update, [ip, port])
t.start()
neighbour_nodes[key]['timer'] = t
# update timera
def reset_hello_timer(ip, port):
global neighbour_peers
key = ip + "," + str(port)
neighbour_peers[key]['timer'].cancel()
t = threading.Timer(30.0, timeout_hello, [ip, port])
t.start()
neighbour_peers[key]['timer'] = t
# inicializacia timera
def set_update_timer(ip, port):
global neighbour_nodes
key = ip + "," + str(port)
t = threading.Timer(12.0, timeout_update, [ip, port])
t.start()
neighbour_nodes[key]['timer'] = t
# inicializacia timera
def set_hello_timer(ip, port):
global neighbour_peers
key = ip + "," + str(port)
t = threading.Timer(30.0, timeout_hello, [ip, port])
t.start()
neighbour_peers[key]['timer'] = t
# main
def main():
global node_socket
global end
global node_ip
global node_port
global txid
global toacked
global database
global peercounter
global neighbour_nodes
global neighbour_peers
# kontrola argumentov a nacitanie
args = arg_parser.parse_arguments("node")
arg_parser.check_arguments_format("node", args)
# naplnime udaje o nodu
node_ip = args.reg_ipv4
node_port = int(args.reg_port)
# vytvorenie socketu
node_socket = create_socket(node_ip, node_port)
# vypnutie blokovania
node_socket.setblocking(0)
path_command = str(args.id) + "_node_command"
# subor pre rpc ze bezime
open(str(args.id) + "_node_running", 'a').close()
# vlakno pre posielanie updatov
t = threading.Thread(target=send_update_thread)
# detach
t.daemon = True
t.start()
# incializacia databaze
key = node_ip + "," + str(node_port)
database[key] = {}
# cyklus pre prijimanie sprav a prikazov
while not end:
if os.path.exists('./' + path_command): # ak existuje pipe
try:
with open(path_command, 'r') as pipe:
command_input = json.loads(pipe.read())
os.remove(path_command)
received = None
if command_input['command'] == 'database':
#iba vypisat DB
print(database)
if command_input['command'] == 'neighbors':
# vypisat neighbour_nodes
for k, val in neighbour_nodes.items():
print("ip: " + val['ip'] + "," + "port: " + str(val['port']))
if command_input['command'] == 'connect':
# poslat update pridat ho do neigbour_nodes
msg = {"type": "update", "txid": txid, "db": database}
txid += 1
check_txid()
my_send(node_socket, msg, command_input['ip'], int(command_input['port']))
if command_input['command'] == 'disconnect':
#posleme kazdemu
for k, val in neighbour_nodes.items():
# posle disconnect
msg = {"type": "disconnect", "txid": txid}
my_send(node_socket, msg, val['ip'], int(val['port']))
toacked = txid
txid += 1
check_txid()
# vymazeme vzdy nezavisle na ack a error
key = val['ip'] + "," + str(val['port'])
#del neighbour_nodes[key]
del database[key]
#zrusime timer
val['timer'].cancel()
# recv ack
ack = False
starttime = time.time()
while time.time() - starttime <= 2:
received, _ = my_recv(node_socket)
if received is None:
continue
# ak bol error
if received['type'] == "error":
break
# ak nebol ack tak chyba
if received['type'] == "ack":
ack = True
break
if received is not None:
if received['type'] == "error":
sys.stderr.write(received['verbose'] + "\n")
continue
if ack is False:
sys.stderr.write("Error, ACK lost!\n")
continue
else:
if received['txid'] != toacked:
sys.stderr.write("Error, bad ACK number!\n")
continue
# vynulujeme susedov
neighbour_nodes = {}
if command_input['command'] == 'sync':
# poslat pre kazdy sused
for key, val in neighbour_nodes.items():
message = {"type": "update", "txid": txid, "db": database}
txid += 1
check_txid()
my_send(node_socket, message, val['ip'], val['port'])
except EnvironmentError:
sys.stderr.write("Error at receiving the command.\n")
exit(1)
#ak nebol zadany prikaz
else:
received, address = my_recv(node_socket)
# ak neprislo nic
if received is None:
continue
else: # ak sa prijali data
if received["type"] == "getlist":
# kontrola ci ho pozname
key = address[0] + "," + str(address[1])
if key not in neighbour_peers:
msg = {"type": "error", "txid": received["txid"], "verbose": "I dont know you, you are not registered to me!"}
my_send(node_socket, msg, address[0], address[1])
txid += 1
check_txid()
continue
# send ack
msg = {"type": "ack", "txid": received["txid"]}
my_send(node_socket, msg, address[0], address[1])
txid += 1
check_txid()
#send list
#iteracia cez uzly v db
idx = 0
peer_record = {}
for key, val in database.items():
for k, peer in val.items():
peer_record[str(idx)] = peer
idx += 1
msg = {"type": "list", "txid": txid, "peers": peer_record}
my_send(node_socket, msg, address[0], address[1])
toacked = txid
txid += 1
check_txid()
# recv ack
ack = False
starttime = time.time()
while time.time() - starttime <= 2:
received, _ = my_recv(node_socket)
if received is None:
continue
# ak bol error
if received['type'] == "error":
break
# ak nebol ack tak chyba
if received['type'] == "ack":
ack = True
break
if received is not None:
if received['type'] == "error":
sys.stderr.write(received['verbose'] + "\n")
continue
if ack is False:
sys.stderr.write("Error, ACK lost!\n")
continue
else:
if received['txid'] != toacked:
sys.stderr.write("Error, bad ACK number!\n")
continue
if received["type"] == "update":
key = address[0] + "," + str(address[1])
#aktualizacia db iba autoritetivne
database[key] = received["db"][key]
# aktualizacia neighbour nodes
# pridame novych k susedom
for node in received["db"]:
stripped = [x.strip() for x in node.split(',')]
ip = stripped[0]
port = int(stripped[1])
# nepridame sama seba
if node != node_ip + "," + str(node_port):
if node not in neighbour_nodes:
# posleme mu update ze sa chceme pripojit
msg = {"type": "update", "txid": txid, "db": database}
my_send(node_socket, msg, ip, port)
txid += 1
check_txid()
# ulozime ho k susedom, nastavime novy timer
neighbour_nodes[node] = {}
neighbour_nodes[node]['timer'] = None
neighbour_nodes[node]["ip"] = ip
neighbour_nodes[node]["port"] = port
set_update_timer(ip, port)
else:
# vynulovat timer iba od toho od koho sme dostali
if address[0] + "," + str(address[1]) == node:
reset_update_timer(ip, port)
if received["type"] == "hello":
# ak nulove tak ho vyhodit aj z neighbours aj z db
if received["ipv4"] == "0.0.0.0" and received["port"] == 0:
# error ak ho nepozname tj neni pripojeny
peers_key = address[0] + "," + str(address[1])
if peers_key not in neighbour_peers:
msg = {"type": "error", "txid": received["txid"],
"verbose": "I dont know you, you are not connected to me!"}
my_send(node_socket, msg, received["ipv4"], received["port"])
txid += 1
check_txid()
continue
peers_key = address[0] + "," + str(address[1])
db_key = node_ip + "," + str(node_port)
neighbour_peers[peers_key]['timer'].cancel()
for k, peer in database[db_key].items():
if peer["username"] == neighbour_peers[peers_key]["username"]:
del database[db_key][k]
break
del neighbour_peers[peers_key]
continue
key = node_ip + "," + str(node_port)
found = False
# kontrola iba ak uz je ulozeny
if key in database:
for k, peer in database[key].items():
if peer["username"] == received["username"]:
#aktualizacia
peer["ipv4"] = received["ipv4"]
peer["port"] = received["port"]
found = True
# | |
already_processed.add('_subtype')
if value in ('true', '1'):
self._subtype = True
elif value in ('false', '0'):
self._subtype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('DataFormat', node)
if value is not None and 'DataFormat' not in already_processed:
already_processed.add('DataFormat')
self.DataFormat = value
value = find_attr_value_('_archetype', node)
if value is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
self._archetype = value
value = find_attr_value_('Units', node)
if value is not None and 'Units' not in already_processed:
already_processed.add('Units')
self.Units = value
value = find_attr_value_('_id', node)
if value is not None and '_id' not in already_processed:
already_processed.add('_id')
self._id = value
value = find_attr_value_('ArrayValue', node)
if value is not None and 'ArrayValue' not in already_processed:
already_processed.add('ArrayValue')
self.ArrayValue = value
value = find_attr_value_('MetricName', node)
if value is not None and 'MetricName' not in already_processed:
already_processed.add('MetricName')
self.MetricName = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class MetricType
class MetricsType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, _derived=None, _real_archetype=None, _archetype=None, _subtype=None, _instances=None, _desynched_atts=None, _id=None, ComplexMetric=None, Metric=None):
self.original_tagname_ = None
self._derived = _cast(None, _derived)
self._real_archetype = _cast(bool, _real_archetype)
self._archetype = _cast(None, _archetype)
self._subtype = _cast(bool, _subtype)
self._instances = _cast(None, _instances)
self._desynched_atts = _cast(None, _desynched_atts)
self._id = _cast(None, _id)
if ComplexMetric is None:
self.ComplexMetric = []
else:
self.ComplexMetric = ComplexMetric
if Metric is None:
self.Metric = []
else:
self.Metric = Metric
def factory(*args_, **kwargs_):
if MetricsType.subclass:
return MetricsType.subclass(*args_, **kwargs_)
else:
return MetricsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ComplexMetric(self): return self.ComplexMetric
def set_ComplexMetric(self, ComplexMetric): self.ComplexMetric = ComplexMetric
def add_ComplexMetric(self, value): self.ComplexMetric.append(value)
def insert_ComplexMetric(self, index, value): self.ComplexMetric[index] = value
def get_Metric(self): return self.Metric
def set_Metric(self, Metric): self.Metric = Metric
def add_Metric(self, value): self.Metric.append(value)
def insert_Metric(self, index, value): self.Metric[index] = value
def get__derived(self): return self._derived
def set__derived(self, _derived): self._derived = _derived
def get__real_archetype(self): return self._real_archetype
def set__real_archetype(self, _real_archetype): self._real_archetype = _real_archetype
def get__archetype(self): return self._archetype
def set__archetype(self, _archetype): self._archetype = _archetype
def get__subtype(self): return self._subtype
def set__subtype(self, _subtype): self._subtype = _subtype
def get__instances(self): return self._instances
def set__instances(self, _instances): self._instances = _instances
def get__desynched_atts(self): return self._desynched_atts
def set__desynched_atts(self, _desynched_atts): self._desynched_atts = _desynched_atts
def get__id(self): return self._id
def set__id(self, _id): self._id = _id
def hasContent_(self):
if (
self.ComplexMetric or
self.Metric
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='MetricsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MetricsType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='MetricsType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MetricsType'):
if self._derived is not None and '_derived' not in already_processed:
already_processed.add('_derived')
outfile.write(' _derived=%s' % (self.gds_format_string(quote_attrib(self._derived).encode(ExternalEncoding), input_name='_derived'), ))
if self._real_archetype is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
outfile.write(' _real_archetype="%s"' % self.gds_format_boolean(self._real_archetype, input_name='_real_archetype'))
if self._archetype is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
outfile.write(' _archetype=%s' % (self.gds_format_string(quote_attrib(self._archetype).encode(ExternalEncoding), input_name='_archetype'), ))
if self._subtype is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
outfile.write(' _subtype="%s"' % self.gds_format_boolean(self._subtype, input_name='_subtype'))
if self._instances is not None and '_instances' not in already_processed:
already_processed.add('_instances')
outfile.write(' _instances=%s' % (self.gds_format_string(quote_attrib(self._instances).encode(ExternalEncoding), input_name='_instances'), ))
if self._desynched_atts is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
outfile.write(' _desynched_atts=%s' % (self.gds_format_string(quote_attrib(self._desynched_atts).encode(ExternalEncoding), input_name='_desynched_atts'), ))
if self._id is not None and '_id' not in already_processed:
already_processed.add('_id')
outfile.write(' _id=%s' % (self.gds_format_string(quote_attrib(self._id).encode(ExternalEncoding), input_name='_id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='MetricsType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for ComplexMetric_ in self.ComplexMetric:
ComplexMetric_.export(outfile, level, namespace_, name_='ComplexMetric', pretty_print=pretty_print)
for Metric_ in self.Metric:
Metric_.export(outfile, level, namespace_, name_='Metric', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='MetricsType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self._derived is not None and '_derived' not in already_processed:
already_processed.add('_derived')
showIndent(outfile, level)
outfile.write('_derived="%s",\n' % (self._derived,))
if self._real_archetype is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
showIndent(outfile, level)
outfile.write('_real_archetype=%s,\n' % (self._real_archetype,))
if self._archetype is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
showIndent(outfile, level)
outfile.write('_archetype="%s",\n' % (self._archetype,))
if self._subtype is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
showIndent(outfile, level)
outfile.write('_subtype=%s,\n' % (self._subtype,))
if self._instances is not None and '_instances' not in already_processed:
already_processed.add('_instances')
showIndent(outfile, level)
outfile.write('_instances="%s",\n' % (self._instances,))
if self._desynched_atts is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
showIndent(outfile, level)
outfile.write('_desynched_atts="%s",\n' % (self._desynched_atts,))
if self._id is not None and '_id' not in already_processed:
already_processed.add('_id')
showIndent(outfile, level)
outfile.write('_id="%s",\n' % (self._id,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('ComplexMetric=[\n')
level += 1
for ComplexMetric_ in self.ComplexMetric:
showIndent(outfile, level)
outfile.write('model_.ComplexMetricType(\n')
ComplexMetric_.exportLiteral(outfile, level, name_='ComplexMetricType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Metric=[\n')
level += 1
for Metric_ in self.Metric:
showIndent(outfile, level)
outfile.write('model_.MetricType(\n')
Metric_.exportLiteral(outfile, level, name_='MetricType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('_derived', node)
if value is not None and '_derived' not in already_processed:
already_processed.add('_derived')
self._derived = value
value = find_attr_value_('_real_archetype', node)
if value is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
if value in ('true', '1'):
self._real_archetype = True
elif value in ('false', '0'):
self._real_archetype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('_archetype', node)
if value is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
self._archetype = value
value = find_attr_value_('_subtype', node)
if value is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
if value in ('true', '1'):
self._subtype = True
elif value in ('false', '0'):
self._subtype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('_instances', node)
if value is not None and '_instances' not in already_processed:
already_processed.add('_instances')
self._instances = value
value = find_attr_value_('_desynched_atts', node)
if value is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
self._desynched_atts = value
value = find_attr_value_('_id', node)
if value is not None and '_id' not in already_processed:
already_processed.add('_id')
self._id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ComplexMetric':
obj_ = ComplexMetricType.factory()
obj_.build(child_)
self.ComplexMetric.append(obj_)
obj_.original_tagname_ = 'ComplexMetric'
elif nodeName_ == 'Metric':
obj_ = MetricType.factory()
obj_.build(child_)
self.Metric.append(obj_)
obj_.original_tagname_ = 'Metric'
# end class MetricsType
class MaterialType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Bearing=None, _derived=None, _real_archetype=None, _desynched_atts=None, _subtype=None, _instances=None, _archetype=None, Units=None, _id=None, Mises=None, Shear=None):
self.original_tagname_ = None
self.Bearing = _cast(float, Bearing)
self._derived = _cast(None, _derived)
self._real_archetype = _cast(bool, _real_archetype)
self._desynched_atts = _cast(None, _desynched_atts)
self._subtype = _cast(bool, _subtype)
self._instances = _cast(None, _instances)
self._archetype = _cast(None, _archetype)
self.Units = _cast(None, Units)
self._id = _cast(None, _id)
self.Mises = _cast(float, Mises)
self.Shear = _cast(float, Shear)
def factory(*args_, **kwargs_):
if MaterialType.subclass:
return MaterialType.subclass(*args_, **kwargs_)
else:
return MaterialType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Bearing(self): return self.Bearing
def set_Bearing(self, Bearing): self.Bearing = Bearing
def get__derived(self): return self._derived
def set__derived(self, _derived): self._derived = _derived
def get__real_archetype(self): return self._real_archetype
def set__real_archetype(self, _real_archetype): self._real_archetype = _real_archetype
def get__desynched_atts(self): return self._desynched_atts
def set__desynched_atts(self, _desynched_atts): self._desynched_atts = _desynched_atts
def get__subtype(self): return self._subtype
def set__subtype(self, _subtype): self._subtype = _subtype
def get__instances(self): return self._instances
def set__instances(self, _instances): self._instances = _instances
def get__archetype(self): return self._archetype
def set__archetype(self, _archetype): self._archetype = _archetype
def get_Units(self): return self.Units
def set_Units(self, Units): self.Units = Units
def get__id(self): return self._id
def set__id(self, _id): self._id = _id
def get_Mises(self): return self.Mises
def set_Mises(self, Mises): self.Mises = Mises
def get_Shear(self): return self.Shear
def set_Shear(self, Shear): self.Shear = Shear
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='MaterialType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MaterialType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='MaterialType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MaterialType'):
| |
dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G13 lower layer half thickness; throw 0.75 except for -0.25 on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 6
assert np.all(np.isclose(grid_fa,
np.array([[0.25, 0.25],
[1.0, 0.5],
[5.0/8, 5.0/8], # 0.625, 0.625
[1.0/6, 1.0/3],
[1.0/3, 1.0/6],
[1.0/3, 1.0/3]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = 0.75
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[0, 2 * (grid.ni + 1), 2] += 0.5
p[0, 3 * (grid.ni + 1), 2] += 0.5
p[1:, 2 * (grid.ni + 1), 2] -= 0.5
p[1:, 3 * (grid.ni + 1), 2] -= 0.5
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G14 lower layer half thickness; throw 0.75 except for top layer pinching on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 5
assert np.all(np.isclose(grid_fa,
np.array([[1.0/16, 1.0/8], # 0.0625, 0.125
[0.75, 0.75],
[0.125, 0.125],
[0.25, 0.25],
[1.0, 0.5]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = -0.25
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[0, 2 * (grid.ni + 1), 2] += 0.5
p[0, 3 * (grid.ni + 1), 2] += 0.5
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G15 lower layer half thickness; throw -0.25 except for top layer pinching on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 6
assert np.all(np.isclose(grid_fa,
np.array([[11.0/16, 11.0/12],
[0.25, 0.5],
[0.5, 0.5],
[0.75, 0.75],
[0.25, 0.5],
[0.5, 0.5]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = -0.75
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[:, 2 * (grid.ni + 1), 2] -= 1.0
p[:, 3 * (grid.ni + 1), 2] -= 1.0
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G16 lower layer half thickness; throw -0.75 except for -1.75 on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 4
assert np.all(np.isclose(grid_fa,
np.array([[1.0/32, 1.0/32],
[0.25, 0.5],
[0.25, 0.25],
[0.5, 1.0]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = +0.75
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[:, 2 * (grid.ni + 1), 2] += 1.0
p[:, 3 * (grid.ni + 1), 2] += 1.0
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G17 lower layer half thickness; throw +0.75 except for +1.75 on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 4
assert np.all(np.isclose(grid_fa,
np.array([[1.0/32, 1.0/32],
[0.5, 0.25],
[0.25, 0.25],
[1.0, 0.5]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = +0.75
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[:, 3 * (grid.ni + 1) - 1, 2] += 1.0
p[:, 4 * (grid.ni + 1) - 1, 2] += 1.0
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = int)
grid.cols_for_split_pillars_cl = np.array((1, 3, 4), dtype = int)
grid.write_hdf5()
grid.create_xml(title = 'G18 lower layer half thickness; throw +0.75 except for +1.75 on one wing')
grid_fcs, grid_fa = rqtr.fault_connection_set(grid)
assert grid_fcs is not None
assert grid_fa is not None
# show_fa(grid, grid_fcs, grid_fa)
assert grid_fcs.count == 4
assert np.all(np.isclose(grid_fa,
np.array([[0.25, 0.25],
[1.0, 0.5],
[1.0/32, 1.0/32],
[0.5, 0.25]]), atol = 0.01))
# J face split
# deeper layer half thickness of top layer
throw = +0.25
grid = grr.RegularGrid(model, (2, 2, 2), dxyz = (10.0, 10.0, 1.0))
grid.grid_representation = 'IjkGrid'
pu_pillar_count = (grid.nj + 1) * (grid.ni + 1)
pu = grid.points_ref(masked = False).reshape(grid.nk + 1, pu_pillar_count, 3)
p = np.zeros((grid.nk + 1, pu_pillar_count + grid.ni + 1, 3))
p[:, :pu_pillar_count, :] = pu
p[:, pu_pillar_count:, :] = pu[:, grid.ni + 1 : 2 * (grid.ni + 1), :]
p[:, 2 * (grid.ni + 1):, 2] += throw
p[0, 2 * (grid.ni + 1), 2] -= 0.5
p[0, 3 * (grid.ni + 1), 2] -= 0.5
p[-1, :, 2] -= 0.5
grid.points_cached = p
grid.has_split_coordinate_lines = True
grid.split_pillars_count = grid.ni + 1
grid.split_pillar_indices_cached = np.array([i for i in range(grid.ni + 1, 2 * (grid.ni + 1))], dtype = int)
grid.cols_for_split_pillars = np.array((2, 2, 3, 3), dtype = | |
> first_destroy_idx:
# this device is preexisting
start = first_destroy_idx + 1
stop_action = creates[-1]
if start is None:
continue
# remove all actions on this from after the first destroy up
# to the last create
prune_actions = self.findActions(devid=a.device.id)
for rem in prune_actions:
if rem == stop_action:
break
end = self._actions.index(stop_action)
if start <= self._actions.index(rem) < end:
log.debug(" removing action '%s' (%s)" % (rem, id(rem)))
self._actions.remove(rem)
# device resize actions
actions = self.findActions(type="resize", object="device")
for a in actions:
if a not in self._actions:
# we may have removed some of the actions in a previous
# iteration of this loop
continue
log.debug("action '%s' (%s)" % (a, id(a)))
loops = self.findActions(devid=a.device.id,
type="resize",
object="device")
if len(loops) == 1:
continue
# remove all but the last resize action on this device
for rem in loops[:-1]:
log.debug(" removing action '%s' (%s)" % (rem, id(rem)))
self._actions.remove(rem)
# format destroy
# XXX I don't think there's a way for these loops to happen
actions = self.findActions(type="destroy", object="format")
for a in actions:
if a not in self._actions:
# we may have removed some of the actions in a previous
# iteration of this loop
continue
log.debug("action '%s' (%s)" % (a, id(a)))
destroys = self.findActions(devid=a.device.id,
type="destroy",
object="format")
creates = self.findActions(devid=a.device.id,
type="create",
object="format")
# If the format is not preexisting, we remove all actions up
# to and including the last destroy action.
# If the format is preexisting, we remove all actions from
# after the first destroy action up to and including the last
# destroy action.
loops = []
first_destroy_idx = None
first_create_idx = None
stop_action = None
start = None
if len(destroys) > 1:
# there are multiple destroy actions for this format
loops = destroys
first_destroy_idx = self._actions.index(loops[0])
start = self._actions.index(a) + 1
stop_action = destroys[-1]
if creates:
first_create_idx = self._actions.index(creates[0])
if not loops or first_destroy_idx > first_create_idx:
# this format is not preexisting
start = first_create_idx
stop_action = destroys[-1]
if start is None:
continue
# now we remove all actions on this device's format between
# the start index (into self._actions) and stop_action.
prune_actions = self.findActions(devid=a.device.id,
object="format")
for rem in prune_actions:
end = self._actions.index(stop_action)
if start <= self._actions.index(rem) <= end:
log.debug(" removing action '%s' (%s)" % (rem, id(rem)))
self._actions.remove(rem)
if rem == stop_action:
break
# format create
# XXX I don't think there's a way for these loops to happen
actions = self.findActions(type="create", object="format")
for a in actions:
if a not in self._actions:
# we may have removed some of the actions in a previous
# iteration of this loop
continue
log.debug("action '%s' (%s)" % (a, id(a)))
creates = self.findActions(devid=a.device.id,
type="create",
object="format")
destroys = self.findActions(devid=a.device.id,
type="destroy",
object="format")
# If the format is preexisting, we remove everything between
# the first destroy and the last create.
# If the format is not preexisting, we remove everything up to
# the last create.
loops = []
first_destroy_idx = None
first_create_idx = None
stop_action = None
start = None
if len(creates) > 1:
# there are multiple create actions for this format
loops = creates
first_create_idx = self._actions.index(loops[0])
start = 0
stop_action = creates[-1]
if destroys:
first_destroy_idx = self._actions.index(destroys[0])
if not loops or first_create_idx > first_destroy_idx:
# this format is preexisting
start = first_destroy_idx + 1
stop_action = creates[-1]
if start is None:
continue
# remove all actions on this from after the first destroy up
# to the last create
dev_actions = self.findActions(devid=a.device.id,
object="format")
for rem in dev_actions:
if rem == stop_action:
break
end = self._actions.index(stop_action)
if start <= self._actions.index(rem) < end:
log.debug(" removing action '%s' (%s)" % (rem, id(rem)))
self._actions.remove(rem)
# format resize
actions = self.findActions(type="resize", object="format")
for a in actions:
if a not in self._actions:
# we may have removed some of the actions in a previous
# iteration of this loop
continue
log.debug("action '%s' (%s)" % (a, id(a)))
loops = self.findActions(devid=a.device.id,
type="resize",
object="format")
if len(loops) == 1:
continue
# remove all but the last resize action on this format
for rem in loops[:-1]:
log.debug(" removing action '%s' (%s)" % (rem, id(rem)))
self._actions.remove(rem)
# format migrate
# XXX I don't think there's away for these loops to occur
actions = self.findActions(type="migrate", object="format")
for a in actions:
if a not in self._actions:
# we may have removed some of the actions in a previous
# iteration of this loop
continue
log.debug("action '%s' (%s)" % (a, id(a)))
loops = self.findActions(devid=a.device.id,
type="migrate",
object="format")
if len(loops) == 1:
continue
# remove all but the last migrate action on this format
for rem in loops[:-1]:
log.debug(" removing action '%s' (%s)" % (rem, id(rem)))
self._actions.remove(rem)
def processActions(self, dryRun=None):
""" Execute all registered actions. """
# Execute an action
def executeAction(action):
try:
# STACKI
log.info("STACKI:standard action: %s" % action)
# STACKI
action.execute(intf=self.intf)
except DiskLabelCommitError:
# it's likely that a previous format destroy action
# triggered setup of an lvm or md device.
self.teardownAll()
action.execute(intf=self.intf)
udev_settle()
for device in self._devices:
#make sure we catch any renumbering parted does
if device.exists and isinstance(device, PartitionDevice):
device.updateName()
device.format.device = device.path
# in most cases the actions will already be sorted because of the
# rules for registration, but let's not rely on that
def cmpActions(a1, a2):
ret = 0
if a1.isDestroy() and a2.isDestroy():
if a1.device.path == a2.device.path:
# if it's the same device, destroy the format first
if a1.isFormat() and a2.isFormat():
ret = 0
elif a1.isFormat() and not a2.isFormat():
ret = -1
elif not a1.isFormat() and a2.isFormat():
ret = 1
elif a1.device.dependsOn(a2.device):
ret = -1
elif a2.device.dependsOn(a1.device):
ret = 1
# generally destroy partitions after lvs, vgs, &c
elif isinstance(a1.device, PartitionDevice) and \
isinstance(a2.device, PartitionDevice):
if a1.device.disk == a2.device.disk:
ret = cmp(a2.device.partedPartition.number,
a1.device.partedPartition.number)
else:
ret = cmp(a2.device.name, a1.device.name)
elif isinstance(a1.device, PartitionDevice) and \
a2.device.partitioned:
ret = -1
elif isinstance(a2.device, PartitionDevice) and \
a1.device.partitioned:
ret = 1
# remove partitions before unpartitioned non-partition
# devices
elif isinstance(a1.device, PartitionDevice) and \
not isinstance(a2.device, PartitionDevice):
ret = 1
elif isinstance(a2.device, PartitionDevice) and \
not isinstance(a1.device, PartitionDevice):
ret = -1
else:
ret = 0
elif a1.isDestroy():
ret = -1
elif a2.isDestroy():
ret = 1
elif a1.isResize() and a2.isResize():
if a1.device.path == a2.device.path:
if a1.obj == a2.obj:
ret = 0
elif a1.isFormat() and not a2.isFormat():
# same path, one device, one format
if a1.isGrow():
ret = 1
else:
ret = -1
elif not a1.isFormat() and a2.isFormat():
# same path, one device, one format
if a1.isGrow():
ret = -1
else:
ret = 1
else:
ret = cmp(a1.device.name, a2.device.name)
elif a1.device.dependsOn(a2.device):
if a1.isGrow():
ret = 1
else:
ret = -1
elif a2.device.dependsOn(a1.device):
if a1.isGrow():
ret = -1
else:
ret = 1
elif isinstance(a1.device, PartitionDevice) and \
isinstance(a2.device, PartitionDevice):
ret = cmp(a1.device.name, a2.device.name)
else:
ret = 0
elif a1.isResize():
ret = -1
elif a2.isResize():
ret = 1
elif a1.isCreate() and a2.isCreate():
if a1.device.path == a2.device.path:
if a1.obj == a2.obj:
ret = 0
if a1.isFormat():
ret = 1
elif a2.isFormat():
ret = -1
else:
ret = 0
elif a1.device.dependsOn(a2.device):
ret = 1
elif a2.device.dependsOn(a1.device):
ret = -1
# generally create partitions before other device types
elif isinstance(a1.device, PartitionDevice) and \
isinstance(a2.device, PartitionDevice):
if a1.device.disk == a2.device.disk:
ret = cmp(a1.device.partedPartition.number,
a2.device.partedPartition.number)
else:
ret = cmp(a1.device.name, a2.device.name)
elif isinstance(a1.device, LVMLogicalVolumeDevice) and \
isinstance(a2.device, LVMLogicalVolumeDevice) and \
a1.device.vg == a2.device.vg:
if a1.device.singlePV and not a2.device.singlePV:
ret = -1
elif not a1.device.singlePV and a2.device.singlePV:
ret = 1
elif isinstance(a1.device, PartitionDevice) and \
a2.device.partitioned:
ret = 1
elif isinstance(a2.device, PartitionDevice) and \
a1.device.partitioned:
ret = -1
elif isinstance(a1.device, PartitionDevice) and \
not isinstance(a2.device, PartitionDevice):
ret = -1
elif isinstance(a2.device, PartitionDevice) and \
not isinstance(a1.device, PartitionDevice):
ret = 1
else:
ret = 0
elif a1.isCreate():
ret = -1
elif a2.isCreate():
ret = 1
elif a1.isMigrate() and a2.isMigrate():
if a1.device.path == a2.device.path:
ret = 0
elif a1.device.dependsOn(a2.device):
ret = 1
elif a2.device.dependsOn(a1.device):
ret = -1
elif isinstance(a1.device, PartitionDevice) and \
isinstance(a2.device, PartitionDevice):
if a1.device.disk == a2.device.disk:
ret = cmp(a1.device.partedPartition.number,
a2.device.partedPartition.number)
| |
\dot{z} = \frac{d}{dt} z = A z +
\begin{pmatrix} 0 \\ \frac{1}{\tau_w} \delta_L \end{pmatrix}
where A takes different forms depending on the selected dynamics.
The four possible A are returned:
* :math:`\tilde{A}_I = \begin{pmatrix} \frac{-1}{\tau_v} (I - \hat{J}) & \frac{1}{\tau_v} \bar{Q} \\
\frac{-1}{\tau_u} J_L^o & \frac{-\alpha}{\tau_u} I \end{pmatrix}`
* :math:`A_I = \begin{pmatrix} \frac{-1}{\tau_v} (I - \hat{J}) & \frac{1}{\tau_v} (I - \hat{J}) \bar{Q} \\
\frac{-1}{\tau_u} \bar{J} & \frac{-\alpha}{\tau_u} I \end{pmatrix}`
* :math:`A_I^{inst} = \frac{-1}{\tau_u} (\bar{J}\bar{Q} + \alpha I)`
* :math:`A_{PI} = \begin{pmatrix} \frac{-1}{\tau_v} (I - \hat{J}) & \frac{1}{\tau_v} (I - \hat{J}) \bar{Q} \\
\frac{-1}{\tau_u} (-\bar{J} + \frac{K_p}{\tau_v} J_L^o) & -\frac{1}{\tau_u}(\alpha I+\frac{K_p}{\tau_v} J_L Q_L)
\end{pmatrix}`
:param linear_activations: linear activations :math:`v_i` that are to be used. It can either be
the feedforward activations, or the steady state activations (from dynamical inversion).
Must be given as a list, one element per layer.
:param forward: flag which signals whether we are analysing forward dynamics (if True) or feedback dynamics
(if False). Sets the values alpha, k_p and time_constant_ratio to their corresponding forward or
feedback values.
:param linear: whether the network is linear. Passed on to compute the Jacobian.
:return A: A dictionary which contains the matrices :math:`\tilde{A}_I`, :math:`A_I`, :math:`A_I^{inst}`
and :math:`A_{PI}` as described above.
:return max_eig: A dictionary with identical keys, containing the maximum eigenvalue (real part) of each
matrix in A, for each batch sample (in a 1D np array).
:return keys: list of keys under which the A matrices and the respective max eigenvalues are stored
"""
if forward:
alpha = self.alpha_di
k_p = self.k_p
tcr = self.time_constant_ratio
else:
alpha = self.alpha_fb
k_p = self.k_p_fb
tcr = self.time_constant_ratio_fb
L = self.depth
layer_jacobians = [None for i in range(L)]
vectorized_nonlinearity_derivative = [self.layers[i].compute_vectorized_jacobian(linear_activations[i]) for i in range(L)]
output_activation = self.layers[-1].activations
batch_size = output_activation.shape[0]
output_size = output_activation.shape[1]
layer_jacobians[-1] = \
torch.eye(output_size).repeat(batch_size, 1, 1).reshape(batch_size, output_size, output_size)
if linear:
layer_jacobians[-1] = layer_jacobians[-1] \
* vectorized_nonlinearity_derivative[-1].view(batch_size, output_size, 1)
for i in range(L - 1 - 1, 0 - 1, -1):
if linear:
layer_jacobians[i] = layer_jacobians[i + 1].matmul(self.layers[i + 1].weights) \
* vectorized_nonlinearity_derivative[i].unsqueeze(1)
else:
layer_jacobians[i] = (layer_jacobians[i + 1] * vectorized_nonlinearity_derivative[i + 1].unsqueeze(1)) \
.matmul(self.layers[i + 1].weights)
J_bar = torch.cat(layer_jacobians, dim=2)
B = batch_size
sum_i = J_bar.shape[2]
J_L0 = torch.zeros(J_bar.shape)
J_L0[:, :, -layer_jacobians[-1].shape[2]:] = layer_jacobians[-1]
J_hat = torch.zeros((B, sum_i, sum_i))
io = torch.tensor([[l.weights.shape[0], l.weights.shape[1]] for l in self.layers])
limits = [torch.sum(io[:i, 0]) for i in range(L + 1)]
for i in range(1, L):
J_hat[:, limits[i]:limits[i+1], limits[i-1]:limits[i]] = \
self.layers[i].weights.unsqueeze(0) * vectorized_nonlinearity_derivative[i-1].unsqueeze(1)
a_11 = (1/tcr) * (torch.eye(sum_i) - J_hat)
A = dict()
A["A_tildeI"] = torch.cat((
torch.cat((-1*a_11, (1/tcr)*self.full_Q.expand(B, self.full_Q.shape[0], self.full_Q.shape[1])), dim=2), # first row
torch.cat((-1*J_L0, -alpha * torch.eye(output_size).expand(B, output_size, output_size)), dim=2) # second row
), dim=1)
A["A_I"] = torch.cat((
torch.cat((-1*a_11, a_11 @ self.full_Q), dim=2),
torch.cat((-1*J_bar, -alpha * torch.eye(output_size).expand(B, output_size, output_size)), dim=2) # second row
), dim=1)
A["A_instI"] = -1*(torch.matmul(J_bar, self.full_Q) + alpha*torch.eye(output_size))
A["A_PI"] = torch.cat((
torch.cat( (-1*a_11, a_11 @ self.full_Q), dim=2),
torch.cat( (-1*J_bar + k_p/tcr * J_L0,
-1*(alpha * torch.eye(output_size).expand(B, output_size, output_size)
+(k_p/tcr)*layer_jacobians[-1]@self.layers[-1].feedbackweights)),dim=2)
), dim=1)
max_eig = dict()
keys = [k for k in A.keys()]
for k in keys:
max_eig[k] = np.zeros((B,))
for b in range(B):
for k in keys:
max_eig[k][b] = torch.max(torch.eig(A[k][b]).eigenvalues[:, 0]).detach().cpu().numpy() # the first value is the real part
return A, max_eig, keys
def save_eigenvalues_to_tensorboard(self, writer, step):
"""
Save the maximum eigenvalues of the different variations of A
to Tensorboard. Generates a plot of the average and a histogram across samples.
:param: writer: Tensorboard object to which the data will be written
:param: step: x-axis index used for tensorboard (normally train_var.batch_idx)
"""
categ = "max_eig"
for k in self.max_eig.keys():
writer.add_scalar(
tag=categ + "/" + k,
scalar_value=np.mean(self.max_eig[k]),
global_step=step)
writer.add_histogram(
tag=categ+"/"+k,
values=self.max_eig[k],
global_step=step)
def save_eigenvalues_bcn_to_tensorboard(self, writer, step):
"""
Save the maximum eigenvalues of the different variations of A
(before corrections for non-convergence / divergence take place)
to Tensorboard. Saves both mean and std across the batch.
:param: writer: Tensorboard object to which the data will be written
:param: step: x-axis index used for tensorboard (normally train_var.batch_idx)
"""
categ = "max_eig_bcn"
for k in self.max_eig_bcn.keys():
writer.add_scalar(
tag=categ + "/" + k + "_mean",
scalar_value=np.mean(self.max_eig_bcn[k]),
global_step=step)
writer.add_histogram(
tag=categ + "/" + k,
values=self.max_eig_bcn[k],
global_step=step)
def save_norm_r_to_tensorboard(self, writer, step):
"""
Save the modulus of r (post nonlinearity activations) of each layer
(as relative deviations from the mean of all layers)
to Tensorboard. Saves both mean and std across the batch.
:param: writer: Tensorboard object to which the data will be written
:param: step: x-axis index used for tensorboard (normally train_var.batch_idx)
"""
"""
iterate over the keys in norm_r and save to Tensorboard
note that deviations can have positive or negative value
values distributed around the mean will generate 0 mean
but a large std.
"""
categ = 'norm_r'
for k in self.norm_r.keys():
writer.add_scalar(
tag=categ + "/" + k + "_mean",
scalar_value=np.mean(self.norm_r[k]),
global_step=step)
writer.add_histogram(
tag=categ + "/" + k,
values=self.norm_r[k],
global_step=step)
writer.add_scalar(
tag=categ + "/" + "dev_r_mean",
scalar_value=np.mean(self.dev_r),
global_step=step)
writer.add_histogram(
tag=categ + "/" + "dev_r",
values=self.dev_r,
global_step=step)
def split_full_jacobian_tilde(self, full_jacobian_tilde):
"""
Split :math:`\tilde{J}` into the block matrices
:math:`\tilde{J} = [\tilde{J}_1 \tilde{J}_2]` with :math:`\tilde{J}_1`
a square matrix. Do this for each batch sample.
Args:
full_jacobian_tilde (torch.Tensor): the linearly transformed
full jacobian computed by :func:`compute_full_jacobian_tilde`
Returns (tuple): A tuple (J_tilde_1, J_tilde_2), with J_tilde_1 and
J_tilde_2 containing the above specified matrices for each batch
sample.
"""
n_L = full_jacobian_tilde.shape[1]
return (full_jacobian_tilde[:,:,:n_L], full_jacobian_tilde[:,:,n_L:])
def save_ndi_angles(self, writer, step,
save_dataframe=True, save_tensorboard=True):
"""
Compute the angle between the actual weight updates of the model
(e.g. resulting from TPDI) on the one hand, and the weight updates
resulting from ideal inversion (analytical solution, NDI) on
other hand. These have been stored during training in
self.layers[i].ndi_update_weights / _bias.
Save the angle in the tensorboard X writer
(if ``save_tensorboard=true``) and in the
corresponding dataframe (if ``save_dataframe=True``)
Args:
writer: Tensorboard writer
step (int): x-axis index used for tensorboard
save_dataframe (bool): Flag indicating whether a dataframe of the angles
should be saved in the network object
save_tensorboard (bool): Flag indicating whether the angles should
be saved in Tensorboard
"""
ndi_param_updates = []
net_params = self.get_forward_parameter_list()
net_param_updates = [p.grad for p in net_params]
for i in range(self.depth):
parameter_update = self.layers[i].get_forward_gradients()
weights_angle = utils.compute_angle(self.layers[i].ndi_updates_weights,
parameter_update[0])
ndi_param_updates.append(self.layers[i].ndi_updates_weights)
if self.use_bias:
bias_angle = utils.compute_angle(self.layers[i].ndi_updates_bias,
parameter_update[1])
ndi_param_updates.append(self.layers[i].ndi_updates_bias)
if save_tensorboard:
name = 'layer {}'.format(i + 1)
writer.add_scalar(
tag='{}/weight_ndi_angle'.format(name),
scalar_value=weights_angle,
global_step=step)
if self.use_bias:
writer.add_scalar(
tag='{}/bias_ndi_angle'.format(name),
scalar_value=bias_angle,
global_step=step
)
if save_dataframe:
self.ndi_angles.at[step, i] = weights_angle.item()
total_angle = utils.compute_angle(utils.vectorize_tensor_list(ndi_param_updates),
utils.vectorize_tensor_list(net_param_updates))
if save_tensorboard:
name = 'total_alignment/ndi_angle'
writer.add_scalar(
tag=name,
scalar_value=total_angle,
global_step=step
)
if save_dataframe:
self.ndi_angles_network.at[step, 0] = total_angle.item()
def compute_condition_two(self, retain_graph=False):
"""
..math::
\frac{\|\tilde{J}_2\|_F}{\|\tilde{J}\|_F}
to keep track whether condition 2 is (approximately) satisfied.
If the minibatch size is bigger than 1, the mean over the minibatch
is returned.
Returns:
"""
jacobians = self.compute_full_jacobian(linear=True,
retain_graph=retain_graph)
Q = self.full_Q
projected_Q_fro = []
for b in range(jacobians.shape[0]):
jac = jacobians[b,:,:]
projection_matrix = torch.matmul(jac.T,
torch.matmul(torch.inverse(torch.matmul(jac, jac.T)), jac))
projected_Q_fro.append(torch.norm(torch.matmul(projection_matrix, Q), p='fro'))
projected_Q_fro = torch.stack(projected_Q_fro)
Q_fro = torch.norm(Q, p='fro')
condition_two_ratio = projected_Q_fro/Q_fro
return torch.mean(condition_two_ratio)
def get_feedback_parameter_list(self):
"""
Returns (list): a list with all the feedback parameters (weights and
biases) of the network. Note that the first hidden layer does not
need feedback parameters, so they are not put in the list.
"""
parameterlist = []
for layer in self.layers:
parameterlist.append(layer.feedbackweights)
return parameterlist
def save_feedback_batch_logs(self, args, writer, step, init=False,
retain_graph=False, save_tensorboard=True,
save_dataframe=True, save_statistics=False,
damping=0):
"""
Save the logs for the current minibatch on tensorboardX.
Args:
args (argsparse.Namespace): cmd line arguments
writer (SummaryWriter): summary writer from tensorboardX
step (int): the global step used for the x-axis of the plots
init (bool): flag indicating that the training is in the
initialization phase (only training the feedback weights).
retain_graph (bool): Flag indicating whether the Pytorch Autograd
computational graph should be retained or not.
save_statistics: Flag indicating whether the statistics of the
feedback weights should be saved (e.g. gradient norms)
damping (float): damping constant used for computing the damped
pseudoinverse of the jacobian.
| |
# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library to manipulate Containers."""
from collections import OrderedDict, Counter
from io import open
from re import search
from string import Template
from time import sleep
from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.Constants import Constants
from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.ssh import SSH
from resources.libraries.python.topology import Topology, SocketType
from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
__all__ = [
u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
]
SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
class ContainerManager:
"""Container lifecycle management class."""
def __init__(self, engine):
"""Initialize Container Manager class.
:param engine: Container technology used (LXC/Docker/...).
:type engine: str
:raises NotImplementedError: If container technology is not implemented.
"""
try:
self.engine = globals()[engine]()
except KeyError:
raise NotImplementedError(f"{engine} is not implemented.")
self.containers = OrderedDict()
def get_container_by_name(self, name):
"""Get container instance.
:param name: Container name.
:type name: str
:returns: Container instance.
:rtype: Container
:raises RuntimeError: If failed to get container with name.
"""
try:
return self.containers[name]
except KeyError:
raise RuntimeError(f"Failed to get container with name: {name}")
def construct_container(self, **kwargs):
"""Construct container object on node with specified parameters.
:param kwargs: Key-value pairs used to construct container.
:param kwargs: dict
"""
# Create base class
self.engine.initialize()
# Set parameters
for key in kwargs:
setattr(self.engine.container, key, kwargs[key])
# Set additional environmental variables
setattr(
self.engine.container, u"env",
f"MICROSERVICE_LABEL={kwargs[u'name']}"
)
# Store container instance
self.containers[kwargs[u"name"]] = self.engine.container
def construct_containers(self, **kwargs):
"""Construct 1..N container(s) on node with specified name.
Ordinal number is automatically added to the name of container as
suffix.
:param kwargs: Named parameters.
:param kwargs: dict
"""
name = kwargs[u"name"]
for i in range(kwargs[u"count"]):
# Name will contain ordinal suffix
kwargs[u"name"] = u"".join([name, str(i+1)])
# Create container
self.construct_container(i=i, **kwargs)
def acquire_all_containers(self):
"""Acquire all containers."""
for container in self.containers:
self.engine.container = self.containers[container]
self.engine.acquire()
def build_all_containers(self):
"""Build all containers."""
for container in self.containers:
self.engine.container = self.containers[container]
self.engine.build()
def create_all_containers(self):
"""Create all containers."""
for container in self.containers:
self.engine.container = self.containers[container]
self.engine.create()
def execute_on_container(self, name, command):
"""Execute command on container with name.
:param name: Container name.
:param command: Command to execute.
:type name: str
:type command: str
"""
self.engine.container = self.get_container_by_name(name)
self.engine.execute(command)
def execute_on_all_containers(self, command):
"""Execute command on all containers.
:param command: Command to execute.
:type command: str
"""
for container in self.containers:
self.engine.container = self.containers[container]
self.engine.execute(command)
def start_vpp_in_all_containers(self):
"""Start VPP in all containers."""
for container in self.containers:
self.engine.container = self.containers[container]
self.engine.start_vpp()
def restart_vpp_in_all_containers(self):
"""Restart VPP in all containers."""
for container in self.containers:
self.engine.container = self.containers[container]
self.engine.restart_vpp()
def verify_vpp_in_all_containers(self):
"""Verify that VPP is installed and running in all containers."""
for container in self.containers:
self.engine.container = self.containers[container]
self.engine.verify_vpp()
def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
"""Configure VPP in all containers.
:param chain_topology: Topology used for chaining containers can be
chain or cross_horiz. Chain topology is using 1 memif pair per
container. Cross_horiz topology is using 1 memif and 1 physical
interface in container (only single container can be configured).
:param kwargs: Named parameters.
:type chain_topology: str
:type kwargs: dict
"""
# Count number of DUTs based on node's host information
dut_cnt = len(
Counter(
[
self.containers[container].node[u"host"]
for container in self.containers
]
)
)
mod = len(self.containers) // dut_cnt
for i, container in enumerate(self.containers):
mid1 = i % mod + 1
mid2 = i % mod + 1
sid1 = i % mod * 2 + 1
sid2 = i % mod * 2 + 2
self.engine.container = self.containers[container]
guest_dir = self.engine.container.mnt[0].split(u":")[1]
if chain_topology == u"chain":
self._configure_vpp_chain_l2xc(
mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
guest_dir=guest_dir, **kwargs
)
elif chain_topology == u"cross_horiz":
self._configure_vpp_cross_horiz(
mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
guest_dir=guest_dir, **kwargs
)
elif chain_topology == u"chain_functional":
self._configure_vpp_chain_functional(
mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
guest_dir=guest_dir, **kwargs
)
elif chain_topology == u"chain_ip4":
self._configure_vpp_chain_ip4(
mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
guest_dir=guest_dir, **kwargs
)
elif chain_topology == u"pipeline_ip4":
self._configure_vpp_pipeline_ip4(
mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
guest_dir=guest_dir, **kwargs
)
elif chain_topology == u"chain_vswitch":
self._configure_vpp_chain_vswitch(
mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
guest_dir=guest_dir, **kwargs)
elif chain_topology == u"chain_ipsec":
idx_match = search(r"\d+$", self.engine.container.name)
if idx_match:
idx = int(idx_match.group())
self._configure_vpp_chain_ipsec(
mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
guest_dir=guest_dir, nf_instance=idx, **kwargs)
else:
raise RuntimeError(
f"Container topology {chain_topology} not implemented"
)
def _configure_vpp_chain_l2xc(self, **kwargs):
"""Configure VPP in chain topology with l2xc.
:param kwargs: Named parameters.
:type kwargs: dict
"""
self.engine.create_vpp_startup_config()
self.engine.create_vpp_exec_config(
u"memif_create_chain_l2xc.exec",
mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
socket1=f"{kwargs[u'guest_dir']}/memif-"
f"{self.engine.container.name}-{kwargs[u'sid1']}",
socket2=f"{kwargs[u'guest_dir']}/memif-"
f"{self.engine.container.name}-{kwargs[u'sid2']}"
)
def _configure_vpp_cross_horiz(self, **kwargs):
"""Configure VPP in cross horizontal topology (single memif).
:param kwargs: Named parameters.
:type kwargs: dict
"""
if u"DUT1" in self.engine.container.name:
if_pci = Topology.get_interface_pci_addr(
self.engine.container.node, kwargs[u"dut1_if"])
if_name = Topology.get_interface_name(
self.engine.container.node, kwargs[u"dut1_if"])
if u"DUT2" in self.engine.container.name:
if_pci = Topology.get_interface_pci_addr(
self.engine.container.node, kwargs[u"dut2_if"])
if_name = Topology.get_interface_name(
self.engine.container.node, kwargs[u"dut2_if"])
self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
self.engine.create_vpp_exec_config(
u"memif_create_cross_horizon.exec",
mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
socket1=f"{kwargs[u'guest_dir']}/memif-"
f"{self.engine.container.name}-{kwargs[u'sid1']}"
)
def _configure_vpp_chain_functional(self, **kwargs):
"""Configure VPP in chain topology with l2xc (functional).
:param kwargs: Named parameters.
:type kwargs: dict
"""
self.engine.create_vpp_startup_config()
self.engine.create_vpp_exec_config(
u"memif_create_chain_functional.exec",
mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
socket1=f"{kwargs[u'guest_dir']}/memif-"
f"{self.engine.container.name}-{kwargs[u'sid1']}",
socket2=f"{kwargs[u'guest_dir']}/memif-"
f"{self.engine.container.name}-{kwargs[u'sid2']}",
rx_mode=u"interrupt"
)
def _configure_vpp_chain_ip4(self, **kwargs):
"""Configure VPP in chain topology with ip4.
:param kwargs: Named parameters.
:type kwargs: dict
"""
self.engine.create_vpp_startup_config()
vif1_mac = kwargs[u"tg_pf1_mac"] \
if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
vif2_mac = kwargs[u"tg_pf2_mac"] \
if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
self.engine.create_vpp_exec_config(
u"memif_create_chain_ip4.exec",
mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
socket1=f"{kwargs[u'guest_dir']}/memif-"
f"{self.engine.container.name}-{kwargs[u'sid1']}",
socket2=f"{kwargs[u'guest_dir']}/memif-"
f"{self.engine.container.name}-{kwargs[u'sid2']}",
mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
vif1_mac=vif1_mac, vif2_mac=vif2_mac
)
def _configure_vpp_chain_vswitch(self, **kwargs):
"""Configure VPP as vswitch in container.
:param kwargs: Named parameters.
:type kwargs: dict
"""
dut = self.engine.container.name.split(u"_")[0]
if dut == u"DUT1":
if1_pci = Topology.get_interface_pci_addr(
self.engine.container.node, kwargs[u"dut1_if2"])
if2_pci = Topology.get_interface_pci_addr(
self.engine.container.node, kwargs[u"dut1_if1"])
if_red_name = Topology.get_interface_name(
self.engine.container.node, kwargs[u"dut1_if2"])
if_black_name = Topology.get_interface_name(
self.engine.container.node, kwargs[u"dut1_if1"])
tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
tg_pf_mac = kwargs[u"tg_pf2_mac"]
else:
tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
tg_pf_mac = kwargs[u"tg_pf1_mac"]
if1_pci = Topology.get_interface_pci_addr(
self.engine.container.node, kwargs[u"dut2_if1"])
if2_pci = Topology.get_interface_pci_addr(
self.engine.container.node, kwargs[u"dut2_if2"])
if_red_name = Topology.get_interface_name(
self.engine.container.node, kwargs[u"dut2_if1"])
if_black_name = Topology.get_interface_name(
self.engine.container.node, kwargs[u"dut2_if2"])
n_instances = int(kwargs[u"n_instances"])
rxq = 1
if u"rxq" in kwargs:
rxq = int(kwargs[u"rxq"])
nodes = kwargs[u"nodes"]
cpuset_cpus = CpuUtils.get_affinity_nf(
nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
)
self.engine.create_vpp_startup_config_vswitch(
cpuset_cpus, rxq, if1_pci, if2_pci
)
instances = []
for i in range(1, n_instances + 1):
instances.append(
f"create interface memif id {i} socket-id 1 master\n"
f"set interface state memif1/{i} up\n"
f"set interface l2 bridge memif1/{i} 1\n"
f"create interface memif id {i} socket-id 2 master\n"
f"set interface state memif2/{i} up\n"
f"set interface l2 bridge memif2/{i} 2\n"
f"set ip neighbor memif2/{i} {tg_pf_ip4} {tg_pf_mac} "
f"static\n\n"
)
self.engine.create_vpp_exec_config(
u"memif_create_chain_vswitch_ipsec.exec",
socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
if_red_name=if_red_name,
if_black_name=if_black_name,
instances=u"\n\n".join(instances))
def _configure_vpp_chain_ipsec(self, **kwargs):
"""Configure VPP in container with memifs.
:param kwargs: Named parameters.
:type kwargs: dict
"""
nf_nodes = int(kwargs[u"nf_nodes"])
nf_instance = int(kwargs[u"nf_instance"])
nodes = kwargs[u"nodes"]
dut = self.engine.container.name.split(u"_")[0]
cpuset_cpus = CpuUtils.get_affinity_nf(
nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
if dut == u"DUT1":
tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
tg_pf_mac = kwargs[u"tg_pf1_mac"]
raddr_ip4 = kwargs[u"laddr_ip4"]
l_mac1 = 17
l_mac2 = 18
r_mac = 1
else:
tnl_local_ip = f"{local_ip_base}.{nf_instance}"
tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
tg_pf_mac = kwargs[u"tg_pf2_mac"]
raddr_ip4 = kwargs[u"raddr_ip4"]
l_mac1 = 1
l_mac2 = 2
r_mac = 17
self.engine.create_vpp_exec_config(
u"memif_create_chain_ipsec.exec",
socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
mid1=nf_instance,
mid2=nf_instance,
sid1=u"1",
sid2=u"2",
mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
tg_pf2_ip4=tg_pf_ip4,
tg_pf2_mac=tg_pf_mac,
raddr_ip4=raddr_ip4,
tnl_local_ip=tnl_local_ip,
tnl_remote_ip=tnl_remote_ip,
tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
remote_ip=f"{remote_ip_base}.{nf_instance}"
)
self.engine.execute(
f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
f"{dut}_{nf_instance}.config >> /tmp/running.exec"
)
def _configure_vpp_pipeline_ip4(self, **kwargs):
"""Configure VPP in pipeline topology with ip4.
:param kwargs: Named parameters.
:type kwargs: dict
"""
self.engine.create_vpp_startup_config()
node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
mid1 = kwargs[u"mid1"]
mid2 = kwargs[u"mid2"]
role1 = u"master"
role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
kwargs[u"mid2"] = kwargs[u"mid2"] \
if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
vif1_mac = kwargs[u"tg_pf1_mac"] \
if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + | |
<gh_stars>0
# Copyright (C) 2021 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from hamcrest import ( assert_that,
contains_exactly,
contains_inanyorder,
equal_to,
has_entries,
matches_regexp )
from unittest import TestCase
import requests
import pprint
from ycmd.tests.javascript import setUpModule, tearDownModule # noqa
from ycmd.tests.javascript import IsolatedYcmd, PathToTestFile, SharedYcmd
from ycmd.tests.test_utils import ( BuildRequest,
ChunkMatcher,
CombineRequest,
ErrorMatcher,
LocationMatcher,
MessageMatcher,
WaitUntilCompleterServerReady )
from ycmd.utils import ReadFile
def RunTest( app, test ):
contents = ReadFile( test[ 'request' ][ 'filepath' ] )
app.post_json(
'/event_notification',
CombineRequest( test[ 'request' ], {
'contents': contents,
'filetype': 'javascript',
'event_name': 'BufferVisit'
} )
)
app.post_json(
'/event_notification',
CombineRequest( test[ 'request' ], {
'contents': contents,
'filetype': 'javascript',
'event_name': 'FileReadyToParse'
} )
)
# We ignore errors here and check the response code ourself.
# This is to allow testing of requests returning errors.
response = app.post_json(
'/run_completer_command',
CombineRequest( test[ 'request' ], {
'contents': contents,
'filetype': 'javascript',
'command_arguments': ( [ test[ 'request' ][ 'command' ] ]
+ test[ 'request' ].get( 'arguments', [] ) )
} ),
expect_errors = True
)
print( f'completer response: { pprint.pformat( response.json ) }' )
assert_that( response.status_code,
equal_to( test[ 'expect' ][ 'response' ] ) )
assert_that( response.json, test[ 'expect' ][ 'data' ] )
def Subcommands_GoTo( app, goto_command ):
RunTest( app, {
'description': goto_command + ' works within file',
'request': {
'command': goto_command,
'line_num': 31,
'column_num': 13,
'filepath': PathToTestFile( 'test.js' ),
},
'expect': {
'response': requests.codes.ok,
'data': LocationMatcher( PathToTestFile( 'test.js' ), 27, 3 )
}
} )
class SubcommandsTest( TestCase ):
@IsolatedYcmd()
def test_Subcommands_DefinedSubcommands( self, app ):
subcommands_data = BuildRequest( completer_target = 'javascript' )
assert_that(
app.post_json( '/defined_subcommands', subcommands_data ).json,
contains_inanyorder(
'Format',
'GoTo',
'GoToCallees',
'GoToCallers',
'GoToDeclaration',
'GoToDefinition',
'GoToImplementation',
'GoToType',
'GetDoc',
'GetType',
'GoToReferences',
'GoToSymbol',
'FixIt',
'OrganizeImports',
'RefactorRename',
'RestartServer'
)
)
@SharedYcmd
def test_Subcommands_Format_WholeFile_Spaces( self, app ):
filepath = PathToTestFile( 'test.js' )
RunTest( app, {
'description': 'Formatting is applied on the whole file '
'with tabs composed of 4 spaces',
'request': {
'command': 'Format',
'filepath': filepath,
'options': {
'tab_size': 4,
'insert_spaces': True
}
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_exactly(
ChunkMatcher( ' ',
LocationMatcher( filepath, 2, 1 ),
LocationMatcher( filepath, 2, 3 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 3, 1 ),
LocationMatcher( filepath, 3, 3 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 3, 14 ),
LocationMatcher( filepath, 3, 14 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 4, 1 ),
LocationMatcher( filepath, 4, 3 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 4, 14 ),
LocationMatcher( filepath, 4, 14 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 5, 1 ),
LocationMatcher( filepath, 5, 3 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 6, 1 ),
LocationMatcher( filepath, 6, 5 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 7, 1 ),
LocationMatcher( filepath, 7, 5 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 8, 1 ),
LocationMatcher( filepath, 8, 3 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 8, 6 ),
LocationMatcher( filepath, 8, 6 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 18, 1 ),
LocationMatcher( filepath, 18, 2 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 19, 1 ),
LocationMatcher( filepath, 19, 2 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 20, 1 ),
LocationMatcher( filepath, 20, 2 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 21, 1 ),
LocationMatcher( filepath, 21, 2 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 24, 1 ),
LocationMatcher( filepath, 24, 3 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 25, 1 ),
LocationMatcher( filepath, 25, 4 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 26, 1 ),
LocationMatcher( filepath, 26, 4 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 27, 1 ),
LocationMatcher( filepath, 27, 3 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 27, 17 ),
LocationMatcher( filepath, 27, 17 ) ),
)
} ) )
} )
}
} )
@SharedYcmd
def test_Subcommands_Format_WholeFile_Tabs( self, app ):
filepath = PathToTestFile( 'test.js' )
RunTest( app, {
'description': 'Formatting is applied on the whole file '
'with tabs composed of 2 spaces',
'request': {
'command': 'Format',
'filepath': filepath,
'options': {
'tab_size': 4,
'insert_spaces': False
}
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_exactly(
ChunkMatcher( '\t',
LocationMatcher( filepath, 2, 1 ),
LocationMatcher( filepath, 2, 3 ) ),
ChunkMatcher( '\t',
LocationMatcher( filepath, 3, 1 ),
LocationMatcher( filepath, 3, 3 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 3, 14 ),
LocationMatcher( filepath, 3, 14 ) ),
ChunkMatcher( '\t',
LocationMatcher( filepath, 4, 1 ),
LocationMatcher( filepath, 4, 3 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 4, 14 ),
LocationMatcher( filepath, 4, 14 ) ),
ChunkMatcher( '\t',
LocationMatcher( filepath, 5, 1 ),
LocationMatcher( filepath, 5, 3 ) ),
ChunkMatcher( '\t\t',
LocationMatcher( filepath, 6, 1 ),
LocationMatcher( filepath, 6, 5 ) ),
ChunkMatcher( '\t\t',
LocationMatcher( filepath, 7, 1 ),
LocationMatcher( filepath, 7, 5 ) ),
ChunkMatcher( '\t',
LocationMatcher( filepath, 8, 1 ),
LocationMatcher( filepath, 8, 3 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 8, 6 ),
LocationMatcher( filepath, 8, 6 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 18, 1 ),
LocationMatcher( filepath, 18, 2 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 19, 1 ),
LocationMatcher( filepath, 19, 2 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 20, 1 ),
LocationMatcher( filepath, 20, 2 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 21, 1 ),
LocationMatcher( filepath, 21, 2 ) ),
ChunkMatcher( '\t',
LocationMatcher( filepath, 24, 1 ),
LocationMatcher( filepath, 24, 3 ) ),
ChunkMatcher( '\t ',
LocationMatcher( filepath, 25, 1 ),
LocationMatcher( filepath, 25, 4 ) ),
ChunkMatcher( '\t ',
LocationMatcher( filepath, 26, 1 ),
LocationMatcher( filepath, 26, 4 ) ),
ChunkMatcher( '\t',
LocationMatcher( filepath, 27, 1 ),
LocationMatcher( filepath, 27, 3 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 27, 17 ),
LocationMatcher( filepath, 27, 17 ) ),
)
} ) )
} )
}
} )
@SharedYcmd
def test_Subcommands_Format_Range_Spaces( self, app ):
filepath = PathToTestFile( 'test.js' )
RunTest( app, {
'description': 'Formatting is applied on some part of the file '
'with tabs composed of 4 spaces by default',
'request': {
'command': 'Format',
'filepath': filepath,
'range': {
'start': {
'line_num': 5,
'column_num': 3,
},
'end': {
'line_num': 8,
'column_num': 6
}
},
'options': {
'tab_size': 4,
'insert_spaces': True
}
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_exactly(
ChunkMatcher( ' ',
LocationMatcher( filepath, 5, 1 ),
LocationMatcher( filepath, 5, 3 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 6, 1 ),
LocationMatcher( filepath, 6, 5 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 7, 1 ),
LocationMatcher( filepath, 7, 5 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 8, 1 ),
LocationMatcher( filepath, 8, 3 ) ),
)
} ) )
} )
}
} )
@IsolatedYcmd()
def test_Subcommands_Format_Range_Tabs( self, app ):
filepath = PathToTestFile( 'test.js' )
RunTest( app, {
'description': 'Formatting is applied on some part of the file '
'with tabs instead of spaces',
'request': {
'command': 'Format',
'filepath': filepath,
'range': {
'start': {
'line_num': 5,
'column_num': 3,
},
'end': {
'line_num': 8,
'column_num': 6
}
},
'options': {
'tab_size': 4,
'insert_spaces': False
}
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_exactly(
ChunkMatcher( '\t',
LocationMatcher( filepath, 5, 1 ),
LocationMatcher( filepath, 5, 3 ) ),
ChunkMatcher( '\t\t',
LocationMatcher( filepath, 6, 1 ),
LocationMatcher( filepath, 6, 5 ) ),
ChunkMatcher( '\t\t',
LocationMatcher( filepath, 7, 1 ),
LocationMatcher( filepath, 7, 5 ) ),
ChunkMatcher( '\t',
LocationMatcher( filepath, 8, 1 ),
LocationMatcher( filepath, 8, 3 ) ),
| |
#coding:utf-8
'''
图像几何变换
Copyright (C) 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
import cv2
import random
from data_generator.object_detection_2d_image_boxes_validation_utils import BoxFilter, ImageValidator
class Resize:
def __init__(self,
height,
width,
interpolation_mode=cv2.INTER_LINEAR,
box_filter=None,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
if not (isinstance(box_filter, BoxFilter) or box_filter is None):
raise ValueError("`box_filter` must be either `None` or a `BoxFilter` object.")
self.out_height = height
self.out_width = width
self.interpolation_mode = interpolation_mode
self.box_filter = box_filter
self.labels_format = labels_format
def __call__(self, image, labels=None, return_inverter=False):
img_height, img_width = image.shape[:2]
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
image = cv2.resize(image,
dsize=(self.out_width, self.out_height),
interpolation=self.interpolation_mode)
if return_inverter:
def inverter(labels):
labels = np.copy(labels)
labels[:, [ymin+1, ymax+1]] = np.round(labels[:, [ymin+1, ymax+1]] * (img_height / self.out_height), decimals=0)
labels[:, [xmin+1, xmax+1]] = np.round(labels[:, [xmin+1, xmax+1]] * (img_width / self.out_width), decimals=0)
return labels
if labels is None:
if return_inverter:
return image, inverter
else:
return image
else:
labels = np.copy(labels)
labels[:, [ymin, ymax]] = np.round(labels[:, [ymin, ymax]] * (self.out_height / img_height), decimals=0)
labels[:, [xmin, xmax]] = np.round(labels[:, [xmin, xmax]] * (self.out_width / img_width), decimals=0)
if not (self.box_filter is None):
self.box_filter.labels_format = self.labels_format
labels = self.box_filter(labels=labels,
image_height=self.out_height,
image_width=self.out_width)
if return_inverter:
return image, labels, inverter
else:
return image, labels
class ResizeRandomInterp:
'''
和上面Resize实现功能相同,只不过插值方式是随机的
'''
def __init__(self,
height,
width,
interpolation_modes=[cv2.INTER_NEAREST,
cv2.INTER_LINEAR,
cv2.INTER_CUBIC,
cv2.INTER_AREA,
cv2.INTER_LANCZOS4],
box_filter=None,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
if not (isinstance(interpolation_modes, (list, tuple))):
raise ValueError("`interpolation_mode` must be a list or tuple.")
self.height = height
self.width = width
self.interpolation_modes = interpolation_modes
self.box_filter = box_filter
self.labels_format = labels_format
self.resize = Resize(height=self.height,
width=self.width,
box_filter=self.box_filter,
labels_format=self.labels_format)
def __call__(self, image, labels=None, return_inverter=False):
self.resize.interpolation_mode = np.random.choice(self.interpolation_modes)
self.resize.labels_format = self.labels_format
return self.resize(image, labels, return_inverter)
class Flip:
'''
水平或者垂直翻转图像
'''
def __init__(self,
dim='horizontal',
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
if not (dim in {'horizontal', 'vertical'}): raise ValueError("`dim` can be one of 'horizontal' and 'vertical'.")
self.dim = dim
self.labels_format = labels_format
def __call__(self, image, labels=None, return_inverter=False):
img_height, img_width = image.shape[:2]
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
if self.dim == 'horizontal':
image = image[:,::-1]
if labels is None:
return image
else:
labels = np.copy(labels)
labels[:, [xmin, xmax]] = img_width - labels[:, [xmax, xmin]]
return image, labels
else:
image = image[::-1]
if labels is None:
return image
else:
labels = np.copy(labels)
labels[:, [ymin, ymax]] = img_height - labels[:, [ymax, ymin]]
return image, labels
class RandomFlip:
'''
随机翻转图像
'''
def __init__(self,
dim='horizontal',
prob=0.5,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
self.dim = dim
self.prob = prob
self.labels_format = labels_format
self.flip = Flip(dim=self.dim, labels_format=self.labels_format)
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
self.flip.labels_format = self.labels_format
return self.flip(image, labels)
elif labels is None:
return image
else:
return image, labels
class Translate:
'''
在水平或垂直方向移动图像
'''
def __init__(self,
dy,
dx,
clip_boxes=True,
box_filter=None,
background=(0,0,0),
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
dy (float): 垂直方向移动的距离百分比,正值向下移动,负值向上移动
dx (float): 水平方向移动的距离百分比,正值向右移动,负值向左移动
'''
if not (isinstance(box_filter, BoxFilter) or box_filter is None):
raise ValueError("`box_filter` must be either `None` or a `BoxFilter` object.")
self.dy_rel = dy
self.dx_rel = dx
self.clip_boxes = clip_boxes
self.box_filter = box_filter
self.background = background
self.labels_format = labels_format
def __call__(self, image, labels=None):
img_height, img_width = image.shape[:2]
#这个矩阵将图像在x,y方向进行平移
dy_abs = int(round(img_height * self.dy_rel))
dx_abs = int(round(img_width * self.dx_rel))
M = np.float32([[1, 0, dx_abs],
[0, 1, dy_abs]])
#平移之后剪裁为(img_width, img_height)大小
image = cv2.warpAffine(image,
M=M,
dsize=(img_width, img_height),
borderMode=cv2.BORDER_CONSTANT,
borderValue=self.background)
if labels is None:
return image
else:
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
labels = np.copy(labels)
# 对标签进行相应的变换
labels[:,[xmin,xmax]] += dx_abs
labels[:,[ymin,ymax]] += dy_abs
# 计算此patch中有效的box.
if not (self.box_filter is None):
self.box_filter.labels_format = self.labels_format
labels = self.box_filter(labels=labels,
image_height=img_height,
image_width=img_width)
if self.clip_boxes:
labels[:,[ymin,ymax]] = np.clip(labels[:,[ymin,ymax]], a_min=0, a_max=img_height-1)
labels[:,[xmin,xmax]] = np.clip(labels[:,[xmin,xmax]], a_min=0, a_max=img_width-1)
return image, labels
class RandomTranslate:
'''
Randomly translates images horizontally and/or vertically.
'''
def __init__(self,
dy_minmax=(0.03,0.3),
dx_minmax=(0.03,0.3),
prob=0.5,
clip_boxes=True,
box_filter=None,
image_validator=None,
n_trials_max=3,
background=(0,0,0),
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
n_trials_max (int, optional): 仅在给出边界框标签时才生效。确定生成有效图像的最大试验次数。
如果在'n_trials_max`试验中无法生成有效图像,则返回未改变的输入图像。
'''
if dy_minmax[0] > dy_minmax[1]:
raise ValueError("It must be `dy_minmax[0] <= dy_minmax[1]`.")
if dx_minmax[0] > dx_minmax[1]:
raise ValueError("It must be `dx_minmax[0] <= dx_minmax[1]`.")
if dy_minmax[0] < 0 or dx_minmax[0] < 0:
raise ValueError("It must be `dy_minmax[0] >= 0` and `dx_minmax[0] >= 0`.")
if not (isinstance(image_validator, ImageValidator) or image_validator is None):
raise ValueError("`image_validator` must be either `None` or an `ImageValidator` object.")
self.dy_minmax = dy_minmax
self.dx_minmax = dx_minmax
self.prob = prob
self.clip_boxes = clip_boxes
self.box_filter = box_filter
self.image_validator = image_validator
self.n_trials_max = n_trials_max
self.background = background
self.labels_format = labels_format
self.translate = Translate(dy=0,
dx=0,
clip_boxes=self.clip_boxes,
box_filter=self.box_filter,
background=self.background,
labels_format=self.labels_format)
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
img_height, img_width = image.shape[:2]
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
# Override the preset labels format.
if not self.image_validator is None:
self.image_validator.labels_format = self.labels_format
self.translate.labels_format = self.labels_format
for _ in range(max(1, self.n_trials_max)):
# 数值
dy_abs = np.random.uniform(self.dy_minmax[0], self.dy_minmax[1])
dx_abs = np.random.uniform(self.dx_minmax[0], self.dx_minmax[1])
# 方向
dy = np.random.choice([-dy_abs, dy_abs])
dx = np.random.choice([-dx_abs, dx_abs])
self.translate.dy_rel = dy
self.translate.dx_rel = dx
if (labels is None) or (self.image_validator is None):
# We either don't have any boxes or if we do, we will accept any outcome as valid.
return self.translate(image, labels)
else:
# Translate the box coordinates to the translated image's coordinate system.
new_labels = np.copy(labels)
new_labels[:, [ymin, ymax]] += int(round(img_height * dy))
new_labels[:, [xmin, xmax]] += int(round(img_width * dx))
# Check if the patch is valid.
if self.image_validator(labels=new_labels,
image_height=img_height,
image_width=img_width):
return self.translate(image, labels)
# If all attempts failed, return the unaltered input image.
if labels is None:
return image
else:
return image, labels
elif labels is None:
return image
else:
return image, labels
class Scale:
'''
Scales images, i.e. zooms in or out.
'''
def __init__(self,
factor,
clip_boxes=True,
box_filter=None,
background=(0,0,0)):
if factor <= 0:
raise ValueError("It must be `factor > 0`.")
if not (isinstance(box_filter, BoxFilter) or box_filter is None):
raise ValueError("`box_filter` must be either `None` or a `BoxFilter` object.")
self.factor = factor
self.clip_boxes = clip_boxes
self.box_filter = box_filter
self.background = background
self.labels_format = labels_format
def __call__(self, image, labels=None):
img_height, img_width = image.shape[:2]
# 计算旋转矩阵(这里将角度设置为0),所以只缩放了
# 第一个参数旋转中心,第二个参数旋转角度,第三个参数:缩放比例
M = cv2.getRotationMatrix2D(center=(img_width / 2, img_height / 2),
angle=0,
scale=self.factor)
# 缩放图像
image = cv2.warpAffine(image,
M=M,
dsize=(img_width, img_height),
borderMode=cv2.BORDER_CONSTANT,
borderValue=self.background)
if labels is None:
return image
else:
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
labels = np.copy(labels)
# 使用M得到对应的bbox左上角以及右下角坐标
toplefts = np.array([labels[:,xmin], labels[:,ymin], np.ones(labels.shape[0])])
bottomrights = np.array([labels[:,xmax], labels[:,ymax], np.ones(labels.shape[0])])
new_toplefts = (np.dot(M, toplefts)).T
new_bottomrights = (np.dot(M, bottomrights)).T
labels[:,[xmin,ymin]] = np.round(new_toplefts, decimals=0).astype(np.int)
labels[:,[xmax,ymax]] = np.round(new_bottomrights, decimals=0).astype(np.int)
# Compute all valid boxes for this patch.
if not (self.box_filter is None):
self.box_filter.labels_format = self.labels_format
labels = self.box_filter(labels=labels,
image_height=img_height,
image_width=img_width)
if self.clip_boxes:
labels[:,[ymin,ymax]] = np.clip(labels[:,[ymin,ymax]], a_min=0, a_max=img_height-1)
labels[:,[xmin,xmax]] = np.clip(labels[:,[xmin,xmax]], a_min=0, a_max=img_width-1)
return image, labels
class RandomScale:
'''
Randomly scales images.
'''
def __init__(self,
min_factor=0.5,
max_factor=1.5,
prob=0.5,
clip_boxes=True,
box_filter=None,
image_validator=None,
n_trials_max=3,
background=(0,0,0),
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
if not (0 < min_factor <= max_factor):
raise ValueError("It must be `0 < min_factor <= max_factor`.")
if not (isinstance(image_validator, ImageValidator) or image_validator is None):
raise ValueError("`image_validator` must be either `None` or an `ImageValidator` object.")
self.min_factor = min_factor
self.max_factor = max_factor
self.prob = prob
self.clip_boxes = clip_boxes
self.box_filter = box_filter
self.image_validator = image_validator
self.n_trials_max = n_trials_max
self.background = background
self.labels_format = labels_format
self.scale = Scale(factor=1.0,
clip_boxes=self.clip_boxes,
box_filter=self.box_filter,
background=self.background,
labels_format=self.labels_format)
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
img_height, img_width = image.shape[:2]
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
# Override the preset | |
+= 1
self._global_build_failure_count += 1
logger.warning('BUILD FAILURE: %s' % uid)
if not keep_variant:
try:
logger.info('removing %s...' % dest_dir)
shutil.rmtree(dest_dir)
except Exception as e:
logger.warning('%s' % e)
return DD.UNRESOLVED
# test application
logger.info('testing %s...' % uid)
result = self.do_test(dest_dir)
logger.info('%s (size=%d) -> %s' % (uid, len(c), result))
if result == DD.FAIL:
self.update_progress(result, (self._global_patch_count,
self.count_components(self._vp, c),
self._stage))
elif result == DD.PASS:
self.update_progress(result, (self._global_patch_count,
self.count_components(self._vp, c),
self._stage))
test_result_dir = self.get_test_result_dir()
if not os.path.exists(test_result_dir):
os.makedirs(test_result_dir)
test_result_path = os.path.join(test_result_dir, '%s.json' % uid)
try:
with open(test_result_path, 'w') as f:
d = {'config':c,'result':result}
json.dump(d, f)
ft_path = os.path.join(dest_dir, 'failing_tests')
if os.path.exists(ft_path):
shutil.copyfile(ft_path,
os.path.join(test_result_dir, uid+'.failing_tests'))
if not keep_variant:
logger.info('removing %s...' % dest_dir)
shutil.rmtree(dest_dir)
except Exception as e:
logger.warning('%s' % e)
return result
def do_ddmin(self, c, stage=1, prefix=''):
c_min = c
if len(c) > 1:
c_min = self.ddmin(c)
c_min.sort(key=getnum)
c_min_len_ = len(self.ungroup(c_min))
self.set_status('STAGE{}: The 1-minimal failure-inducing changes ({}({}) components)'.format(stage,
len(c_min),
c_min_len_))
print(c_min)
self.show_hunks(c_min)
suffix = str(stage)
min_res = self._test(c_min,
uid=add_vp_suffix(prefix+'minimal'+suffix, self._vp),
keep_variant=True)
r = DDResult(algo=A_DDMIN)
r.inp = c
r.minimal_result = min_res
r.cids_minimal = c_min
return r
def do_dd(self, c, stage=1, prefix=''):
(c_min, c_pass, c_fail) = self.dd(c)
c_min.sort(key=getnum)
c_pass.sort(key=getnum)
c_fail.sort(key=getnum)
c_min_len_ = len(self.ungroup(c_min))
self.set_status('STAGE{}: The 1-minimal failure-inducing difference ({}({}) components)'.format(stage,
len(c_min),
c_min_len_))
print(c_min)
self.show_hunks(c_min)
print('[%d] passes (%d)' % (stage, len(c_pass)))
print(c_pass)
print('[%d] fails (%d)' % (stage, len(c_fail)))
print(c_fail)
min_res = DD.UNRESOLVED
pass_res = DD.UNRESOLVED
fail_res = DD.UNRESOLVED
suffix = str(stage)
c_min_ = None
if c_min:
min_uid = add_vp_suffix(prefix+'minimal'+suffix, self._vp)
c_min_ = self.add_dependency(self.ungroup(c_min))
min_res = self._test(c_min_, uid=min_uid, keep_variant=True)
if min_res != DD.FAIL:
print('trying to add group dependencies...')
c_min_ = self.add_dependency_g(c_min)
min_res = self._test(c_min_, uid=min_uid, keep_variant=True)
if c_pass:
pass_res = self._test(c_pass,
uid=add_vp_suffix(prefix+'pass'+suffix, self._vp),
keep_variant=True)
if c_fail:
fail_res = self._test(c_fail,
uid=add_vp_suffix(prefix+'fail'+suffix, self._vp),
keep_variant=True)
r = DDResult(algo=A_DD)
r.inp = c
r.minimal_result = min_res
r.cids_minimal = c_min_
r.pass_result = pass_res
r.cids_pass = c_pass
r.fail_result = fail_res
r.cids_fail = c_fail
return r
def staged_dd(self, cids, staging, prefix=''):
r = DDResult(cids)
self._stage = 0
while True:
cids = staging.mkcids(r)
if cids == None:
break
self._patch_count = 0
self._patch_failure_count = 0
self._build_count = 0
self._build_failure_count = 0
self._stage += 1
algo = staging.get_algo()
self.set_status('STAGE{}: {} in progress...'.format(self._stage, algo))
print('cids=%s' % cids)
self.reset_dd()
if cids:
if algo == A_DDMIN:
r = self.do_ddmin(cids, stage=self._stage, prefix=prefix)
elif algo == A_DD:
r = self.do_dd(cids, stage=self._stage, prefix=prefix)
if self._patch_count > 0:
sc = self._patch_count - self._patch_failure_count
sr = (float(sc) / float(self._patch_count)) * 100
print('PATCH SUCCESS RATE: %.4f%% (%d/%d)' % (sr, sc, self._patch_count))
if self._build_count > 0:
sc = self._build_count - self._build_failure_count
sr = (float(sc) / float(self._build_count)) * 100
print('BUILD SUCCESS RATE: %.4f%% (%d/%d)' % (sr, sc, self._build_count))
else:
break
if self._global_patch_count > 0:
sc = self._global_patch_count - self._global_patch_failure_count
sr = (float(sc) / float(self._global_patch_count)) * 100
print('%s: GLOBAL PATCH SUCCESS RATE: %.4f%% (%d/%d)' % (self._vp_str, sr, sc,
self._global_patch_count))
if self._global_build_count > 0:
sc = self._global_build_count - self._global_build_failure_count
sr = (float(sc) / float(self._global_build_count)) * 100
print('%s: GLOBAL BUILD SUCCESS RATE: %.4f%% (%d/%d)' % (self._vp_str, sr, sc,
self._global_build_count))
return r
class Staging(object):
def __init__(self, algo, jdd, staged=True):
self._algo = algo
self._jdd = jdd
self._state = 'I'
self._staged = staged
self._stmt_level = 0
self._max_stmt_level = jdd.get_max_stmt_level()
def get_algo(self):
return self._algo
def is_grouped(self):
return (self._state != '0' and self._state != '0m')
def mkcids(self, ddres):
jdd = self._jdd
if self._state == 'I':
if self._staged:
self._state = 'Fd'
else:
self._state = '0'
return ddres.cids_ini
if self._state == 'Fd': # next: F or Md
if ddres.algo == A_DDMIN:
self._state = 'F'
return self.mkcids_F(ddres)
elif ddres.algo == A_DD:
if ddres.minimal_result == DD.FAIL:
self._state = 'F'
return self.mkcids_F(ddres)
else:
ddres.check_fail()
self._state = 'Md'
return self.mkcids_Md(ddres)
elif self._state == 'F': # next: Md
ddres.check_fail()
self._state = 'Md'
return self.mkcids_Md(ddres)
#
elif self._state == 'Md': # next: M or Sd
if ddres.algo == A_DDMIN:
self._state = 'M'
return self.mkcids_M(ddres)
elif ddres.algo == A_DD:
if ddres.minimal_result == DD.FAIL:
self._state = 'M'
return self.mkcids_M(ddres)
else:
ddres.check_fail()
self._state = 'Sd'
return self.mkcids_Sd(ddres)
elif self._state == 'M': # next: Sd
ddres.check_fail()
self._state = 'Sd'
return self.mkcids_Sd(ddres)
#
elif self._state == 'Sd': # next: S or Sd or 0
if ddres.algo == A_DDMIN:
self._state = 'S'
return self.mkcids_S(ddres)
elif ddres.algo == A_DD:
if ddres.minimal_result == DD.FAIL:
self._state = 'S'
return self.mkcids_S(ddres)
else:
ddres.check_fail()
if self._stmt_level >= self._max_stmt_level or not jdd.has_stmt_group(self._stmt_level+1):
self._state = '0'
return self.mkcids_0(ddres)
else:
self._state = 'Sd'
self._stmt_level += 1
return self.mkcids_Sd(ddres)
elif self._state == 'S': # next: Sd or 0
ddres.check_fail()
if self._stmt_level >= self._max_stmt_level or not jdd.has_stmt_group(self._stmt_level+1):
self._state = '0'
return self.mkcids_0(ddres)
else:
self._state = 'Sd'
self._stmt_level += 1
return self.mkcids_Sd(ddres)
#
elif self._state == '0' or self._state == '0m':
if ddres.algo == A_DD:
pass_res = ddres.pass_result
fail_res = ddres.fail_result
min_res = ddres.minimal_result
if pass_res == DD.PASS and fail_res == DD.FAIL and min_res == DD.PASS:
self._state = '0m'
#self._algo = A_DDMIN
jdd.add_base_cids(ddres.cids_minimal)
cids = ddres.cids_pass
return cids
return None
def mkcids_F(self, ddres):
jdd = self._jdd
cids = None
if ddres.algo == A_DDMIN:
c_min = ddres.cids_minimal
min_res = ddres.minimal_result
logger.info('c_min=%s, min_res=%s' % (c_min, min_res))
if min_res == DD.FAIL:
cids = jdd.regroup_by_file(c_min, by_dep=False)
elif ddres.algo == A_DD:
c_min_ = ddres.cids_minimal
min_res = ddres.minimal_result
logger.info('c_min_=%s, min_res=%s' % (c_min_, min_res))
if c_min_:
if min_res == DD.FAIL:
cids = jdd.regroup_by_file(c_min_, by_dep=False)
if cids == None:
c_fail = ddres.cids_fail
fail_res = ddres.fail_result
logger.info('c_fail=%s, fail_res=%s' % (c_fail, fail_res))
if c_fail:
if fail_res == DD.FAIL:
cids = jdd.regroup_by_file(c_fail, by_dep=False)
return cids
def mkcids_Md(self, ddres):
jdd = self._jdd
cids = None
if ddres.algo == A_DDMIN:
c_min = ddres.cids_minimal
min_res = ddres.minimal_result
logger.info('c_min=%s, min_res=%s' % (c_min, min_res))
if min_res == DD.FAIL:
cids = jdd.regroup_by_meth(c_min, by_dep=True)
elif ddres.algo == A_DD:
c_min_ = ddres.cids_minimal
min_res = ddres.minimal_result
logger.info('c_min_=%s, min_res=%s' % (c_min_, min_res))
if c_min_:
if min_res == DD.FAIL:
cids = jdd.regroup_by_meth(c_min_, by_dep=True)
if cids == None:
c_fail = ddres.cids_fail
fail_res = ddres.fail_result
logger.info('c_fail=%s, fail_res=%s' % (c_fail, fail_res))
if c_fail:
if fail_res == DD.FAIL:
cids = jdd.regroup_by_meth(c_fail, by_dep=True)
return cids
def mkcids_M(self, ddres):
jdd = self._jdd
cids = None
if ddres.algo == A_DDMIN:
c_min = ddres.cids_minimal
min_res = ddres.minimal_result
logger.info('c_min=%s, min_res=%s' % (c_min, min_res))
if min_res == DD.FAIL:
cids = jdd.regroup_by_meth(c_min, by_dep=False)
elif ddres.algo == A_DD:
c_min_ = ddres.cids_minimal
min_res = ddres.minimal_result
logger.info('c_min_=%s, min_res=%s' % (c_min_, min_res))
if c_min_:
if min_res == DD.FAIL:
cids = jdd.regroup_by_meth(c_min_, by_dep=False)
if cids == None:
c_fail = ddres.cids_fail
fail_res = ddres.fail_result
logger.info('c_fail=%s, fail_res=%s' % (c_fail, fail_res))
if c_fail:
if fail_res == DD.FAIL:
cids = jdd.regroup_by_meth(c_fail, by_dep=False)
return cids
def mkcids_Sd(self, ddres):
logger.info('stmt_level=%d' % self._stmt_level)
jdd = self._jdd
cids = None
if ddres.algo == A_DDMIN:
c_min = ddres.cids_minimal
min_res = ddres.minimal_result
logger.info('c_min=%s, min_res=%s' % (c_min, min_res))
if min_res == DD.FAIL:
jdd.set_stmt_level(self._stmt_level)
cids = jdd.regroup_by_stmt(c_min, by_dep=True)
elif ddres.algo == A_DD:
c_min_ = ddres.cids_minimal
min_res = ddres.minimal_result
logger.info('c_min_=%s, min_res=%s' % (c_min_, min_res))
if c_min_:
if min_res == DD.FAIL:
jdd.set_stmt_level(self._stmt_level)
cids = jdd.regroup_by_stmt(c_min_, by_dep=True)
if cids == None:
c_fail = ddres.cids_fail
fail_res = ddres.fail_result
logger.info('c_fail=%s, fail_res=%s' % (c_fail, fail_res))
if c_fail:
if fail_res == DD.FAIL:
jdd.set_stmt_level(self._stmt_level)
cids = jdd.regroup_by_stmt(c_fail, by_dep=True)
return cids
def mkcids_S(self, ddres):
logger.info('stmt_level=%d' % self._stmt_level)
jdd = self._jdd
cids = None
if ddres.algo == A_DDMIN:
c_min = ddres.cids_minimal
min_res = ddres.minimal_result
logger.info('c_min=%s, min_res=%s' % (c_min, min_res))
if min_res == DD.FAIL:
jdd.set_stmt_level(self._stmt_level)
cids = jdd.regroup_by_stmt(c_min, by_dep=False)
elif ddres.algo == A_DD:
c_min_ = ddres.cids_minimal
min_res = ddres.minimal_result
logger.info('c_min_=%s, min_res=%s' % (c_min_, min_res))
if c_min_:
if min_res == DD.FAIL:
jdd.set_stmt_level(self._stmt_level)
cids = jdd.regroup_by_stmt(c_min_, by_dep=False)
if cids == None:
c_fail = ddres.cids_fail
fail_res = ddres.fail_result
logger.info('c_fail=%s, fail_res=%s' % (c_fail, fail_res))
if c_fail:
if fail_res == DD.FAIL:
jdd.set_stmt_level(self._stmt_level)
cids = jdd.regroup_by_stmt(c_fail, by_dep=False)
return cids
def mkcids_0(self, ddres):
jdd = self._jdd
cids = None
if ddres.algo == A_DDMIN:
c_min = ddres.cids_minimal
min_res = ddres.minimal_result
logger.info('c_min=%s, min_res=%s' % (c_min, min_res))
if min_res == DD.FAIL and jdd.has_grp(c_min):
cids = jdd.ungroup(c_min)
elif ddres.algo == A_DD:
c_min_ = ddres.cids_minimal
min_res = ddres.minimal_result
logger.info('c_min_=%s, min_res=%s' % (c_min_, min_res))
if c_min_:
if min_res == DD.FAIL:
if jdd.has_grp(c_min_):
cids = jdd.ungroup(c_min_)
if cids == None:
c_fail = ddres.cids_fail
fail_res = ddres.fail_result
logger.info('c_fail=%s, fail_res=%s' % (c_fail, fail_res))
if c_fail:
if fail_res | |
plt.savefig(gdat.pathimag + 'diffionrdiffenph.pdf')
plt.close()
liststrg = ['ionrbolt.csv', 'ionrbeck.csv', 'ionrfauc.csv']
listlabl = ['Bolton & Haehnelt (2007)', 'Becker et al. (2007)', 'Faucher-Giguere (2008)']
listmrkr = ['o', 'D', 'x']
figr, axisrows = plt.subplots(1, gdat.numbmpol, sharey='all', figsize=(gdat.numbmpol * gdat.plotsize, gdat.plotsize))
for i, axis in enumerate(axisrows):
plot = axis.loglog(gdat.meanreds, gdat.ionrdmat[:,i])
axis.set_xlabel('$z$')
axis.set_ylabel(r'$\Gamma$ [1/s/H]')
for k, strg in enumerate(liststrg):
path = gdat.pathdata + strg
data = loadtxt(path)
ndata = data.shape[0] / 3
yerr = zeros((2, ndata))
xdat = data[:ndata, 0]
ydat = data[:ndata, 1] * 1e-12
yerr[0, :] = data[ndata:2*ndata, 1]
yerr[1, :] = data[2*ndata:3*ndata, 1]
yerr = abs(yerr - ydat)
axis.errorbar(xdat, ydat, yerr=yerr, ls='', marker=listmrkr[k])
figr.tight_layout()
plt.savefig(gdat.pathimag + 'ionr.pdf')
plt.close()
def retr_fluxphotexpr(gdat):
path = gdat.pathdata + 'xray_background.dat'
tabl = loadtxt(path)
gdat.meanenphexpr = tabl[:,0]
gdat.meanenphexpr *= 1e-6 # [MeV]
gdat.fluxphotexpr = tabl[:, 1]
gdat.fluxphotexpr *= 1e-3 / gdat.meanenphexpr**2 # [1/cm^2/s/sr/MeV]
gdat.indxenphexpr = where((amin(gdat.meanenphexpr) < gdat.meanenph) & (gdat.meanenph < amax(gdat.meanenphexpr)))[0]
gdat.fluxphotexpr = interp1d(gdat.meanenphexpr, gdat.fluxphotexpr)(gdat.meanenph[gdat.indxenphexpr])
gdat.fluxphotexprvari = (gdat.fluxphotexpr * 1.)**2
def retr_datapara(gdat):
gdat.numbpara = 3
datapara = tdpy.util.gdatstrt()
datapara.indx = dict()
datapara.minm = zeros(gdat.numbpara)
datapara.maxm = zeros(gdat.numbpara)
datapara.name = empty(gdat.numbpara, dtype=object)
datapara.scal = empty(gdat.numbpara, dtype=object)
datapara.labl = empty(gdat.numbpara, dtype=object)
datapara.unit = empty(gdat.numbpara, dtype=object)
datapara.true = empty(gdat.numbpara, dtype=object)
datapara.vari = zeros(gdat.numbpara)
datapara.indx['csecvelo'] = 0
datapara.name[0] = 'csecvelo'
datapara.minm[0] = 3e-26
datapara.maxm[0] = 3e-20
datapara.scal[0] = 'logt'
datapara.labl[0] = '$a$'
datapara.unit[0] = '[cm$^3$/s]'
datapara.vari[0] = 3e-1
datapara.true[0] = None
datapara.indx['csecfrac'] = 1
datapara.name[1] = 'csecfrac'
datapara.minm[1] = 1e-2
datapara.maxm[1] = 1e10
datapara.scal[1] = 'logt'
datapara.labl[1] = '$b/a$'
datapara.unit[1] = ''
datapara.vari[1] = 3e-1
datapara.true[1] = None
datapara.indx['masspart'] = 2
datapara.name[2] = 'masspart'
datapara.minm[2] = 1e4
datapara.maxm[2] = 1e6
datapara.scal[2] = 'logt'
datapara.labl[2] = '$M$'
datapara.unit[2] = '[MeV]'
datapara.vari[2] = 3e-1
datapara.true[2] = None
if False:
datapara.indx['dmatslop'] = 3
datapara.name[3] = 'dmatslop'
datapara.minm[3] = 0.8
datapara.maxm[3] = 1.5
datapara.scal[3] = 'self'
datapara.labl[3] = r'$\gamma$'
datapara.unit[3] = ''
datapara.vari[3] = 3e-1
datapara.strg = datapara.labl + ' ' + datapara.unit
return datapara
def retr_fluxphotdmatintp(gdat):
fluxphotdmatintp = gdat.fluxphotdmat[:, :, 0] * gdat.csecvelo / gdat.csecvelopivt + gdat.fluxphotdmat[:, :, 1] * gdat.csecfrac / gdat.csecfracpivt
if False:
print 'retr_fluxphotdmatintp'
print 'csecvelo'
print gdat.csecvelo
print 'csecvelopivt'
print gdat.csecvelopivt
print 'csecfrac'
print gdat.csecfrac
print 'csecfracpivt'
print gdat.csecfracpivt
print 'fluxphotdmat[:, :, 0]'
print gdat.fluxphotdmat[:, :, 0]
print 'fluxphotdmat[:, :, 1]'
print gdat.fluxphotdmat[:, :, 1]
print 'fluxphotdmatintp[:, 0]'
print fluxphotdmatintp[:, 0]
return fluxphotdmatintp
def plot_sfrd(gdat):
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.plot(gdat.meanredssfrd, gdat.sfrd)
axis.set_yscale('log')
axis.set_xlabel('$z$')
axis.set_ylabel(r'SFRD [erg/Mpc$^3$/yr]')
figr.tight_layout()
plt.savefig(gdat.pathimag + 'sfrd.pdf')
plt.close()
def plot_hm12(gdat, fluxphotdmat=None, listfluxphotdmat=None):
figr, axis = plt.subplots(figsize=(2 * gdat.plotsize, gdat.plotsize))
xdat = gdat.meanenph[gdat.indxenphexpr] * 1e6
ydatlowr = gdat.meanenph[gdat.indxenphexpr] * (gdat.fluxphotexpr - sqrt(gdat.fluxphotexprvari))
ydatuppr = gdat.meanenph[gdat.indxenphexpr] * (gdat.fluxphotexpr + sqrt(gdat.fluxphotexprvari))
axis.fill_between(xdat, ydatuppr, ydatlowr, color='lightblue')
axis.loglog(gdat.meanenph[gdat.indxenphexpr] * 1e6, gdat.meanenph[gdat.indxenphexpr] * gdat.fluxphotexpr, label='ROSAT')
liststrg = ['moretotl', 'moreothr', 'more']
listlabl = ['Total XRB, Moretti et al. (2009)', 'Unresolved XRB, Worsley et al. (2006)', 'Unresolved XRB, Moretti et al. (2012)']
listcolr = ['black', 'yellow', 'green']
for k, strg in enumerate(liststrg):
path = gdat.pathdata + strg + '.csv'
datamore = loadtxt(path)
gdat.meanenphmore = datamore[:, 0] * 1e-3 # [MeV]
fluxphotmore = gdat.ergs2mgev * (180. / pi)**2 * datamore[:, 1] / gdat.meanenphmore**2 # [1/cm^2/s/sr/MeV]
#axis.fill_between(gdat.meanenphmore * 1e6, 2. * gdat.meanenphmore * fluxphotmore, 0.5 * gdat.meanenphmore * fluxphotmore, color='lightgreen')
axis.loglog(gdat.meanenphmore * 1e6, gdat.meanenphmore * fluxphotmore, label=listlabl[k], color=listcolr[k])
listcolr = ['b', 'g', 'r']
for c in range(gdat.numbredsplotlate):
axis.loglog(gdat.meanenph * 1e6, gdat.meanenph * gdat.fluxphothm12[:, gdat.indxredsplotlate[c]], label='Haardt & Madau (2012), ' + gdat.strgredsplotlate[c])
if fluxphotdmat != None:
axis.loglog(gdat.meanenph * 1e6, gdat.meanenph * gdat.fluxphotdmat[:, gdat.indxredsplotlate[c]], label='DM, ' + gdat.strgredsplotlate[c])
if listfluxphotdmat != None:
tdpy.util.plot_braz(axis, gdat.meanenph * 1e6, gdat.meanenph[None, :] * listfluxphotdmat[:, :, gdat.indxredsplotlate[c]], \
lcol=listcolr[c], alpha=0.5, dcol=listcolr[c], mcol='black')
axis.set_xlabel(r'$E_\gamma$ [eV]')
axis.set_ylabel(r'$EdN/dE$ [1/cm$^2$/s/sr]')
axis.legend()
figr.tight_layout()
plt.savefig(gdat.pathimag + 'fluxphothm12.pdf')
plt.close()
def init( \
datatype='inpt', \
datalabl='XMM-Newton', \
numbswep=100000, \
verbtype=1, \
saveflux=True, \
makeplot=True, \
propmodltype='effi', \
concmodltype='duff', \
subsmodltype='smth', \
igmamodltype='clum'
):
# global object
gdat = tdpy.util.gdatstrt()
gdat.saveflux = saveflux
gdat.makeplot = makeplot
gdat.propmodltype = propmodltype
gdat.concmodltype = concmodltype
gdat.subsmodltype = subsmodltype
gdat.igmamodltype = igmamodltype
datapara = retr_datapara(gdat)
gdat.numbpara = len(datapara.name)
gdat.indxpara = arange(gdat.numbpara)
# plotting
gdat.plotsize = 6
gdat.strgmpol = ['s-wave', 'p-wave']
gdat.anch = 'b'
gdat.demcsigm = 6.
# constants
## cosmological constants
gravlght = 1.19e-34 # [cm^3/MeV/s^2]
gdat.omegbmat = 0.049 # baryonic matter abundance today
gdat.omegdmat = 0.26 # dark matter abundance today
gdat.omegradi = 4.8e-5 # radiation abundance today
gdat.omegdene = 0.69 # dark energy abundance today
gdat.stdvodennorm = 0.83 # rms density fluctuation in spheres of radius 8/h Mpc
gdat.psecindx = 0.96 # spectral index of the primordial power spectrum
gdat.hubbcons = 0.704 # reduced Hubble constant
gdat.tempcmbrnunc = 2.725 # CMB temperature today [K]
gdat.shtrwgth = 0.707 # Sheth-Tormen
gdat.shtrnorm = 0.3222 # Sheth-Tormen
gdat.shtrindx = 0.3 # Sheth-Tormen
gdat.omegmatt = gdat.omegbmat + gdat.omegdmat
gdat.masselec = 0.511 # electron gdat.meanmass [MeV]
gdat.velolght = 2.998e10 # speed of light [cm/s]
gdat.strtcons = 7.297e-3 # fine structure constant
gdat.odencoll = 1.686 # linear overdensity at collapse
gdat.odenviri = 18. * pi**2 # overdensity at virialization
gdat.radisolr = 8.5 # radial distance from the GC to the Sun [kpc]
gdat.edencritnunc = 5.3e-3 # critical density of the Universe [MeV/cm^3]
gdat.csecthom = 6.65e-25 # [cm^2] Thompson cross section
gdat.csecionzrydb = 6.3e-18 # [cm^2] neutral Hydrogen photon-ionization cross section at 13.6 eV
gdat.enerrydb = 13.5984e-6 # Rydberg energy [MeV]
gdat.plnkcons = 4.136e-21 # Planck constant [MeV s]
gdat.boltcons = 8.6173e-11 # Boltzmann constant [MeV/K]
gdat.massmilk = 1e12 # gdat.meanmass of the Milky Way [Msun]
gdat.ergs2mgev = 6.241509e5 # conversion factor from erg to MeV
gdat.myrs2secd = 3.154e13 # million year per second
gdat.solm2mgev = 1.115e60 # Solar gdat.meanmass in MeVc^2
gdat.kprc2cmet = 3.086e21 # conversion factor from kpc to cm
gdat.magffac = 4.966835e-8 # [(MeV/cm^3)/(muG^2/mu0)]
gdat.demccons = 35. / 18. # Dehnen McLaughlin profile index
gdat.sigm = 6. # ratio of mean squared relative velocity to 1-particle velocity variance
gdat.gravconsredu = 1.19e-34 # [cm^3/MeV/s^2]
gdat.cmet2angs = 1e8
# axis
# temp
gdat.numbradi = 50
gdat.numbrsph = 50
gdat.numbzaxi = 50
gdat.numbenph = 50
gdat.numbenel = 50
gdat.numbenpi = 50
gdat.numbcden = 50
gdat.numbreds = 50
gdat.numbmass = 50
gdat.numbwnum = 500
gdat.indxradi = arange(gdat.numbradi)
gdat.indxrsph = arange(gdat.numbrsph)
gdat.indxzaxi = arange(gdat.numbzaxi)
gdat.indxenph = arange(gdat.numbenph)
gdat.indxenel = arange(gdat.numbenel)
gdat.indxenpi = arange(gdat.numbenpi)
gdat.indxcden = arange(gdat.numbcden)
gdat.indxreds = arange(gdat.numbreds)
gdat.indxmass = arange(gdat.numbmass)
gdat.indxwnum = arange(gdat.numbwnum)
gdat.minmenph = 1e-6 # [MeV]
gdat.maxmenph = 1e-1 # [MeV]
gdat.meanenph = logspace(log10(gdat.minmenph), log10(gdat.maxmenph), gdat.numbenph)
gdat.indxenphplot = array([0, gdat.numbenph / 2, gdat.numbenph - 1])
gdat.minmenel = 5e1 # [MeV]
gdat.maxmenel = 1e5 # [MeV]
gdat.meanenel = logspace(log10(gdat.minmenel), log10(gdat.maxmenel), gdat.numbenel)
gdat.diffenel = gdat.meanenel[1:] - gdat.meanenel[:-1]
gdat.indxenelplot = array([0, gdat.numbenel / 4, gdat.numbenel / 2])
gdat.numbenelplot = gdat.indxenelplot.size
gdat.minmreds = 1e-3
gdat.maxmreds = 1e2
gdat.meanreds = logspace(log10(gdat.minmreds), log10(gdat.maxmreds), gdat.numbreds)
gdat.redsplotprox = array([0., 1e3, 1e6])
gdat.numbredsplot = gdat.redsplotprox.size
gdat.indxredsplot = empty(gdat.numbredsplot, dtype=int)
for n, reds in enumerate(gdat.redsplotprox):
gdat.indxredsplot[n] = argmin(fabs(gdat.meanreds - reds))
gdat.meanredsplotlateprox = array([0.1, 2., 6.])
gdat.numbredsplotlate = len(gdat.meanredsplotlateprox)
gdat.indxredsplotlate = []
gdat.strgredsplotlate = []
for k in range(gdat.numbredsplotlate):
gdat.indxredsplotlate.append(argmin(abs(gdat.meanreds - gdat.meanredsplotlateprox[k])))
gdat.strgredsplotlate.append('$z = %.3g$' % gdat.meanredsplotlateprox[k])
minmmass = 1e8 # [Solar Mass]
maxmmass = 1e17 # [Solar Mass]
gdat.meanmassprim = logspace(log10(minmmass), log10(maxmmass), gdat.numbmass + 1)
gdat.meanmass = gdat.meanmassprim[:-1]
gdat.meanmassprox = [1e10, 1e12, 1e15]
gdat.numbmassplot = len(gdat.meanmassprox)
gdat.indxmassplot = []
gdat.strgmass = []
for d in range(gdat.numbmassplot):
gdat.indxmassplot.append(argmin(fabs(gdat.meanmass - gdat.meanmassprox[d])))
gdat.strgmass.append('$M$ = ' + tdpy.util.mexp(gdat.meanmassprox[d]) + r' $M_\odot$')
gdat.minmcden = 10**11. # [1/cm^2]
gdat.maxmcden = 10**22. # [1/cm^2]
gdat.meancden = logspace(log10(gdat.minmcden), log10(gdat.maxmcden), gdat.numbcden)
gdat.minmenpi = 1e-12 # [MeV]
gdat.maxmenpi = 1e-5 # [MeV]
gdat.meanenpi = logspace(log10(gdat.minmenpi), log10(gdat.maxmenpi), gdat.numbenpi)
# wavenumber axis.s
gdat.minmwnum = 1e-4
gdat.maxmwnum = 1e4
gdat.meanwnum = logspace(log10(gdat.minmwnum), log10(gdat.maxmwnum), gdat.numbwnum)
gdat.edenbmat = gdat.omegbmat * gdat.edencritnunc * (1. + gdat.meanreds)**3
gdat.edendmat = gdat.omegdmat * gdat.edencritnunc * (1. + gdat.meanreds)**3
gdat.edenmatt = | |
tf.float32)
varm_init = 0.8*tf.ones([self.n_tasks,self.n_latent], dtype = tf.float32)
loc_init = tf.zeros(self.n_tasks)
varc_init = 1.0
else:
beta_init, varm_init, loc_init, varc_init = initial_state
beta_cur = tf.Variable(beta_init, name = 'beta_cur', trainable = False)
varm_cur = tf.Variable(varm_init, name = 'varm_cur', trainable = False)
loc_cur = tf.Variable(loc_init, name = 'loc_cur', trainable = False)
varc_cur = tf.Variable(varc_init, name = 'varc_cur', trainable = False)
unconstraining_bijectors = [tfb.Softplus(), tfb.Softplus(),tfb.Identity(), tfb.Softplus()]
unnormalized_posterior_log_prob = lambda *args: self.joint_log_prob(tf.nn.softplus(unc_noise), Wmix,*args)
current_state = [beta_cur, varm_cur,loc_cur, varc_cur]
# Initializing a sampler for warmup:
sampler = TransformedTransitionKernel(
inner_kernel= HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_posterior_log_prob,
step_size= step_size,
num_leapfrog_steps=num_leapfrog_steps),
bijector=unconstraining_bijectors)
# One step of the sampler
[
beta_next,
varm_next,
loc_next,
varc_next
], kernel_results = sampler.one_step(current_state = current_state,
previous_kernel_results=sampler.bootstrap_results(current_state))
# updating the step size
step_size_update = step_size_simple_update(step_size, kernel_results,
target_rate = target_accept_rate,
decrement_multiplier = 0.1,
increment_multiplier = 0.1)
# Updating the state of the hyperparameters
beta_update1 = beta_cur.assign(beta_next)
varm_update1 = varm_cur.assign(varm_next)
loc_update1 = loc_cur.assign(loc_next)
varc_update1 = varc_cur.assign(varc_next)
warmup_update = tf.group([beta_update1, varm_update1,loc_update1, varc_update1, step_size_update])
step_size_update2 = step_size.assign(0.95*step_size)
simple_update = tf.group([beta_update1, varm_update1,loc_update1, varc_update1])
# Set up E-step with MCMC
[
beta_probs,
varm_probs,
loc_probs,
varc_probs
], em_kernel_results = sample_chain(num_results= 10, num_burnin_steps= 0,
current_state=current_state,
kernel= TransformedTransitionKernel(
inner_kernel= HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_posterior_log_prob,
step_size= 0.95*step_size,
num_leapfrog_steps=num_leapfrog_steps),
bijector=unconstraining_bijectors))
# Updating the state of the hyperparameters
beta_update2 = beta_cur.assign(tf.reduce_mean(beta_probs, axis = 0))
varm_update2 = varm_cur.assign(tf.reduce_mean(varm_probs, axis = 0))
loc_update2 = loc_cur.assign(tf.reduce_mean(loc_probs, axis = 0))
varc_update2 = varc_cur.assign(tf.reduce_mean(varc_probs, axis = 0))
expectation_update = tf.group([beta_update2, varm_update2,loc_update2, varc_update2])
#-- Set up M-step (updating noise variance)
with tf.control_dependencies([expectation_update]):
loss = -self.joint_log_prob(tf.nn.softplus(unc_noise), Wmix, beta_cur, varm_cur, loc_cur, varc_cur) -self.rv_noise.log_prob(tf.nn.softplus(unc_noise)) \
-self.rv_Wmix.log_prob(Wmix)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
minimization_update = optimizer.minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# Initial warm-up stage
print('First warm-up phase.')
num_accepted = 0
for t in range(num_warmup_iters):
_, is_accepted_val = sess.run([warmup_update, kernel_results.inner_results.is_accepted])
num_accepted += is_accepted_val
if (t % display_rate == 0) or ( t == num_warmup_iters -1):
print("Warm-Up Iteration: {:>3} Acceptance Rate: {:.3f}".format(t, num_accepted / (t + 1)))
loss_history = np.zeros(em_iters)
noise_history = np.zeros((em_iters, self.n_tasks))
print('Estimating the noise variance: ')
for t in range(em_iters):
[
_,
_,
unc_noise_,
Wmix_,
loss_
] = sess.run([
expectation_update,
minimization_update,
unc_noise,
Wmix,
loss
])
loss_history[t] = loss_
noise_history[t,:] = np.log(np.exp(unc_noise_) + 1)
if (t % display_rate == 0) or ( t == em_iters -1):
print("Iteration: {:>4} Loss: {:.3f}".format(t, loss_))
# Second warmup phase
print('Second warm-up phase.')
num_accepted = 0
for t in range(num_warmup_iters):
_, is_accepted_val = sess.run([warmup_update, kernel_results.inner_results.is_accepted])
num_accepted += is_accepted_val
if (t % display_rate == 0) or ( t == num_warmup_iters -1):
print("Warm-Up Iteration: {:>3} Acceptance Rate: {:.3f}".format(t, num_accepted / (t + 1)))
step_size_ = sess.run(step_size)
if step_size_ < 1e-4:
warnings.warn("Estimated step size is low. (less than 1e-4)")
print('Collecting samples for the GP hyperparameters.')
sess.run(step_size_update2)
loc_samples = np.zeros((mcmc_samples, self.n_tasks))
varm_samples = np.zeros((mcmc_samples, self.n_tasks, self.n_latent))
beta_samples = np.zeros((mcmc_samples, self.n_latent,self.dim_input))
varc_samples = np.zeros(mcmc_samples)
num_accepted = 0
total_runs = 4 * mcmc_samples
for t in range(total_runs):
[
_,
is_accepted_val,
loc_next_,
varm_next_,
beta_next_,
varc_next_
] = sess.run(
[
simple_update,
kernel_results.inner_results.is_accepted,
loc_next,
varm_next,
beta_next,
varc_next
])
if (t % 4 == 0) :
idx = t//4
loc_samples[idx,:] = loc_next_
varm_samples[idx,:,:] = varm_next_
beta_samples[idx,:,:] = beta_next_
varc_samples[idx] = varc_next_
num_accepted += is_accepted_val
if (t % display_rate == 0) or ( t == total_runs -1):
acceptance_rate = num_accepted / (t + 1)
print("Sampling Iteration: {:>3} Acceptance Rate: {:.3f}".format(t,acceptance_rate))
self.noise = np.log(np.exp(unc_noise_) + 1)
self.noise = tf.convert_to_tensor(self.noise, tf.float32)
self.Wmix = tf.convert_to_tensor(Wmix_, tf.float32)
hyperpar_samples = [loc_samples, varm_samples, beta_samples, varc_samples]
if acceptance_rate < 0.1:
warnings.warn("Acceptance rate was low (less than 0.1)")
sess.close()
return hyperpar_samples, loss_history, noise_history
def posteriormeanVariance(self, Xtest, hyperpars, fullCov = False):
# generate posterior mean and variance for the Gaussian process, given values for the hyperparameters
# Xtest := N x D tensorflow array of new inputs
# output := mean of posterior distribution in the form of a N x Q array
# and (Co)variance of posterior in the form of a N x N X Q array if
# fullCov = True or a N x 1 array if fullCov = False
beta, varm, loc, varc = hyperpars
noise = self.noise
Wmix = self.Wmix
# ------- generate covariance matrix for training data and computing the corresponding cholesky factor
Kxx = self.kernel(self.Xtrain, self.Xtrain, beta) # kernels for the latent Gaussian processes
# Kxx has shape R x N x N
Cov_train = mixing_Covariance(Kxx, Wmix, varm, varc) # with shape M x N x M x N
size = self.n_tasks*self.n_train
noise_matrix = tf.tile(self.noise[:,tf.newaxis],[1, self.n_train])
noise_matrix = tf.linalg.diag(tf.reshape(noise_matrix,[-1]))
Cov_train = tf.reshape(Cov_train, [size, size]) + noise_matrix + (self.jitter_level)*tf.eye(size)
#-------- Computing the cholesky factor ------------
L = tf.linalg.cholesky(Cov_train)
#-------- generate covariance matrix for test data
n_test = Xtest.shape[0].value
size_test = self.n_tasks*n_test
if fullCov:
Kx2 = self.kernel(Xtest, Xtest, beta)
Cov_test = mixing_Covariance(Kx2, Wmix, varm, varc)
Cov_test = tf.reshape(Cov_test, [size_test, size_test]) + (noise + self.jitter_level)*tf.eye(size_test)
else:
Cov_test = varc*tf.reduce_sum(tf.square(Wmix), axis =1) + tf.reduce_sum(varm, axis = 1)
Cov_test = tf.tile(Cov_test[:, tf.newaxis], [1, n_test])
Cov_test = tf.reshape(Cov_test,[size_test])
#------- covariance between test data and training data
Kx3 = self.kernel(self.Xtrain, Xtest, beta)
Cov_mixed = mixing_Covariance(Kx3, Wmix, varm, varc) # with shape M x N x M x N_test
Cov_mixed = tf.reshape(Cov_mixed, [size, size_test])
mean_training = tf.tile(loc[:, tf.newaxis], [1, self.n_train])
mean_training = tf.reshape(mean_training, [size,1])
mean_test = tf.tile(loc[:, tf.newaxis], [1, n_test])
mean_test = tf.reshape(mean_test, [size_test,1])
Y = tf.transpose(self.Ytrain)
Y = tf.reshape(Y, [size,1]) - mean_training
mean_pos, var_pos = posterior_Gaussian(L, Cov_mixed, Cov_test, Y, fullCov)
mean_pos = mean_pos + mean_test
return mean_pos, var_pos
def samples(self, Xtest, hyperpar_samples, num_samples = 20, with_point_samples = False):
# Sampling for the full model and the simulator model
# hyperpar_samples is a list of numpy arrays contaning samples for the hyperparameters
# Xtest is a 2-dimesional numpy array
n_test = len(Xtest)
if len(Xtest.shape) == 1:
Xtest = Xtest[:,None]
Xtest = tf.convert_to_tensor(Xtest, tf.float32)
size_test = n_test*self.n_tasks
loc_samples, varm_samples, beta_samples, varc_samples = hyperpar_samples
loc_samples = loc_samples.astype(np.float32)
varm_samples = varm_samples.astype(np.float32)
beta_samples = beta_samples.astype(np.float32)
varc_samples = varc_samples.astype(np.float32)
n_samples = len(loc_samples)
i0 = tf.constant(0)
collect_mean0 = tf.zeros([size_test,1])
collect_variance0 = tf.zeros([size_test,1])
if with_point_samples:
collect_samples0 = tf.zeros([1,size_test,1])
def condition(i, collect_mean, collect_variance, collect_samples):
return i < n_samples
def body(i,collect_mean, collect_variance, collect_samples):
beta = tf.gather(beta_samples,i, axis = 0)
varm = tf.gather(varm_samples,i, axis = 0)
loc = tf.gather(loc_samples, i, axis = 0)
varc = tf.gather(varc_samples, i, axis = 0)
hyperpars = [beta,varm, loc, varc]
mean_pos, var_pos = self.posteriormeanVariance(Xtest, hyperpars, fullCov = False)
rv_norm = tfd.Normal(loc = mean_pos, scale = tf.sqrt(var_pos))
samples = rv_norm.sample(num_samples)
out = [i+1, tf.concat([collect_mean, mean_pos], axis = 1),
tf.concat([collect_variance, var_pos], axis = 1), tf.concat([collect_samples,samples], axis =0)]
return out
results = tf.while_loop(condition, body, loop_vars = [i0, collect_mean0, collect_variance0, collect_samples0],
parallel_iterations = 20,
shape_invariants = [i0.get_shape(), tf.TensorShape([size_test, None]), tf.TensorShape([size_test, None]),tf.TensorShape([None,size_test,1])])
with tf.Session() as sess:
_, mean_pos_, var_pos_, samples_ = sess.run(results)
mean_pos =np.mean(mean_pos_[:,1:],axis =1)
var_pos = np.mean(var_pos_[:,1:], axis =1) + np.var(mean_pos_[:,1:],axis =1)
# Reshaping
mean_pos = np.reshape(mean_pos,(self.n_tasks,n_test))
mean_pos = np.transpose(mean_pos)
var_pos = np.reshape(var_pos,(self.n_tasks,n_test))
var_pos = np.transpose(var_pos)
samples = samples_[1:,:,0]
n_total = len(samples)
samples = np.reshape(samples,(n_total,self.n_tasks,n_test))
samples = np.transpose(samples, (0,2,1))
return mean_pos, var_pos, samples
else:
def condition(i, collect_mean, collect_variance):
return i < n_samples
def body(i,collect_mean, collect_variance):
beta = tf.gather(beta_samples,i, axis = 0)
varm = tf.gather(varm_samples,i, axis = 0)
loc = tf.gather(loc_samples, i, axis = 0)
varc = tf.gather(varc_samples, i, axis = 0)
hyperpars = [beta,varm, loc, varc]
mean_pos, var_pos = self.posteriormeanVariance(Xtest, hyperpars, fullCov = False)
out = [i+1, tf.concat([collect_mean, mean_pos], axis = 1),
tf.concat([collect_variance, var_pos], axis = 1)]
return out
results = tf.while_loop(condition, body, loop_vars = [i0, collect_mean0, collect_variance0],
parallel_iterations = 20,
shape_invariants = [i0.get_shape(), tf.TensorShape([size_test, None]), tf.TensorShape([size_test, None])])
with tf.Session() as sess:
_, mean_pos_, var_pos_ = sess.run(results)
mean_pos =np.mean(mean_pos_[:,1:],axis =1)
var_pos = np.mean(var_pos_[:,1:], axis =1) + np.var(mean_pos_[:,1:],axis =1)
# Reshaping
mean_pos = np.reshape(mean_pos,(self.n_tasks,n_test))
mean_pos = np.transpose(mean_pos)
var_pos = np.reshape(var_pos,(self.n_tasks,n_test))
var_pos = np.transpose(var_pos)
return mean_pos, var_pos
#-------------------------------------------------------------------------------------------
#---- The next functions are used for sensitivity analysis
def full_PosteriormeanVariance(self, X, L, hyperpars):
n_new = X.shape[0].value
size_new = self.n_tasks*n_new
beta, varm, loc, varc = hyperpars
Cov_test = varc*tf.reduce_sum(tf.square(self.Wmix), axis =1) + tf.reduce_sum(varm, axis = 1)
Cov_test = tf.tile(Cov_test[:, tf.newaxis], [1, n_new])
Cov_test = tf.reshape(Cov_test,[size_new])
Kx3 = self.kernel(self.Xtrain, X, beta)
size = self.n_tasks*self.n_train
| |
('u2u64', b), 32)), 'options->lower_pack_64_2x32_split'),
(('pack_32_2x16_split', a, b), ('ior', ('u2u32', a), ('ishl', ('u2u32', b), 16)), 'options->lower_pack_32_2x16_split'),
(('unpack_64_2x32_split_x', a), ('u2u32', a), 'options->lower_unpack_64_2x32_split'),
(('unpack_64_2x32_split_y', a), ('u2u32', ('ushr', a, 32)), 'options->lower_unpack_64_2x32_split'),
(('unpack_32_2x16_split_x', a), ('u2u16', a), 'options->lower_unpack_32_2x16_split'),
(('unpack_32_2x16_split_y', a), ('u2u16', ('ushr', a, 16)), 'options->lower_unpack_32_2x16_split'),
# Useless masking before unpacking
(('unpack_half_2x16_split_x', ('iand', a, 0xffff)), ('unpack_half_2x16_split_x', a)),
(('unpack_32_2x16_split_x', ('iand', a, 0xffff)), ('unpack_32_2x16_split_x', a)),
(('unpack_64_2x32_split_x', ('iand', a, 0xffffffff)), ('unpack_64_2x32_split_x', a)),
(('unpack_half_2x16_split_y', ('iand', a, 0xffff0000)), ('unpack_half_2x16_split_y', a)),
(('unpack_32_2x16_split_y', ('iand', a, 0xffff0000)), ('unpack_32_2x16_split_y', a)),
(('unpack_64_2x32_split_y', ('iand', a, 0xffffffff00000000)), ('unpack_64_2x32_split_y', a)),
(('unpack_half_2x16_split_x', ('extract_u16', a, 0)), ('unpack_half_2x16_split_x', a)),
(('unpack_half_2x16_split_x', ('extract_u16', a, 1)), ('unpack_half_2x16_split_y', a)),
(('unpack_half_2x16_split_x', ('ushr', a, 16)), ('unpack_half_2x16_split_y', a)),
(('unpack_32_2x16_split_x', ('extract_u16', a, 0)), ('unpack_32_2x16_split_x', a)),
(('unpack_32_2x16_split_x', ('extract_u16', a, 1)), ('unpack_32_2x16_split_y', a)),
# Optimize half packing
(('ishl', ('pack_half_2x16', ('vec2', a, 0)), 16), ('pack_half_2x16', ('vec2', 0, a))),
(('ushr', ('pack_half_2x16', ('vec2', 0, a)), 16), ('pack_half_2x16', ('vec2', a, 0))),
(('iadd', ('pack_half_2x16', ('vec2', a, 0)), ('pack_half_2x16', ('vec2', 0, b))),
('pack_half_2x16', ('vec2', a, b))),
(('ior', ('pack_half_2x16', ('vec2', a, 0)), ('pack_half_2x16', ('vec2', 0, b))),
('pack_half_2x16', ('vec2', a, b))),
(('ishl', ('pack_half_2x16_split', a, 0), 16), ('pack_half_2x16_split', 0, a)),
(('ushr', ('pack_half_2x16_split', 0, a), 16), ('pack_half_2x16_split', a, 0)),
(('extract_u16', ('pack_half_2x16_split', 0, a), 1), ('pack_half_2x16_split', a, 0)),
(('iadd', ('pack_half_2x16_split', a, 0), ('pack_half_2x16_split', 0, b)), ('pack_half_2x16_split', a, b)),
(('ior', ('pack_half_2x16_split', a, 0), ('pack_half_2x16_split', 0, b)), ('pack_half_2x16_split', a, b)),
])
# After the ('extract_u8', a, 0) pattern, above, triggers, there will be
# patterns like those below.
for op in ('ushr', 'ishr'):
optimizations.extend([(('extract_u8', (op, 'a@16', 8), 0), ('extract_u8', a, 1))])
optimizations.extend([(('extract_u8', (op, 'a@32', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 4)])
optimizations.extend([(('extract_u8', (op, 'a@64', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 8)])
optimizations.extend([(('extract_u8', ('extract_u16', a, 1), 0), ('extract_u8', a, 2))])
# After the ('extract_[iu]8', a, 3) patterns, above, trigger, there will be
# patterns like those below.
for op in ('extract_u8', 'extract_i8'):
optimizations.extend([((op, ('ishl', 'a@16', 8), 1), (op, a, 0))])
optimizations.extend([((op, ('ishl', 'a@32', 24 - 8 * i), 3), (op, a, i)) for i in range(2, -1, -1)])
optimizations.extend([((op, ('ishl', 'a@64', 56 - 8 * i), 7), (op, a, i)) for i in range(6, -1, -1)])
optimizations.extend([
# Subtracts
(('ussub_4x8', a, 0), a),
(('ussub_4x8', a, ~0), 0),
# Lower all Subtractions first - they can get recombined later
(('fsub', a, b), ('fadd', a, ('fneg', b))),
(('isub', a, b), ('iadd', a, ('ineg', b))),
(('uabs_usub', a, b), ('bcsel', ('ult', a, b), ('ineg', ('isub', a, b)), ('isub', a, b))),
# This is correct. We don't need isub_sat because the result type is unsigned, so it cannot overflow.
(('uabs_isub', a, b), ('bcsel', ('ilt', a, b), ('ineg', ('isub', a, b)), ('isub', a, b))),
# Propagate negation up multiplication chains
(('fmul(is_used_by_non_fsat)', ('fneg', a), b), ('fneg', ('fmul', a, b))),
(('imul', ('ineg', a), b), ('ineg', ('imul', a, b))),
# Propagate constants up multiplication chains
(('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fmul', ('fmul', a, c), b)),
(('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('imul', ('imul', a, c), b)),
# Prefer moving out a multiplication for more MAD/FMA-friendly code
(('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', 'b(is_fmul)'), '#c'), ('fadd', ('fadd', a, c), b)),
(('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fadd', ('fadd', a, c), b)),
(('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('iadd', ('iadd', a, c), b)),
# Reassociate constants in add/mul chains so they can be folded together.
# For now, we mostly only handle cases where the constants are separated by
# a single non-constant. We could do better eventually.
(('~fmul', '#a', ('fmul', 'b(is_not_const)', '#c')), ('fmul', ('fmul', a, c), b)),
(('imul', '#a', ('imul', 'b(is_not_const)', '#c')), ('imul', ('imul', a, c), b)),
(('~fadd', '#a', ('fadd', 'b(is_not_const)', '#c')), ('fadd', ('fadd', a, c), b)),
(('~fadd', '#a', ('fneg', ('fadd', 'b(is_not_const)', '#c'))), ('fadd', ('fadd', a, ('fneg', c)), ('fneg', b))),
(('iadd', '#a', ('iadd', 'b(is_not_const)', '#c')), ('iadd', ('iadd', a, c), b)),
(('iand', '#a', ('iand', 'b(is_not_const)', '#c')), ('iand', ('iand', a, c), b)),
(('ior', '#a', ('ior', 'b(is_not_const)', '#c')), ('ior', ('ior', a, c), b)),
(('ixor', '#a', ('ixor', 'b(is_not_const)', '#c')), ('ixor', ('ixor', a, c), b)),
# Drop mul-div by the same value when there's no wrapping.
(('idiv', ('imul(no_signed_wrap)', a, b), b), a),
# By definition...
(('bcsel', ('ige', ('find_lsb', a), 0), ('find_lsb', a), -1), ('find_lsb', a)),
(('bcsel', ('ige', ('ifind_msb', a), 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
(('bcsel', ('ige', ('ufind_msb', a), 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
(('bcsel', ('ine', a, 0), ('find_lsb', a), -1), ('find_lsb', a)),
(('bcsel', ('ine', a, 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
(('bcsel', ('ine', a, 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
(('bcsel', ('ine', a, -1), ('ifind_msb', a), -1), ('ifind_msb', a)),
(('~fmul', ('bcsel(is_used_once)', c, -1.0, 1.0), b), ('bcsel', c, ('fneg', b), b)),
(('~fmul', ('bcsel(is_used_once)', c, 1.0, -1.0), b), ('bcsel', c, b, ('fneg', b))),
(('~bcsel', ('flt', a, 0.0), ('fneg', a), a), ('fabs', a)),
(('bcsel', a, ('bcsel', b, c, d), d), ('bcsel', ('iand', a, b), c, d)),
(('bcsel', a, b, ('bcsel', c, b, d)), ('bcsel', ('ior', a, c), b, d)),
# Misc. lowering
(('fmod', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
(('frem', a, b), ('fsub', a, ('fmul', b, ('ftrunc', ('fdiv', a, b)))), 'options->lower_fmod'),
(('uadd_carry', a, b), ('b2i', ('ult', ('iadd', a, b), a)), 'options->lower_uadd_carry'),
(('usub_borrow@32', a, b), ('b2i', ('ult', a, b)), 'options->lower_usub_borrow'),
(('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
('bcsel', ('ult', 31, 'bits'), 'insert',
('bfi', ('bfm', 'bits', 'offset'), 'insert', 'base')),
'options->lower_bitfield_insert'),
(('ihadd', a, b), ('iadd', ('iand', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
(('uhadd', a, b), ('iadd', ('iand', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
(('irhadd', a, b), ('isub', ('ior', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
(('urhadd', a, b), ('isub', ('ior', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
(('ihadd@64', a, b), ('iadd', ('iand', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd64 || (options->lower_int64_options & nir_lower_iadd64) != 0'),
(('uhadd@64', a, b), ('iadd', ('iand', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd64 || (options->lower_int64_options & nir_lower_iadd64) != 0'),
(('irhadd@64', a, b), ('isub', ('ior', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd64 || (options->lower_int64_options & nir_lower_iadd64) != 0'),
(('urhadd@64', a, b), ('isub', ('ior', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd64 || (options->lower_int64_options & nir_lower_iadd64) != 0'),
(('uadd_sat@64', a, b), ('bcsel', ('ult', ('iadd', a, b), a), -1, ('iadd', a, b)), 'options->lower_add_sat || (options->lower_int64_options & nir_lower_iadd64) != 0'),
(('uadd_sat', a, b), ('bcsel', ('ult', ('iadd', a, b), a), -1, ('iadd', a, b)), 'options->lower_add_sat'),
(('usub_sat', a, b), ('bcsel', ('ult', a, b), 0, ('isub', a, b)), 'options->lower_add_sat'),
(('usub_sat@64', a, b), ('bcsel', ('ult', a, b), 0, ('isub', a, b)), 'options->lower_usub_sat64 || (options->lower_int64_options & nir_lower_iadd64) != 0'),
# int64_t sum = a + b;
#
# if (a < 0 && b < 0 && a < sum)
# sum = INT64_MIN;
# } else if (a >= 0 && b >= 0 && sum < a)
# sum = INT64_MAX;
# }
#
# A couple optimizations are applied.
#
# 1. a < sum => sum >= 0. This replacement works because it is known that
# a < 0 and b < 0, so sum should also be < 0 unless there was
# underflow.
#
# 2. sum < a => sum < 0. This replacement works because it is known that
# a >= 0 and b >= 0, so sum should also be >= 0 unless there was
# overflow.
#
# 3. Invert the second if-condition and swap the order of parameters for
# the bcsel. !(a >= 0 && b >= 0 && sum < 0) becomes !(a >= 0) || !(b >=
# 0) || !(sum < 0), and that becomes (a < 0) || (b < 0) || (sum >= 0)
#
# On Intel Gen11, this saves ~11 instructions.
(('iadd_sat@64', a, b), ('bcsel',
('iand', ('iand', ('ilt', a, 0), ('ilt', b, 0)), ('ige', ('iadd', a, b), 0)),
0x8000000000000000,
('bcsel',
('ior', ('ior', ('ilt', a, 0), ('ilt', b, 0)), ('ige', ('iadd', a, b), 0)),
('iadd', a, b),
0x7fffffffffffffff)),
'(options->lower_int64_options & nir_lower_iadd64) != 0'),
# int64_t sum = a - b;
#
# if (a < 0 && b >= 0 && a < sum)
# sum = INT64_MIN;
# } else if (a >= | |
# This file is autogenerated by `applaudgen` from `app_store_connect_api.json`.
# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
from enum import Enum
class StringEnum(str, Enum):
"""
Making Enums (as always, arguably) more Pythonic:
https://www.cosmicpython.com/blog/2020-10-27-i-hate-enums.html
"""
def __str__(self) -> str:
return str.__str__(self)
class FinanceReportType(StringEnum):
FINANCIAL = "FINANCIAL"
FINANCE_DETAIL = "FINANCE_DETAIL"
class SalesReportType(StringEnum):
SALES = "SALES"
PRE_ORDER = "PRE_ORDER"
NEWSSTAND = "NEWSSTAND"
SUBSCRIPTION = "SUBSCRIPTION"
SUBSCRIPTION_EVENT = "SUBSCRIPTION_EVENT"
SUBSCRIBER = "SUBSCRIBER"
class SalesReportSubType(StringEnum):
SUMMARY = "SUMMARY"
DETAILED = "DETAILED"
class AppClipAction(StringEnum):
OPEN = 'OPEN'
VIEW = 'VIEW'
PLAY = 'PLAY'
class AppClipAdvancedExperienceLanguage(StringEnum):
AR = 'AR'
CA = 'CA'
CS = 'CS'
DA = 'DA'
DE = 'DE'
EL = 'EL'
EN = 'EN'
ES = 'ES'
FI = 'FI'
FR = 'FR'
HE = 'HE'
HI = 'HI'
HR = 'HR'
HU = 'HU'
ID = 'ID'
IT = 'IT'
JA = 'JA'
KO = 'KO'
MS = 'MS'
NL = 'NL'
NO = 'NO'
PL = 'PL'
PT = 'PT'
RO = 'RO'
RU = 'RU'
SK = 'SK'
SV = 'SV'
TH = 'TH'
TR = 'TR'
UK = 'UK'
VI = 'VI'
ZH = 'ZH'
class AppEncryptionDeclarationState(StringEnum):
IN_REVIEW = 'IN_REVIEW'
APPROVED = 'APPROVED'
REJECTED = 'REJECTED'
INVALID = 'INVALID'
EXPIRED = 'EXPIRED'
class AppStoreAgeRating(StringEnum):
FOUR_PLUS = 'FOUR_PLUS'
NINE_PLUS = 'NINE_PLUS'
TWELVE_PLUS = 'TWELVE_PLUS'
SEVENTEEN_PLUS = 'SEVENTEEN_PLUS'
class AppStoreVersionState(StringEnum):
DEVELOPER_REMOVED_FROM_SALE = 'DEVELOPER_REMOVED_FROM_SALE'
DEVELOPER_REJECTED = 'DEVELOPER_REJECTED'
IN_REVIEW = 'IN_REVIEW'
INVALID_BINARY = 'INVALID_BINARY'
METADATA_REJECTED = 'METADATA_REJECTED'
PENDING_APPLE_RELEASE = 'PENDING_APPLE_RELEASE'
PENDING_CONTRACT = 'PENDING_CONTRACT'
PENDING_DEVELOPER_RELEASE = 'PENDING_DEVELOPER_RELEASE'
PREPARE_FOR_SUBMISSION = 'PREPARE_FOR_SUBMISSION'
PREORDER_READY_FOR_SALE = 'PREORDER_READY_FOR_SALE'
PROCESSING_FOR_APP_STORE = 'PROCESSING_FOR_APP_STORE'
READY_FOR_SALE = 'READY_FOR_SALE'
REJECTED = 'REJECTED'
REMOVED_FROM_SALE = 'REMOVED_FROM_SALE'
WAITING_FOR_EXPORT_COMPLIANCE = 'WAITING_FOR_EXPORT_COMPLIANCE'
WAITING_FOR_REVIEW = 'WAITING_FOR_REVIEW'
REPLACED_WITH_NEW_VERSION = 'REPLACED_WITH_NEW_VERSION'
class BetaInviteType(StringEnum):
EMAIL = 'EMAIL'
PUBLIC_LINK = 'PUBLIC_LINK'
class BetaReviewState(StringEnum):
WAITING_FOR_REVIEW = 'WAITING_FOR_REVIEW'
IN_REVIEW = 'IN_REVIEW'
REJECTED = 'REJECTED'
APPROVED = 'APPROVED'
class BrazilAgeRating(StringEnum):
L = 'L'
TEN = 'TEN'
TWELVE = 'TWELVE'
FOURTEEN = 'FOURTEEN'
SIXTEEN = 'SIXTEEN'
EIGHTEEN = 'EIGHTEEN'
class BuildAudienceType(StringEnum):
INTERNAL_ONLY = 'INTERNAL_ONLY'
APP_STORE_ELIGIBLE = 'APP_STORE_ELIGIBLE'
class BundleIdPlatform(StringEnum):
IOS = 'IOS'
MAC_OS = 'MAC_OS'
class CapabilityType(StringEnum):
ICLOUD = 'ICLOUD'
IN_APP_PURCHASE = 'IN_APP_PURCHASE'
GAME_CENTER = 'GAME_CENTER'
PUSH_NOTIFICATIONS = 'PUSH_NOTIFICATIONS'
WALLET = 'WALLET'
INTER_APP_AUDIO = 'INTER_APP_AUDIO'
MAPS = 'MAPS'
ASSOCIATED_DOMAINS = 'ASSOCIATED_DOMAINS'
PERSONAL_VPN = 'PERSONAL_VPN'
APP_GROUPS = 'APP_GROUPS'
HEALTHKIT = 'HEALTHKIT'
HOMEKIT = 'HOMEKIT'
WIRELESS_ACCESSORY_CONFIGURATION = 'WIRELESS_ACCESSORY_CONFIGURATION'
APPLE_PAY = 'APPLE_PAY'
DATA_PROTECTION = 'DATA_PROTECTION'
SIRIKIT = 'SIRIKIT'
NETWORK_EXTENSIONS = 'NETWORK_EXTENSIONS'
MULTIPATH = 'MULTIPATH'
HOT_SPOT = 'HOT_SPOT'
NFC_TAG_READING = 'NFC_TAG_READING'
CLASSKIT = 'CLASSKIT'
AUTOFILL_CREDENTIAL_PROVIDER = 'AUTOFILL_CREDENTIAL_PROVIDER'
ACCESS_WIFI_INFORMATION = 'ACCESS_WIFI_INFORMATION'
NETWORK_CUSTOM_PROTOCOL = 'NETWORK_CUSTOM_PROTOCOL'
COREMEDIA_HLS_LOW_LATENCY = 'COREMEDIA_HLS_LOW_LATENCY'
SYSTEM_EXTENSION_INSTALL = 'SYSTEM_EXTENSION_INSTALL'
USER_MANAGEMENT = 'USER_MANAGEMENT'
APPLE_ID_AUTH = 'APPLE_ID_AUTH'
class CertificateType(StringEnum):
IOS_DEVELOPMENT = 'IOS_DEVELOPMENT'
IOS_DISTRIBUTION = 'IOS_DISTRIBUTION'
MAC_APP_DISTRIBUTION = 'MAC_APP_DISTRIBUTION'
MAC_INSTALLER_DISTRIBUTION = 'MAC_INSTALLER_DISTRIBUTION'
MAC_APP_DEVELOPMENT = 'MAC_APP_DEVELOPMENT'
DEVELOPER_ID_KEXT = 'DEVELOPER_ID_KEXT'
DEVELOPER_ID_APPLICATION = 'DEVELOPER_ID_APPLICATION'
DEVELOPMENT = 'DEVELOPMENT'
DISTRIBUTION = 'DISTRIBUTION'
PASS_TYPE_ID = 'PASS_TYPE_ID'
PASS_TYPE_ID_WITH_NFC = 'PASS_TYPE_ID_WITH_NFC'
class CiActionType(StringEnum):
BUILD = 'BUILD'
ANALYZE = 'ANALYZE'
TEST = 'TEST'
ARCHIVE = 'ARCHIVE'
class CiCompletionStatus(StringEnum):
SUCCEEDED = 'SUCCEEDED'
FAILED = 'FAILED'
ERRORED = 'ERRORED'
CANCELED = 'CANCELED'
SKIPPED = 'SKIPPED'
class CiExecutionProgress(StringEnum):
PENDING = 'PENDING'
RUNNING = 'RUNNING'
COMPLETE = 'COMPLETE'
class CiGitRefKind(StringEnum):
BRANCH = 'BRANCH'
TAG = 'TAG'
class CiTestDestinationKind(StringEnum):
SIMULATOR = 'SIMULATOR'
MAC = 'MAC'
class CiTestStatus(StringEnum):
SUCCESS = 'SUCCESS'
FAILURE = 'FAILURE'
MIXED = 'MIXED'
SKIPPED = 'SKIPPED'
EXPECTED_FAILURE = 'EXPECTED_FAILURE'
class ExternalBetaState(StringEnum):
PROCESSING = 'PROCESSING'
PROCESSING_EXCEPTION = 'PROCESSING_EXCEPTION'
MISSING_EXPORT_COMPLIANCE = 'MISSING_EXPORT_COMPLIANCE'
READY_FOR_BETA_TESTING = 'READY_FOR_BETA_TESTING'
IN_BETA_TESTING = 'IN_BETA_TESTING'
EXPIRED = 'EXPIRED'
READY_FOR_BETA_SUBMISSION = 'READY_FOR_BETA_SUBMISSION'
IN_EXPORT_COMPLIANCE_REVIEW = 'IN_EXPORT_COMPLIANCE_REVIEW'
WAITING_FOR_BETA_REVIEW = 'WAITING_FOR_BETA_REVIEW'
IN_BETA_REVIEW = 'IN_BETA_REVIEW'
BETA_REJECTED = 'BETA_REJECTED'
BETA_APPROVED = 'BETA_APPROVED'
class IconAssetType(StringEnum):
APP_STORE = 'APP_STORE'
MESSAGES_APP_STORE = 'MESSAGES_APP_STORE'
WATCH_APP_STORE = 'WATCH_APP_STORE'
TV_OS_HOME_SCREEN = 'TV_OS_HOME_SCREEN'
TV_OS_TOP_SHELF = 'TV_OS_TOP_SHELF'
class InternalBetaState(StringEnum):
PROCESSING = 'PROCESSING'
PROCESSING_EXCEPTION = 'PROCESSING_EXCEPTION'
MISSING_EXPORT_COMPLIANCE = 'MISSING_EXPORT_COMPLIANCE'
READY_FOR_BETA_TESTING = 'READY_FOR_BETA_TESTING'
IN_BETA_TESTING = 'IN_BETA_TESTING'
EXPIRED = 'EXPIRED'
IN_EXPORT_COMPLIANCE_REVIEW = 'IN_EXPORT_COMPLIANCE_REVIEW'
class KidsAgeBand(StringEnum):
FIVE_AND_UNDER = 'FIVE_AND_UNDER'
SIX_TO_EIGHT = 'SIX_TO_EIGHT'
NINE_TO_ELEVEN = 'NINE_TO_ELEVEN'
class PhasedReleaseState(StringEnum):
INACTIVE = 'INACTIVE'
ACTIVE = 'ACTIVE'
PAUSED = 'PAUSED'
COMPLETE = 'COMPLETE'
class Platform(StringEnum):
IOS = 'IOS'
MAC_OS = 'MAC_OS'
TV_OS = 'TV_OS'
class PreviewType(StringEnum):
IPHONE_65 = 'IPHONE_65'
IPHONE_58 = 'IPHONE_58'
IPHONE_55 = 'IPHONE_55'
IPHONE_47 = 'IPHONE_47'
IPHONE_40 = 'IPHONE_40'
IPHONE_35 = 'IPHONE_35'
IPAD_PRO_3GEN_129 = 'IPAD_PRO_3GEN_129'
IPAD_PRO_3GEN_11 = 'IPAD_PRO_3GEN_11'
IPAD_PRO_129 = 'IPAD_PRO_129'
IPAD_105 = 'IPAD_105'
IPAD_97 = 'IPAD_97'
DESKTOP = 'DESKTOP'
WATCH_SERIES_4 = 'WATCH_SERIES_4'
WATCH_SERIES_3 = 'WATCH_SERIES_3'
APPLE_TV = 'APPLE_TV'
class ScreenshotDisplayType(StringEnum):
APP_IPHONE_65 = 'APP_IPHONE_65'
APP_IPHONE_58 = 'APP_IPHONE_58'
APP_IPHONE_55 = 'APP_IPHONE_55'
APP_IPHONE_47 = 'APP_IPHONE_47'
APP_IPHONE_40 = 'APP_IPHONE_40'
APP_IPHONE_35 = 'APP_IPHONE_35'
APP_IPAD_PRO_3GEN_129 = 'APP_IPAD_PRO_3GEN_129'
APP_IPAD_PRO_3GEN_11 = 'APP_IPAD_PRO_3GEN_11'
APP_IPAD_PRO_129 = 'APP_IPAD_PRO_129'
APP_IPAD_105 = 'APP_IPAD_105'
APP_IPAD_97 = 'APP_IPAD_97'
APP_DESKTOP = 'APP_DESKTOP'
APP_WATCH_SERIES_7 = 'APP_WATCH_SERIES_7'
APP_WATCH_SERIES_4 = 'APP_WATCH_SERIES_4'
APP_WATCH_SERIES_3 = 'APP_WATCH_SERIES_3'
APP_APPLE_TV = 'APP_APPLE_TV'
IMESSAGE_APP_IPHONE_65 = 'IMESSAGE_APP_IPHONE_65'
IMESSAGE_APP_IPHONE_58 = 'IMESSAGE_APP_IPHONE_58'
IMESSAGE_APP_IPHONE_55 = 'IMESSAGE_APP_IPHONE_55'
IMESSAGE_APP_IPHONE_47 = 'IMESSAGE_APP_IPHONE_47'
IMESSAGE_APP_IPHONE_40 = 'IMESSAGE_APP_IPHONE_40'
IMESSAGE_APP_IPAD_PRO_3GEN_129 = 'IMESSAGE_APP_IPAD_PRO_3GEN_129'
IMESSAGE_APP_IPAD_PRO_3GEN_11 = 'IMESSAGE_APP_IPAD_PRO_3GEN_11'
IMESSAGE_APP_IPAD_PRO_129 = 'IMESSAGE_APP_IPAD_PRO_129'
IMESSAGE_APP_IPAD_105 = 'IMESSAGE_APP_IPAD_105'
IMESSAGE_APP_IPAD_97 = 'IMESSAGE_APP_IPAD_97'
class UserRole(StringEnum):
ADMIN = 'ADMIN'
FINANCE = 'FINANCE'
ACCOUNT_HOLDER = 'ACCOUNT_HOLDER'
SALES = 'SALES'
MARKETING = 'MARKETING'
APP_MANAGER = 'APP_MANAGER'
DEVELOPER = 'DEVELOPER'
ACCESS_TO_REPORTS = 'ACCESS_TO_REPORTS'
CUSTOMER_SUPPORT = 'CUSTOMER_SUPPORT'
IMAGE_MANAGER = 'IMAGE_MANAGER'
CREATE_APPS = 'CREATE_APPS'
CLOUD_MANAGED_DEVELOPER_ID = 'CLOUD_MANAGED_DEVELOPER_ID'
CLOUD_MANAGED_APP_DISTRIBUTION = 'CLOUD_MANAGED_APP_DISTRIBUTION'
class DeviceClass(StringEnum):
APPLE_WATCH = 'APPLE_WATCH'
IPAD = 'IPAD'
IPHONE = 'IPHONE'
IPOD = 'IPOD'
APPLE_TV = 'APPLE_TV'
MAC = 'MAC'
class DeviceStatus(StringEnum):
ENABLED = 'ENABLED'
DISABLED = 'DISABLED'
class AgeRatingDeclarationLevel(StringEnum):
NONE = 'NONE'
INFREQUENT_OR_MILD = 'INFREQUENT_OR_MILD'
FREQUENT_OR_INTENSE = 'FREQUENT_OR_INTENSE'
class AppClipAdvancedExperienceStatus(StringEnum):
RECEIVED = 'RECEIVED'
DEACTIVATED = 'DEACTIVATED'
APP_TRANSFER_IN_PROGRESS = 'APP_TRANSFER_IN_PROGRESS'
class AppClipAdvancedExperiencePlaceSource(StringEnum):
CALCULATED = 'CALCULATED'
MANUALLY_PLACED = 'MANUALLY_PLACED'
class AppClipAdvancedExperiencePlaceMapAction(StringEnum):
BUY_TICKETS = 'BUY_TICKETS'
VIEW_AVAILABILITY = 'VIEW_AVAILABILITY'
VIEW_PRICING = 'VIEW_PRICING'
HOTEL_BOOK_ROOM = 'HOTEL_BOOK_ROOM'
PARKING_RESERVE_PARKING = 'PARKING_RESERVE_PARKING'
RESTAURANT_JOIN_WAITLIST = 'RESTAURANT_JOIN_WAITLIST'
RESTAURANT_ORDER_DELIVERY = 'RESTAURANT_ORDER_DELIVERY'
RESTAURANT_ORDER_FOOD = 'RESTAURANT_ORDER_FOOD'
RESTAURANT_ORDER_TAKEOUT = 'RESTAURANT_ORDER_TAKEOUT'
RESTAURANT_RESERVATION = 'RESTAURANT_RESERVATION'
SCHEDULE_APPOINTMENT = 'SCHEDULE_APPOINTMENT'
RESTAURANT_VIEW_MENU = 'RESTAURANT_VIEW_MENU'
THEATER_NOW_PLAYING = 'THEATER_NOW_PLAYING'
class AppClipAdvancedExperiencePlaceRelationship(StringEnum):
OWNER = 'OWNER'
AUTHORIZED = 'AUTHORIZED'
OTHER = 'OTHER'
class AppClipAdvancedExperiencePlacePhoneNumberType(StringEnum):
FAX = 'FAX'
LANDLINE = 'LANDLINE'
MOBILE = 'MOBILE'
TOLLFREE = 'TOLLFREE'
class AppClipAdvancedExperiencePlaceStatus(StringEnum):
PENDING = 'PENDING'
MATCHED = 'MATCHED'
NO_MATCH = 'NO_MATCH'
class AppClipAdvancedExperienceBusinessCategory(StringEnum):
AUTOMOTIVE = 'AUTOMOTIVE'
BEAUTY = 'BEAUTY'
BIKES = 'BIKES'
BOOKS = 'BOOKS'
CASINO = 'CASINO'
EDUCATION = 'EDUCATION'
EDUCATION_JAPAN = 'EDUCATION_JAPAN'
ENTERTAINMENT = 'ENTERTAINMENT'
EV_CHARGER = 'EV_CHARGER'
FINANCIAL_USD = 'FINANCIAL_USD'
FINANCIAL_CNY = 'FINANCIAL_CNY'
FINANCIAL_GBP = 'FINANCIAL_GBP'
FINANCIAL_JPY = 'FINANCIAL_JPY'
FINANCIAL_EUR = 'FINANCIAL_EUR'
FITNESS = 'FITNESS'
FOOD_AND_DRINK = 'FOOD_AND_DRINK'
GAS = 'GAS'
GROCERY = 'GROCERY'
HEALTH_AND_MEDICAL = 'HEALTH_AND_MEDICAL'
HOTEL_AND_TRAVEL = 'HOTEL_AND_TRAVEL'
MUSIC = 'MUSIC'
PARKING = 'PARKING'
PET_SERVICES = 'PET_SERVICES'
PROFESSIONAL_SERVICES = 'PROFESSIONAL_SERVICES'
SHOPPING = 'SHOPPING'
TICKETING = 'TICKETING'
TRANSIT = 'TRANSIT'
class AppClipDomainErrorCode(StringEnum):
BAD_HTTP_RESPONSE = 'BAD_HTTP_RESPONSE'
BAD_JSON_CONTENT = 'BAD_JSON_CONTENT'
BAD_PKCS7_SIGNATURE = 'BAD_PKCS7_SIGNATURE'
CANNOT_REACH_AASA_FILE = 'CANNOT_REACH_AASA_FILE'
DNS_ERROR = 'DNS_ERROR'
INSECURE_REDIRECTS_FORBIDDEN = 'INSECURE_REDIRECTS_FORBIDDEN'
INVALID_ENTITLEMENT_MISSING_SECTION = 'INVALID_ENTITLEMENT_MISSING_SECTION'
INVALID_ENTITLEMENT_SYNTAX_ERROR = 'INVALID_ENTITLEMENT_SYNTAX_ERROR'
INVALID_ENTITLEMENT_UNHANDLED_SECTION = 'INVALID_ENTITLEMENT_UNHANDLED_SECTION'
INVALID_ENTITLEMENT_UNKNOWN_ID = 'INVALID_ENTITLEMENT_UNKNOWN_ID'
NETWORK_ERROR = 'NETWORK_ERROR'
NETWORK_ERROR_TEMPORARY = 'NETWORK_ERROR_TEMPORARY'
OTHER_ERROR = 'OTHER_ERROR'
TIMEOUT = 'TIMEOUT'
TLS_ERROR = 'TLS_ERROR'
UNEXPECTED_ERROR = 'UNEXPECTED_ERROR'
class AppStoreVersionReleaseType(StringEnum):
MANUAL = 'MANUAL'
AFTER_APPROVAL = 'AFTER_APPROVAL'
SCHEDULED = 'SCHEDULED'
class AppContentRightsDeclaration(StringEnum):
DOES_NOT_USE_THIRD_PARTY_CONTENT = 'DOES_NOT_USE_THIRD_PARTY_CONTENT'
USES_THIRD_PARTY_CONTENT = 'USES_THIRD_PARTY_CONTENT'
class BuildBundleType(StringEnum):
APP = 'APP'
APP_CLIP = 'APP_CLIP'
class BuildProcessingState(StringEnum):
PROCESSING = 'PROCESSING'
FAILED = 'FAILED'
INVALID = 'INVALID'
VALID = 'VALID'
class CiArtifactFileType(StringEnum):
ARCHIVE = 'ARCHIVE'
ARCHIVE_EXPORT = 'ARCHIVE_EXPORT'
LOG_BUNDLE = 'LOG_BUNDLE'
RESULT_BUNDLE = 'RESULT_BUNDLE'
TEST_PRODUCTS = 'TEST_PRODUCTS'
XCODEBUILD_PRODUCTS = 'XCODEBUILD_PRODUCTS'
class CiBuildRunStartReason(StringEnum):
GIT_REF_CHANGE = 'GIT_REF_CHANGE'
MANUAL = 'MANUAL'
MANUAL_REBUILD = 'MANUAL_REBUILD'
PULL_REQUEST_OPEN = 'PULL_REQUEST_OPEN'
PULL_REQUEST_UPDATE = 'PULL_REQUEST_UPDATE'
SCHEDULE = 'SCHEDULE'
class CiBuildRunCancelReason(StringEnum):
AUTOMATICALLY_BY_NEWER_BUILD = 'AUTOMATICALLY_BY_NEWER_BUILD'
MANUALLY_BY_USER = 'MANUALLY_BY_USER'
class CiIssueType(StringEnum):
ANALYZER_WARNING = 'ANALYZER_WARNING'
ERROR = 'ERROR'
TEST_FAILURE = 'TEST_FAILURE'
WARNING = 'WARNING'
class CiProductType(StringEnum):
APP = 'APP'
FRAMEWORK = 'FRAMEWORK'
class DiagnosticType(StringEnum):
DISK_WRITES = 'DISK_WRITES'
class InAppPurchaseType(StringEnum):
AUTOMATICALLY_RENEWABLE_SUBSCRIPTION = 'AUTOMATICALLY_RENEWABLE_SUBSCRIPTION'
NON_CONSUMABLE = 'NON_CONSUMABLE'
CONSUMABLE = 'CONSUMABLE'
NON_RENEWING_SUBSCRIPTION = 'NON_RENEWING_SUBSCRIPTION'
FREE_SUBSCRIPTION = 'FREE_SUBSCRIPTION'
class InAppPurchaseState(StringEnum):
CREATED = 'CREATED'
DEVELOPER_SIGNED_OFF = 'DEVELOPER_SIGNED_OFF'
DEVELOPER_ACTION_NEEDED = 'DEVELOPER_ACTION_NEEDED'
DELETION_IN_PROGRESS = 'DELETION_IN_PROGRESS'
APPROVED = 'APPROVED'
DELETED = 'DELETED'
REMOVED_FROM_SALE = 'REMOVED_FROM_SALE'
DEVELOPER_REMOVED_FROM_SALE = 'DEVELOPER_REMOVED_FROM_SALE'
WAITING_FOR_UPLOAD = 'WAITING_FOR_UPLOAD'
PROCESSING_CONTENT = 'PROCESSING_CONTENT'
REPLACED = 'REPLACED'
REJECTED = 'REJECTED'
WAITING_FOR_SCREENSHOT = 'WAITING_FOR_SCREENSHOT'
PREPARE_FOR_SUBMISSION = 'PREPARE_FOR_SUBMISSION'
MISSING_METADATA = 'MISSING_METADATA'
READY_TO_SUBMIT = 'READY_TO_SUBMIT'
WAITING_FOR_REVIEW = 'WAITING_FOR_REVIEW'
IN_REVIEW = 'IN_REVIEW'
PENDING_DEVELOPER_RELEASE = 'PENDING_DEVELOPER_RELEASE'
class PerfPowerMetricPlatform(StringEnum):
IOS = 'IOS'
class PerfPowerMetricType(StringEnum):
DISK = 'DISK'
HANG = 'HANG'
BATTERY = 'BATTERY'
LAUNCH = | |
wait["Bot"] == True:
gid = cl.getGroup(to)
cl.sendMessage(to, "[ID Group : ]\n" + gid.id)
elif text.lower() == 'grouppicture':
if msg._from in admin:
if wait["Bot"] == True:
group = cl.getGroup(to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(to, path)
elif text.lower() == 'groupname':
if msg._from in admin:
if wait["Bot"] == True:
gid = cl.getGroup(to)
cl.sendMessage(to, "[Nama Group : ]\n" + gid.name)
elif text.lower() == 'groupticket':
if msg._from in admin:
if wait["Bot"] == True:
if msg.toType == 2:
group = cl.getGroup(to)
if group.preventedJoinByTicket == False:
ticket = cl.reissueGroupTicket(to)
cl.sendMessage(to, "[ Group Ticket ]\nhttps://line.me/R/ti/g/{}".format(str(ticket)))
else:
cl.sendMessage(to, "Grup qr tidak terbuka silahkan buka terlebih dahulu dengan perintah {}openqr".format(str(settings["keyCommand"])))
elif text.lower() == 'groupticket on':
if msg._from in admin:
if wait["Bot"] == True:
if msg.toType == 2:
group = cl.getGroup(to)
if group.preventedJoinByTicket == False:
cl.sendMessage(to, "Grup qr sudah terbuka")
else:
group.preventedJoinByTicket = False
cl.updateGroup(group)
cl.sendMessage(to, "[ Group Ticket ]\nhttps://line.me/R/ti/g/{}".format(str(ticket)))
cl.sendMessage(to, "membuka grup qr")
elif text.lower() == 'groupticket off':
if msg._from in admin:
if wait["Bot"] == True:
if msg.toType == 2:
group = cl.getGroup(to)
if group.preventedJoinByTicket == True:
cl.sendMessage(to, "Grup qr sudah tertutup")
else:
group.preventedJoinByTicket = True
cl.updateGroup(group)
cl.sendMessage(to, "[ Group Ticket ]\nhttps://line.me/R/ti/g/{}".format(str(ticket)))
cl.sendMessage(to, "menutup grup qr")
elif text.lower() == 'ginfo':
if msg._from in admin:
if wait["Bot"] == True:
group = cl.getGroup(to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Tidak ditemukan"
if group.invitee is None:
gPending = "0"
else:
gPending = str(len(group.invitee))
if group.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(group.id)))
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
ret_ = "╔══[ Group Info ]"
ret_ += "\n╠ Nama Group : {}".format(str(group.name))
ret_ += "\n╠ ID Group : {}".format(group.id)
ret_ += "\n╠ Pembuat : {}".format(str(gCreator))
ret_ += "\n╠ Jumlah Member : {}".format(str(len(group.members)))
ret_ += "\n╠ Jumlah Pending : {}".format(gPending)
ret_ += "\n╠ Group Qr : {}".format(gQr)
ret_ += "\n╠ Group Ticket : {}".format(gTicket)
ret_ += "\n╚══[ Group Info ]"
cl.sendMessage(to, str(ret_))
cl.sendImageWithURL(to, path)
elif text.lower() == 'memlist':
if msg._from in admin:
if wait["Bot"] == True:
if msg.toType == 2:
group = cl.getGroup(to)
ret_ = "╔══[ Member List ]"
no = 0 + 1
for mem in group.members:
ret_ += "\n╠ {}. {}".format(str(no), str(mem.displayName))
no += 1
ret_ += "\n╚══[ Total {} ]".format(str(len(group.members)))
cl.sendMessage(to, str(ret_))
elif text.lower() == 'glist':
if msg._from in admin:
if wait["Bot"] == True:
groups = cl.groups
ret_ = "╔══[ Group List ]"
no = 0 + 1
for gid in groups:
group = cl.getGroup(gid)
ret_ += "\n╠ {}. {} | {}".format(str(no), str(group.name), str(len(group.members)))
no += 1
ret_ += "\n╚══[ Total {} Groups ]".format(str(len(groups)))
cl.sendMessage(to, str(ret_))
elif text.lower() == 'sambutan on':
if msg._from in admin:
if wait["Bot"] == True:
if settings["notifikasi"] == True:
if settings["lang"] == "JP":
cl.sendMessage(to,"notif mode on")
else:
settings["notifikasi"] = True
if settings["lang"] == "JP":
cl.sendMessage(to,"notif mode on")
elif text.lower() == 'sambutan off':
if msg._from in admin:
if wait["Bot"] == True:
if settings["notifikasi"] == False:
if settings["lang"] == "JP":
cl.sendMessage(to,"notif mode off")
else:
settings["notifikasi"] = False
if settings["lang"] == "JP":
cl.sendMessage(to,"notif mode off")
elif text.lower() == 'tag':
if msg._from in admin:
if wait["Bot"] == True:
if msg.toType == 0:
sendMention(to, to, "", "")
elif msg.toType == 2:
group = cl.getGroup(to)
midMembers = [contact.mid for contact in group.members]
midSelect = len(midMembers)//20
for mentionMembers in range(midSelect+1):
no = 0
ret_ = "╔══[ Mention Members ]"
dataMid = []
for dataMention in group.members[mentionMembers*20 : (mentionMembers+1)*20]:
dataMid.append(dataMention.mid)
no += 1
ret_ += "\n╠ {}. @!".format(str(no))
ret_ += "\n╚══[ Total {} Members]".format(str(len(dataMid)))
cl.sendMention(to, ret_, dataMid)
elif text.lower() == 'changepictureprofile':
if msg._from in admin:
if wait["Bot"] == True:
settings["changePicture"] = True
cl.sendMessage(to, "Silahkan kirim gambarnya")
elif text.lower() == 'changegrouppicture':
if msg._from in admin:
if wait["Bot"] == True:
if msg.toType == 2:
if to not in settings["changeGroupPicture"]:
settings["changeGroupPicture"].append(to)
cl.sendMessage(to, "Silahkan kirim gambarnya")
elif text.lower() == 'lurking on':
if msg._from in admin:
if wait["Bot"] == True:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to in read['readPoint']:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
read['readPoint'][msg.to] = msg.id
read['readMember'][msg.to] = ""
read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
read['ROM'][msg.to] = {}
with open('read.json', 'w') as fp:
json.dump(read, fp, sort_keys=True, indent=4)
cl.sendMessage(to,"Lurking already on")
else:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
read['readPoint'][msg.to] = msg.id
read['readMember'][msg.to] = ""
read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
read['ROM'][msg.to] = {}
with open('read.json', 'w') as fp:
json.dump(read, fp, sort_keys=True, indent=4)
cl.sendMessage(to, "Set reading point:\n" + readTime)
elif text.lower() == 'lurking off':
if msg._from in admin:
if wait["Bot"] == True:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to not in read['readPoint']:
cl.sendMessage(to,"Lurking already off")
else:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
cl.sendMessage(to, "Delete reading point:\n" + readTime)
elif text.lower() == 'lurking reset':
if msg._from in admin:
if wait["Bot"] == True:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to in read["readPoint"]:
try:
del read["readPoint"][msg.to]
del read["readMember"][msg.to]
del read["readTime"][msg.to]
except:
pass
cl.sendMessage(to, "Reset reading point:\n" + readTime)
else:
cl.sendMessage(to, "Lurking belum diaktifkan ngapain di reset?")
elif text.lower() == 'lurking':
if msg._from in admin:
if wait["Bot"] == True:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if receiver in read['readPoint']:
if read["ROM"][receiver].items() == []:
cl.sendMessage(receiver,"[ Reader ]:\nNone")
else:
chiya = []
for rom in read["ROM"][receiver].items():
chiya.append(rom[1])
cmem = cl.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = '[ Reader ]:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@c\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
text = xpesan+ zxc + "\n[ Lurking time ]: \n" + readTime
try:
cl.sendMessage(receiver, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except Exception as error:
print (error)
pass
else:
cl.sendMessage(receiver,"Lurking has not been set.")
elif text.lower() == 'sider on':
if msg._from in admin:
if wait["Bot"] == True:
try:
del cctv['point'][msg.to]
| |
<filename>pycode/tinyflow/VGG16_test_leo.py
import os
GPU = 0
os.environ['CUDA_VISIBLE_DEVICES'] = f'{GPU}'
import sys
sys.path.append('../../')
from pycode.tinyflow import autodiff as ad, autodiff
from pycode.tinyflow.get_result import get_result
from util import *
from line_profiler import LineProfiler
class VGG16():
def __init__(self, num_step, batch_size, log_path, job_id):
self.job_id = job_id
self.dropout_rate = 0.5
self.image_channel = 3
self.image_size = 224
self.num_step = num_step
self.batch_size = batch_size
self.log_path = log_path
self.executor_ctx = None
self.n_class = None
self.ad = ad
self.top_control_queue = None
self.top_message_queue = None
def get_predict_results(self, n_class, **kwargs):
X = self.ad.Placeholder("X")
y_ = self.ad.Placeholder("y_")
W1_1 = self.ad.Variable("W1_1")
W1_2 = self.ad.Variable("W1_2")
W2_1 = self.ad.Variable("W2_1")
W2_2 = self.ad.Variable("W2_2")
W3_1 = self.ad.Variable("W3_1")
W3_2 = self.ad.Variable("W3_2")
W3_3 = self.ad.Variable("W3_3")
W4_1 = self.ad.Variable("W4_1")
W4_2 = self.ad.Variable("W4_2")
W4_3 = self.ad.Variable("W4_3")
W5_1 = self.ad.Variable("W5_1")
W5_2 = self.ad.Variable("W5_2")
W5_3 = self.ad.Variable("W5_3")
W6 = self.ad.Variable("W6")
W7 = self.ad.Variable("W7")
W8 = self.ad.Variable("W8")
b6 = self.ad.Variable("b6")
b7 = self.ad.Variable("b7")
b8 = self.ad.Variable("b8")
# conv 1
conv1_1 = self.ad.convolution_2d_forward_op(X, W1_1, "NCHW", "SAME", 1, 1)
act1_1 = self.ad.activation_forward_op(conv1_1, "NCHW", "relu")
conv1_2 = self.ad.convolution_2d_forward_op(act1_1, W1_2, "NCHW", "SAME", 1, 1)
act1_2 = self.ad.activation_forward_op(conv1_2, "NCHW", "relu")
pool1 = self.ad.pooling_2d_forward_op(act1_2, "NCHW", "max", 0, 0, 2, 2, 2, 2)
# conv 2
conv2_1 = self.ad.convolution_2d_forward_op(pool1, W2_1, "NCHW", "SAME", 1, 1)
act2_1 = self.ad.activation_forward_op(conv2_1, "NCHW", "relu")
conv2_2 = self.ad.convolution_2d_forward_op(act2_1, W2_2, "NCHW", "SAME", 1, 1)
act2_2 = self.ad.activation_forward_op(conv2_2, "NCHW", "relu")
pool2 = self.ad.pooling_2d_forward_op(act2_2, "NCHW", "max", 0, 0, 2, 2, 2, 2)
# conv 3
conv3_1 = self.ad.convolution_2d_forward_op(pool2, W3_1, "NCHW", "SAME", 1, 1)
act3_1 = self.ad.activation_forward_op(conv3_1, "NCHW", "relu")
conv3_2 = self.ad.convolution_2d_forward_op(act3_1, W3_2, "NCHW", "SAME", 1, 1)
act3_2 = self.ad.activation_forward_op(conv3_2, "NCHW", "relu")
conv3_3 = self.ad.convolution_2d_forward_op(act3_2, W3_3, "NCHW", "SAME", 1, 1)
act3_3 = self.ad.activation_forward_op(conv3_3, "NCHW", "relu")
pool3 = self.ad.pooling_2d_forward_op(act3_3, "NCHW", "max", 0, 0, 2, 2, 2, 2)
# conv 4
conv4_1 = self.ad.convolution_2d_forward_op(pool3, W4_1, "NCHW", "SAME", 1, 1)
act4_1 = self.ad.activation_forward_op(conv4_1, "NCHW", "relu")
conv4_2 = self.ad.convolution_2d_forward_op(act4_1, W4_2, "NCHW", "SAME", 1, 1)
act4_2 = self.ad.activation_forward_op(conv4_2, "NCHW", "relu")
conv4_3 = self.ad.convolution_2d_forward_op(act4_2, W4_3, "NCHW", "SAME", 1, 1)
act4_3 = self.ad.activation_forward_op(conv4_3, "NCHW", "relu")
pool4 = self.ad.pooling_2d_forward_op(act4_3, "NCHW", "max", 0, 0, 2, 2, 2, 2)
# conv 5
conv5_1 = self.ad.convolution_2d_forward_op(pool4, W5_1, "NCHW", "SAME", 1, 1)
act5_1 = self.ad.activation_forward_op(conv5_1, "NCHW", "relu")
conv5_2 = self.ad.convolution_2d_forward_op(act5_1, W5_2, "NCHW", "SAME", 1, 1)
act5_2 = self.ad.activation_forward_op(conv5_2, "NCHW", "relu")
conv5_3 = self.ad.convolution_2d_forward_op(act5_2, W5_3, "NCHW", "SAME", 1, 1)
act5_3 = self.ad.activation_forward_op(conv5_3, "NCHW", "relu")
pool5 = self.ad.pooling_2d_forward_op(act5_3, "NCHW", "max", 0, 0, 2, 2, 2, 2)
# fc6
pool5_flat = self.ad.flatten_op(pool5)
fc6 = self.ad.dense(pool5_flat, W6, b6)
act6 = self.ad.fullyactivation_forward_op(fc6, "NCHW", "relu")
drop6 = self.ad.fullydropout_forward_op(act6, "NCHW", self.dropout_rate)
# fc7
fc7 = self.ad.dense(drop6, W7, b7)
act7 = self.ad.fullyactivation_forward_op(fc7, "NCHW", "relu")
drop7 = self.ad.fullydropout_forward_op(act7, "NCHW", self.dropout_rate)
# fc8
fc8 = self.ad.dense(drop7, W8, b8)
bn8 = self.ad.fullybn_forward_op(fc8, "NCHW")
y = self.ad.fullyactivation_forward_op(bn8, "NCHW", "softmax")
loss = self.ad.crossEntropy_loss(y, y_)
W1_1_val = (64, self.image_channel, 3, 3)
W1_2_val = (64, 64, 3, 3)
W2_1_val = (128, 64, 3, 3)
W2_2_val = (128, 128, 3, 3)
W3_1_val = (256, 128, 3, 3)
W3_2_val = (256, 256, 3, 3)
W3_3_val = (256, 256, 3, 3)
W4_1_val = (512, 256, 3, 3)
W4_2_val = (512, 512, 3, 3)
W4_3_val = (512, 512, 3, 3)
W5_1_val = (512, 512, 3, 3)
W5_2_val = (512, 512, 3, 3)
W5_3_val = (512, 512, 3, 3)
W6_val = (512 * int(self.image_size / 32) * int(self.image_size / 32), 4096)
W7_val = (4096, 4096)
W8_val = (4096, n_class)
b6_val = (4096,)
b7_val = (4096,)
b8_val = (n_class,)
# 只声明,不操作
executor = self.ad.Executor(loss, y, 0.001, top_control_queue=None, top_message_queue=None, log_path=self.log_path, **kwargs)
feed_dict = {
W1_1: W1_1_val,
W1_2: W1_2_val,
W2_1: W2_1_val,
W2_2: W2_2_val,
W3_1: W3_1_val,
W3_2: W3_2_val,
W3_3: W3_3_val,
W4_1: W4_1_val,
W4_2: W4_2_val,
W4_3: W4_3_val,
W5_1: W5_1_val,
W5_2: W5_2_val,
W5_3: W5_3_val,
W6: W6_val,
W7: W7_val,
W8: W8_val,
b6: b6_val,
b7: b7_val,
b8: b8_val
}
feed_dict_mv = {}
for key, value in feed_dict.items():
m_key = executor.Variable_node_to_mv[key][0]
m_val = value
v_key = executor.Variable_node_to_mv[key][1]
v_val = value
feed_dict_mv.update({m_key: m_val, v_key: v_val})
X_val = (self.batch_size, self.image_channel, self.image_size, self.image_size) # number = batch_size channel = 3 image_size = 224*224
y_val = (self.batch_size, 1000) # n_class = 1000
feed_dict.update(feed_dict_mv)
feed_dict[X] = X_val
feed_dict[y_] = y_val
executor.init_operator_latency(feed_dict_sample=feed_dict, **kwargs)
return executor.predict_results
def run(self, executor_ctx, top_control_queue, top_message_queue, n_class, X_val, y_val, **kwargs):
self.n_class = n_class
self.top_control_queue = top_control_queue
self.top_message_queue = top_message_queue
self.executor_ctx = executor_ctx
X = self.ad.Placeholder("X")
y_ = self.ad.Placeholder("y_")
W1_1 = self.ad.Variable("W1_1")
W1_2 = self.ad.Variable("W1_2")
W2_1 = self.ad.Variable("W2_1")
W2_2 = self.ad.Variable("W2_2")
W3_1 = self.ad.Variable("W3_1")
W3_2 = self.ad.Variable("W3_2")
W3_3 = self.ad.Variable("W3_3")
W4_1 = self.ad.Variable("W4_1")
W4_2 = self.ad.Variable("W4_2")
W4_3 = self.ad.Variable("W4_3")
W5_1 = self.ad.Variable("W5_1")
W5_2 = self.ad.Variable("W5_2")
W5_3 = self.ad.Variable("W5_3")
W6 = self.ad.Variable("W6")
W7 = self.ad.Variable("W7")
W8 = self.ad.Variable("W8")
b6 = self.ad.Variable("b6")
b7 = self.ad.Variable("b7")
b8 = self.ad.Variable("b8")
# conv 1
conv1_1 = self.ad.convolution_2d_forward_op(X, W1_1, "NCHW", "SAME", 1, 1)
act1_1 = self.ad.activation_forward_op(conv1_1, "NCHW", "relu")
conv1_2 = self.ad.convolution_2d_forward_op(act1_1, W1_2, "NCHW", "SAME", 1, 1)
act1_2 = self.ad.activation_forward_op(conv1_2, "NCHW", "relu")
pool1 = self.ad.pooling_2d_forward_op(act1_2, "NCHW", "max", 0, 0, 2, 2, 2, 2)
# conv 2
conv2_1 = self.ad.convolution_2d_forward_op(pool1, W2_1, "NCHW", "SAME", 1, 1)
act2_1 = self.ad.activation_forward_op(conv2_1, "NCHW", "relu")
conv2_2 = self.ad.convolution_2d_forward_op(act2_1, W2_2, "NCHW", "SAME", 1, 1)
act2_2 = self.ad.activation_forward_op(conv2_2, "NCHW", "relu")
pool2 = self.ad.pooling_2d_forward_op(act2_2, "NCHW", "max", 0, 0, 2, 2, 2, 2)
# conv 3
conv3_1 = self.ad.convolution_2d_forward_op(pool2, W3_1, "NCHW", "SAME", 1, 1)
act3_1 = self.ad.activation_forward_op(conv3_1, "NCHW", "relu")
conv3_2 = self.ad.convolution_2d_forward_op(act3_1, W3_2, "NCHW", "SAME", 1, 1)
act3_2 = self.ad.activation_forward_op(conv3_2, "NCHW", "relu")
conv3_3 = self.ad.convolution_2d_forward_op(act3_2, W3_3, "NCHW", "SAME", 1, 1)
act3_3 = self.ad.activation_forward_op(conv3_3, "NCHW", "relu")
pool3 = self.ad.pooling_2d_forward_op(act3_3, "NCHW", "max", 0, 0, 2, 2, 2, 2)
# conv 4
conv4_1 = self.ad.convolution_2d_forward_op(pool3, W4_1, "NCHW", "SAME", 1, 1)
act4_1 = self.ad.activation_forward_op(conv4_1, "NCHW", "relu")
conv4_2 = self.ad.convolution_2d_forward_op(act4_1, W4_2, "NCHW", "SAME", 1, 1)
act4_2 = self.ad.activation_forward_op(conv4_2, "NCHW", "relu")
conv4_3 = self.ad.convolution_2d_forward_op(act4_2, W4_3, "NCHW", "SAME", 1, 1)
act4_3 = self.ad.activation_forward_op(conv4_3, "NCHW", "relu")
pool4 = self.ad.pooling_2d_forward_op(act4_3, "NCHW", "max", 0, 0, 2, 2, 2, 2)
# conv 5
conv5_1 = self.ad.convolution_2d_forward_op(pool4, W5_1, "NCHW", "SAME", 1, 1)
act5_1 = self.ad.activation_forward_op(conv5_1, "NCHW", "relu")
conv5_2 = self.ad.convolution_2d_forward_op(act5_1, W5_2, "NCHW", "SAME", 1, 1)
act5_2 = self.ad.activation_forward_op(conv5_2, "NCHW", "relu")
conv5_3 = self.ad.convolution_2d_forward_op(act5_2, W5_3, "NCHW", "SAME", 1, 1)
act5_3 = self.ad.activation_forward_op(conv5_3, "NCHW", "relu")
pool5 = self.ad.pooling_2d_forward_op(act5_3, "NCHW", "max", 0, 0, 2, 2, 2, 2)
# fc6
pool5_flat = self.ad.flatten_op(pool5)
fc6 = self.ad.dense(pool5_flat, W6, b6)
act6 = self.ad.fullyactivation_forward_op(fc6, "NCHW", "relu")
drop6 = self.ad.fullydropout_forward_op(act6, "NCHW", self.dropout_rate)
# fc7
fc7 = self.ad.dense(drop6, W7, b7)
act7 = self.ad.fullyactivation_forward_op(fc7, "NCHW", "relu")
drop7 = self.ad.fullydropout_forward_op(act7, "NCHW", self.dropout_rate)
# fc8
fc8 = self.ad.dense(drop7, W8, b8)
bn8 = self.ad.fullybn_forward_op(fc8, "NCHW")
y = self.ad.fullyactivation_forward_op(bn8, "NCHW", "softmax")
loss = self.ad.crossEntropy_loss(y, y_)
W1_1_val = ndarray.array(np.random.normal(0.0, 0.1, (64, self.image_channel, 3, 3)), executor_ctx)
W1_2_val = ndarray.array(np.random.normal(0.0, 0.1, (64, 64, 3, 3)), executor_ctx)
W2_1_val = ndarray.array(np.random.normal(0.0, 0.1, (128, 64, 3, 3)), executor_ctx)
W2_2_val = ndarray.array(np.random.normal(0.0, 0.1, (128, 128, 3, 3)), executor_ctx)
W3_1_val = ndarray.array(np.random.normal(0.0, 0.1, (256, 128, 3, 3)), executor_ctx)
W3_2_val = ndarray.array(np.random.normal(0.0, 0.1, (256, 256, 3, 3)), executor_ctx)
W3_3_val = ndarray.array(np.random.normal(0.0, 0.1, (256, 256, 3, 3)), executor_ctx)
W4_1_val = ndarray.array(np.random.normal(0.0, 0.1, (512, 256, 3, 3)), executor_ctx)
W4_2_val = ndarray.array(np.random.normal(0.0, 0.1, (512, 512, 3, 3)), executor_ctx)
W4_3_val = ndarray.array(np.random.normal(0.0, 0.1, (512, 512, 3, 3)), executor_ctx)
W5_1_val = ndarray.array(np.random.normal(0.0, 0.1, (512, 512, 3, 3)), executor_ctx)
W5_2_val = ndarray.array(np.random.normal(0.0, 0.1, (512, 512, 3, 3)), executor_ctx)
W5_3_val = ndarray.array(np.random.normal(0.0, 0.1, (512, 512, 3, 3)), executor_ctx)
W6_val = ndarray.array(np.random.normal(0.0, 0.1, (512 * int(self.image_size / 32) * int(self.image_size / 32), 4096)), executor_ctx)
W7_val = ndarray.array(np.random.normal(0.0, 0.1, (4096, 4096)) * 0.001, executor_ctx)
W8_val = ndarray.array(np.random.normal(0.0, 0.1, (4096, n_class)) * 0.001, executor_ctx)
b6_val = ndarray.array(np.ones(4096) * 0.1, executor_ctx)
b7_val = ndarray.array(np.ones(4096) * 0.1, executor_ctx)
b8_val = ndarray.array(np.ones(n_class) * 0.1, executor_ctx)
# 只声明,不操作
executor = self.ad.Executor(loss, y, 0.001, top_control_queue=top_control_queue, top_message_queue=top_message_queue, log_path=self.log_path)
feed_dict = {
W1_1: W1_1_val,
W1_2: W1_2_val,
W2_1: W2_1_val,
W2_2: W2_2_val,
W3_1: W3_1_val,
W3_2: W3_2_val,
W3_3: W3_3_val,
W4_1: W4_1_val,
W4_2: W4_2_val,
W4_3: W4_3_val,
W5_1: W5_1_val,
W5_2: W5_2_val,
W5_3: W5_3_val,
W6: W6_val,
W7: W7_val,
W8: W8_val,
b6: b6_val,
b7: b7_val,
b8: b8_val
}
feed_dict_mv = {}
for key, value in feed_dict.items():
m_key = executor.Variable_node_to_mv[key][0]
m_val = ndarray.array(np.zeros(shape=value.shape), executor_ctx)
v_key = executor.Variable_node_to_mv[key][1]
v_val = ndarray.array(np.zeros(shape=value.shape), executor_ctx)
feed_dict_mv.update({m_key: m_val, v_key: v_val})
feed_dict.update(feed_dict_mv)
if 'predict_results' in kwargs.keys():
executor.predict_results = | |
<gh_stars>1-10
"""
A ctags wrapper, parser and sorter.
"""
import codecs
import re
import os
import sys
import subprocess
import bisect
import mmap
if sys.version_info < (2, 7):
from helpers.check_output import check_output
else:
from subprocess import check_output
#
# Contants
#
TAGS_RE = re.compile(
r'(?P<symbol>[^\t]+)\t'
r'(?P<filename>[^\t]+)\t'
r'(?P<ex_command>(/.+/|\?.+\?|\d+));"\t'
r'(?P<type>[^\t\r\n]+)'
r'(?:\t(?P<fields>.*))?'
)
# column indexes
SYMBOL = 0
FILENAME = 1
MATCHES_STARTWITH = 'starts_with'
PATH_ORDER = [
'function', 'class', 'struct',
]
PATH_IGNORE_FIELDS = (
'file', 'access', 'signature', 'language', 'line', 'inherits')
TAG_PATH_SPLITTERS = ('/', '.', '::', ':')
#
# Functions
#
# Helper functions
def splits(string, *splitters):
"""
Split a string on a number of splitters.
:param string: string to split
:param splitters: characters to split string on
:returns: ``string`` split on characters in ``string``"""
if splitters:
split = string.split(splitters[0])
for val in split:
for char in splits(val, *splitters[1:]):
yield char
else:
if string:
yield string
# Tag processing functions
def parse_tag_lines(lines, order_by='symbol', tag_class=None, filters=None):
"""
Parse and sort a list of tags.
Parse and sort a list of tags one by using a combination of regexen and
Python functions. The end result is a dictionary containing all 'tags' or
entries found in the list of tags, sorted and filtered in a manner
specified by the user.
:param lines: list of tag lines from a tagfile
:param order_by: element by which the result should be sorted
:param tag_class: a Class to wrap around the resulting dictionary
:param filters: filters to apply to resulting dictionary
:returns: tag object or dictionary containing a sorted, filtered version
of the original input tag lines
"""
tags_lookup = {}
for line in lines:
skip = False
if isinstance(line, Tag): # handle both text and tag objects
line = line.line
line = line.rstrip('\r\n')
search_obj = TAGS_RE.search(line)
if not search_obj:
continue
tag = search_obj.groupdict() # convert regex search result to dict
tag = post_process_tag(tag)
if tag_class is not None: # if 'casting' to a class
tag = tag_class(tag)
if filters:
# apply filters, filtering out any matching entries
for filt in filters:
for key, val in list(filt.items()):
if re.match(val, tag[key]):
skip = True
if skip: # if a filter was matched, ignore line (filter out)
continue
tags_lookup.setdefault(tag[order_by], []).append(tag)
return tags_lookup
def post_process_tag(tag):
"""
Process 'EX Command'-related elements of a tag.
Process all 'EX Command'-related elements. The 'Ex Command' element has
previously been split into the 'fields', 'type' and 'ex_command' elements.
Break these down further as seen below::
=========== = ============= =========================================
original > new meaning/example
=========== = ============= =========================================
symbol > symbol symbol name (i.e. class, variable)
filename > filename file containing symbol
. > tag_path tuple of (filename, [class], symbol)
ex_command > ex_command line number or regex used to find symbol
type > type type of symbol (i.e. class, method)
fields > fields string of fields
. > [field_keys] list of parsed field keys
. > [field_one] parsed field element one
. > [...] additional parsed field element
=========== = ============= =========================================
Example::
=========== = ============= =========================================
original > new example
=========== = ============= =========================================
symbol > symbol 'getSum'
filename > filename 'DemoClass.java'
. > tag_path ('DemoClass.java', 'DemoClass', 'getSum')
ex_command > ex_command '\tprivate int getSum(int a, int b) {'
type > type 'm'
fields > fields 'class:DemoClass\tfile:'
. > field_keys ['class', 'file']
. > class 'DemoClass'
. > file ''
=========== = ============= =========================================
:param tag: dict containing the unprocessed tag
:returns: dict containing the processed tag
"""
tag.update(process_fields(tag))
tag['ex_command'] = process_ex_cmd(tag)
tag.update(create_tag_path(tag))
return tag
def process_ex_cmd(tag):
"""
Process the 'ex_command' element of a tag dictionary.
Process the ex_command string - a line number or regex used to find symbol
declaration - by unescaping the regex where used.
:param tag: dict containing a tag
:returns: updated 'ex_command' dictionary entry
"""
ex_cmd = tag.get('ex_command')
if ex_cmd.isdigit(): # if a line number, do nothing
return ex_cmd
else: # else a regex, so unescape
return re.sub(r"\\(\$|/|\^|\\)", r'\1', ex_cmd[2:-2]) # unescape regex
def process_fields(tag):
"""
Process the 'field' element of a tag dictionary.
Process the fields string - a comma-separated string of "key-value" pairs
- by generating key-value pairs and appending them to the tag dictionary.
Also append a list of keys for said pairs.
:param tag: dict containing a tag
:returns: dict containing the key-value pairs from the field element, plus
a list of keys for said pairs
"""
fields = tag.get('fields')
if not fields: # do nothing
return {}
# split the fields string into a dictionary of key-value pairs
result = dict(f.split(':', 1) for f in fields.split('\t'))
# append all keys to the dictionary
result['field_keys'] = sorted(result.keys())
return result
def create_tag_path(tag):
"""
Create a tag path entry for a tag dictionary.
Creates a tag path entry for a tag dictionary from the field key-value
pairs. Uses format::
[function] [class] [struct] [additional entries] symbol
Where ``additional entries`` is any field key-value pair not found in
``PATH_IGNORE_FIELDS``
:param tag: dict containing a tag
:returns: dict containing the 'tag_path' entry
"""
field_keys = tag.get('field_keys', [])[:]
fields = []
tag_path = ''
# sort field arguments related to path order in correct order
for field in PATH_ORDER:
if field in field_keys:
fields.append(field)
field_keys.pop(field_keys.index(field))
# append all remaining field arguments
fields.extend(field_keys)
# convert list of fields to dot-joined string, dropping any "ignore" fields
for field in fields:
if field not in PATH_IGNORE_FIELDS:
tag_path += (tag.get(field) + '.')
# append symbol as last item in string
tag_path += tag.get('symbol')
# split string on seperators and append tag filename to resulting list
splitup = ([tag.get('filename')] +
list(splits(tag_path, *TAG_PATH_SPLITTERS)))
# convert list to tuple
result = {'tag_path': tuple(splitup)}
return result
# Tag building/sorting functions
def build_ctags(path, cmd=None, tag_file=None, recursive=False, opts=None):
"""
Execute the ``ctags`` command using ``Popen``.
:param path: path to file or directory (with all files) to generate
ctags for.
:param recursive: specify if search should be recursive in directory
given by path. This overrides filename specified by ``path``
:param tag_file: filename to use for the tag file. Defaults to ``tags``
:param opts: list of additional options to pass to the ctags executable
:returns: original ``tag_file`` filename
"""
# build the CTags command
if cmd:
cmd = [cmd]
else:
cmd = ['ctags']
if not os.path.exists(path):
raise IOError('\'path\' is not at valid directory or file path, or '
'is not accessible')
if os.path.isfile(path):
cwd = os.path.dirname(path)
else:
cwd = path
if tag_file:
cmd.append('-f {0}'.format(tag_file))
if opts:
if type(opts) == list:
cmd.extend(opts)
else: # *should* be a list, but better safe than sorry
cmd.append(opts)
if recursive: # ignore any file specified in path if recursive set
cmd.append('-R')
elif os.path.isfile(path):
filename = os.path.basename(path)
cmd.append(filename)
else: # search all files in current directory
cmd.append(os.path.join(path, '*'))
# workaround for the issue described here:
# http://bugs.python.org/issue6689
if os.name == 'posix':
cmd = ' '.join(cmd)
# execute the command
check_output(cmd, cwd=cwd, shell=True, stdin=subprocess.PIPE,
stderr=subprocess.STDOUT)
if not tag_file: # Exuberant ctags defaults to ``tags`` filename.
tag_file = os.path.join(cwd, 'tags')
else:
if os.path.dirname(tag_file) != cwd:
tag_file = os.path.join(cwd, tag_file)
# re-sort ctag file in filename order to improve search performance
resort_ctags(tag_file)
return tag_file
def resort_ctags(tag_file):
"""
Rearrange ctags file for speed.
Resorts (re-sort) a CTag file in order of file. This improves searching
performance when searching tags by file as a binary search can be used.
The algorithm works as so:
For each line in the tag file
Read the file name (``file_name``) the tag belongs to
If not exists, create an empty array and store in the
dictionary with the file name as key
Save the line to this list
Create a new ``[tagfile]_sorted_by_file`` file
For each key in the sorted dictionary
For each line in the list indicated by the key
Split the line on tab character
Remove the prepending ``.\`` from the ``file_name`` part of
the tag
Join the line again and write the ``sorted_by_file`` file
:param tag_file: The location of the tagfile to be sorted
:returns: None
"""
keys = {}
with codecs.open(tag_file, encoding='utf-8', errors='replace') as file_:
for line in file_:
keys.setdefault(line.split('\t')[FILENAME], []).append(line)
with codecs.open(tag_file+'_sorted_by_file', 'w', encoding='utf-8',
errors='replace') as file_:
for k in sorted(keys):
for line in keys[k]:
split = line.split('\t')
| |
from __future__ import print_function
import functools
import tensorflow as tf
import numpy as np
import math
import numbers
def stddev(init_scale, feature_size):
return np.sqrt(init_scale/feature_size)
def weight_bias(W_shape, b_shape, bias_init=0.1):
W = tf.Variable(tf.random.truncated_normal(W_shape, stddev=0.1), name='weight')
b = tf.Variable(tf.constant(bias_init, shape=b_shape), name='bias')
return W, b
def residual(x, activation):
"""
Residual Fully Connected Network
"""
size = int(x.get_shape()[-1])
out = tf.compat.v1.layers.dense(
inputs=x,
units=size,
kernel_initializer=tf.compat.v1.truncated_normal_initializer(
stddev=stddev(1.0, size)),
bias_initializer=tf.compat.v1.constant_initializer(0.1),
activation=activation
)
return x + out
def highway(x, activation=None, carry_bias=-1.0):
"""Highway Network (cf. http://arxiv.org/abs/1505.00387).
t = sigmoid(W_T*x + b_T)
y = t * g(Wx + b) + (1 - t) * x
where g is nonlinearity, t is transform gate, and (1 - t) is carry gate.
the weight(W_T,W) in highway layer must have same size,but you can use fully-connected layers to change dimensionality.
with larger negative carry_bias, more input (x) will be kept in the final output of highway layer.
"""
with tf.compat.v1.variable_scope("highway"):
size = int(x.get_shape()[1])
W, b = weight_bias([size, size], [size])
with tf.compat.v1.name_scope('transform_gate'):
W_T, b_T = weight_bias([size, size], [size], bias_init=carry_bias)
H = tf.matmul(x, W) + b
if activation is not None:
H = activation(H, name='activation')
T = tf.sigmoid(tf.matmul(x, W_T) + b_T, name='transform_gate')
C = tf.subtract(1.0, T, name="carry_gate")
# y = (H * T) + (x * C)
y = tf.add(tf.multiply(H, T), tf.multiply(x, C), name='y')
return y
def dense_block(input, width):
output = tf.compat.v1.layers.dense(
inputs=input,
units=width,
kernel_initializer=tf.compat.v1.truncated_normal_initializer(
stddev=stddev(1.0, int(input.get_shape()[-1]))),
bias_initializer=tf.compat.v1.constant_initializer(0.1)
)
output = tf.concat([input, output], -1)
return output
def primes(n):
primfac = []
d = 2
while d*d <= n:
while (n % d) == 0:
primfac.append(d) # supposing you want multiple factors repeated
n //= d
d += 1
if n > 1:
primfac.append(n)
return primfac
def numLayers(d1, d2=None):
n1 = 0
while d1 > 1:
d1 = math.ceil(d1/2.0)
n1 += 1
n2 = 0
if d2 is not None:
n2 = numLayers(d2)
return max(n1, n2)
def factorize(feature_size):
factors = sorted(primes(feature_size))
if len(factors) < 2:
raise ValueError('feature size is not factorizable.')
return tuple(sorted([reduce(lambda x, y: x*y, factors[:-1]), factors[-1]], reverse=True))
def lazy_property(function):
attribute = '_' + function.__name__
@property
@functools.wraps(function)
def wrapper(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return wrapper
class SecurityGradePredictor:
def __init__(self, data, target, wsize, training, is3d=False, num_hidden=200, num_layers=2, learning_rate=1e-4):
self.data = data
self.target = target
self.training = training
self._num_hidden = num_hidden
self._num_layers = num_layers
self._learning_rate = learning_rate
self._is3d = is3d
self._wsize = wsize
self.prediction
self.error
self.accuracy
self.optimize
@lazy_property
def multi_cnn(self):
"""Model function for CNN."""
# Add Channel Dimension to Input Layer
input = tf.expand_dims(self.data, [3])
step = int(self.data.get_shape()[1])
feat = int(self.data.get_shape()[2])
nlayer = numLayers(feat)
wsize = self._wsize
print("window size:{} step:{} feat:{} #conv layers: {}".format(
wsize, step, feat, nlayer))
filters = max(2, 2 ** (math.ceil(math.log(feat, 2)+1)))
krange = 3
drange = 3
convlayers = np.array([[input for _ in range(drange)]
for _ in range(krange)])
for i in range(nlayer):
filters *= 2
uf = math.ceil(filters/(krange*drange))
for k in range(krange):
for d in range(drange):
conv = tf.compat.v1.layers.separable_conv2d(
inputs=convlayers[k][d],
filters=uf,
depth_multiplier=3,
dilation_rate=d+1,
kernel_size=[k+wsize, k+2],
padding="same",
activation=tf.nn.elu)
pool = tf.compat.v1.layers.max_pooling2d(
inputs=conv, pool_size=k+2, strides=[1, 2],
padding="same")
convlayers[k][d] = pool
print("#{} conv:{}\tpool: {}\twide: {}\tdilation: {}".format(
i+1, conv.get_shape(), pool.get_shape(), k+2, d+1))
# Flatten convlayers
convlayers = convlayers.flatten()
convlayer = tf.concat([c for c in convlayers], 3)
print("concat: {}".format(convlayer.get_shape()))
output_layer = tf.squeeze(convlayer, [2])
output_layer = tf.compat.v1.layers.batch_normalization(
output_layer, training=self.training)
print("cnn output layer: {}".format(output_layer.get_shape()))
# self.data = output_layer
return output_layer
@lazy_property
def multi_cnn3d(self):
"""Model function for 3D CNN."""
step = int(self.data.get_shape()[1])
feat = int(self.data.get_shape()[2])
# Get 2D dimension length (height, width)
h, w = factorize(feat)
# Transforms input to 3D [batch, depth, height, width, channel]
input = tf.reshape(self.data, [-1, step, h, w, 1])
print("input transformed to 3D shape: {}".format(input.get_shape()))
nlayer = numLayers(h, w)
wsize = self._wsize
print("window size:{} step:{} #conv layers: {}".format(
wsize, step, nlayer))
filters = max(2, 2 ** (math.ceil(math.log(feat, 2)+1)))
# krange = min(h, w) // 2
# drange = krange
krange = 1
drange = 1
convlayers = np.array([[input for _ in range(drange)]
for _ in range(krange)])
for i in range(nlayer):
filters *= 4
uf = math.ceil(filters/(krange*drange))
for k in range(krange):
for d in range(drange):
conv = tf.compat.v1.layers.conv3d(
inputs=convlayers[k][d],
filters=uf,
kernel_size=(k+wsize, k+2, k+2),
kernel_initializer=tf.compat.v1.truncated_normal_initializer(
stddev=0.01),
bias_initializer=tf.compat.v1.constant_initializer(0.1),
dilation_rate=(max(2, d+1), d+1, d+1),
padding="same",
activation=tf.nn.elu)
h_stride = 2 if int(conv.get_shape()[2]) >= 2 else 1
w_stride = 2 if int(conv.get_shape()[3]) >= 2 else 1
pool = tf.compat.v1.layers.max_pooling3d(
inputs=conv, pool_size=k+2, strides=[1, h_stride, w_stride],
padding="same")
convlayers[k][d] = pool
print("#{} conv:{} pool: {} wide: {} dilation: {}".format(
i+1, conv.get_shape(), pool.get_shape(), k+2, d+1))
# Flatten convlayers
convlayers = convlayers.flatten()
convlayer = tf.concat([c for c in convlayers], 4)
print("concat: {}".format(convlayer.get_shape()))
output_layer = tf.squeeze(convlayer, [2, 3])
output_layer = tf.compat.v1.layers.batch_normalization(
output_layer, training=self.training)
print("cnn output layer: {}".format(output_layer.get_shape()))
# self.data = output_layer
return output_layer
@lazy_property
def length(self):
used = tf.sign(tf.reduce_max(input_tensor=tf.abs(self.data), axis=2))
length = tf.reduce_sum(input_tensor=used, axis=1)
length = tf.cast(length, tf.int32)
return length
@lazy_property
def prediction(self):
return self.crnn2d
# Recurrent network.
# cells = []
# for _ in range(self._num_layers):
# cell = tf.nn.rnn_cell.GRUCell(
# self._num_hidden) # Or LSTMCell(num_units), or use ConvLSTMCell?
# # cell = tf.nn.rnn_cell.DropoutWrapper(
# # cell, output_keep_prob=1.0 - self.dropout)
# cells.append(cell)
# cell = tf.nn.rnn_cell.MultiRNNCell(cells)
# output, _ = tf.nn.dynamic_rnn(
# cell,
# self.multi_cnn3d if self._is3d else self.multi_cnn,
# dtype=tf.float32,
# sequence_length=self.length,
# )
# last = self._last_relevant(output, self.length)
# weight, bias = self._weight_and_bias(
# self._num_hidden, int(self.target.get_shape()[1]))
# prediction = tf.matmul(last, weight) + bias
# norm_last = tf.layers.batch_normalization(
# last, training=self.training)
# dense = tf.layers.dense(
# inputs=last, units=self._num_hidden * 3, activation=tf.nn.elu)
# dropout = tf.layers.dropout(
# inputs=dense, rate=math.e/10, training=self.training)
# Logits Layer
# prediction = tf.layers.dense(
# inputs=dense, units=int(self.target.get_shape()[1]), activation=tf.nn.relu6)
# prediction = self._cnn_layer(self.data,
# self._ksize,
# int(self.target.get_shape()[1]),
# self.training)
# return prediction
@lazy_property
def crnn2d(self):
# map [batch, step, feature] to [batch][step, feature] and pass each
# [step, feature] to cnn2d
cnn = tf.map_fn(lambda input: self.cnn2d(
input, self.training), self.data)
# can't use batch_norm due to its bug causing "InvalidArgumentError: Retval[0] does not have value"
# norm_cnn = tf.layers.batch_normalization(cnn, training=self.training)
mix = tf.concat([self.data, cnn], 2)
rnn = self.rnn(self, mix)
dense = tf.compat.v1.layers.dense(
inputs=rnn,
units=self._num_hidden * 3,
# kernel_initializer=tf.truncated_normal_initializer,
# bias_initializer=tf.constant_initializer(0.1),
activation=tf.nn.elu)
dropout = tf.compat.v1.layers.dropout(
inputs=dense, rate=math.e/10, training=self.training)
# Logits Layer
output = tf.compat.v1.layers.dense(
inputs=dropout,
units=int(self.target.get_shape()[1]),
# kernel_initializer=tf.truncated_normal_initializer,
# bias_initializer=tf.constant_initializer(0.1),
activation=tf.nn.relu6)
return output
@lazy_property
def cost(self):
cross_entropy = tf.reduce_mean(input_tensor=tf.nn.softmax_cross_entropy_with_logits(
labels=self.target, logits=self.prediction))
# cross_entropy = -tf.reduce_sum(self.target * tf.log(self.prediction))
return cross_entropy
@lazy_property
def optimize(self):
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
optimizer = None
with tf.control_dependencies(update_ops):
optimizer = tf.compat.v1.train.AdamOptimizer(self._learning_rate).minimize(
self.cost, global_step=tf.compat.v1.train.get_global_step())
return optimizer
# optimizer = tf.train.AdamOptimizer(self._learning_rate)
# return optimizer.minimize(self.cost, global_step=tf.train.get_global_step())
@lazy_property
def error(self):
mistakes = tf.not_equal(
tf.argmax(input=self.target, axis=1), tf.argmax(input=self.prediction, axis=1))
return tf.reduce_mean(input_tensor=tf.cast(mistakes, tf.float32))
@lazy_property
def accuracy(self):
accuracy = tf.equal(
tf.argmax(input=self.target, axis=1), tf.argmax(input=self.prediction, axis=1))
return tf.reduce_mean(input_tensor=tf.cast(accuracy, tf.float32))
# return tf.metrics.accuracy(labels=self.target, predictions=self.prediction)
@staticmethod
def cnn2d(input, training):
"""Model function for CNN."""
print("shape of cnn input: {}".format(input.get_shape()))
step = int(input.get_shape()[0])
feat = int(input.get_shape()[1])
# Get 2D dimension length (height, width)
h, w = factorize(feat)
# Transforms into 2D compatible format [batch(step), height, width, channel]
input2d = tf.reshape(input, [-1, h, w, 1])
print("input transformed to 2D shape: {}".format(input2d.get_shape()))
nlayer = numLayers(h, w)
print("step:{} feat:{} #conv layers: {}".format(step, feat, nlayer))
filters = max(2, 2 ** (math.ceil(math.log(feat, 2)+1)))
# krange = 3
# drange = 3
krange = 1
drange = 1
convlayers = np.array([[input2d for _ in range(drange)]
for _ in range(krange)])
for i in range(nlayer):
filters *= 2
uf = math.ceil(filters/(krange*drange))
for k in range(krange):
for d in range(drange):
conv = tf.compat.v1.layers.conv2d(
inputs=convlayers[k][d],
filters=uf,
kernel_size=k+2,
kernel_initializer=tf.compat.v1.truncated_normal_initializer(
stddev=0.01),
bias_initializer=tf.compat.v1.constant_initializer(0.1),
dilation_rate=d+1,
padding="same",
activation=tf.nn.elu)
h_stride = 2 if int(conv.get_shape()[1]) >= 2 else 1
w_stride = 2 if int(conv.get_shape()[2]) >= 2 else 1
pool = tf.compat.v1.layers.max_pooling2d(
inputs=conv, pool_size=k+2, strides=[h_stride, w_stride],
padding="same")
# can't use for now due to map_fn and batch_norm cooperation bugs
# norm_pool = tf.layers.batch_normalization(
# pool, training=training)
convlayers[k][d] = pool
print("#{} conv:{} pool: {} ksize: {} dilation: {}".format(
i+1, conv.get_shape(), pool.get_shape(), k+2, d+1))
# Flatten convlayers
convlayers = convlayers.flatten()
convlayer = tf.concat([c for c in convlayers], 3)
print("concat: {}".format(convlayer.get_shape()))
convlayer = tf.squeeze(convlayer, [1, 2])
print("squeeze: {}".format(convlayer.get_shape()))
units = 2 ** math.ceil(math.log(feat*3, 2))-feat
dense = tf.compat.v1.layers.dense(
inputs=convlayer,
units=units,
kernel_initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.01),
bias_initializer=tf.compat.v1.constant_initializer(0.1),
activation=tf.nn.elu)
# can't use for now due to map_fn and batch_norm | |
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class PropIntegrationScheduledTriggerProperties(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.ScheduledTriggerProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html
Property Document:
- ``rp_ScheduleExpression``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-scheduleexpression
- ``p_DataPullMode``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-datapullmode
- ``p_FirstExecutionFrom``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-firstexecutionfrom
- ``p_ScheduleEndTime``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-scheduleendtime
- ``p_ScheduleOffset``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-scheduleoffset
- ``p_ScheduleStartTime``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-schedulestarttime
- ``p_Timezone``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-timezone
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.ScheduledTriggerProperties"
rp_ScheduleExpression: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ScheduleExpression"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-scheduleexpression"""
p_DataPullMode: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DataPullMode"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-datapullmode"""
p_FirstExecutionFrom: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "FirstExecutionFrom"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-firstexecutionfrom"""
p_ScheduleEndTime: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "ScheduleEndTime"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-scheduleendtime"""
p_ScheduleOffset: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "ScheduleOffset"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-scheduleoffset"""
p_ScheduleStartTime: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "ScheduleStartTime"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-schedulestarttime"""
p_Timezone: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Timezone"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-scheduledtriggerproperties.html#cfn-customerprofiles-integration-scheduledtriggerproperties-timezone"""
@attr.s
class PropIntegrationS3SourceProperties(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.S3SourceProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-s3sourceproperties.html
Property Document:
- ``rp_BucketName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-s3sourceproperties.html#cfn-customerprofiles-integration-s3sourceproperties-bucketname
- ``p_BucketPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-s3sourceproperties.html#cfn-customerprofiles-integration-s3sourceproperties-bucketprefix
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.S3SourceProperties"
rp_BucketName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "BucketName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-s3sourceproperties.html#cfn-customerprofiles-integration-s3sourceproperties-bucketname"""
p_BucketPrefix: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "BucketPrefix"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-s3sourceproperties.html#cfn-customerprofiles-integration-s3sourceproperties-bucketprefix"""
@attr.s
class PropObjectTypeObjectTypeField(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::ObjectType.ObjectTypeField"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-objecttypefield.html
Property Document:
- ``p_ContentType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-objecttypefield.html#cfn-customerprofiles-objecttype-objecttypefield-contenttype
- ``p_Source``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-objecttypefield.html#cfn-customerprofiles-objecttype-objecttypefield-source
- ``p_Target``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-objecttypefield.html#cfn-customerprofiles-objecttype-objecttypefield-target
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::ObjectType.ObjectTypeField"
p_ContentType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ContentType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-objecttypefield.html#cfn-customerprofiles-objecttype-objecttypefield-contenttype"""
p_Source: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Source"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-objecttypefield.html#cfn-customerprofiles-objecttype-objecttypefield-source"""
p_Target: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Target"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-objecttypefield.html#cfn-customerprofiles-objecttype-objecttypefield-target"""
@attr.s
class PropIntegrationIncrementalPullConfig(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.IncrementalPullConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-incrementalpullconfig.html
Property Document:
- ``p_DatetimeTypeFieldName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-incrementalpullconfig.html#cfn-customerprofiles-integration-incrementalpullconfig-datetimetypefieldname
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.IncrementalPullConfig"
p_DatetimeTypeFieldName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DatetimeTypeFieldName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-incrementalpullconfig.html#cfn-customerprofiles-integration-incrementalpullconfig-datetimetypefieldname"""
@attr.s
class PropIntegrationMarketoSourceProperties(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.MarketoSourceProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-marketosourceproperties.html
Property Document:
- ``rp_Object``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-marketosourceproperties.html#cfn-customerprofiles-integration-marketosourceproperties-object
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.MarketoSourceProperties"
rp_Object: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Object"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-marketosourceproperties.html#cfn-customerprofiles-integration-marketosourceproperties-object"""
@attr.s
class PropIntegrationTaskPropertiesMap(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.TaskPropertiesMap"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-taskpropertiesmap.html
Property Document:
- ``rp_OperatorPropertyKey``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-taskpropertiesmap.html#cfn-customerprofiles-integration-taskpropertiesmap-operatorpropertykey
- ``rp_Property``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-taskpropertiesmap.html#cfn-customerprofiles-integration-taskpropertiesmap-property
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.TaskPropertiesMap"
rp_OperatorPropertyKey: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "OperatorPropertyKey"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-taskpropertiesmap.html#cfn-customerprofiles-integration-taskpropertiesmap-operatorpropertykey"""
rp_Property: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Property"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-taskpropertiesmap.html#cfn-customerprofiles-integration-taskpropertiesmap-property"""
@attr.s
class PropIntegrationConnectorOperator(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.ConnectorOperator"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-connectoroperator.html
Property Document:
- ``p_Marketo``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-connectoroperator.html#cfn-customerprofiles-integration-connectoroperator-marketo
- ``p_S3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-connectoroperator.html#cfn-customerprofiles-integration-connectoroperator-s3
- ``p_Salesforce``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-connectoroperator.html#cfn-customerprofiles-integration-connectoroperator-salesforce
- ``p_ServiceNow``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-connectoroperator.html#cfn-customerprofiles-integration-connectoroperator-servicenow
- ``p_Zendesk``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-connectoroperator.html#cfn-customerprofiles-integration-connectoroperator-zendesk
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.ConnectorOperator"
p_Marketo: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Marketo"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-connectoroperator.html#cfn-customerprofiles-integration-connectoroperator-marketo"""
p_S3: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "S3"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-connectoroperator.html#cfn-customerprofiles-integration-connectoroperator-s3"""
p_Salesforce: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Salesforce"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-connectoroperator.html#cfn-customerprofiles-integration-connectoroperator-salesforce"""
p_ServiceNow: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ServiceNow"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-connectoroperator.html#cfn-customerprofiles-integration-connectoroperator-servicenow"""
p_Zendesk: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Zendesk"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-connectoroperator.html#cfn-customerprofiles-integration-connectoroperator-zendesk"""
@attr.s
class PropIntegrationZendeskSourceProperties(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.ZendeskSourceProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-zendesksourceproperties.html
Property Document:
- ``rp_Object``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-zendesksourceproperties.html#cfn-customerprofiles-integration-zendesksourceproperties-object
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.ZendeskSourceProperties"
rp_Object: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Object"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-zendesksourceproperties.html#cfn-customerprofiles-integration-zendesksourceproperties-object"""
@attr.s
class PropIntegrationServiceNowSourceProperties(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.ServiceNowSourceProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-servicenowsourceproperties.html
Property Document:
- ``rp_Object``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-servicenowsourceproperties.html#cfn-customerprofiles-integration-servicenowsourceproperties-object
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.ServiceNowSourceProperties"
rp_Object: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Object"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-servicenowsourceproperties.html#cfn-customerprofiles-integration-servicenowsourceproperties-object"""
@attr.s
class PropIntegrationSalesforceSourceProperties(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.SalesforceSourceProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-salesforcesourceproperties.html
Property Document:
- ``rp_Object``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-salesforcesourceproperties.html#cfn-customerprofiles-integration-salesforcesourceproperties-object
- ``p_EnableDynamicFieldUpdate``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-salesforcesourceproperties.html#cfn-customerprofiles-integration-salesforcesourceproperties-enabledynamicfieldupdate
- ``p_IncludeDeletedRecords``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-salesforcesourceproperties.html#cfn-customerprofiles-integration-salesforcesourceproperties-includedeletedrecords
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.SalesforceSourceProperties"
rp_Object: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Object"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-salesforcesourceproperties.html#cfn-customerprofiles-integration-salesforcesourceproperties-object"""
p_EnableDynamicFieldUpdate: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "EnableDynamicFieldUpdate"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-salesforcesourceproperties.html#cfn-customerprofiles-integration-salesforcesourceproperties-enabledynamicfieldupdate"""
p_IncludeDeletedRecords: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "IncludeDeletedRecords"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-salesforcesourceproperties.html#cfn-customerprofiles-integration-salesforcesourceproperties-includedeletedrecords"""
@attr.s
class PropObjectTypeObjectTypeKey(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::ObjectType.ObjectTypeKey"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-objecttypekey.html
Property Document:
- ``p_FieldNames``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-objecttypekey.html#cfn-customerprofiles-objecttype-objecttypekey-fieldnames
- ``p_StandardIdentifiers``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-objecttypekey.html#cfn-customerprofiles-objecttype-objecttypekey-standardidentifiers
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::ObjectType.ObjectTypeKey"
p_FieldNames: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "FieldNames"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-objecttypekey.html#cfn-customerprofiles-objecttype-objecttypekey-fieldnames"""
p_StandardIdentifiers: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "StandardIdentifiers"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-objecttypekey.html#cfn-customerprofiles-objecttype-objecttypekey-standardidentifiers"""
@attr.s
class PropIntegrationTask(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.Task"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-task.html
Property Document:
- ``rp_SourceFields``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-task.html#cfn-customerprofiles-integration-task-sourcefields
- ``rp_TaskType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-task.html#cfn-customerprofiles-integration-task-tasktype
- ``p_ConnectorOperator``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-task.html#cfn-customerprofiles-integration-task-connectoroperator
- ``p_DestinationField``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-task.html#cfn-customerprofiles-integration-task-destinationfield
- ``p_TaskProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-task.html#cfn-customerprofiles-integration-task-taskproperties
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.Task"
rp_SourceFields: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "SourceFields"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-task.html#cfn-customerprofiles-integration-task-sourcefields"""
rp_TaskType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "TaskType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-task.html#cfn-customerprofiles-integration-task-tasktype"""
p_ConnectorOperator: typing.Union['PropIntegrationConnectorOperator', dict] = attr.ib(
default=None,
converter=PropIntegrationConnectorOperator.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropIntegrationConnectorOperator)),
metadata={AttrMeta.PROPERTY_NAME: "ConnectorOperator"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-task.html#cfn-customerprofiles-integration-task-connectoroperator"""
p_DestinationField: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DestinationField"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-task.html#cfn-customerprofiles-integration-task-destinationfield"""
p_TaskProperties: typing.List[typing.Union['PropIntegrationTaskPropertiesMap', dict]] = attr.ib(
default=None,
converter=PropIntegrationTaskPropertiesMap.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropIntegrationTaskPropertiesMap), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "TaskProperties"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-task.html#cfn-customerprofiles-integration-task-taskproperties"""
@attr.s
class PropObjectTypeKeyMap(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::ObjectType.KeyMap"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-keymap.html
Property Document:
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-keymap.html#cfn-customerprofiles-objecttype-keymap-name
- ``p_ObjectTypeKeyList``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-keymap.html#cfn-customerprofiles-objecttype-keymap-objecttypekeylist
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::ObjectType.KeyMap"
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-keymap.html#cfn-customerprofiles-objecttype-keymap-name"""
p_ObjectTypeKeyList: typing.List[typing.Union['PropObjectTypeObjectTypeKey', dict]] = attr.ib(
default=None,
converter=PropObjectTypeObjectTypeKey.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropObjectTypeObjectTypeKey), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "ObjectTypeKeyList"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-keymap.html#cfn-customerprofiles-objecttype-keymap-objecttypekeylist"""
@attr.s
class PropIntegrationTriggerProperties(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.TriggerProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-triggerproperties.html
Property Document:
- ``p_Scheduled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-triggerproperties.html#cfn-customerprofiles-integration-triggerproperties-scheduled
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.TriggerProperties"
p_Scheduled: typing.Union['PropIntegrationScheduledTriggerProperties', dict] = attr.ib(
default=None,
converter=PropIntegrationScheduledTriggerProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropIntegrationScheduledTriggerProperties)),
metadata={AttrMeta.PROPERTY_NAME: "Scheduled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-triggerproperties.html#cfn-customerprofiles-integration-triggerproperties-scheduled"""
@attr.s
class PropObjectTypeFieldMap(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::ObjectType.FieldMap"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-fieldmap.html
Property Document:
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-fieldmap.html#cfn-customerprofiles-objecttype-fieldmap-name
- ``p_ObjectTypeField``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-fieldmap.html#cfn-customerprofiles-objecttype-fieldmap-objecttypefield
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::ObjectType.FieldMap"
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-fieldmap.html#cfn-customerprofiles-objecttype-fieldmap-name"""
p_ObjectTypeField: typing.Union['PropObjectTypeObjectTypeField', dict] = attr.ib(
default=None,
converter=PropObjectTypeObjectTypeField.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropObjectTypeObjectTypeField)),
metadata={AttrMeta.PROPERTY_NAME: "ObjectTypeField"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-objecttype-fieldmap.html#cfn-customerprofiles-objecttype-fieldmap-objecttypefield"""
@attr.s
class PropIntegrationSourceConnectorProperties(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.SourceConnectorProperties"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceconnectorproperties.html
Property Document:
- ``p_Marketo``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceconnectorproperties.html#cfn-customerprofiles-integration-sourceconnectorproperties-marketo
- ``p_S3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceconnectorproperties.html#cfn-customerprofiles-integration-sourceconnectorproperties-s3
- ``p_Salesforce``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceconnectorproperties.html#cfn-customerprofiles-integration-sourceconnectorproperties-salesforce
- ``p_ServiceNow``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceconnectorproperties.html#cfn-customerprofiles-integration-sourceconnectorproperties-servicenow
- ``p_Zendesk``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceconnectorproperties.html#cfn-customerprofiles-integration-sourceconnectorproperties-zendesk
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.SourceConnectorProperties"
p_Marketo: typing.Union['PropIntegrationMarketoSourceProperties', dict] = attr.ib(
default=None,
converter=PropIntegrationMarketoSourceProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropIntegrationMarketoSourceProperties)),
metadata={AttrMeta.PROPERTY_NAME: "Marketo"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceconnectorproperties.html#cfn-customerprofiles-integration-sourceconnectorproperties-marketo"""
p_S3: typing.Union['PropIntegrationS3SourceProperties', dict] = attr.ib(
default=None,
converter=PropIntegrationS3SourceProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropIntegrationS3SourceProperties)),
metadata={AttrMeta.PROPERTY_NAME: "S3"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceconnectorproperties.html#cfn-customerprofiles-integration-sourceconnectorproperties-s3"""
p_Salesforce: typing.Union['PropIntegrationSalesforceSourceProperties', dict] = attr.ib(
default=None,
converter=PropIntegrationSalesforceSourceProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropIntegrationSalesforceSourceProperties)),
metadata={AttrMeta.PROPERTY_NAME: "Salesforce"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceconnectorproperties.html#cfn-customerprofiles-integration-sourceconnectorproperties-salesforce"""
p_ServiceNow: typing.Union['PropIntegrationServiceNowSourceProperties', dict] = attr.ib(
default=None,
converter=PropIntegrationServiceNowSourceProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropIntegrationServiceNowSourceProperties)),
metadata={AttrMeta.PROPERTY_NAME: "ServiceNow"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceconnectorproperties.html#cfn-customerprofiles-integration-sourceconnectorproperties-servicenow"""
p_Zendesk: typing.Union['PropIntegrationZendeskSourceProperties', dict] = attr.ib(
default=None,
converter=PropIntegrationZendeskSourceProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropIntegrationZendeskSourceProperties)),
metadata={AttrMeta.PROPERTY_NAME: "Zendesk"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceconnectorproperties.html#cfn-customerprofiles-integration-sourceconnectorproperties-zendesk"""
@attr.s
class PropIntegrationTriggerConfig(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.TriggerConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-triggerconfig.html
Property Document:
- ``rp_TriggerType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-triggerconfig.html#cfn-customerprofiles-integration-triggerconfig-triggertype
- ``p_TriggerProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-triggerconfig.html#cfn-customerprofiles-integration-triggerconfig-triggerproperties
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.TriggerConfig"
rp_TriggerType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "TriggerType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-triggerconfig.html#cfn-customerprofiles-integration-triggerconfig-triggertype"""
p_TriggerProperties: typing.Union['PropIntegrationTriggerProperties', dict] = attr.ib(
default=None,
converter=PropIntegrationTriggerProperties.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropIntegrationTriggerProperties)),
metadata={AttrMeta.PROPERTY_NAME: "TriggerProperties"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-triggerconfig.html#cfn-customerprofiles-integration-triggerconfig-triggerproperties"""
@attr.s
class PropIntegrationSourceFlowConfig(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.SourceFlowConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceflowconfig.html
Property Document:
- ``rp_ConnectorType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceflowconfig.html#cfn-customerprofiles-integration-sourceflowconfig-connectortype
- ``rp_SourceConnectorProperties``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceflowconfig.html#cfn-customerprofiles-integration-sourceflowconfig-sourceconnectorproperties
- ``p_ConnectorProfileName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceflowconfig.html#cfn-customerprofiles-integration-sourceflowconfig-connectorprofilename
- ``p_IncrementalPullConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceflowconfig.html#cfn-customerprofiles-integration-sourceflowconfig-incrementalpullconfig
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.SourceFlowConfig"
rp_ConnectorType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ConnectorType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceflowconfig.html#cfn-customerprofiles-integration-sourceflowconfig-connectortype"""
rp_SourceConnectorProperties: typing.Union['PropIntegrationSourceConnectorProperties', dict] = attr.ib(
default=None,
converter=PropIntegrationSourceConnectorProperties.from_dict,
validator=attr.validators.instance_of(PropIntegrationSourceConnectorProperties),
metadata={AttrMeta.PROPERTY_NAME: "SourceConnectorProperties"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceflowconfig.html#cfn-customerprofiles-integration-sourceflowconfig-sourceconnectorproperties"""
p_ConnectorProfileName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ConnectorProfileName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceflowconfig.html#cfn-customerprofiles-integration-sourceflowconfig-connectorprofilename"""
p_IncrementalPullConfig: typing.Union['PropIntegrationIncrementalPullConfig', dict] = attr.ib(
default=None,
converter=PropIntegrationIncrementalPullConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropIntegrationIncrementalPullConfig)),
metadata={AttrMeta.PROPERTY_NAME: "IncrementalPullConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-sourceflowconfig.html#cfn-customerprofiles-integration-sourceflowconfig-incrementalpullconfig"""
@attr.s
class PropIntegrationFlowDefinition(Property):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration.FlowDefinition"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-flowdefinition.html
Property Document:
- ``rp_FlowName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-flowdefinition.html#cfn-customerprofiles-integration-flowdefinition-flowname
- ``rp_KmsArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-flowdefinition.html#cfn-customerprofiles-integration-flowdefinition-kmsarn
- ``rp_SourceFlowConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-flowdefinition.html#cfn-customerprofiles-integration-flowdefinition-sourceflowconfig
- ``rp_Tasks``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-flowdefinition.html#cfn-customerprofiles-integration-flowdefinition-tasks
- ``rp_TriggerConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-flowdefinition.html#cfn-customerprofiles-integration-flowdefinition-triggerconfig
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-flowdefinition.html#cfn-customerprofiles-integration-flowdefinition-description
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration.FlowDefinition"
rp_FlowName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "FlowName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-flowdefinition.html#cfn-customerprofiles-integration-flowdefinition-flowname"""
rp_KmsArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "KmsArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-flowdefinition.html#cfn-customerprofiles-integration-flowdefinition-kmsarn"""
rp_SourceFlowConfig: typing.Union['PropIntegrationSourceFlowConfig', dict] = attr.ib(
default=None,
converter=PropIntegrationSourceFlowConfig.from_dict,
validator=attr.validators.instance_of(PropIntegrationSourceFlowConfig),
metadata={AttrMeta.PROPERTY_NAME: "SourceFlowConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-flowdefinition.html#cfn-customerprofiles-integration-flowdefinition-sourceflowconfig"""
rp_Tasks: typing.List[typing.Union['PropIntegrationTask', dict]] = attr.ib(
default=None,
converter=PropIntegrationTask.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropIntegrationTask), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Tasks"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-flowdefinition.html#cfn-customerprofiles-integration-flowdefinition-tasks"""
rp_TriggerConfig: typing.Union['PropIntegrationTriggerConfig', dict] = attr.ib(
default=None,
converter=PropIntegrationTriggerConfig.from_dict,
validator=attr.validators.instance_of(PropIntegrationTriggerConfig),
metadata={AttrMeta.PROPERTY_NAME: "TriggerConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-flowdefinition.html#cfn-customerprofiles-integration-flowdefinition-triggerconfig"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-customerprofiles-integration-flowdefinition.html#cfn-customerprofiles-integration-flowdefinition-description"""
#--- Resource declaration ---
@attr.s
class Integration(Resource):
"""
AWS Object Type = "AWS::CustomerProfiles::Integration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-customerprofiles-integration.html
Property Document:
- ``rp_DomainName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-customerprofiles-integration.html#cfn-customerprofiles-integration-domainname
- ``rp_ObjectTypeName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-customerprofiles-integration.html#cfn-customerprofiles-integration-objecttypename
- ``p_FlowDefinition``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-customerprofiles-integration.html#cfn-customerprofiles-integration-flowdefinition
- ``p_Uri``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-customerprofiles-integration.html#cfn-customerprofiles-integration-uri
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-customerprofiles-integration.html#cfn-customerprofiles-integration-tags
"""
AWS_OBJECT_TYPE = "AWS::CustomerProfiles::Integration"
rp_DomainName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "DomainName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-customerprofiles-integration.html#cfn-customerprofiles-integration-domainname"""
rp_ObjectTypeName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ObjectTypeName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-customerprofiles-integration.html#cfn-customerprofiles-integration-objecttypename"""
p_FlowDefinition: typing.Union['PropIntegrationFlowDefinition', dict] = attr.ib(
default=None,
converter=PropIntegrationFlowDefinition.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropIntegrationFlowDefinition)),
metadata={AttrMeta.PROPERTY_NAME: "FlowDefinition"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-customerprofiles-integration.html#cfn-customerprofiles-integration-flowdefinition"""
p_Uri: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Uri"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-customerprofiles-integration.html#cfn-customerprofiles-integration-uri"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-customerprofiles-integration.html#cfn-customerprofiles-integration-tags"""
@property
def rv_CreatedAt(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-customerprofiles-integration.html#aws-resource-customerprofiles-integration-return-values"""
return GetAtt(resource=self, attr_name="CreatedAt")
| |
from django.shortcuts import render,redirect
from django.http import HttpResponse
from model.community import *
from model.oss import *
from view.common import *
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
from django.db.models import Sum, Count
from operator import itemgetter
from django.http import JsonResponse
from influxdb_metrics.utils import query
from influxdb import InfluxDBClient
import time
import datetime
import statsmodels.api as sm
import pandas as pd
import math
import numpy as np
client = InfluxDBClient('172.16.58.3', 8086, 'moose', 'moose', 'moose')
def activity(request):
extra_info = dict()
uid = request.session['user_id']
cid = request.GET.get('cid')
community = get_nav_list(uid, cid)
extra_info.update(community)
commit_yearmonth = MOOSEStatisticCommitYearmonth.objects.filter(community_id=int(cid))
line_commit_arr = ''
line_commit_data = ''
line_issue_close_data = ''
if commit_yearmonth.count() > 0:
for per_commit_yearmonth in commit_yearmonth:
line_commit_data += str(per_commit_yearmonth.commits_count) + ','
line_commit_arr += per_commit_yearmonth.yearmonth + ','
issue_yearmonth = MOOSEStatisticIssueYearmonth.objects.filter(community_id=int(cid))
line_issue_arr = ''
line_issue_data = ''
if issue_yearmonth.count() > 0:
for per_issue_yearmonth in issue_yearmonth:
line_issue_data += str(per_issue_yearmonth.issue_count) + ','
line_issue_arr += per_issue_yearmonth.yearmonth + ','
line_issue_close_data += str(per_issue_yearmonth.close_issue_count) + ','
pull_yearmonth = MOOSEStatisticPullYearmonth.objects.filter(community_id=int(cid))
line_pull_arr = ''
line_pull_data = ''
line_pull_merged_data = ''
if pull_yearmonth.count() > 0:
for per_pull_yearmonth in pull_yearmonth:
line_pull_data += str(per_pull_yearmonth.pull_count) + ','
line_pull_arr += per_pull_yearmonth.yearmonth + ','
line_pull_merged_data += str(per_pull_yearmonth.merged_pull_count) + ','
#developer
developer_yearmonth = MOOSEStatisticAuthorYearmonth.objects.filter(community_id=int(cid))
line_developer_arr = ''
line_developer_data = ''
if developer_yearmonth.count() > 0:
for per_developer_yearmonth in developer_yearmonth:
line_developer_data += str(per_developer_yearmonth.developer_count) + ','
line_developer_arr += per_developer_yearmonth.yearmonth + ','
commit_hourday = MOOSEStatisticCommitHourday.objects.filter(community_id=int(cid))
commit_hourday_arr = ''
if commit_hourday.count() > 0:
for per_commit_hourday in commit_hourday:
commit_hourday_str = str(per_commit_hourday.day) + '-' + str(per_commit_hourday.hour) + '-' + str(per_commit_hourday.commit_count)
commit_hourday_arr += commit_hourday_str+','
extra_info.update({'line_commit_arr': line_commit_arr})
extra_info.update({'line_commit_data': line_commit_data})
extra_info.update({'line_issue_arr': line_issue_arr})
extra_info.update({'line_issue_data': line_issue_data})
extra_info.update({'line_issue_close_data': line_issue_close_data})
extra_info.update({'line_pull_arr': line_pull_arr})
extra_info.update({'line_pull_data': line_pull_data})
extra_info.update({'line_developer_arr': line_developer_arr})
extra_info.update({'line_developer_data': line_developer_data})
extra_info.update({'line_pull_merged_data': line_pull_merged_data})
extra_info.update({'commit_hourday': commit_hourday_arr[:-1]})
extra_info.update({'path': 2})
return render(request, 'activity.html', extra_info)
def issue(request):
#client = InfluxDBClient('10.162.108.122', 8086, 'moose', 'moose', 'moose')
extra_info = dict()
uid = request.session['user_id']
cid = request.GET.get('cid')
community = get_nav_list(uid, cid)
extra_info.update(community)
try:
page = request.GET.get('page')
except:
page = 1
oss_id = []
oss_list_id = MOOSECommunityList.objects.values("oss_id").filter(community_id=int(cid))
for per_oss_id in oss_list_id:
oss_id.append(int(per_oss_id['oss_id']))
issue_all = []
result = client.query("select * from moose_issue where community_id='"+cid+"' order by time desc;").get_points()
issue_index = 0
for aa in result:
if issue_index >=100:
break
issue_tmp = dict()
issue_id = aa['issue_id']
oss_id = aa['oss_id']
oss_name = MOOSEMeta.objects.values('oss_fullname').filter(oss_id=oss_id)[0]['oss_fullname']
if issue_id is None:
continue
issue_tmp.update({'issue_state': aa['issue_state']})
issue_tmp.update({'issue_title': aa['title']})
issue_tmp.update({'issue_body': aa['body']})
issue_tmp.update({'id': aa['issue_id']})
issue_tmp.update({'issue_create_time': aa['time']})
issue_tmp.update({'issue_comment_count': aa['issue_comment_count']})
issue_tmp.update({'oss_fullname': oss_name})
issue_all.append(issue_tmp)
issue_index += 1
#issue_all = MOOSEIssue.objects.filter(oss_id__in=oss_id)[0:100]
paginator = Paginator(issue_all, 20)
try:
customer = paginator.page(page)
except PageNotAnInteger:
customer = paginator.page(1)
issue_statistic = MOOSEStatistic.objects.filter(community_id=int(cid))[0:1]
issue_count = issue_statistic[0].issue_count
issue_close_count = issue_statistic[0].issue_close_count
issue_open_count = int(issue_count)-int(issue_close_count)
extra_info.update({'issue': customer})
extra_info.update({'issue_count': issue_count})
extra_info.update({'issue_close_count': issue_close_count})
extra_info.update({'issue_open_count': issue_open_count})
extra_info.update({'path': 3})
return render(request, 'issue.html', extra_info)
def pull(request):
extra_info = dict()
uid = request.session['user_id']
cid = request.GET.get('cid')
community = get_nav_list(uid, cid)
extra_info.update(community)
try:
page = request.GET.get('page')
except:
page = 1
oss_id = []
oss_list_id = MOOSECommunityList.objects.values("oss_id").filter(community_id=int(cid))
for per_oss_id in oss_list_id:
oss_id.append(int(per_oss_id['oss_id']))
pulls_all = []
result = client.query("select * from moose_pull where community_id='" + cid + "' order by time desc;").get_points()
pull_index = 0
for aa in result:
if pull_index >= 100:
break
pull_tmp = dict()
pull_id = aa['issue_id']
oss_id = aa['oss_id']
oss_name = MOOSEMeta.objects.values('oss_fullname').filter(oss_id=oss_id)[0]['oss_fullname']
if pull_id is None:
continue
pull_tmp.update({'pull_state': aa['pull_state']})
pull_tmp.update({'pull_is_merged': aa['pull_merged']})
pull_tmp.update({'pull_title': aa['title']})
pull_tmp.update({'pull_body': aa['body']})
pull_tmp.update({'id': aa['issue_id']})
pull_tmp.update({'pull_create_time': aa['time']})
pull_tmp.update({'pull_comments': aa['pull_comment_count']})
pull_tmp.update({'oss_fullname': oss_name})
pulls_all.append(pull_tmp)
pull_index += 1
##pulls_all = MOOSEPulls.objects.filter(oss_id__in=oss_id)[0:100]
paginator = Paginator(pulls_all, 20)
try:
customer = paginator.page(page)
except PageNotAnInteger:
customer = paginator.page(1)
pull_statistic = MOOSEStatistic.objects.filter(community_id=int(cid))[0:1]
pull_count = pull_statistic[0].pull_count
pull_merged_count = pull_statistic[0].pull_merged_count
pull_umerged_count = int(pull_count)-int(pull_merged_count)
extra_info.update({'pull': customer})
extra_info.update({'pull_count': pull_count})
extra_info.update({'pull_merged_count': pull_merged_count})
extra_info.update({'pull_umerged_count': pull_umerged_count})
extra_info.update({'path': 4})
return render(request, 'pull.html', extra_info)
def author(request):
extra_info = dict()
uid = request.session['user_id']
cid = request.GET.get('cid')
community = get_nav_list(uid, cid)
extra_info.update(community)
# 最近活跃用户
author = MOOSEStatisticAuthor.objects.filter(community_id=int(cid))[0:12]
extra_info.update({'author': author})
# 开发者排名
oss_id = []
oss_list_id = MOOSECommunityList.objects.values("oss_id", 'oss_name').filter(community_id=int(cid))
MOOSE_auth = dict()
for per_oss_id in oss_list_id:
MOOSE_auth_list = (MOOSEAuthorList.objects.filter(oss_id=per_oss_id['oss_id'])[0:12])
MOOSE_auth[per_oss_id['oss_name']] = MOOSE_auth_list
oss_id.append(int(per_oss_id['oss_id']))
print(MOOSE_auth)
extra_info.update({'MOOSE_auth': MOOSE_auth})
oss_domain = MOOSEDomain.objects.values("domain").filter(oss_id__in=oss_id).annotate(commits=Sum('commits'))[0:10]
oss_domain_name = ''
oss_domain_commit = ''
for per_oss_domain in oss_domain:
oss_domain_name += per_oss_domain['domain'] + ','
oss_domain_commit += str(per_oss_domain['commits']) + ','
extra_info.update({'oss_domain_name': oss_domain_name})
extra_info.update({'oss_domain_commit': oss_domain_commit})
extra_info.update({'path': 5})
return render(request, 'author.html', extra_info)
def monitor(request):
extra_info = dict()
uid = request.session['user_id']
cid = request.GET.get('cid')
community = get_nav_list(uid, cid)
extra_info.update(community)
index_name = MOOSEIndex.objects.all()
index_name_cal = MOOSEIndex.objects.filter(cal_need=1)
result = client.query("select * from moose_index where oss_id='23418517' and index_type='IssueCommentEvent';").get_points()
index_date = ''
index_data = ''
for aa in result:
index_date += aa['time'][0:10] + ','
index_data += str(aa['index_count']) + ','
extra_info.update({'index_date': index_date})
extra_info.update({'index_data': index_data})
extra_info.update({'index_name': index_name})
extra_info.update({'index_name_cal': index_name_cal})
extra_info.update({'cid': cid})
extra_info.update({'path': 6})
return render(request, 'index_monitor.html', extra_info)
def getIndex(request):
result = client.query("select * from moose_index where oss_id='23418517' and index_type='IssueCommentEvent';").get_points()
index_date = ''
index_data = ''
extra_info = dict()
for aa in result:
index_date += aa['time'][0:10] + ','
index_data += str(aa['index_count']) + ','
extra_info.update({'index_date': index_date})
extra_info.update({'index_data': index_data})
return JsonResponse(extra_info, safe=False)
def getMonitor(request):
ids_str = request.POST.get('ids')
cid = request.POST.get('cid')
date_start = request.POST.get('date_start')
date_end = request.POST.get('date_end')
date_now = datetime.datetime.now()
#获取今天的时间戳和预测7天后的时间戳
date_now_str = (date_now + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
date_pre_str = (date_now + datetime.timedelta(days=7)).strftime("%Y-%m-%d")
#如果不指定起止时间,默认获取20天的数据
if date_start == '' or date_start == None:
date_start = (date_now + datetime.timedelta(days=-20)).strftime("%Y-%m-%d")
if date_end == '' or date_end == None:
date_end = (date_now + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
#获取时间的iso标准时间,便于查询时序数据库
date_start_array = time.strptime(date_start, "%Y-%m-%d")
date_start_stamp = int(time.mktime(date_start_array))
date_start_ISO = datetime.datetime.fromtimestamp(date_start_stamp).isoformat()+"Z"
date_end_array = time.strptime(date_end, "%Y-%m-%d")
date_end_stamp = int(time.mktime(date_end_array))
date_end_ISO = datetime.datetime.fromtimestamp(date_end_stamp).isoformat()+"Z"
date_now_array = time.strptime(date_end, "%Y-%m-%d")
date_now_stamp = int(time.mktime(date_now_array))
date_now_ISO = datetime.datetime.fromtimestamp(date_now_stamp).isoformat() + "Z"
ids_arr = ids_str.split(',')
index_name_display = []
index = []
index_info = MOOSEIndex.objects.filter(id__in=ids_arr)
extra_info = dict()
##计算自定义公式
formula_customize = MOOSEIndex.objects.filter(community_id=cid, cal_need=2)#'(#W#*0.8)+(#F#*0.5+(#PR#*0.4))'
for per_formula_customize in formula_customize:
formula_customize_custormize = MOOSEIndexFormula.objects.values('cal_formula').filter(index_id=per_formula_customize.id)
str_formula = formula_customize_custormize[0]['cal_formula']
str_formula_new = str_formula.replace("+", "|").replace("-", "|").replace("*", "|").replace("(", "|").replace(")", "|");
str_formula_arr = str_formula_new.split('|')
result_cal = dict()
cal_index_name = []
for i in range(len(str_formula_arr)):
if str_formula_arr[i] == '':
continue
if str_formula_arr[i].find('#') >= 0:
#获取event名称
event_name = MOOSEIndex.objects.values('index_name').filter(cal_name=str_formula_arr[i])
result = client.query("select sum(index_count) from moose_index where community_id='" + str(cid) + "' and index_type = '" + event_name[0]['index_name'] + "' and (time >='" + date_start_ISO + "' and time <= '" + date_end_ISO + "') group by time(24h) fill(0);").get_points()
cal_index_name.append(str_formula_arr[i])
index_cal_date = ''
index_cal_data = ''
index_cal = []
for bb in result:
index_cal_date += bb['time'][0:10] + ','
index_cal_data += str(bb['sum']) + ','
index_cal_date = index_cal_date[:-1]
index_cal_data = index_cal_data[:-1]
index_cal.append(index_cal_date.split(','))
index_cal.append(index_cal_data.split(','))
result_cal.update({str_formula_arr[i]: index_cal})
date1 = datetime.datetime(date_start_array[0], date_start_array[1], date_start_array[2])
date2 = datetime.datetime(date_end_array[0], date_end_array[1], date_end_array[2])
diff_days = (date2 - date1).days
for j in range(diff_days-2):
str_formula_temp = str_formula
for k in range(len(cal_index_name)):
str_formula_temp = str_formula_temp.replace(cal_index_name[k], result_cal[cal_index_name[k]][1][j])
index_cal_count = eval(str_formula_temp)
body = [
{
"measurement": "moose_index_customize",
"time": result_cal[cal_index_name[k]][0][j],
"tags": {
"oss_id": 0,
"community_id": 15,
"index_type": per_formula_customize.index_name
},
"fields": {
"index_count": index_cal_count
},
}
]
res = client.write_points(body)
######
for per_index in index_info:
moose_index_display = MOOSEIndexDisplay.objects.filter(index_id=per_index.id, community_id=cid)
if moose_index_display.count()<=0:
moose_index_display_new = MOOSEIndexDisplay()
moose_index_display_new.index_id = per_index.id
moose_index_display_new.community_id = cid
moose_index_display_new.save()
index_dict = dict()
index_date = ''
index_data = ''
index_date_pre = ''
index_data_pre = ''
index_name_display.append(per_index.index_display)
if per_index.cal_need == 2:
result = client.query("select sum(index_count) from moose_index_customize where community_id='" + str(cid) + "' and index_type = '" + per_index.index_name + "' and (time >='" + date_start_ISO + "' and time <= '" + date_end_ISO + "') group by time(24h) fill(0);").get_points()
else:
result = client.query("select sum(index_count) from moose_index where community_id='" + str(cid) + "' and index_type = '" + per_index.index_name + "' and (time >='" + date_start_ISO + "' and time <= '" + date_end_ISO + "') group by time(24h) fill(0);").get_points()
for aa in result:
index_date += aa['time'][0:10] + ','
index_data += str(aa['sum']) + ','
#如果没有数据,默认全0
if len(index_data) <= 0:
date1 = datetime.datetime(date_start_array[0], date_start_array[1], date_start_array[2])
date2 = datetime.datetime(date_end_array[0], date_end_array[1], date_end_array[2])
diff_days = (date2 - date1).days
for j in range(diff_days):
index_date += (date_now + datetime.timedelta(days=(-diff_days + j + 1))).strftime("%Y-%m-%d")+','
index_data += '0' + ','
index_date = index_date[:-1]
index_data = index_data[:-1]
index_dict.update({'index_date': index_date})
index_dict.update({'index_data': index_data})
index_dict.update({'index_id': per_index.id})
#获取全部数据用来预测,计算平均值及阈值
index_date_pre_arr = []
index_data_pre_arr = []
average_count = 0
if per_index.cal_need == 2:
result_pre = client.query("select sum(index_count) from moose_index_customize where community_id='" + str(cid) + "' and index_type = '" + per_index.index_name + "' and (time <= '" + date_now_ISO + "') group by time(24h) fill(0);").get_points()
else:
result_pre = client.query("select sum(index_count) from moose_index where community_id='" + str(
cid) + "' and index_type = '" + per_index.index_name + "' and (time <= '" + date_now_ISO + "') group by time(24h) fill(0);").get_points()
for per_result_pre in result_pre:
index_date_pre_arr.append(per_result_pre['time'][0:10])
index_data_pre_arr.append(per_result_pre['sum'])
| |
the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete_authorization_rule.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete_authorization_rule.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}'}
def get_authorization_rule(
self, resource_group_name, namespace_name, notification_hub_name, authorization_rule_name, custom_headers=None, raw=False, **operation_config):
"""Gets an authorization rule for a NotificationHub by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param authorization_rule_name: authorization rule name.
:type authorization_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SharedAccessAuthorizationRuleResource or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.notificationhubs.models.SharedAccessAuthorizationRuleResource
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_authorization_rule.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SharedAccessAuthorizationRuleResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_authorization_rule.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}'}
def list(
self, resource_group_name, namespace_name, custom_headers=None, raw=False, **operation_config):
"""Lists the notification hubs associated with a namespace.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NotificationHubResource
:rtype:
~azure.mgmt.notificationhubs.models.NotificationHubResourcePaged[~azure.mgmt.notificationhubs.models.NotificationHubResource]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NotificationHubResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NotificationHubResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs'}
def list_authorization_rules(
self, resource_group_name, namespace_name, notification_hub_name, custom_headers=None, raw=False, **operation_config):
"""Gets the authorization rules for a NotificationHub.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of
SharedAccessAuthorizationRuleResource
:rtype:
~azure.mgmt.notificationhubs.models.SharedAccessAuthorizationRuleResourcePaged[~azure.mgmt.notificationhubs.models.SharedAccessAuthorizationRuleResource]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_authorization_rules.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SharedAccessAuthorizationRuleResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SharedAccessAuthorizationRuleResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_authorization_rules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules'}
def list_keys(
self, resource_group_name, namespace_name, notification_hub_name, authorization_rule_name, custom_headers=None, raw=False, **operation_config):
"""Gets the Primary and Secondary ConnectionStrings to the NotificationHub
.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param authorization_rule_name: The connection string of the
NotificationHub for the specified authorizationRule.
:type authorization_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ResourceListKeys or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.notificationhubs.models.ResourceListKeys or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.list_keys.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceListKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}/listKeys'}
def regenerate_keys(
self, resource_group_name, namespace_name, notification_hub_name, authorization_rule_name, policy_key=None, custom_headers=None, raw=False, **operation_config):
"""Regenerates the Primary/Secondary Keys to the NotificationHub
Authorization Rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param authorization_rule_name: The connection string of the
NotificationHub for the specified authorizationRule.
:type authorization_rule_name: str
:param policy_key: Name of the key that has to be regenerated for the
Namespace/Notification Hub Authorization Rule. The value can be
Primary Key/Secondary Key.
:type policy_key: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ResourceListKeys or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.notificationhubs.models.ResourceListKeys or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.PolicykeyResource(policy_key=policy_key)
# Construct URL
url = self.regenerate_keys.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicykeyResource')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceListKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
regenerate_keys.metadata = | |
g_conv_temp = g_conv_temp * g_feii_rel_int
# Sum templates along rows
g_template = np.sum(g_conv_temp, axis=1)
g_template[(lam_gal <4472) & (lam_gal >5147)] = 0
# Z template
# Perform the convolution
z_conv_temp = convolve_gauss_hermite(z_feii_fft, npad, float(velscale),\
[opt_feii_voff, opt_feii_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
# Normalize amplitudes to 1
z_norm = np.array([np.max(z_conv_temp[:,i]) for i in range(np.shape(z_conv_temp)[1])])
z_norm[z_norm<1.e-6] = 1.0
z_conv_temp = z_conv_temp/z_norm
# Multiply by relative intensities
z_conv_temp = z_conv_temp * z_feii_rel_int
# Sum templates along rows
z_template = np.sum(z_conv_temp, axis=1)
# Multiply by FeII amplitude
z_template = z_template * z_feii_amp
z_template[(lam_gal <4418) & (lam_gal >5428)] = 0
return f_template,s_template,g_template,z_template
def calculate_k10_rel_int(transition,center,gf,e2,I2,temp):
"""
Calculate relative intensities for the S, F, and G FeII line groups
from Kovacevic et al. 2010 template as a fucntion a temperature.
"""
c = 2.99792458e+8 # speed of light; m/s
h = 6.62607004e-34 # Planck's constant; m2 kg s-1
k = 1.38064852e-23 # Boltzmann constant; m2 kg s-2 K-1
if (transition=='F'):
# For the F transition, we normalize to the values of 4549.474
rel_int = I2*(4549.474/center)**3 * (gf/1.10e-02) * np.exp(-1.0/(k*temp) * (e2 - 8.896255e-19))
return rel_int
elif (transition=='S'):
# For the S transition, we normalize to the values of 5018.440
rel_int = I2*(5018.440/center)**3 * (gf/3.98e-02) * np.exp(-1.0/(k*temp) * (e2 - 8.589111e-19))
return rel_int
elif (transition=='G'):
# For the G transition, we normalize to the values of 5316.615
rel_int = I2*(5316.615/center)**3 * (gf/1.17e-02) * np.exp(-1.0/(k*temp) * (e2 - 8.786549e-19))
return rel_int
##################################################################################
##################################################################################
def VW01_uv_iron_template(lam_gal, pdict, uv_iron_template, uv_iron_options, velscale, run_dir):
"""
Generates the UV Iron model from Vestergaard & Wilkes (2001).
If the UV iron FWHM and/or VOFF are free to vary, perform the convolution of optical FeII template with Gauss-Hermite kernel using
PPXF framework.
"""
# Unpack opt_feii_templates (uv_iron_fft, npad, vsyst)
uv_iron_fft, npad, vsyst = uv_iron_template
# Parse FeII options
if (uv_iron_options['uv_amp_const']['bool']==False): # if amp not constant
uv_iron_amp = pdict['UV_IRON_AMP']
elif (uv_iron_options['uv_amp_const']['bool']==True): # if amp constant
uv_iron_amp = uv_iron_options['uv_amp_const']['uv_iron_val']
if (uv_iron_options['uv_fwhm_const']['bool']==False): # if amp not constant
uv_iron_fwhm = pdict['UV_IRON_FWHM']
elif (uv_iron_options['uv_fwhm_const']['bool']==True): # if amp constant
uv_iron_fwhm = uv_iron_options['uv_fwhm_const']['uv_iron_val']
if uv_iron_fwhm <= 0.01: uv_iron_fwhm = 0.01
if (uv_iron_options['uv_voff_const']['bool']==False): # if amp not constant
uv_iron_voff = pdict['UV_IRON_VOFF']
elif (uv_iron_options['uv_voff_const']['bool']==True): # if amp constant
uv_iron_voff = uv_iron_options['uv_voff_const']['uv_iron_val']
# Convolve the UV iron FFT template and return the inverse Fourier transform.
conv_temp = convolve_gauss_hermite(uv_iron_fft, npad, velscale,
[uv_iron_voff, uv_iron_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
# Reshape
conv_temp = conv_temp.reshape(-1)
# Re-normalize to 1
conv_temp = conv_temp/np.max(conv_temp)
# Multiplyy by amplitude
template = uv_iron_amp * conv_temp
# Reshape
# template = template.reshape(-1)
#
# Set fitting region outside of template to zero to prevent convolution loops
template[(lam_gal < 1074) & (lam_gal > 3090)] = 0
#
# If the summation results in 0.0, it means that features were too close
# to the edges of the fitting region (usua lly because the region is too
# small), then simply return an array of zeros.
if (isinstance(template,int)) or (isinstance(template,float)):
template=np.zeros(len(lam_gal))
elif np.isnan(np.sum(template)):
template=np.zeros(len(lam_gal))
return template
##################################################################################
##################################################################################
def generate_balmer_continuum(lam_gal,lam_balmer, spec_high_balmer,velscale,
balmer_ratio, balmer_amp, balmer_fwhm, balmer_voff, balmer_Teff, balmer_tau):
# We need to generate a new grid for the Balmer continuum that matches
# that we made for the higher-order lines
def blackbody(lam, balmer_Teff):
c = 2.99792458e+18 # speed of light [A/s]
h = 6.626196e-11 # Planck's constant [g*A2/s2 * s]
k = 1.380649 # Boltzmann Constant [g*A2/s2 1/K]
Blam = ((2.0*h*c**2.0)/lam**5.0)*(1.0/(np.exp((h*c)/(lam*k*balmer_Teff))-1.0))
return Blam
# Construct Balmer continuum from lam_balmer
lam_edge = 3646.0 # Balmer edge wavelength [A]
Blam = blackbody(lam_balmer, balmer_Teff) # blackbody function [erg/s]
cont = Blam * (1.0-1.0/np.exp(balmer_tau*(lam_balmer/lam_edge)**3.0))
# Normalize at 3000 Å
cont = cont / np.max(cont)
# Set Balmer continuum to zero after Balmer edge
cont[find_nearest(lam_balmer,lam_edge)[1]:] = 0.0
# Normalize higher-order lines at Balmer edge
# Unsure of how Calderone et al. (2017) (QSFit) did this normalization, so we added
# fudge factor of 1.36 to match the QSFit implementation of the Balmer continuum.
# spec_high_balmer = spec_high_balmer/spec_high_balmer[find_nearest(lam_balmer,lam_edge+10)[1]] * balmer_ratio #* 1.36
if (np.sum(spec_high_balmer)>0):
spec_high_balmer = spec_high_balmer/np.max(spec_high_balmer) * balmer_ratio #* 1.36
# Sum the two components
full_balmer = spec_high_balmer + cont
# Pre-compute the FFT and vsyst
balmer_fft, balmer_npad = template_rfft(full_balmer)
c = 299792.458 # speed of light in km/s
vsyst = np.log(lam_balmer[0]/lam_gal[0])*c
if balmer_fwhm<= 0.01: balmer_fwhm = 0.01
# Broaden the higher-order Balmer lines
conv_temp = convolve_gauss_hermite(balmer_fft, balmer_npad, float(velscale),\
[balmer_voff, balmer_fwhm/2.3548], lam_gal.shape[0],
velscale_ratio=1, sigma_diff=0, vsyst=vsyst)
conv_temp = conv_temp/conv_temp[find_nearest(lam_gal,lam_edge)[1]] * balmer_ratio
conv_temp = conv_temp.reshape(-1)
# Normalize the full continuum to 1
# norm_balmer = conv_temp[find_nearest(lam_gal,3000.0)[1]]
# conv_temp = conv_temp/norm_balmer * balmer_amp
conv_temp = conv_temp/np.max(conv_temp) * balmer_amp
# Plot for testing purposes
if 0:
# Plot
fig = plt.figure(figsize=(14,5))
ax1 = fig.add_subplot(1,1,1)
ax1.set_title('Balmer Continuum')
# ax1.plot(lam_balmer, cont/np.max(cont), color='xkcd:cerulean')
# ax1.plot(lam_balmer, spec_high_balmer/np.max(spec_high_balmer), color='xkcd:bright red')
ax1.plot(lam_gal, conv_temp, color='xkcd:bright red',linewidth=0.75)
ax1.axvline(lam_edge,linestyle='--',color='xkcd:red',linewidth=1.0)
ax1.axvline(3000,linestyle='--',color='xkcd:black',linewidth=0.5)
ax1.axhline(1.0,linestyle='--',color='xkcd:black',linewidth=0.5)
# ax1.axhline(0.6,linestyle='--',color='xkcd:black',linewidth=0.5)
ax1.set_ylim(0.0,)
# ax1.set_xlim(1000,4500)
fontsize = 16
ax1.set_xlabel(r"Wavelength ($\lambda$)",fontsize=fontsize)
return conv_temp
##################################################################################
#### Simple Power-Law Template ###################################################
def simple_power_law(x,amp,alpha):
"""
Simple power-low function to model
the AGN continuum (Calderone et al. 2017).
Parameters
----------
x : array_like
wavelength vector (angstroms)
amp : float
continuum amplitude (flux density units)
alpha : float
power-law slope
Returns
----------
C : array
AGN continuum model the same length as x
"""
# This works
xb = np.max(x)-(0.5*(np.max(x)-np.min(x))) # take to be half of the wavelength range
C = amp*(x/xb)**alpha # un-normalized
return C
##################################################################################
#### Smoothly-Broken Power-Law Template ##########################################
def broken_power_law(x, amp, x_break, alpha_1, alpha_2, delta):
"""
Smoothly-broken power law continuum model; for use
when there is sufficient coverage in near-UV.
(See https://docs.astropy.org/en/stable/api/astropy.modeling.
powerlaws.SmoothlyBrokenPowerLaw1D.html#astropy.modeling.powerlaws.
SmoothlyBrokenPowerLaw1D)
Parameters
----------
x : array_like
wavelength vector (angstroms)
amp : float [0,max]
continuum amplitude (flux density units)
x_break : float [x_min,x_max]
wavelength of the break
alpha_1 : float [-4,2]
power-law slope on blue side.
alpha_2 : float [-4,2]
power-law slope on red side.
delta : float [0.001,1.0]
Returns
----------
C : array
AGN continuum model the same length as x
"""
C = amp * (x/x_break)**(alpha_1) * (0.5*(1.0+(x/x_break)**(1.0/delta)))**((alpha_2-alpha_1)*delta)
return C
##################################################################################
##################################################################################
def gaussian_line_profile(lam_gal,center,amp,fwhm,voff,center_pix,fwhm_res_kms,velscale):
"""
Produces a gaussian vector the length of
x with the specified parameters.
"""
# Take into account instrumental dispersion (FWHM resolution)
fwhm = np.sqrt(fwhm**2+fwhm_res_kms**2)
sigma = fwhm/2.3548 # Gaussian dispersion in km/s
sigma_pix = sigma/(velscale) # dispersion in pixels (velscale = km/s/pixel)
if sigma_pix<=0.01: sigma_pix = 0.01
voff_pix = voff/(velscale) # velocity offset in pixels
center_pix = center_pix + voff_pix # shift the line center by voff in pixels
#
x_pix = np.array(range(len(lam_gal))) # pixels vector
x_pix = x_pix.reshape((len(x_pix),1)) # reshape into row
g = amp*np.exp(-0.5*(x_pix-(center_pix))**2/(sigma_pix)**2) # construct gaussian
g = np.sum(g,axis=1)
# Make sure edges of gaussian are zero to avoid wierd things
g[(g>-1e-6) & (g<1e-6)] = 0.0
g[0] = g[1]
g[-1] = g[-2]
#
return g
##################################################################################
def lorentzian_line_profile(lam_gal,center,amp,fwhm,voff,center_pix,fwhm_res_kms,velscale,noise):
"""
Produces a lorentzian vector the length of
x with the specified parameters.
(See: https://docs.astropy.org/en/stable/api/astropy.modeling.functional_models.Lorentz1D.html)
"""
# Take into account instrumental dispersion (FWHM resolution)
fwhm = np.sqrt(fwhm**2+fwhm_res_kms**2)
fwhm_pix = fwhm/velscale # fwhm in pixels (velscale = km/s/pixel)
if fwhm_pix<=0.01: fwhm_pix = 0.01
voff_pix = voff/velscale # velocity offset in pixels
center_pix = center_pix + voff_pix # shift the line center by voff in pixels
#
x_pix = np.array(range(len(lam_gal))) # pixels vector
x_pix = x_pix.reshape((len(x_pix),1)) # reshape into row
gamma = 0.5*fwhm_pix
l = amp*( (gamma**2) / (gamma**2+(x_pix-center_pix)**2) ) # construct lorenzian
l= np.sum(l,axis=1)
# Truncate wings below noise level
l[l<=np.median(noise)] = 0.0
l[l>np.median(noise)] -= np.median(noise)
# Make sure edges of gaussian are zero to avoid wierd things
l[(l>-1e-6) & (l<1e-6)] = 0.0
l[0] = l[1]
l[-1] = l[-2]
#
return l
##################################################################################
def gauss_hermite_line_profile(lam_gal,center,amp,fwhm,voff,hmoments,center_pix,fwhm_res_kms,velscale,noise):
"""
Produces a Gauss-Hermite vector the length of
x with the specified parameters.
"""
# Take into account instrumental dispersion (FWHM resolution)
fwhm = np.sqrt(fwhm**2+fwhm_res_kms**2)
sigma_pix = fwhm/2.3548/velscale # dispersion in pixels (velscale = km/s/pixel)
if sigma_pix<=0.01: sigma_pix = 0.01
voff_pix = voff/velscale # velocity offset in pixels
center_pix = center_pix + voff_pix # shift the line center by voff in pixels
#
x_pix = np.array(range(len(lam_gal))) # pixels vector
x_pix | |
# shape=(nlst,ndays,ntriads,nlags=1)
if nsamples_coh > 1:
awts_shape = tuple(NP.ones(cpds[smplng]['whole']['dspec']['mean'].ndim, dtype=NP.int))
awts = NP.ones(awts_shape, dtype=NP.complex)
awts_shape = NP.asarray(awts_shape)
for caxind,caxis in enumerate(cohax):
curr_awts_shape = NP.copy(awts_shape)
curr_awts_shape[caxis] = -1
awts = awts * autoinfo['wts'][caxind].reshape(tuple(curr_awts_shape))
for stat in ['mean', 'median']:
if dpool == 'submodel':
dspec = NP.copy(cpds[smplng][dpool]['dspec'][dspec_multidim_idx])
else:
dspec = NP.copy(cpds[smplng][dpool]['dspec'][stat][dspec_multidim_idx])
if nsamples_coh > 1:
if stat == 'mean':
dspec = NP.sum(twts[twts_multidim_idx][NP.newaxis,...] * awts * dspec[dspec_multidim_idx], axis=cohax, keepdims=True) / NP.sum(twts[twts_multidim_idx][NP.newaxis,...] * awts, axis=cohax, keepdims=True)
else:
dspec = NP.median(dspec[dspec_multidim_idx], axis=cohax, keepdims=True)
if nsamples_incoh > 1:
expandax_map = {}
wts_shape = tuple(NP.ones(dspec.ndim, dtype=NP.int))
preXwts = NP.ones(wts_shape, dtype=NP.complex)
wts_shape = NP.asarray(wts_shape)
for incaxind,incaxis in enumerate(xinfo['axes']):
curr_wts_shape = NP.copy(wts_shape)
curr_wts_shape[incaxis] = -1
preXwts = preXwts * xinfo['wts']['preX'][incaxind].reshape(tuple(curr_wts_shape))
dspec1 = NP.copy(dspec)
dspec2 = NP.copy(dspec)
preXwts1 = NP.copy(preXwts)
preXwts2 = NP.copy(preXwts)
for incax in NP.sort(incohax)[::-1]:
dspec1 = NP.expand_dims(dspec1, axis=incax)
preXwts1 = NP.expand_dims(preXwts1, axis=incax)
if incax == 1:
preXwts1_outshape = list(preXwts1.shape)
preXwts1_outshape[incax+1] = dspec1.shape[incax+1]
preXwts1_outshape = tuple(preXwts1_outshape)
preXwts1 = NP.broadcast_to(preXwts1, preXwts1_outshape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
preXwts2_tmp = NP.expand_dims(preXwts2, axis=incax)
preXwts2_shape = NP.asarray(preXwts2_tmp.shape)
preXwts2_shape[incax] = lstshifts.size
preXwts2_shape[incax+1] = preXwts1_outshape[incax+1]
preXwts2_shape = tuple(preXwts2_shape)
preXwts2 = NP.broadcast_to(preXwts2_tmp, preXwts2_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
dspec2_tmp = NP.expand_dims(dspec2, axis=incax)
dspec2_shape = NP.asarray(dspec2_tmp.shape)
dspec2_shape[incax] = lstshifts.size
# dspec2_shape = NP.insert(dspec2_shape, incax, lstshifts.size)
dspec2_shape = tuple(dspec2_shape)
dspec2 = NP.broadcast_to(dspec2_tmp, dspec2_shape).copy() # For some strange reason the NP.broadcast_to() creates a "read-only" immutable array which is changed to writeable by copy()
for lstshiftind, lstshift in enumerate(lstshifts):
dspec2[:,lstshiftind,...] = NP.roll(dspec2_tmp[:,0,...], lstshift, axis=incax)
dspec2[:,lstshiftind,:lstshift,...] = NP.nan
preXwts2[:,lstshiftind,...] = NP.roll(preXwts2_tmp[:,0,...], lstshift, axis=incax)
preXwts2[:,lstshiftind,:lstshift,...] = NP.nan
else:
dspec2 = NP.expand_dims(dspec2, axis=incax+1)
preXwts2 = NP.expand_dims(preXwts2, axis=incax+1)
expandax_map[incax] = incax + NP.arange(2)
for ekey in expandax_map:
if ekey > incax:
expandax_map[ekey] += 1
result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec1.ndim-1, dtype=NP.int))) * (dspec1*U.Unit('Jy Hz') * preXwts1) * (dspec2*U.Unit('Jy Hz') * preXwts2).conj()
if xinfo['wts']['preXnorm']:
result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(preXwts1 * preXwts2.conj(), axis=NP.union1d(NP.where(logical_or(NP.asarray(preXwts1.shape)>1, NP.asarray(preXwts2.shape)>1))), keepdims=True) # Normalize by summing the weights over the expanded axes
if (len(xinfo['collapse_axes']) > 0) or (xinfo['avgcov']):
# if any one of collapsing of incoherent axes or
# averaging of full covariance is requested
diagoffsets = {} # Stores the correlation index difference along each axis.
diagweights = {} # Stores the number of points summed in the trace along the offset diagonal
for colaxind, colax in enumerate(xinfo['collapse_axes']):
if colax == 1:
shp = NP.ones(dspec.ndim, dtype=NP.int)
shp[colax] = lst_ind.size
multdim_idx = tuple([NP.arange(axdim) for axdim in shp])
diagweights[colax] = NP.sum(NP.logical_not(NP.isnan(dspec[multdim_idx]))) - lstshifts
# diagweights[colax] = result[smplng][dpool][stat].shape[expandax_map[colax][-1]] - lstshifts
if stat == 'mean':
result[smplng][dpool][stat] = NP.nanmean(result[smplng][dpool][stat], axis=expandax_map[colax][-1])
else:
result[smplng][dpool][stat] = NP.nanmedian(result[smplng][dpool][stat], axis=expandax_map[colax][-1])
diagoffsets[colax] = lstshifts
else:
pspec_unit = result[smplng][dpool][stat].si.unit
result[smplng][dpool][stat], offsets, diagwts = OPS.array_trace(result[smplng][dpool][stat].si.value, offsets=None, axis1=expandax_map[colax][0], axis2=expandax_map[colax][1], outaxis='axis1')
diagwts_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int)
diagwts_shape[expandax_map[colax][0]] = diagwts.size
diagoffsets[colax] = offsets
diagweights[colax] = NP.copy(diagwts)
result[smplng][dpool][stat] = result[smplng][dpool][stat] * pspec_unit / diagwts.reshape(diagwts_shape)
for ekey in expandax_map:
if ekey > colax:
expandax_map[ekey] -= 1
expandax_map[colax] = NP.asarray(expandax_map[colax][0]).ravel()
wts_shape = tuple(NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int))
postXwts = NP.ones(wts_shape, dtype=NP.complex)
wts_shape = NP.asarray(wts_shape)
for colaxind, colax in enumerate(xinfo['collapse_axes']):
curr_wts_shape = NP.copy(wts_shape)
curr_wts_shape[expandax_map[colax]] = -1
postXwts = postXwts * xinfo['wts']['postX'][colaxind].reshape(tuple(curr_wts_shape))
result[smplng][dpool][stat] = result[smplng][dpool][stat] * postXwts
axes_to_sum = tuple(NP.asarray([expandax_map[colax] for colax in xinfo['collapse_axes']]).ravel()) # for post-X normalization and collapse of covariance matrix
if xinfo['wts']['postXnorm']:
result[smplng][dpool][stat] = result[smplng][dpool][stat] / NP.nansum(postXwts, axis=axes_to_sum, keepdims=True) # Normalize by summing the weights over the collapsed axes
if xinfo['avgcov']:
# collapse the axes further (postXwts have already
# been applied)
diagoffset_weights = 1.0
for colaxind in zip(*sorted(zip(NP.arange(xinfo['collapse_axes'].size), xinfo['collapse_axes']), reverse=True))[0]:
# It is important to sort the collapsable axes in
# reverse order before deleting elements below,
# otherwise the axes ordering may be get messed up
diagoffset_weights_shape = NP.ones(result[smplng][dpool][stat].ndim, dtype=NP.int)
diagoffset_weights_shape[expandax_map[xinfo['collapse_axes'][colaxind]][0]] = diagweights[xinfo['collapse_axes'][colaxind]].size
diagoffset_weights = diagoffset_weights * diagweights[xinfo['collapse_axes'][colaxind]].reshape(diagoffset_weights_shape)
del diagoffsets[xinfo['collapse_axes'][colaxind]]
result[smplng][dpool][stat] = NP.nansum(result[smplng][dpool][stat]*diagoffset_weights, axis=axes_to_sum, keepdims=True) / NP.nansum(diagoffset_weights, axis=axes_to_sum, keepdims=True)
else:
result[smplng][dpool][stat] = factor.reshape((-1,)+tuple(NP.ones(dspec.ndim-1, dtype=NP.int))) * NP.abs(dspec * U.Jy)**2
diagoffsets = {}
expandax_map = {}
if units == 'Jy':
result[smplng][dpool][stat] = result[smplng][dpool][stat].to('Jy2 Mpc')
elif units == 'K':
result[smplng][dpool][stat] = result[smplng][dpool][stat].to('K2 Mpc3')
else:
raise ValueError('Input value for units invalid')
result[smplng][dpool]['diagoffsets'] = diagoffsets
result[smplng][dpool]['diagweights'] = diagweights
result[smplng][dpool]['axesmap'] = expandax_map
result[smplng][dpool]['nsamples_incoh'] = nsamples_incoh
result[smplng][dpool]['nsamples_coh'] = nsamples_coh
return result
############################################################################
def compute_power_spectrum_uncertainty(self, cpds=None, selection=None,
autoinfo=None,xinfo=None,
cosmo=cosmo100, units='K',
beamparms=None):
"""
------------------------------------------------------------------------
Compute uncertainty in the power spectrum of closure phase data. It is
in units of Mpc/h
Inputs:
cpds [dictionary] A dictionary that contains the 'oversampled' (if
resample=False) and/or 'resampled' (if resample=True) delay
spectrum information on the key 'errinfo'. If it is not
specified the attributes cPhaseDS['errinfo'] and
cPhaseDS_resampled['errinfo'] are used. Under each of these
sampling keys, it holds a dictionary that has the following
keys and values:
'freq_center' [numpy array] contains the center frequencies
(in Hz) of the frequency subbands of the subband
delay spectra. It is of size n_win. It is
roughly equivalent to redshift(s)
'freq_wts' [numpy array] Contains frequency weights applied
on each frequency sub-band during the subband
delay transform. It is of size n_win x nchan.
'bw_eff' [numpy array] contains the effective bandwidths
(in Hz) of the subbands being delay transformed.
It is of size n_win. It is roughly equivalent to
width in redshift or along line-of-sight
'shape' [string] shape of the window function applied.
Accepted values are 'rect' (rectangular), 'bhw'
(Blackman-Harris), 'bnw' (Blackman-Nuttall).
'fftpow' [scalar] the power to which the FFT of the window
was raised. The value is be a positive scalar
with default = 1.0
'npad' [scalar] Numbber of zero-padded channels before
performing the subband delay transform.
'lags' [numpy array] lags of the subband delay spectra
after padding in frequency during the transform.
It is of size nlags. The lags roughly correspond
to k_parallel.
'lag_kernel' [numpy array] delay transform of the frequency
weights under the key 'freq_wts'. It is of size
n_bl x n_win x nlags x n_t.
'lag_corr_length'
[numpy array] It is the correlation timescale
(in pixels) of the subband delay spectra. It is
proportional to inverse of effective bandwidth.
It is of size n_win. The unit size of a pixel is
determined by the difference between adjacent
pixels in lags under key 'lags' which in turn is
effectively inverse of the effective bandwidth
of the subband specified in bw_eff
'errinfo' [dictionary] It has two keys 'dspec0' and
'dspec1' each of which are dictionaries with
the following keys and values:
'twts' [numpy array] Weights for the subsample
difference. It is of shape (nlst, ndays,
ntriads, nchan)
'mean' [numpy array] Delay spectrum of the
subsample difference obtained by using
the mean statistic. It is of shape
(nspw, nlst, ndays, ntriads, nlags)
'median'
[numpy array] Delay spectrum of the
subsample difference obtained by using
the median statistic. It is of shape
(nspw, nlst, ndays, ntriads, nlags)
selection [NoneType or dictionary] Selection parameters based on which
triad, LST, and day indices will be returned. If set to None
(default), all triad, LST, and day indices will be returned.
Otherwise it must be a dictionary with the following keys
and values:
'triads' [NoneType or list of 3-element tuples] If set
to None (default), indices of all triads are
returned. Otherwise, the specific triads must
be specified such as [(1,2,3), (1,2,4), ...]
and their indices will be returned
'lst' [NoneType, list or numpy array] If set to None
(default), indices of all LST are returned.
Otherwise must be a list or numpy array
containing indices to LST.
'days' [NoneType, list or numpy array] If set to None
(default), indices of all days are returned.
Otherwise must be | |
<reponame>TugberkArkose/MLScheduler
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0471372,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.239712,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.25248,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.559051,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.968075,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.555218,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.08234,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.51389,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.35317,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0476989,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0202661,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.164281,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.14988,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.21198,
'Execution Unit/Register Files/Runtime Dynamic': 0.170146,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.409884,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.26479,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 4.16237,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00123831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00123831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00107325,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000412564,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00215304,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00570292,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0120629,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.144083,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.327336,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.489372,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 0.978557,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0778918,
'L2/Runtime Dynamic': 0.0247027,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 7.48682,
'Load Store Unit/Data Cache/Runtime Dynamic': 3.0353,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.202193,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.202193,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 8.44551,
'Load Store Unit/Runtime Dynamic': 4.23464,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.498573,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.997146,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.176945,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.178094,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0537231,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.864398,
'Memory Management Unit/Runtime Dynamic': 0.231817,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 29.2714,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.16641,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0305893,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.299534,
'Renaming Unit/Int Front End RAT/Subthreshold | |
'''
.. console - Comprehensive utility library for ANSI terminals.
.. © 2018, <NAME> - Released under the LGPL, version 3+.
Tables for known ANSI color palettes.
In order for "color palette downgrade by proximity" to work well, we need
to know what the standard 16 color palette of the platform is. Many are
recorded here. Palettes are recorded as integer values so proximity can
be calculated.
'''
# Windows colors 0..15: 16 basic colors
# https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit
cmd_palette4 = (
(0, 0, 0), # 0 black
(128, 0, 0), # 1 red
(0, 128, 0), # 2 green
(128, 128, 0), # 3 yellow
(0, 0, 128), # 4 blue
(128, 0, 128), # 5 magenta/purple
(0, 128, 128), # 6 cyan
(192, 192, 192), # 7 white/grey
(128, 128, 128), # 8 bright black
(255, 0, 0), # 9 bright red
(0, 255, 0), # 10 bright green
(255, 255, 0), # 11 bright yellow
(0, 0, 255), # 12 bright blue
(255, 0, 255), # 13 bright magenta
(0, 255, 255), # 14 bright cyan
(255, 255, 255), # 15 bright white
)
# Windows: new palette, 1709, Fall Creators Update, 2017-10-17, build 16299
# https://blogs.msdn.microsoft.com/commandline/2017/08/02/updating-the-windows-console-colors/
cmd1709_palette4 = (
(12, 12, 12), # 0 black
(197, 15, 31), # 1 red
(19, 161, 14), # 2 green
(193, 156, 0), # 3 yellow
(0, 55, 218), # 4 blue
(136, 23, 152), # 5 magenta/purple
(58, 150, 221), # 6 cyan
(204, 204, 204), # 7 white/grey
(118, 118, 118), # 8 bright black
(231, 72, 86), # 9 bright red
(22, 198, 12), # 10 bright green
(249, 241, 165), # 11 bright yellow
(59, 120, 255), # 12 bright blue
(180, 0, 158), # 13 bright magenta
(97, 214, 214), # 14 bright cyan
(242, 242, 242), # 15 bright white
)
linuxcon_palette4 = ( # config from: /etc/vtrgb
(1, 1, 1), # 0 black
(222, 56, 43), # 1 red
(57, 181, 74), # 2 green
(255, 199, 6), # 3 yellow
(0, 111, 184), # 4 blue
(118, 38, 113), # 5 magenta/purple
(44, 181, 233), # 6 cyan
(204, 204, 204), # 7 white/grey
(128, 128, 128), # 8 bright black
(255, 0, 0), # 9 bright red
(0, 255, 0), # 10 bright green
(255, 255, 0), # 11 bright yellow
(0, 0, 255), # 12 bright blue
(255, 0, 255), # 13 bright magenta
(0, 255, 255), # 14 bright cyan
(255, 255, 255), # 15 bright white
)
iterm_palette4 = (
(0, 0, 0), # 0 black
(201, 27, 0), # 1 red
(0, 194, 0), # 2 green
(199, 196, 0), # 3 yellow
(2, 37, 199), # 4 blue
(201, 48, 199), # 5 magenta/purple
(0, 197, 199), # 6 cyan
(199, 199, 199), # 7 white/grey
(103, 103, 103), # 8 bright black
(255, 109, 103), # 9 bright red
(95, 249, 103), # 10 bright green
(254, 251, 103), # 11 bright yellow
(104, 113, 255), # 12 bright blue
(255, 118, 255), # 13 bright magenta
(95, 253, 255), # 14 bright cyan
(255, 255, 254), # 15 bright white
)
# https://en.wikipedia.org/wiki/Tango_Desktop_Project#Palette
# https://blogs.n1zyy.com/andrew/2009/02/02/tango-color-scheme-for-xfce-terminal/
tango_palette4 = (
(0x2e, 0x34, 0x34), # 0 black
(0xcc, 0x00, 0x00), # 1 red
(0x4e, 0x9a, 0x06), # 2 green
(0xc4, 0xa0, 0x00), # 3 yellow
(0x34, 0x65, 0xa4), # 4 blue
(0x75, 0x50, 0x7b), # 5 magenta/purple
(0x06, 0x98, 0x9a), # 6 cyan
(0xd3, 0xd7, 0xcf), # 7 white/grey
(0x55, 0x57, 0x53), # 8 bright black
(0xef, 0x29, 0x29), # 9 bright red
(0x8a, 0xe2, 0x34), # 10 bright green
(0xfc, 0xe9, 0x4f), # 11 bright yellow
(0x72, 0x9f, 0xcf), # 12 bright blue
(0xad, 0x7f, 0xa8), # 13 bright magenta
(0x34, 0xe2, 0xe2), # 14 bright cyan
(0xee, 0xee, 0xec), # 15 bright white
)
termapp_palette4 = (
(0, 0, 0), # 0 black
(194, 54, 33), # 1 red
(37, 188, 36), # 2 green
(173, 173, 39), # 3 yellow
(73, 46, 225), # 4 blue
(211, 56, 211), # 5 magenta/purple
(51, 187, 200), # 6 cyan
(203, 204, 205), # 7 white/grey
(129, 131, 131), # 8 bright black
(252, 57, 31), # 9 bright red
(49, 231, 34), # 10 bright green
(234, 236, 35), # 11 bright yellow
(88, 51, 255), # 12 bright blue
(249, 53, 248), # 13 bright magenta
(20, 240, 240), # 14 bright cyan
(233, 235, 235), # 15 bright white
)
solarized_dark_palette4 = (
(0x07, 0x36, 0x42), # 0 black
(0xDC, 0x32, 0x2F), # 1 red
(0x85, 0x99, 0x00), # 2 green
(0xB5, 0x89, 0x00), # 3 yellow
(0x26, 0x8B, 0xD2), # 4 blue
(0xD3, 0x36, 0x82), # 5 magenta/purple
(0x2A, 0xA1, 0x98), # 6 cyan
(0xEE, 0xE8, 0xD5), # 7 white/grey
(0x00, 0x2B, 0x36), # 8 bright black
(0xCB, 0x4B, 0x16), # 9 bright red),
(0x58, 0x6E, 0x75), # 10 bright green),
(0x65, 0x7B, 0x83), # 11 bright yellow),
(0x83, 0x94, 0x96), # 12 bright blue),
(0x6C, 0x71, 0xC4), # 13 bright magenta),
(0x93, 0xA1, 0xA1), # 14 bright cyan),
(0xFD, 0xF6, 0xE3), # 15 bright white),
)
vga_palette4 = (
(0, 0, 0), # 0 black
(170, 0, 0), # 1 red
(0, 170, 0), # 2 green
(170, 85, 0), # 3 yellow
(0, 0, 170), # 4 blue
(170, 0, 170), # 5 magenta/purple
(0, 170, 170), # 6 cyan
(170, 170, 170), # 7 white/grey
(85, 85, 85), # 8 bright black
(255, 85, 85), # 9 bright red
(85, 255, 85), # 10 bright green
(255, 255, 85), # 11 bright yellow
(85, 85, 255), # 12 bright blue
(255, 85, 255), # 13 bright magenta
(85, 255, 255), # 14 bright cyan
(255, 255, 255), # 15 bright white
)
xterm_palette4 = (
(0x00, 0x00, 0x00), # 0 black
(0xcd, 0x00, 0x00), # 1 red
(0x00, 0xcd, 0x00), # 2 green
(0xcd, 0xcd, 0x00), # 3 yellow
(0x00, 0x00, 0xee), # 4 blue
(0xcd, 0x00, 0xcd), # 5 magenta/purple
(0x00, 0xcd, 0xcd), # 6 cyan
(0xe5, 0xe5, 0xe5), # 7 white/grey
(0x7f, 0x7f, 0x7f), # 8 bright black
(0xff, 0x00, 0x00), # 9 bright red
(0x00, 0xff, 0x00), # 10 bright green
(0xff, 0xff, 0x00), # 11 bright yellow
(0x5c, 0x5c, 0xff), # 12 bright blue
(0xff, 0x00, 0xff), # 13 bright magenta
(0x00, 0xff, 0xff), # 14 bright cyan
(0xff, 0xff, 0xff), # 15 bright white
)
# template for new entry
#~ _new_palette4 = (
#~ (), # 0 black
#~ (), # 1 red
#~ (), # 2 green
#~ (), # 3 yellow
#~ (), # 4 blue
#~ (), # 5 magenta/purple
#~ (), # 6 cyan
#~ (), # 7 white/grey
#~ (), # 8 bright black
#~ (), # 9 bright red
#~ (), # 10 bright green
#~ (), # 11 bright yellow
#~ (), # 12 bright blue
#~ (), # 13 bright magenta
#~ (), # 14 bright cyan
#~ (), # 15 bright white
#~ )
# Mapping from TERM environment variable specs to basic palettes,
# for use over SSH. Windows generally doesn't use these.
term_palette_map = {
'xterm*': xterm_palette4,
'linux*': linuxcon_palette4,
'fbterm': linuxcon_palette4,
'iterm*': iterm_palette4,
'nsterm*': termapp_palette4,
}
DEFAULT_BASIC_PALETTE = xterm_palette4
# Extended/256 color table for finding rgb values for indexes,
# useful for color downgrade:
index_to_rgb8 = {
'0': (0, 0, 0),
'1': (128, 0, 0),
'2': (0, 128, 0),
'3': (128, 128, 0),
'4': (0, 0, 128),
'5': (128, 0, 128),
'6': (0, 128, 128),
'7': (192, 192, 192),
'8': (128, 128, 128),
'9': (255, 0, 0),
'10': (0, 255, 0),
'11': (255, 255, 0),
'12': (0, 0, 255),
'13': (255, 0, 255),
'14': (0, 255, 255),
'15': (255, 255, 255),
'16': (0, 0, 0),
'17': (0, 0, 95),
'18': (0, 0, 135),
'19': (0, 0, 175),
'20': (0, 0, 215),
'21': (0, 0, 255),
'22': (0, 95, 0),
| |
contours
for i,cnt1 in enumerate(contour_TT):
x = i
if i != NB_TT-1:
for j,cnt2 in enumerate(contour_TT[i+1:]):
x = x+1
dist = self.find_if_close(cnt1,cnt2, self.para_r1)
if dist == True:
val = min(status_TT[i],status_TT[x])
status_TT[x] = status_TT[i] = val
else:
if status_TT[x]==status_TT[i]:
status_TT[x] = i+1
#
# 2) unify neighboring contours
unified_TT = []
maximum = int(status_TT.max())+1
for i in xrange(maximum):
pos = np.where(status_TT==i)[0]
if pos.size != 0:
cont = np.vstack(contour_TT[i] for i in pos)
#hull = cv2.convexHull(cont)
unified_TT.append(cont)
#
# 3) draw contours to data_f4 to evaluate area
data_f4 = data2.astype('f')
#
i1 = 0
for contour in contour_TT:
#cv2.drawContours(data_f3,[contour],0,i1+2,-1)
cv2.drawContours(data_f4,[contour],0,status_TT[i1]+2,-1)
i1 = i1 + 1
#
# 4) sum matched groups of pixels into same unified contours (accurate area evaluation!)
i1 = 0
area_pixels_tt = []
for contour in unified_TT:
#cv2.drawContours(data_f3,[contour],0,i1+2,-1)
cnt = np.sum(data_f4==i1+2)
area_pixels_tt.append(cnt)
i1 = i1 + 1
else:
unified_TT = []
data_f4 = data2.astype('f')
#
i1 = 0
area_pixels_tt = []
for contour in contour_TT:
unified_TT.append(contour)
cv2.drawContours(data_f4,[contour],0,i1+2,-1)
cnt = np.sum(data_f4==i1+2)
area_pixels_tt.append(cnt)
i1 = i1 + 1
#
####################################################
# NEW 3A - search through grid for percent of pixels above threshold (porosity) - include original regions
msk = np.ones((self.para_r2,self.para_r2))
#
data_f5t = convolve2d(data_f1,msk,mode = 'same')
data_f5b = (data_f5t > self.para_r4)
data_f5 = (data_f5t / self.para_r3)*data_f5b
#
#if ((Nx>self.para_r2)and(Ny>self.para_r2))
# for i1 in range(self.para_r2,Nx-self.para_r2):
# for ij in range(self.para_r2,Ny-self.para_r2):
####################################################
# NEW 3B - search through grid for percent of pixels above threshold (porosity) AND remove called BW regions
msk = np.ones((self.para_r2,self.para_r2))
#
data_f13b = (data_f3 == 1)
data_f13 = data_f13b.astype('f')
#
data_f6t = convolve2d(data_f13,msk,mode = 'same')
data3 = (data_f6t > self.para_r4)
data_f6 = data3.astype('f')
####################################################
# NEW 3C - CALL SEPARATE ADA BLOB EVALUATION FOR BW MAP (TRUE AREA CALCULATION)
#contour_list = []
#N_list = 0
contour_PY = self.on_ada_findblobs(data_f6)
#
####################################################
# NEW 3D - MERGE BLOB EVALUATION FOR PY MAP (COMBINE TRUE AREA CALCULATION)
NB_PY = len(contour_PY)
status_PY = np.zeros((NB_PY,1))
#
if NB_PY > 1:
#
# 1) evaluate neighboring contours
for i,cnt1 in enumerate(contour_PY):
x = i
if i != NB_PY-1:
for j,cnt2 in enumerate(contour_PY[i+1:]):
x = x+1
dist = self.find_if_close(cnt1,cnt2, self.para_r2)
if dist == True:
val = min(status_PY[i],status_PY[x])
status_PY[x] = status_PY[i] = val
else:
if status_PY[x]==status_PY[i]:
status_PY[x] = i+1
unified_PY = []
maximum = int(status_PY.max())+1
for i in xrange(maximum):
pos = np.where(status_PY==i)[0]
if pos.size != 0:
cont = np.vstack(contour_PY[i] for i in pos)
#hull = cv2.convexHull(cont)
unified_PY.append(cont)
#
i1 = 0
area_pixels_py = []
data_f7 = data_f6.astype('f')
#
for contour in unified_PY:
cv2.drawContours(data_f7,[contour],0,i1+2,-1)
cnt = np.sum(data_f7==i1+2)
area_pixels_py.append(cnt)
i1 = i1 + 1
unified_PY = []
maximum = int(status_PY.max())+1
for i in xrange(maximum):
pos = np.where(status_PY==i)[0]
if pos.size != 0:
cont = np.vstack(contour_PY[i] for i in pos)
#hull = cv2.convexHull(cont)
unified_PY.append(cont)
else:
unified_PY = []
data_f7 = data_f6.astype('f')
#
i1 = 0
area_pixels_py = []
for contour in contour_PY:
unified_PY.append(contour)
cv2.drawContours(data_f7,[contour],0,i1+2,-1)
cnt = np.sum(data_f7==i1+2)
area_pixels_py.append(cnt)
i1 = i1 + 1
#
####################################################
# NEW 4A - MERGE ALL CALLS AND INCLUDE AREA MEASUREMENTS AND OTHER METRICS
unified = []
area_pixels = []
ADA_call = []
#
i1 = 0
for contour in unified_TT:
unified.append(contour)
area_pixels.append(area_pixels_tt[i1])
ADA_call.append(1)
i1 = i1 + 1
#
i1 = 0
for contour in unified_BW:
unified.append(contour)
area_pixels.append(area_pixels_bw[i1])
ADA_call.append(2)
i1 = i1 + 1
#
i1 = 0
for contour in unified_PY:
unified.append(contour)
area_pixels.append(area_pixels_py[i1])
ADA_call.append(4)
i1 = i1 + 1
# 4B store statistics for each region of interest (contour)
self.feat_a1 = []
self.feat_a2 = []
self.feat_bx = []
self.feat_by = []
self.feat_bw = []
self.feat_bh = []
self.feat_cx = []
self.feat_cy = []
self.call = []
#
i1 = 0
i1A = 0
for contour in unified:
# Evaluate area of regions of interest
# Evaluate bounding box of regions of interest
#feat_area = abs(cv2.contourArea(contour)+cv2.arcLength(contour, True))
#feat_area = abs(cv2.contourArea(contour))
feat_area = area_pixels[i1A]
feat_call = ADA_call[i1A]
feat_bbxy = cv2.boundingRect(contour)
(feat_bbxy_x, feat_bbxy_y, feat_bbxy_width, feat_bbxy_height) = feat_bbxy
#
if feat_area >= self.para_c1:
if feat_bbxy_width >= self.para_c2:
if feat_bbxy_height >= self.para_c3:
#if feat_bbxy_x >= self.para_c2:
#if feat_bbxy_y >= self.para_c3:
# save area feature
self.feat_a1.append(feat_area)
# save dimension features
self.feat_bx.append(feat_bbxy_x)
self.feat_by.append(feat_bbxy_y)
self.feat_bw.append(feat_bbxy_width)
self.feat_bh.append(feat_bbxy_height)
# Evaluate centroid (and possibly higher order moments)
moments1 = cv2.moments(contour)
if moments1['m00'] != 0.0:
self.feat_a2.append(moments1['m00'])
self.feat_cx.append(moments1['m10']/moments1['m00'])
self.feat_cy.append(moments1['m01']/moments1['m00'])
# Add logic to make call (default to 1 for now)
else:
self.feat_a2.append(moments1['m00'])
self.feat_cx.append(feat_bbxy_x+0.5*feat_bbxy_width)
self.feat_cy.append(feat_bbxy_y+0.5*feat_bbxy_height)
self.call.append(feat_call)
i1 = i1 + 1
i1A = i1A + 1
self.nb = i1
####################################################
# Call ADA code - Step 2 (evaluate regions that match call criteria)
self.tmp_data = np.logical_or((data_m6a < self.para_a4),(data_m3a > self.para_a3))* (data_m5a >= 2)
self.tmp_data2 = data_m4a
# OLD - CALL COMBINED ADA BLOB EVALUATION
#self.on_ada_1()
Nr = int(self.nb)
#
ikey_tmp = []
for ikey, ivalues in sorted(self.indcalls.iteritems()):
ikey_tmp.append(ikey)
Nc1 = len(ikey_tmp)
ikey_tmp = []
for ikey, ivalues in sorted(self.indmetrics.iteritems()):
ikey_tmp.append(ikey)
Nc2 = len(ikey_tmp)
Nc = Nc1 + Nc2
#
if int(self.nb) == 0:
Nr = 1
self.nb = 1
idx1 = 0
self.feat_bx.append(1)
self.feat_by.append(1)
self.feat_bw.append(1)
self.feat_bh.append(1)
self.feat_cx.append(1)
self.feat_cy.append(1)
#
model_data = np.zeros((Nr,Nc),'float')
model_data[idx1,0] = 0.0
model_data[idx1,1] = 0.0
model_data[idx1,2] = 0.0
model_data[idx1,3] = 0.0
model_data[idx1,4] = 0.0
model_data[idx1,5] = 0.0
#
model_data[idx1,6] = 0.0
model_data[idx1,7] = 0.0
model_data[idx1,8] = 0.0
#
model_data[idx1,9] = 0.0
model_data[idx1,10] = 0.0
model_data[idx1,11] = 0.0
else:
model_data = np.zeros((Nr,Nc),'float')
for idx1 in range(Nr):
#model_data[idx1,0] = ADA_call[idx1]
model_data[idx1,0] = self.call[idx1]
model_data[idx1,1] = self.feat_cx[idx1]
model_data[idx1,2] = self.feat_cy[idx1]
model_data[idx1,3] = self.feat_a1[idx1]
model_data[idx1,4] = self.feat_bw[idx1]
model_data[idx1,5] = self.feat_bh[idx1]
#
j1 = np.round(self.feat_cx[idx1])
i1 = np.round(self.feat_cy[idx1])
if j1 >= Ny:
j1 = Ny-1
elif j1 < 0:
j1 = 0
if i1 >= Nx:
i1 = Nx-1
elif i1 < 0:
i1 = 0
#
#model_data[idx1,0] = 2.0*data_f1[i1,j1] + data_f2[i1,j1]
model_data[idx1,6] = data_m3t[i1,j1]
model_data[idx1,7] = data_m3a[i1,j1]
model_data[idx1,8] = data_m4a[i1,j1]
#
model_data[idx1,9] = self.feat_bx[idx1]
model_data[idx1,10] = self.feat_by[idx1]
model_data[idx1,11] = self.feat_a2[idx1]
#self.populate_spreadsheet(self.view.output_grid, model.data)
#self.populate_spreadsheet(self.view.output_grid2, model.data)
#self.view.spreadsheet_nb.ChangeSelection(self.view.res_summary_page)
#
filename_2D = basename(filepath)
#
if self.para_dimon > 0:
trans_x_pix2units = self.axis_x_resolution
trans_y_pix2units = self.axis_y_resolution
trans_a_pix2units = self.axis_x_resolution*self.axis_x_resolution
trans_t_stp2units = self.axis_time_resolution*para_cl*0.5
#
x_len = Ny*trans_x_pix2units
y_len = Nx*trans_x_pix2units
#
for idx1 in range(Nr):
model_data[idx1,1] = model_data[idx1,1]*trans_x_pix2units
model_data[idx1,2] = model_data[idx1,2]*trans_y_pix2units
model_data[idx1,3] = model_data[idx1,3]*trans_a_pix2units
#
model_data[idx1,4] = model_data[idx1,4]*trans_x_pix2units
model_data[idx1,5] = model_data[idx1,5]*trans_y_pix2units
model_data[idx1,6] = model_data[idx1,6]*trans_t_stp2units
#
model_data[idx1,9] = model_data[idx1,9]*trans_x_pix2units
model_data[idx1,10] = model_data[idx1,10]*trans_y_pix2units
model_data[idx1,11] = model_data[idx1,11]*trans_a_pix2units
#
#
for ikey, ivalues in sorted(self.indmetrics.iteritems()):
if ivalues['index'] == '1':
ivalues['unit'] = self.axis_x_units
elif ivalues['index'] == '2':
ivalues['unit'] = self.axis_y_units
elif ivalues['index'] == '3':
ivalues['unit'] = (self.axis_x_units + "*" + self.axis_y_units)
elif ivalues['index'] == '4':
ivalues['unit'] = self.axis_x_units
elif ivalues['index'] == '5':
ivalues['unit'] = self.axis_y_units
elif ivalues['index'] == '6':
ivalues['unit'] = self.axis_time_units
elif ivalues['index'] == '9':
ivalues['unit'] = self.axis_x_units
elif ivalues['index'] == '10':
ivalues['unit'] = self.axis_y_units
elif ivalues['index'] == '11':
ivalues['unit'] = (self.axis_x_units + "*" + self.axis_y_units)
#
self.para_t1 = self.para_t1*trans_t_stp2units
self.para_t2 = self.para_t2*trans_t_stp2units
TOF_bw_median = TOF_bw_median*trans_t_stp2units
#
# Store in res_outputdata
model_res_outputdata = []
model_res_outputdata.append(data_m2a)
model_res_outputdata.append(data_m2t*trans_t_stp2units)
model_res_outputdata.append(data_m3a)
model_res_outputdata.append(data_m3t*trans_t_stp2units)
model_res_outputdata.append(data_m4a)
model_res_outputdata.append(data_m1a)
model_res_outputdata.append(data_m1t*trans_t_stp2units)
model_res_outputdata.append(data_f1)
model_res_outputdata.append(data_f2)
#model_res_outputdata.append(data_m0f) #change from data_f3
model_res_outputdata.append(data_f3)
model_res_outputdata.append(data_f4)
model_res_outputdata.append(data_f5)
model_res_outputdata.append(data_f6)
model_res_outputdata.append(data_m4t*trans_t_stp2units)
model_res_outputdata.append(data_m5a)
model_res_outputdata.append(data_m6a)
model_res_outputdata.append(ta)
else:
#
x_len = Ny
y_len = Nx
#
for ikey, ivalues in sorted(self.indmetrics.iteritems()):
if ivalues['index'] == '1':
ivalues['unit'] = '()'
elif ivalues['index'] == '2':
ivalues['unit'] = '()'
elif ivalues['index'] == '3':
ivalues['unit'] = '()'
elif ivalues['index'] == '4':
ivalues['unit'] = '()'
elif ivalues['index'] == '5':
ivalues['unit'] = '()'
elif ivalues['index'] == '6':
ivalues['unit'] = '()'
elif ivalues['index'] == '9':
ivalues['unit'] = '()'
elif ivalues['index'] == '10':
ivalues['unit'] = '()'
elif ivalues['index'] == '11':
ivalues['unit'] = '()'
# Store in res_outputdata
model_res_outputdata = []
model_res_outputdata.append(data_m2a)
model_res_outputdata.append(data_m2t)
model_res_outputdata.append(data_m3a)
model_res_outputdata.append(data_m3t)
model_res_outputdata.append(data_m4a)
model_res_outputdata.append(data_m1a)
model_res_outputdata.append(data_m1t)
model_res_outputdata.append(data_f1)
model_res_outputdata.append(data_f2)
#model_res_outputdata.append(data_m0f) #change from data_f3
model_res_outputdata.append(data_f3)
model_res_outputdata.append(data_f4)
model_res_outputdata.append(data_f5)
model_res_outputdata.append(data_f6)
model_res_outputdata.append(data_m4t)
model_res_outputdata.append(data_m5a)
model_res_outputdata.append(data_m6a)
model_res_outputdata.append(ta)
#
model_res_outputpara = []
model_res_outputpara.append(filename_2D)
model_res_outputpara.append(str(self.axis_x_resolution)+' '+self.axis_x_units)
model_res_outputpara.append(str(x_len))
model_res_outputpara.append(str(self.axis_y_resolution)+' '+self.axis_y_units)
model_res_outputpara.append(str(y_len))
model_res_outputpara.append(str(self.axis_time_resolution)+' '+self.axis_time_units)
model_res_outputpara.append(str(self.para_t1))
model_res_outputpara.append(str(self.para_t2))
model_res_outputpara.append(str(TOF_bw_median))
model_res_outputpara.append(str(datatmp_1p2))
model_res_outputpara.append(str(Vppn_tt_median))
model_res_outputpara.append(str(Vppn_bw_median))
model_res_outputpara.append(str(self.nb))
model_res_outputpara.append(str(self.dac_curve_on))
#
filename_2D_long = filename_2D + '.met'
thefile = open(filename_2D_long, 'w')
for item in model_res_outputpara:
thefile.write("%s\n" % item)
#np.savetxt(filename_2D_long, model_res_outputpara, delimiter=",")
thefile.close()
#
filename_2D_long = 'validation_study.met'
thefile = open(filename_2D_long, 'a')
item = ",".join(model_res_outputpara)
thefile.write("%s\n" % item)
thefile.close()
#
filename_2D_long | |
scores.append(temp_p)
else:
scores.append(0.000)
except:
if not lig_name:
scores.append(0.000)
else:
scores.append("None")
# Cscore
elif i_score in ['dC','C','avgC','medC']:
try:
if dist == 'dice':
temp_scores = DataStructs.BulkDiceSimilarity(c_fps[c],x)
elif dist == 'tani':
temp_scores = DataStructs.BulkTanimotoSimilarity(c_fps[c],x)
elif dist == 'cos':
temp_scores = DataStructs.BulkCosineSimilarity(c_fps[c],x)
#Cscore cutoff
temp_scores = [i for i in temp_scores if float(i) >= cscore_cutoff]
if i_score == 'dC':
temp_c = max(temp_scores)
if not lig_name:
scores.append(stats.percentileofscore(all_scores, temp_c) / 100.0)
else:
scores.append(li_bs[li_score.index(temp_c)])
elif i_score == 'C':
temp_c = max(temp_scores)
if not lig_name:
scores.append(temp_c)
else:
scores.append(li_bs[li_score.index(temp_c)])
elif i_score == 'avgC':
temp_c = np.mean(temp_scores)
if not np.isnan(temp_c):
scores.append(temp_c)
else:
scores.append(0.000)
elif i_score == 'medC':
temp_c = np.median(temp_scores)
if not np.isnan(temp_c):
scores.append(temp_c)
else:
scores.append(0.000)
except:
if not lig_name:
scores.append(0.000)
else:
scores.append("None")
return (c, scores)
def generate_signature(cmpd_file, fp="rd_ecfp4", vect="int", dist="dice", org="nrpdb", bs="coach", c_cutoff=0.0,
p_cutoff=0.0, percentile_cutoff=0.0, i_score="P", out_file='', out_path=".", nr_ligs=True,
prot_path=''):
"""!
Generate an interaction signature for a query compound using our in-house protocol BANDOCK. Note: the parameters
for this function MUST MATCH the parameters used to generate the matrix in use. Otherwise, the scores will be
incompatible.
@param cmpd_file str: filepath to an input mol file
@param fp str: the chemical fingerprint to use (rd_ecfp4, rd_ecfp10, etc)
@param vect str: integer "int" or binary "bit" vector for fingerprint
@param dist str: use Sorenson-Dice "dice" for vect="int" and Tanimoto "tani" for vect="bit"
@param org str: protein library to use ('nrpdb' or 'homo_sapien')
@param bs str: the method to use, just use "coach"
@param c_cutoff float: minimum Cscore (Tanimoto/Dice similarity score) to consider for scoring
@param p_cutoff float: minimum Pscore (binding site score from COACH) to consider for scoring
@param percentile_cutoff float: %ile cutoff for fingerprint similarity scores in 'dC' scoring protocols
@param i_score str: the scoring protocol to use ('P', 'C', 'dC', 'CxP', dCxP')
@param out_file str: filename of the output signature
@param out_path str: path to the output signature
@param nr_ligs bool: use only the non-redundant set of ligands for 'dC' scoring protocols (recommended)
@param prot_path str: specify a local protein library for custom analyses
@return Returns None
"""
def print_time(s):
if s >= 60:
m = s / 60.0
s -= m * 60.0
if m >= 60.0:
h = m / 60.0
m -= h * 60.0
print("Signature generation took {:.0f} hr {:.0f} min {:.0f} s to finish.".format(h, m, s))
else:
print("Signature generation took {:.0f} min {:.0f} s to finish.".format(m, s))
else:
print("signature generation took {:.0f} s to finish.".format(s))
print("Generating CANDO signature...")
start = time.time()
pre = os.path.dirname(__file__) + "/data/v2.2+/"
lig_path = "{}/ligs/fps/".format(pre)
if out_file == '':
if percentile_cutoff != 0.0:
out_file = "{}/cmpd_0-{}-{}-{}-{}-{}-percentile{}-p{}-{}.tsv".format(out_path,fp,vect,dist,org,bs,percentile_cutoff,p_cutoff,i_score)
else:
out_file = "{}/cmpd_0-{}-{}-{}-{}-{}-c{}-p{}-{}.tsv".format(out_path,fp,vect,dist,org,bs,c_cutoff,p_cutoff,i_score)
os.makedirs(out_path, exist_ok=True)
# Remove redundant ligands from full list
# Especially important for percentile calculations
if nr_ligs:
if not os.path.exists("{}/mappings/nr_ligs.csv".format(pre)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/nr_ligs.csv'
dl_file(url, '{}/mappings/nr_ligs.csv'.format(pre))
nr_ligs = pd.read_csv("{}/mappings/nr_ligs.csv".format(pre),header=None)
nr_ligs = nr_ligs[0].values.flatten()
# Download protein matrix if it does not exist
if not prot_path:
if not os.path.exists("{}/prots/{}-{}.tsv".format(pre,org,bs)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/{}-{}.tsv'.format(org,bs)
dl_file(url, '{}/prots/{}-{}.tsv'.format(pre,org,bs))
p_matrix = pd.read_csv("{}/prots/{}-{}.tsv".format(pre,org,bs),sep='\t',header=None,index_col=0)
else:
p_matrix = pd.read_csv("{}/{}-{}.tsv".format(prot_path,org,bs),sep='\t',header=None,index_col=0)
# Create dictionary of lists
# Keys == proteins
# Values == list of predicted bs + bs scores
p_dict = {}
for p in p_matrix.itertuples():
p_dict[p[0]] = list(zip(p[1].split(','),p[2].split(',')))
if i_score not in ['C','dC','P','CxP','dCxP','avgC','medC','avgP','medP']:
print("{} is not an applicable interaction score.".format(i_score))
return
nc = Chem.MolFromMolFile(cmpd_file)
nc = Chem.RemoveHs(nc)
name = nc.GetProp("_Name")
c_fps = {}
rad = int(int(fp[7:])/2)
if fp[3]=='f':
features = True
else:
features = False
if vect=='int':
c_fps[0] = AllChem.GetMorganFingerprint(nc,rad,useFeatures=features)
else:
bits = int(vect[:4])
c_fps[0] = AllChem.GetMorganFingerprintAsBitVect(nc,rad,useFeatures=features,nBits=bits)
if not os.path.exists("{}/{}-{}_vect.pickle".format(lig_path,fp,vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/ligs/fps/{}-{}_vect.pickle'.format(fp,vect)
dl_file(url, '{}/{}-{}_vect.pickle'.format(lig_path,fp,vect))
# Load ligand fingerprint pickles
with open('{}/{}-{}_vect.pickle'.format(lig_path,fp,vect), 'rb') as f:
l_fps = pickle.load(f)
scores = calc_scores(0,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs)
#scores = pool.starmap_async(calc_scores, [(c,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs) for c in c_list]).get()
scores = {scores[0]:scores[1]}
mat = pd.DataFrame.from_dict(scores)
mat.sort_index(axis=1,inplace=True)
mat.rename(index=dict(zip(range(len(p_matrix.index)), p_matrix.index)), inplace=True)
mat.to_csv("{}/{}".format(out_path,out_file), sep='\t', index=True, header=False, float_format='%.3f')
end = time.time()
print("Signature written to {}/{}.".format(out_path,out_file))
print_time(end-start)
return(mat.iloc[:,0].values)
def add_cmpds(cmpd_list, file_type='smi', fp="rd_ecfp4", vect="int", cmpd_dir=".", v=None, map_indications='v2.3'):
"""!
Add new compounds to an existing CANDO Compound library, or create a new Compound library using our in-house protocol
BANDOCK.
@param cmpd_list str: filepath to all input compounds
@param fp str: the chemical fingerprint to use (rd_ecfp4, rd_ecfp10, etc)
@param vect str: integer "int" or binary "bit" vector for fingerprint
@param cmpd_dir str: ??
@param v str: ??
@param map_indications str: CANDO version number to string match exact names from compound file to existing ind_map
@return Returns None
"""
start = time.time()
pre = os.path.dirname(__file__) + "/data/v2.2+/"
# List of new compounds loaded into df
ncs = pd.read_csv(cmpd_list, sep='\t', header=None)
vs = ['v2.2', 'v2.3', 'v2.4', 'v2.5', 'test.0']
if v in vs:
# Redundant with future lines.
# Remove future lines and implement them into get_data()
#get_data(v=v, org=None)
curr_v = v
print("Adding new compounds to compound library {}...".format(curr_v))
t = curr_v.split('.')
t[-1] = str(int(t[-1])+1)
new_v = '.'.join(t)
print("New compound library is {}.".format(new_v))
curr_cmpd_path = "{}/cmpds/fps-{}/".format(pre, curr_v)
if not os.path.exists("{}/cmpds/fps-{}/{}-{}_vect.pickle".format(pre, curr_v, fp, vect)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/{}-{}_vect.pickle'.format(curr_v,
fp, vect)
dl_file(url, '{}/cmpds/fps-{}/{}-{}_vect.pickle'.format(pre, curr_v, fp, vect))
cmpd_path = "{}/cmpds/fps-{}/".format(pre, new_v)
os.makedirs(cmpd_path, exist_ok=True)
os.system("cp {0}/{2}-{3}_vect.pickle {1}/{2}-{3}_vect.pickle".format(curr_cmpd_path, cmpd_path, fp, vect))
if not os.path.exists("{}/mappings/drugbank-{}.tsv".format(pre, curr_v)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank-{}.tsv'.format(curr_v)
dl_file(url, '{}/mappings/drugbank-{}.tsv'.format(pre, curr_v))
d_map = pd.read_csv("{}/mappings/drugbank-{}.tsv".format(pre, curr_v), sep='\t')
if not os.path.exists("{}/mappings/drugbank2ctd-{}.tsv".format(pre, curr_v)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank2ctd-{}.tsv'.format(curr_v)
dl_file(url, '{}/mappings/drugbank2ctd-{}.tsv'.format(pre, curr_v))
os.system("cp {0}/mappings/drugbank2ctd-{1}.tsv {0}/mappings/drugbank2ctd-{2}.tsv".format(pre, curr_v, new_v))
if not os.path.exists("{}/cmpds/fps-{}/inchi_keys.pickle".format(pre, curr_v)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/inchi_keys.pickle'.format(curr_v)
dl_file(url, '{}/cmpds/fps-{}/inchi_keys.pickle'.format(pre, curr_v))
with open('{}/inchi_keys.pickle'.format(curr_cmpd_path), 'rb') as f:
inchi_dict = pickle.load(f)
cmpd_num = len(inchi_dict)
for c in ncs.itertuples(index=False):
try:
if file_type == 'mol':
nc = Chem.MolFromMolFile("{}/{}.mol".format(cmpd_dir, c[0]))
name = nc.GetProp("_Name")
elif file_type == 'smi':
nc = Chem.MolFromSmiles("{}".format(c[0]))
name = c[1]
nc.SetProp("_Name", name)
nc = Chem.RemoveHs(nc)
except:
print("{} cannot load this molecule.".format(c[0]))
continue
inchi_key = Chem.MolToInchiKey(nc)
try:
match = str(inchi_dict[inchi_key])
except:
match = None
if match:
print(" {} is the same as {} - {} in the library".format(name, int(match),
d_map.loc[(d_map['CANDO_ID'] == int(match)),
'GENERIC_NAME'].values[0], match))
continue
else:
print(" Adding compound {} - {}".format(cmpd_num,name))
with open('{}/inchi_keys.pickle'.format(cmpd_path), 'wb') as f:
inchi_dict[inchi_key] = cmpd_num
pickle.dump(inchi_dict, f)
d_map = d_map.append(pd.DataFrame([[cmpd_num, 'NA', name, 'other']],
columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']),
ignore_index=True)
rad = int(int(fp[7:])/2)
if fp[3] == 'f':
features = True
else:
features = False
if vect == 'int':
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:
c_fps = pickle.load(f)
c_fps[cmpd_num] = AllChem.GetMorganFingerprint(nc, rad, useFeatures=features)
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:
pickle.dump(c_fps, f)
else:
bits = int(vect[:4])
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:
c_fps = pickle.load(f)
c_fps[cmpd_num] = AllChem.GetMorganFingerprintAsBitVect(nc, rad, useFeatures=features, nBits=bits)
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:
pickle.dump(c_fps, f)
cmpd_num += 1
elif v and v not in vs:
new_v = v
print("Creating new compound library {}...".format(new_v))
print("The library will be built at {}/{}.".format(os.getcwd(), new_v))
os.makedirs(new_v, exist_ok=True)
os.makedirs("{}/cmpds".format(new_v), exist_ok=True)
os.makedirs("{}/mappings".format(new_v), exist_ok=True)
cmpd_path = "{0}/cmpds/fps-{0}/".format(new_v)
os.makedirs(cmpd_path, exist_ok=True)
d_map = pd.DataFrame(columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS'])
cid2name = {}
cname2inds = {}
if map_indications:
if not os.path.exists("{}/mappings/drugbank-{}.tsv".format(pre, map_indications)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/' \
'drugbank-{}.tsv'.format(map_indications)
dl_file(url, '{}/mappings/drugbank-{}.tsv'.format(pre, map_indications))
if not os.path.exists("{}/mappings/drugbank2ctd-{}.tsv".format(pre, map_indications)):
url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/' \
'drugbank2ctd-{}.tsv'.format(map_indications)
dl_file(url, '{}/mappings/drugbank2ctd-{}.tsv'.format(pre, map_indications))
fcm = open('{}/mappings/drugbank-{}.tsv'.format(pre, map_indications), 'r')
cmls = fcm.readlines()
fcm.close()
for cml in cmls[1:]:
cls = cml.split('\t')
cid = cls[0]
cname = cls[2]
cid2name[cid] = cname
fim = open('{}/mappings/drugbank2ctd-{}.tsv'.format(pre, map_indications), 'r')
imls = fim.readlines()
fim.close()
for iml in imls[1:]:
ils = iml.split('\t')
cid = ils[0]
indname = ils[1]
indid = ils[2]
cname = cid2name[cid]
if cname in cname2inds:
if (indname, indid) not in cname2inds[cname]:
cname2inds[cname].append((indname, indid))
else:
cname2inds[cname] = [(indname, indid)]
cmpd_num = 0
# Create new fingerprint dict and save it to pickle for future use
c_fps = {}
if vect == 'int':
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:
pickle.dump(c_fps, f)
else:
bits = int(vect[:4])
with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:
pickle.dump(c_fps, f)
# Create new inchi dict
inchi_dict = {}
if map_indications:
foind = open("{0}/mappings/inds-{0}.tsv".format(new_v), 'w')
foind.write('CANDO_ID\tINDICATION_NAME\tMESH_ID\tINDICATION_ID\n')
ind2id = {}
curr_ind_id = 0
for c in ncs.itertuples(index=False):
try:
if file_type == 'mol':
nc = Chem.MolFromMolFile("{}/{}.mol".format(cmpd_dir, c[0]))
name = nc.GetProp("_Name")
elif file_type == 'smi':
nc = Chem.MolFromSmiles("{}".format(c[0]))
name = c[1]
nc.SetProp("_Name", name)
except:
print("{} cannot load | |
np.zeros(len(self.rfV1))
sig = np.zeros((len(H), len(k), len(self.phases)))
pws = np.zeros((len(H), len(k), len(self.phases)))
for ih in _progressbar(range(len(H)), 'Computing: ', 15):
for ik, kk in enumerate(k):
for ip, ph in enumerate(self.phases):
for i in range(len(self.rfV1)):
if self.rfV2 and (ph == 'pps' or ph == 'pss'):
rfV = self.rfV2[i].copy()
else:
rfV = self.rfV1[i].copy()
# Calculate move out for each phase and get
# median value, weighted by instantaneous phase (pws)
tt = _dtime_dip_(
rfV, H[ih], kk, vp, ph, self.strike, self.dip)
trace = _timeshift_(rfV, tt)
thilb = hilbert(trace)
tphase = np.arctan2(thilb.imag, thilb.real)
weight += np.exp(1j*tphase[0])
amp[i] = trace[0]
weight = abs(weight/np.float(len(self.rfV1)))**4
sig[ih, ik, ip] = np.var(amp)*np.real(weight)
pws[ih, ik, ip] = np.median(amp)*np.real(weight)
self.pws = pws
self.sig = sig
def average(self, typ='sum', q=0.05, err_method='amp'):
"""
Method to combine the phase-weighted stacks to produce a final
stack, from which to estimate the H and k parameters and their
associated errors.
Parameters
----------
typ : str
How the phase-weigthed stacks should be combined to produce
a final stack. Available options are: weighted sum (``typ=sum``)
or product (``typ=product``).
q : float
Confidence level for the error estimate
err_method : str
How errors should be estimated. Options are ``err_method='amp'``
to estimate errors from amplitude, or ``err_method='stats'`` to
use a statistical F test from the residuals.
"""
# Initialize arrays based on bounds
H = np.arange(self.hbound[0], self.hbound[1] + self.dh, self.dh)
k = np.arange(self.kbound[0], self.kbound[1] + self.dk, self.dk)
# Multiply pws by weights
ps = self.pws[:, :, 0]*self.weights[0]
try:
pps = self.pws[:, :, 1]*self.weights[1]
except:
pps = None
try:
pss = self.pws[:, :, 2]*self.weights[2]
except:
pss = None
# Get stacks
if typ == 'sum':
stack = (ps + pps + pss)
elif typ == 'product':
# Zero out negative values
ps[ps < 0] = 0.
if self.weights[1] != 0.:
pps[pps < 0] = 0.
else:
pps = 1.
if self.weights[2] != 0.:
pss[pss < 0] = 0.
else:
pss = 1.
stack = ps*pps*pss
else:
raise(Exception("'typ' must be either 'sum' or 'product'"))
self.typ = typ
# Find maximum within stacks
ind = np.where(stack == stack.max())
self.h0 = H[ind[0]][0]
self.k0 = k[ind[1]][0]
self.stack = stack
try:
self.error()
except:
self.err_k0 = 0.
self.err_h0 = 0.
def error(self, q=0.05, err_method='amp'):
"""
Method to determine the error on H and k estimates.
From Walsh, JGR, 2013
Parameters
----------
q : float
Confidence level for the error estimate
err_method : str
How errors should be estimated. Options are ``err_method='amp'``
to estimate errors from amplitude, or ``err_method='stats'`` to
use a statistical F test from the residuals.
"""
# Initialize arrays based on bounds
H = np.arange(self.hbound[0], self.hbound[1] + self.dh, self.dh)
k = np.arange(self.kbound[0], self.kbound[1] + self.dk, self.dk)
msf = self.stack/self.stack.max()
# Method 1 - based on stats
if err_method == 'stats':
# Get degrees of freedom
dof = _dof(self._residuals())
# print(dof)
if dof < 3:
dof = 3
print(
"Degrees of freedom < 3. Fixing to DOF = 3, which may " +
"result in accurate errors")
n_par = 2
msf = 1. - msf
# Error contour
vmin = msf.min()
vmax = msf.max()
self.err_contour = vmin*(1. + n_par/(dof - n_par) *
stats.f.ppf(1. - q, n_par, dof - n_par))
# print(vmin*(1. + n_par/(dof - n_par)*
# stats.f.ppf(1. - q, n_par, dof - n_par)))
# self.err_contour = (n_par/(dof - n_par) *
err = np.where(msf < self.err_contour)
# Method 2 - based on amplitude
elif err_method == 'amp':
self.err_contour = 0.5
err = np.where(msf > self.err_contour)
else:
raise(Exception("'err_method' must be either 'stats' or 'amp'"))
self.err_method = err_method
# Estimate uncertainty (q confidence interval)
self.err_k0 = max(0.25*(k[max(err[1])] - k[min(err[1])]), self.dk)
self.err_h0 = max(0.25*(H[max(err[0])] - H[min(err[0])]), self.dh)
def plot(self, save=False, title=None, form='png'):
"""
Method to plot H-K stacks. By default all 4 panels
are plotted: The ``ps``, ``pps`` and ``pss`` stacks, and the
final (averaged) stack. Error contours are also plotted,
as well as the position of the maximum stack values.
Parameters
----------
save : bool
Whether or not to save the Figure
title : str
Title of plot
"""
# Initialize arrays based on bounds
H = np.arange(self.hbound[0], self.hbound[1] + self.dh, self.dh)
k = np.arange(self.kbound[0], self.kbound[1] + self.dk, self.dk)
# Extent of plots
extent = (H.min(), H.max(), k.min(), k.max())
# Extract phase stacks
ps = self.pws[:, :, 0]
pps = self.pws[:, :, 1]
pss = self.pws[:, :, 2]
if self.typ == 'product':
# Zero out negative values
ps[ps < 0] = 0.
try:
pps[pps < 0] = 0.
except:
pass
try:
pss[pss < 0] = 0.
except:
pass
# Set up figure
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(
2, 2, sharex=True, sharey=True)
cmap = 'RdBu_r'
# First subplot: Ps
vmax = np.abs(max(ps.max(), ps.min(), key=abs))
im = ax1.imshow(np.rot90(ps), cmap=cmap,
extent=extent, vmin=-vmax, vmax=vmax, aspect='auto')
ax1.set_ylabel('Vp/Vs')
ax1.set_title('Ps - weight: {0:.1f}'.format(
self.weights[0]), fontsize=10)
# Second subplot: Pps
vmax = np.abs(max(pps.max(), pps.min(), key=abs))
im = ax2.imshow(np.rot90(pps), cmap=cmap,
extent=extent, vmin=-vmax, vmax=vmax, aspect='auto')
ax2.set_title('Pps - weight: {0:.1f}'.format(
self.weights[1]), fontsize=10)
# Third subplot: Pss
vmax = np.abs(max(pss.max(), pss.min(), key=abs))
im = ax3.imshow(np.rot90(pss), cmap=cmap,
extent=extent, vmin=-vmax, vmax=vmax, aspect='auto')
ax3.set_title('Pss - weight: {0:.1f}'.format(
self.weights[2]), fontsize=10)
ax3.set_ylabel('Vp/Vs')
ax3.set_xlabel('Thickness (km)')
# Fourth subplot: Average
vmax = np.abs(max(self.stack.max(), self.stack.min(), key=abs))
im = ax4.imshow(np.rot90(self.stack), cmap=cmap,
extent=extent, vmin=-vmax, vmax=vmax, aspect='auto')
ax4.set_title('Stack')
ax4.set_xlabel('Thickness (km)')
#cbar = fig.colorbar(im, ticks=[-vmax, 0, vmax])
#cbar.ax.set_yticklabels(['min', '0', 'max'])
# Get confidence intervals
if hasattr(self, 'err_contour'):
# ax.contour(np.rot90(vmax-msf), (vmax-err_cont,),
if self.err_method == 'stats':
ax4.contour(
np.rot90(1.-self.stack/self.stack.max()),
(self.err_contour,),
hold='on', colors='yellow', linewidths=1, origin='upper',
extent=extent)
elif self.err_method == 'amp':
ax4.contour(
np.rot90(self.stack/self.stack.max()),
(self.err_contour,),
hold='on', colors='yellow', linewidths=1, origin='upper',
extent=extent)
# Add star showing best fit
try:
ax4.scatter(self.h0, self.k0, 60, marker='*', color='white')
except:
print("'h0' and 'k0' are not available")
if title:
plt.suptitle(title)
else:
plt.suptitle('H-k stacks, station: ' + self.rfV1[0].stats.station)
if save:
plt.savefig('HK_PLOTS/hk.' + self.rfV1[0].stats.station +
'.' + title+'.'+self.typ+'.'+form, format=form)
else:
plt.show()
plt.close()
## JMG ##
def save(self, file):
## JMG ##
"""
Saves HkStack object to file
Parameters
----------
file : str
File name for HkStack object
"""
import pickle
output = open(file, 'wb')
pickle.dump(self, output)
output.close()
def _residuals(self):
"""
Internal method to obtain residuals between observed and predicted
receiver functions given the Moho depth and Vp/Vs obtained from
the Hk stack.
"""
from telewavesim import utils
# Simple 1-layer model over half-space
model = utils.Model(
[self.h0, 0.],
[2800., 3300.],
[self.vp, 8.0],
[self.vp/self.k0, 4.5],
['iso', 'iso'])
# Parameters for run
slow = [tr.stats.slow for tr in self.rfV1]
npts = self.rfV1[0].stats.npts
dt = self.rfV1[0].stats.delta
trR = Stream()
for sl in slow:
trxyz = utils.run_plane(model, sl, npts, dt)
tfs = utils.tf_from_xyz(
trxyz, pvh=True, vp=self.vp, vs=self.vp/self.k0)
tfs[0].data = np.fft.fftshift(tfs[0].data)
trR.append(tfs[0])
trR.filter('bandpass', freqmin=0.05, freqmax=0.5, corners=2,
zerophase=True)
# Get stream of residuals
res = trR.copy()
for i in range(len(res)):
res[i].data = self.rfV1[i].data - trR[i].data
return res
def _dof(st):
"""
Method to determine the degrees of freedom to calculate
the confidence region of the misfit function.
From Walsh, JGR, 2013
"""
dof = []
for tr in st:
F = np.abs(np.fft.fft(tr.data)[0:int(len(tr.data)/2)+1])
E2 = np.sum(F**2)
E2 -= (F[0]**2 + F[-1]**2)/2.
E4 = (1./3.)*(F[0]**4 + F[-1]**4)
for i in range(1, len(F) - 1):
E4 += (4./3.)*F[i]**4
dof.append(int(4.*E2**2/E4 - 2.))
dof_max = min(dof)
return dof_max
def _dtime_(trace, z, r, vp, ph):
"""
Method to calculate travel time for different scattered phases
"""
# Horizontal slowness
slow = trace.stats.slow
# Vertical slownesses
c1 = np.sqrt((r/vp)**2 - slow**2)
c2 = np.sqrt((1./vp)**2 - slow**2)
if ph == 'ps':
tt = z*(c1 - c2)
elif ph == 'pps':
tt = z*(c1 + c2)
elif ph == 'pss':
tt = 2.*z*c1
return tt
def _dtime_dip_(trace, z, r, vp, ph, strike, dip):
"""
Method to calculate travel time for different scattered phases
using strike and dip angles
"""
# Initialize some parameters
n = np.zeros(3)
pinc = np.zeros(3)
ai = 8.1
br = vp/r
# Get vector normal to dipping interface
dip = dip*np.pi/180.
strike = strike*np.pi/180.
n[0] = np.sin(strike)*np.sin(dip)
n[1] = -np.cos(strike)*np.sin(dip)
n[2] = np.cos(dip)
# Horizontal slowness
slow = trace.stats.slow
# Back-azimuth
baz = trace.stats.baz
# Assemble constants of incident wave
| |
<filename>news/tests/test_api_views.py<gh_stars>0
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from django.test import TestCase, Client, override_settings
from django.contrib.auth.models import User, Permission, Group
from django.utils import timezone
import json
from datetime import date, timedelta
from rest_framework.test import APIRequestFactory, APITestCase, APIClient
from rest_framework_jwt.settings import api_settings
from accounts.models import UserProfile
from ..models import KnowledgeCategory, DocumentF, DocQuestion, DocFile, \
NewsFile, DocumentF, News, NotificationReadFlag
from ..api.views import KnowledgeListAPIView, DocQuestionListAPIView, \
UserQuestionCreateAPIView, DocFileCreateAPIView, NewsFileCreateAPIView, \
DocumentFViewSet, NewsViewSet
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
def get_token(user):
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
return token
class TestKnowledgeListAPIView(APITestCase):
@classmethod
def setUpTestData(cls):
cls.test_user1 = User.objects.create_user(
username='testuser1', password='<PASSWORD>')
cls.test_user2 = User.objects.create_user(
username='testuser2', password='<PASSWORD>')
cls.test_user3 = User.objects.create_user(
username='testuser3', password='<PASSWORD>')
newgroup = Group.objects.create(name='testgroup')
for each in Permission.objects.all():
newgroup.permissions.add(each)
cls.test_user1.groups.add(newgroup)
cls.test_user3.groups.add(newgroup)
cls.test_category = KnowledgeCategory.objects.create(
title='Test Category')
cls.test_category_2 = KnowledgeCategory.objects.create(
title='Test Category 2')
test_user1_userprofile = UserProfile.objects.create(
user=cls.test_user1,
name='Test User1',
telephone='11',
email='<EMAIL>',
employee_id='2',
departament='sal',
location='WAW'
)
test_user1_userprofile.save()
test_user2_userprofile = UserProfile.objects.create(
user=cls.test_user2,
name='Test User2',
telephone='222222222',
email='<EMAIL>',
employee_id='3',
departament='sal',
location='PZN'
)
test_user2_userprofile.save()
test_user3_userprofile = UserProfile.objects.create(
user=cls.test_user3,
name='<NAME>',
telephone='222222222',
email='<EMAIL>',
employee_id='3',
departament='sal',
location='PZN'
)
test_user3_userprofile.save()
cls.test_document = DocumentF.objects.create(
title="test title",
body='test body',
author=cls.test_user1,
target_location="PZN",
target_departament="sal",
date_created='2021-07-10T18:11:11.055162Z'
)
cls.factory = APIRequestFactory()
def test_GET_if_no_permission(self):
user = self.test_user2
token = get_token(user)
request = self.factory.get(
'/news/api/knowledge/', HTTP_AUTHORIZATION='JWT ' + token)
view = KnowledgeListAPIView.as_view()
response = view(request)
self.assertEquals(response.status_code, 403)
def test_GET_if_has_permission(self):
view = KnowledgeListAPIView.as_view()
user = self.test_user1
token = get_token(user)
request = self.factory.get(
'/news/api/knowledge/', HTTP_AUTHORIZATION='JWT ' + token)
response = view(request)
self.assertEquals(response.status_code, 200)
def test_view_queryset_response(self):
user = self.test_user1
view = KnowledgeListAPIView.as_view()
token = get_token(user)
request = self.factory.get(
'/news/api/knowledge/', HTTP_AUTHORIZATION='JWT ' + token, format='json')
response = view(request)
response.render()
expected_respone = [{'id': 1, 'title': 'Test Category', 'docs': [], 'files': []}, {
'id': 2, 'title': 'Test Category 2', 'docs': [], 'files': []}]
self.assertEquals(json.loads(response.content), expected_respone)
def test_view_queryset_filtering(self):
user = self.test_user3
view = KnowledgeListAPIView.as_view()
token = get_token(user)
request = self.factory.get(
'/news/api/knowledge/', HTTP_AUTHORIZATION='JWT ' + token, format='json')
response = view(request)
response.render()
expected_respone = [{'id': 1, 'title': 'Test Category', 'docs': [], 'files': []},
{'id': 2, 'title': 'Test Category 2', 'docs': [{'id': 1, 'title': 'test title', 'date_created': '2021-07-10T18:11:11.055162Z'}], 'files': []}]
self.assertEquals(json.loads(response.content), expected_respone)
class TestDocQuestionListAPIView(APITestCase):
@classmethod
def setUpTestData(cls):
cls.test_user1 = User.objects.create_user(
username='testuser1', password='<PASSWORD>')
cls.test_user2 = User.objects.create_user(
username='testuser2', password='<PASSWORD>')
cls.test_user3 = User.objects.create_user(
username='testuser3', password='<PASSWORD>')
newgroup = Group.objects.create(name='Managers')
for each in Permission.objects.all():
newgroup.permissions.add(each)
cls.test_user1.groups.add(newgroup)
cls.test_user3.groups.add(newgroup)
cls.test_category = KnowledgeCategory.objects.create(
title='Test Category')
cls.test_category_2 = KnowledgeCategory.objects.create(
title='Test Category 2')
test_user1_userprofile = UserProfile.objects.create(
user=cls.test_user1,
name='<NAME>',
telephone='11',
email='<EMAIL>',
employee_id='2',
departament='sal',
location='WAW'
)
test_user1_userprofile.save()
test_user2_userprofile = UserProfile.objects.create(
user=cls.test_user2,
name='<NAME>',
telephone='222222222',
email='<EMAIL>',
employee_id='3',
departament='sal',
location='PZN'
)
test_user2_userprofile.save()
test_user3_userprofile = UserProfile.objects.create(
user=cls.test_user3,
name='<NAME>',
telephone='222222222',
email='<EMAIL>',
employee_id='3',
departament='sal',
location='PZN'
)
test_user3_userprofile.save()
cls.test_question = DocQuestion.objects.create(
title="test title",
body='test body',
author=cls.test_user1,
answer="test answer",
category=cls.test_category,
)
cls.test_question2 = DocQuestion.objects.create(
title="test title2",
body='test body2',
author=cls.test_user1,
answer="test answer2",
category=cls.test_category,
target_location="PZN",
target_departament="sal",
)
cls.factory = APIRequestFactory()
def test_GET_if_no_permission(self):
user = self.test_user2
token = get_token(user)
request = self.factory.get(
'/news/api/faq/', HTTP_AUTHORIZATION='JWT ' + token)
view = DocQuestionListAPIView.as_view()
response = view(request)
self.assertEquals(response.status_code, 403)
def test_GET_if_has_permission(self):
view = DocQuestionListAPIView.as_view()
user = self.test_user1
token = get_token(user)
request = self.factory.get(
'/news/api/faq/', HTTP_AUTHORIZATION='JWT ' + token)
response = view(request)
self.assertEquals(response.status_code, 200)
def test_view_queryset_response(self):
user = self.test_user1
view = DocQuestionListAPIView.as_view()
token = get_token(user)
request = self.factory.get(
'/news/api/faq/', HTTP_AUTHORIZATION='JWT ' + token, format='json')
response = view(request)
response.render()
expected_respone = [{'id': 1, 'title': 'test title', 'body': 'test body', 'answer': 'test answer',
'target_departament': 'non', 'target_location': 'non', 'category': 1}]
self.assertEquals(json.loads(response.content), expected_respone)
def test_view_queryset_search(self):
user = self.test_user3
view = DocQuestionListAPIView.as_view()
token = get_token(user)
request = self.factory.get(
'/news/api/faq/?q=title2', HTTP_AUTHORIZATION='JWT ' + token, format='json')
response = view(request)
response.render()
expected_respone = [{'id': 2, 'title': 'test title2', 'body': 'test body2', 'answer': 'test answer2',
'target_departament': 'sal', 'target_location': 'PZN', 'category': 1}]
self.assertEquals(json.loads(response.content), expected_respone)
def test_view_queryset_filtering(self):
user = self.test_user1
view = DocQuestionListAPIView.as_view()
token = get_token(user)
request = self.factory.get(
'/news/api/faq/', HTTP_AUTHORIZATION='JWT ' + token, format='json')
response = view(request)
response.render()
expected_respone = [{'id': 1, 'title': 'test title', 'body': 'test body', 'answer': 'test answer',
'target_departament': 'non', 'target_location': 'non', 'category': 1}]
self.assertEquals(json.loads(response.content), expected_respone)
user3 = self.test_user3
token = get_token(user3)
request = self.factory.get(
'/news/api/faq/', HTTP_AUTHORIZATION='JWT ' + token, format='json')
response = view(request)
response.render()
expected_respone = [{'id': 2, 'title': 'test title2', 'body': 'test body2', 'answer': 'test answer2', 'target_departament': 'sal', 'target_location':
'PZN', 'category': 1}, {'id': 1, 'title': 'test title', 'body': 'test body', 'answer': 'test answer',
'target_departament': 'non', 'target_location': 'non', 'category': 1}]
self.assertEquals(json.loads(response.content), expected_respone)
def test_view_object_creation(self):
user = self.test_user1
view = DocQuestionListAPIView.as_view()
token = get_token(user)
data = {
'title': "test title",
'body': 'test body',
'answer': "test answer",
'category': 1,
}
request = self.factory.post(
'/news/api/faq/', data, HTTP_AUTHORIZATION='JWT ' + token)
response = view(request)
response.render()
expected_respone = {'id': 3, 'title': 'test title', 'body': 'test body',
'answer': 'test answer', 'target_departament': 'non', 'target_location': 'non', 'category': 1}
self.assertEquals(json.loads(response.content), expected_respone)
self.assertEquals(DocQuestion.objects.count(), 3)
self.assertEquals(response.status_code, 201)
def test_view_object_creation_no_permission(self):
user = self.test_user2
view = DocQuestionListAPIView.as_view()
token = get_token(user)
data = {
'title': "test title",
'body': 'test body',
'answer': "test answer",
'category': 1,
}
request = self.factory.post(
'/news/api/faq/', data, HTTP_AUTHORIZATION='JWT ' + token)
response = view(request)
response.render()
expected_respone = {
'detail': 'You do not have permission to perform this action.'}
self.assertEquals(json.loads(response.content), expected_respone)
self.assertEquals(DocQuestion.objects.count(), 2)
self.assertEquals(response.status_code, 403)
def test_view_object_update(self):
user = self.test_user1
view = DocQuestionListAPIView.as_view()
token = get_token(user)
data = {
'title': "test title",
'body': 'test body',
'answer': "test answer update",
'category': 1,
}
request = self.factory.patch(
'/news/api/faq/?pk=2', data, HTTP_AUTHORIZATION='JWT ' + token)
response = view(request)
response.render()
expected_respone = {'id': 2, 'title': 'test title', 'body': 'test body',
'answer': 'test answer update', 'target_departament': 'sal', 'target_location': 'PZN', 'category': 1}
#
self.assertEquals(json.loads(response.content), expected_respone)
self.assertEquals(response.status_code, 200)
def test_view_object_update_no_permission(self):
user = self.test_user2
view = DocQuestionListAPIView.as_view()
token = get_token(user)
data = {
'title': "test title",
'body': 'test body',
'answer': "test answer update",
'category': 1,
}
request = self.factory.patch(
'/news/api/faq/?pk=2', data, HTTP_AUTHORIZATION='JWT ' + token)
response = view(request)
response.render()
self.assertEquals(response.status_code, 403)
def test_view_object_delete(self):
user = self.test_user1
view = DocQuestionListAPIView.as_view()
token = get_token(user)
request = self.factory.delete(
'/news/api/faq/?pk=2', HTTP_AUTHORIZATION='JWT ' + token)
response = view(request)
response.render()
self.assertEquals(DocQuestion.objects.count(), 1)
self.assertEquals(response.status_code, 204)
def test_view_object_delete_no_permission(self):
user = self.test_user2
view = DocQuestionListAPIView.as_view()
token = get_token(user)
request = self.factory.delete(
'/news/api/faq/?pk=2', HTTP_AUTHORIZATION='JWT ' + token)
response = view(request)
response.render()
self.assertEquals(DocQuestion.objects.count(), 2)
self.assertEquals(response.status_code, 403)
class TestUserQuestionCreateAPIView(APITestCase):
@classmethod
def setUpTestData(cls):
cls.test_user1 = User.objects.create_user(
username='testuser1', password='<PASSWORD>')
newgroup = Group.objects.create(name='Managers')
for each in Permission.objects.all():
newgroup.permissions.add(each)
cls.test_user1.groups.add(newgroup)
cls.test_category = KnowledgeCategory.objects.create(
title='Test Category')
test_user1_userprofile = UserProfile.objects.create(
user=cls.test_user1,
name='Test User1',
telephone='11',
email='<EMAIL>',
employee_id='2',
departament='sal',
location='WAW'
)
test_user1_userprofile.save()
cls.factory = APIRequestFactory()
def test_view_object_creation(self):
user = self.test_user1
view = UserQuestionCreateAPIView.as_view()
token = get_token(user)
data = {
'title': "test title",
'body': 'test body',
'category': 1,
}
request = self.factory.post(
'/news/api/userquestion/', data, HTTP_AUTHORIZATION='JWT ' + token)
response = view(request)
response.render()
expected_respone = {'title': 'test title', 'body': 'test body'}
self.assertEquals(json.loads(response.content), expected_respone)
self.assertEquals(DocQuestion.objects.count(), 1)
self.assertEquals(response.status_code, 201)
class TestDocFileCreateAPIView(APITestCase):
@classmethod
def setUpTestData(cls):
cls.test_user1 = User.objects.create_user(
username='testuser1', password='<PASSWORD>')
newgroup = Group.objects.create(name='Managers')
for each in Permission.objects.all():
newgroup.permissions.add(each)
cls.test_user1.groups.add(newgroup)
cls.test_user2 = User.objects.create_user(
username='testuser2', password='<PASSWORD>')
test_user2_userprofile = UserProfile.objects.create(
user=cls.test_user2,
name='<NAME>',
telephone='222222222',
email='<EMAIL>',
employee_id='3',
departament='sal',
location='PZN'
)
test_user2_userprofile.save()
cls.test_category = KnowledgeCategory.objects.create(
title='Test Category')
test_user1_userprofile = UserProfile.objects.create(
user=cls.test_user1,
name='Test User1',
telephone='11',
email='<EMAIL>',
employee_id='2',
departament='sal',
location='WAW'
)
test_user1_userprofile.save()
cls.factory = APIRequestFactory()
def test_view_object_creation_no_permission(self):
user = self.test_user2
view = DocFileCreateAPIView.as_view()
token = get_token(user)
file = SimpleUploadedFile(
"test_file.pdf",
b"these are the file contents!"
)
files = {
'file': SimpleUploadedFile(
"test_file.pdf",
b"these are the file contents!"),
'title': 'test title',
'category': 1
}
request = self.factory.post(
'/news/api/uploaddocfile/', data=files, HTTP_AUTHORIZATION='JWT ' + token, format='multipart')
response = view(request)
response.render()
self.assertEquals(DocFile.objects.count(), 0)
self.assertEquals(response.status_code, 403)
def test_view_object_creation(self):
user = self.test_user1
view = DocFileCreateAPIView.as_view()
token = get_token(user)
file = SimpleUploadedFile(
"test_file.pdf",
b"these are the file contents!"
)
# with open (file.path, 'rb') as f:
files = {
'file': SimpleUploadedFile(
"test_file.pdf",
b"these are the file contents!"),
'title': 'test title',
'category': 1
}
request = self.factory.post(
'/news/api/uploaddocfile/', data=files, HTTP_AUTHORIZATION='JWT ' + token, format='multipart')
response = view(request)
response.render()
self.assertEquals(DocFile.objects.count(), 1)
self.assertEquals(response.status_code, 201)
class TestNewsFileCreateAPIView(APITestCase):
@classmethod
def setUpTestData(cls):
cls.test_user1 = User.objects.create_user(
username='testuser1', password='<PASSWORD>')
newgroup = Group.objects.create(name='Managers')
for each in Permission.objects.all():
newgroup.permissions.add(each)
cls.test_user2 = User.objects.create_user(
username='testuser2', password='<PASSWORD>')
test_user2_userprofile = UserProfile.objects.create(
user=cls.test_user2,
name='Test User2',
telephone='222222222',
email='<EMAIL>',
employee_id='3',
departament='sal',
location='PZN'
)
test_user2_userprofile.save()
cls.test_user1.groups.add(newgroup)
cls.test_category = KnowledgeCategory.objects.create(
title='Test Category')
test_user1_userprofile = UserProfile.objects.create(
user=cls.test_user1,
name='Test User1',
telephone='11',
email='<EMAIL>',
employee_id='2',
departament='sal',
location='WAW'
)
test_user1_userprofile.save()
cls.factory = APIRequestFactory()
def test_view_object_creation(self):
user = self.test_user1
view = NewsFileCreateAPIView.as_view()
token = get_token(user)
file = SimpleUploadedFile(
"test_file.pdf",
b"these are the | |
not wait for responses here.
send_all_status_requests()
#Test whether there is a command or status response ready to read,
# timeout in 5 seconds.
readable_list = get_ercs()
readable_list.append(u.get_tsd().socket)
try:
#readable, unused, unused = select.select(
# [erc.sock, u.get_tsd().socket], [], [], SELECT_TIMEOUT)
readable, unused, unused = select.select(
readable_list, [], [], SELECT_TIMEOUT)
except (socket.error, select.error), msg:
if msg.args[0] != errno.EINTR:
cleanup_networking(intf)
# erc.unsubscribe()
# erc.sock.close()
try:
sys.stderr.write("Exiting early.\n")
sys.stderr.flush()
except IOError:
pass
sys.exit(1)
#Update counts and do it again if there is nothing going on.
if not readable:
#Since we don't have much going on, let us take a moment
# to clear out stale status requests that we don't appear
# to ever be getting reponses to.
drop_stale_status_requests()
if count > MAX_COUNT:
#If nothing received for 30 seconds, resubscribe.
for erc in get_ercs():
erc.subscribe()
count = 0
else:
#If nothing received for 5 seconds, up the count and
# try again.
count = count + 1
# If the display/main thread hasn't done anything in 10
# minutes, let us restart entv.
#TO DO - get_time
if enstore_display.message_queue.get_time <= \
time.time() - TEN_MINUTES:
message = "Display is stuck. Restarting entv. [1]"
Trace.trace(0, message, out_fp=sys.stderr)
restart_entv()
continue
# If the display/main thread hasn't done anything in 10
# minutes, let us restart entv.
for system_name in enstore_display.message_queue.get_queue_keys():
if enstore_display.message_queue.len_queue(system_name) > 0 and \
enstore_display.message_queue.last_get_time() <= \
time.time() - TEN_MINUTES:
message = "Display is stuck. Restarting entv. [2]"
Trace.trace(0, message, out_fp=sys.stderr)
restart_entv()
commands = []
#Read any status responses from movers or the inquisitor.
if u.get_tsd().socket in readable:
process_udp(u, Tkinter.tkinter.READABLE)
"""
for system_name in enstore_display.message_queue.get_queue_keys():
temp_commands = [] #Clear for each enstore system.
send_request_dict_copy = get_sent_request(system_name, None)
for tx_id in send_request_dict_copy.keys():
try:
mstatus = u.recv_deferred(tx_id, 0.0)
if mstatus.has_key('time_in_state'):
#We have a mover response. Since the status
# field might be for an error, we need to
# avoid using is_ok() here, so that the error
# gets displayed instead of getting the
# response ignored.
pass
else:
#We have an inquisitor response.
if not e_errors.is_ok(mstatus):
#del send_request_dict[tx_id]
set_sent_request(None, system_name, tx_id)
continue
#commands = commands + handle_status(
# send_request_dict[tx_id]['name'], mstatus)
temp_commands = temp_commands + handle_status(
get_sent_request(system_name, tx_id)['name'],
mstatus)
#del send_request_dict[tx_id]
set_sent_request(None, system_name, tx_id)
if mstatus.get('work', None) == "show":
Trace.trace(1, "Recieved ID %s from inquisitor." \
% (tx_id,))
else:
Trace.trace(1, "Recieved ID %s from mover." \
% (tx_id,))
except (socket.error, select.error,
e_errors.EnstoreError):
pass
except errno.errorcode[errno.ETIMEDOUT]:
pass
else:
#Make sure to read any messages that finally arrived
# after the record of them being sent was purged from
# send_request_dict.
try:
u.recv_deferred([], 0.0)
except (socket.error, select.error,
e_errors.EnstoreError), msg:
if msg.args[0] not in [errno.ETIMEDOUT]:
Trace.log(0,
"Error reading socket: %s" % (str(msg),))
#Those commands that use mover names need to have the
# system name appended to the name.
commands = commands + insert_system_name(temp_commands,
system_name, intf)
"""
#Remove items that are in the queue without having received a
# response.
else:
drop_stale_status_requests()
#Read the next message from the event relay.
for erc in get_ercs():
if erc in readable:
process_erc(erc, Tkinter.tkinter.READABLE)
"""
temp_commands = [] #Clear for each enstore system.
if erc.sock in readable:
try:
msg = enstore_erc_functions.read_erc(erc)
except SyntaxError:
exc, msg = sys.exc_info()[:2]
import traceback
traceback.print_tb(sys.exc_info()[2])
#Report on the error.
try:
message = "Failed to read erc message: (%s, %s)\n"
sys.stderr.write(message % (str(exc), str(msg)))
sys.stderr.flush()
except IOError:
pass
if msg and not getattr(msg, 'status', None):
#Take the message from event relay.
commands = commands + ["%s %s" % (msg.type,
msg.extra_info)]
##If read_erc is valid it is a EventRelayMessage instance. If
# it gets here it is a dictionary with a status field error.
elif getattr(msg, "status", None):
Trace.trace(1, "Event relay error: %s" % (str(msg),),
out_fp=sys.stderr)
#Those commands that use mover names need to have the
# system name appended to the name.
commands = commands + insert_system_name(temp_commands,
system_name, intf)
"""
#if not commands:
# continue
"""
put_func = enstore_display.message_queue.put_queue #Shortcut.
for command in commands:
if command:
#For normal use put everything into the queue.
put_func(command, system_name)
"""
#If necessary, handle resubscribing.
if not intf.messages_file:
now = time.time()
if now - start > TEN_MINUTES:
# resubscribe
for erc in get_ercs():
erc.subscribe()
start = now
"""
#End nicely.
if not intf.messages_file:
#Tell the event relay to stop sending us information.
erc.unsubscribe()
#Remove all of the routes that were set up to all of the movers.
for mover_name in movers:
try:
m_addr = csc.get(mover_name, {}).get('hostip', None)
#If we added a route to the mover, we should remove it.
# Most clients would prefer to leave such routes in place,
# but entv is not your normal client. It talks to many
# movers that makes the routing table huge.
host_config.unset_route(m_addr)
pass
except (socket.error, OSError):
pass
except TypeError:
# mov.server_address is equal to None
pass
"""
Trace.trace(1, "Detected stop flag in %s." % (MESSAGES_NAME,))
return
"""
def handle_messages(system_name, intf):
global u
global event_relay_messages
threading.currentThread().setName("MESSAGES-%s" % (system_name,))
#Prevent the main thread from queuing status requests.
enstore_display.acquire(enstore_display.startup_lock, "startup_lock")
#This is a time hack to get a clean output file.
if intf.generate_messages_file:
timeout_time = time.time() + intf.capture_timeout
else:
timeout_time = None
# we will get all of the info from the event relay.
if intf.messages_file:
messages_file = open(intf.messages_file, "r")
last_timestamp = -1 #Used to space the commands in real time.
else:
erc = get_erc(system_name)
if erc:
retval = erc.start([event_relay_messages.ALL])
if retval == erc.ERROR:
Trace.trace(0, "Could not contact event relay.",
out_fp=sys.stderr)
#Determine the list of movers, tell the main thread about them
# and send the movers status requests.
movers = setup_movers(system_name, get_display(system_name), intf)
#If the client fails to initialize then wait a minute and start over.
# The largest known error to occur is that socket.socket() fails
# to return a file descriptor because to many files are open.
if should_stop():
enstore_display.release(enstore_display.startup_lock,
"startup_lock") #Avoid resource leak.
Trace.trace(1, "Detected stop flag in %s messages thread." %
(system_name,))
return
start = time.time()
count = 0
#Allow the main thread to queue status requests.
enstore_display.release(enstore_display.startup_lock, "startup_lock")
while not should_stop():
# If commands are listed, use 'canned' version of entv.
if intf.messages_file:
try:
#Get the next line from the commands list file.
line = messages_file.readline()
if not line:
try:
position = messages_file.tell()
size = os.fstat(messages_file.fileno())[stat.ST_SIZE]
except (OSError, IOError), msg:
Trace.trace(0,
"Error accessing messages file: %s" %
(str(msg),),
out_fp=sys.stderr)
sys.exit(1)
if position == size:
messages_file.seek(0, 0) #Position at beginning of file.
last_timestamp = -1 #Reset this too.
#For each line strip off the timestamp information from
# the espion.py.
words = line.split()
recorded_time = string.join(words[:5])
command = string.join(words[5:])
if not command:
continue
#break #Is this correct to break here?
#Store the command into the list (in this case of 1).
commands = [command]
except (OSError, IOError, TypeError, ValueError,
KeyError, IndexError):
messages_file.seek(0, 0) #Position at beginning of file.
last_timestamp = -1 #Reset this too.
continue
try:
timestamp = time.mktime(time.strptime(recorded_time))
except ValueError:
#Other content.
continue
#Don't overwhelm the display thread. This code attempts to wait
# the same amount of time as it happended the first time.
if last_timestamp != -1:
now = time.time()
sleep_duration = timestamp - last_timestamp - (1 - math.modf(now)[0])
time.sleep(max(sleep_duration, 0))
last_timestamp = timestamp
else:
#Send any status requests to the movers or the inquisitor. This
# only sends these requests, it does not wait for responses here.
send_all_status_requests(system_name)
#Test whether there is a command or status response ready to read,
# timeout in 5 seconds.
try:
readable, unused, unused = select.select(
[erc.sock, u.get_tsd().socket], [], [], SELECT_TIMEOUT)
except (socket.error, select.error), msg:
if msg.args[0] != errno.EINTR:
erc.unsubscribe()
erc.sock.close()
try:
sys.stderr.write("Exiting early.\n")
sys.stderr.flush()
except IOError:
pass
sys.exit(1)
#Update counts and do it again if there is nothing going on.
if not readable:
#Since we don't have much going on, let us take a moment
# to clear out stale status requests that we don't appear
# to ever be getting reponses to.
drop_stale_status_requests(system_name)
if count > MAX_COUNT:
#If | |
initiate_rpool(wf, config, session)
config.pipeline_setup[
'pipeline_name'] = f'longitudinal_{orig_pipe_name}'
rpool = ingress_output_dir(config, rpool, long_id,
creds_path=input_creds_path)
select_node_name = f'select_{unique_id}'
select_sess = pe.Node(Function(input_names=['session',
'output_brains',
'warps'],
output_names=['brain_path',
'warp_path'],
function=select_session),
name=select_node_name)
select_sess.inputs.session = unique_id
wf.connect(template_node, 'output_brain_list', select_sess,
'output_brains')
wf.connect(template_node, 'warp_list', select_sess, 'warps')
rpool.set_data("space-longitudinal_desc-brain_T1w",
select_sess, 'brain_path', {}, "",
select_node_name)
rpool.set_data("from-T1w_to-longitudinal_mode-image_"
"desc-linear_xfm",
select_sess, 'warp_path', {}, "",
select_node_name)
config.pipeline_setup['pipeline_name'] = orig_pipe_name
excl = ['space-template_desc-brain_T1w',
'space-T1w_desc-brain_mask']
rpool.gather_pipes(wf, config, add_excl=excl)
wf.run()
# begin single-session stuff again
for session in sub_list:
unique_id = session['unique_id']
try:
creds_path = session['creds_path']
if creds_path and 'none' not in creds_path.lower():
if os.path.exists(creds_path):
input_creds_path = os.path.abspath(creds_path)
else:
err_msg = 'Credentials path: "%s" for subject "%s" ' \
'session "%s" was not found. Check this path ' \
'and try again.' % (creds_path, subject_id,
unique_id)
raise Exception(err_msg)
else:
input_creds_path = None
except KeyError:
input_creds_path = None
wf = initialize_nipype_wf(config, sub_list[0])
wf, rpool = initiate_rpool(wf, config, session)
pipeline_blocks = [warp_longitudinal_T1w_to_template,
warp_longitudinal_seg_to_T1w]
wf = connect_pipeline(wf, config, rpool, pipeline_blocks)
rpool.gather_pipes(wf, config)
# this is going to run multiple times!
# once for every strategy!
wf.run()
# TODO check:
# 1 func alone works
# 2 anat + func works, pass anat strategy list?
def func_preproc_longitudinal_wf(subject_id, sub_list, config):
"""
Parameters
----------
subject_id : string
the id of the subject
sub_list : list of dict
this is a list of sessions for one subject and each session if the same dictionary as the one given to
prep_workflow
config : configuration
a configuration object containing the information of the pipeline config. (Same as for prep_workflow)
Returns
-------
strat_list_ses_list : list of list
a list of strategies; within each strategy, a list of sessions
"""
datasink = pe.Node(nio.DataSink(), name='sinker')
datasink.inputs.base_directory = \
config.pipeline_setup['working_directory']['path']
session_id_list = []
ses_list_strat_list = {}
workflow_name = 'func_preproc_longitudinal_' + str(subject_id)
workflow = pe.Workflow(name=workflow_name)
workflow.base_dir = config.pipeline_setup['working_directory']['path']
workflow.config['execution'] = {
'hash_method': 'timestamp',
'crashdump_dir': os.path.abspath(
config.pipeline_setup['crash_directory']['path'])
}
for sub_dict in sub_list:
if 'func' in sub_dict or 'rest' in sub_dict:
if 'func' in sub_dict:
func_paths_dict = sub_dict['func']
else:
func_paths_dict = sub_dict['rest']
unique_id = sub_dict['unique_id']
session_id_list.append(unique_id)
try:
creds_path = sub_dict['creds_path']
if creds_path and 'none' not in creds_path.lower():
if os.path.exists(creds_path):
input_creds_path = os.path.abspath(creds_path)
else:
err_msg = 'Credentials path: "%s" for subject "%s" was not ' \
'found. Check this path and try again.' % (
creds_path, subject_id)
raise Exception(err_msg)
else:
input_creds_path = None
except KeyError:
input_creds_path = None
strat = Strategy()
strat_list = [strat]
node_suffix = '_'.join([subject_id, unique_id])
# Functional Ingress Workflow
# add optional flag
workflow, diff, blip, fmap_rp_list = connect_func_ingress(
workflow,
strat_list,
config,
sub_dict,
subject_id,
input_creds_path,
node_suffix)
# Functional Initial Prep Workflow
workflow, strat_list = connect_func_init(workflow, strat_list,
config, node_suffix)
# Functional Image Preprocessing Workflow
workflow, strat_list = connect_func_preproc(workflow, strat_list,
config, node_suffix)
# Distortion Correction
workflow, strat_list = connect_distortion_correction(workflow,
strat_list,
config,
diff,
blip,
fmap_rp_list,
node_suffix)
ses_list_strat_list[node_suffix] = strat_list
# Here we have all the func_preproc set up for every session of the subject
# TODO create a list of list ses_list_strat_list
# a list of skullstripping strategies,
# a list of sessions within each strategy list
# TODO rename and reorganize dict
# TODO update strat name
strat_list_ses_list = {}
strat_list_ses_list['func_default'] = []
for sub_ses_id, strat_nodes_list in ses_list_strat_list.items():
strat_list_ses_list['func_default'].append(strat_nodes_list[0])
workflow.run()
return strat_list_ses_list
def merge_func_preproc(working_directory):
"""
Parameters
----------
working_directory : string
a path to the working directory
Returns
-------
brain_list : list
a list of func preprocessed brain
skull_list : list
a list of func preprocessed skull
"""
brain_list = []
skull_list = []
for dirpath, dirnames, filenames in os.walk(working_directory):
for f in filenames:
if 'func_get_preprocessed_median' in dirpath and '.nii.gz' in f:
filepath = os.path.join(dirpath, f)
brain_list.append(filepath)
if 'func_get_motion_correct_median' in dirpath and '.nii.gz' in f:
filepath = os.path.join(dirpath, f)
skull_list.append(filepath)
brain_list.sort()
skull_list.sort()
return brain_list, skull_list
def register_func_longitudinal_template_to_standard(
longitudinal_template_node, c, workflow, strat_init, strat_name):
sub_mem_gb, num_cores_per_sub, num_ants_cores, num_omp_cores = \
check_config_resources(c)
strat_init_new = strat_init.fork()
strat_init_new.update_resource_pool({
'functional_preprocessed_median': (
longitudinal_template_node, 'brain_template'),
'motion_correct_median': (
longitudinal_template_node, 'skull_template')
})
strat_list = [strat_init_new]
new_strat_list = []
regOption = c.anatomical_preproc[
'registration_workflow'
]['registration']['using']
if 'FSL' in regOption:
for num_strat, strat in enumerate(strat_list):
flirt_reg_func_mni = create_fsl_flirt_linear_reg(
'func_mni_flirt_register_%s_%d' % (strat_name, num_strat)
)
if c.functional_registration['2-func_registration_to_template'][
'FNIRT_pipelines']['interpolation'] not in ["trilinear",
"sinc", "spline"]:
err_msg = 'The selected FSL interpolation method may be in the list of values: "trilinear", "sinc", "spline"'
raise Exception(err_msg)
# Input registration parameters
flirt_reg_func_mni.inputs.inputspec.interp = \
c.functional_registration['2-func_registration_to_template'][
'FNIRT_pipelines']['interpolation']
node, out_file = strat['functional_preprocessed_median']
workflow.connect(node, out_file,
flirt_reg_func_mni, 'inputspec.input_brain')
# pass the reference files
node, out_file = strat['template_brain_for_func_preproc']
workflow.connect(node, out_file, flirt_reg_func_mni,
'inputspec.reference_brain')
if 'ANTS' in regOption:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(flirt_reg_func_mni.name)
strat.update_resource_pool({
'registration_method': 'FSL',
'func_longitudinal_to_mni_linear_xfm': (
flirt_reg_func_mni, 'outputspec.linear_xfm'),
'mni_to_func_longitudinal_linear_xfm': (
flirt_reg_func_mni, 'outputspec.invlinear_xfm'),
'func_longitudinal_template_to_standard': (
flirt_reg_func_mni, 'outputspec.output_brain')
})
strat_list += new_strat_list
new_strat_list = []
try:
fsl_linear_reg_only = c.fsl_linear_reg_only
except AttributeError:
fsl_linear_reg_only = [0]
if 'FSL' in regOption and 0 in fsl_linear_reg_only:
for num_strat, strat in enumerate(strat_list):
if strat.get('registration_method') == 'FSL':
fnirt_reg_func_mni = create_fsl_fnirt_nonlinear_reg(
'func_mni_fnirt_register_%s_%d' % (strat_name, num_strat)
)
# brain input
node, out_file = strat['functional_preprocessed_median']
workflow.connect(node, out_file,
fnirt_reg_func_mni, 'inputspec.input_brain')
# brain reference
node, out_file = strat['template_brain_for_func_preproc']
workflow.connect(node, out_file,
fnirt_reg_func_mni,
'inputspec.reference_brain')
# skull input
node, out_file = strat['motion_correct_median']
workflow.connect(node, out_file,
fnirt_reg_func_mni, 'inputspec.input_skull')
# skull reference
node, out_file = strat['template_skull_for_func_preproc']
workflow.connect(node, out_file,
fnirt_reg_func_mni,
'inputspec.reference_skull')
node, out_file = strat['func_longitudinal_to_mni_linear_xfm']
workflow.connect(node, out_file,
fnirt_reg_func_mni, 'inputspec.linear_aff')
node, out_file = strat['template_ref_mask']
workflow.connect(node, out_file,
fnirt_reg_func_mni, 'inputspec.ref_mask')
# assign the FSL FNIRT config file specified in pipeline
# config.yml
fnirt_reg_func_mni.inputs.inputspec.fnirt_config = \
c.anatomical_preproc['registration_workflow']['registration'][
'FSL-FNIRT']['fnirt_config']
if 1 in fsl_linear_reg_only:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(fnirt_reg_func_mni.name)
strat.update_resource_pool({
'func_longitudinal_to_mni_nonlinear_xfm': (
fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'),
'func_longitudinal_template_to_standard': (
fnirt_reg_func_mni, 'outputspec.output_brain')
}, override=True)
strat_list += new_strat_list
new_strat_list = []
for num_strat, strat in enumerate(strat_list):
# or run ANTS anatomical-to-MNI registration instead
if 'ANTS' in regOption and \
strat.get('registration_method') != 'FSL':
ants_reg_func_mni = \
create_wf_calculate_ants_warp(
'func_mni_ants_register_%s_%d' % (strat_name, num_strat),
num_threads=num_ants_cores,
reg_ants_skull=
c.anatomical_preproc['registration_workflow'][
'reg_with_skull']
)
if c.functional_registration['2-func_registration_to_template'][
'ANTs_pipelines']['interpolation'] not in ['Linear',
'BSpline',
'LanczosWindowedSinc']:
err_msg = 'The selected ANTS interpolation method may be in the list of values: "Linear", "BSpline", "LanczosWindowedSinc"'
raise Exception(err_msg)
# Input registration parameters
ants_reg_func_mni.inputs.inputspec.interp = \
c.functional_registration['2-func_registration_to_template'][
'ANTs_pipelines']['interpolation']
# calculating the transform with the skullstripped is
# reported to be better, but it requires very high
# quality skullstripping. If skullstripping is imprecise
# registration with skull is preferred
if c.anatomical_preproc['registration_workflow'][
'reg_with_skull']:
# get the skull-stripped anatomical from resource pool
node, out_file = strat['functional_preprocessed_median']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
ants_reg_func_mni, 'inputspec.moving_brain')
# get the reorient skull-on anatomical from resource pool
node, out_file = strat['motion_correct_median']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
ants_reg_func_mni, 'inputspec.moving_skull')
# pass the reference file
node, out_file = strat['template_brain_for_func_preproc']
workflow.connect(node, out_file,
ants_reg_func_mni,
'inputspec.reference_brain')
# pass the reference file
node, out_file = strat['template_skull_for_func_preproc']
workflow.connect(node, out_file,
ants_reg_func_mni,
'inputspec.reference_skull')
else:
node, out_file = strat['functional_preprocessed_median']
workflow.connect(node, out_file,
ants_reg_func_mni, 'inputspec.moving_brain')
# pass the reference file
node, out_file = strat['template_brain_for_func_preproc']
workflow.connect(node, out_file,
ants_reg_func_mni,
'inputspec.reference_brain')
# pass the reference mask file
node, out_file = strat['template_brain_mask_for_func_preproc']
workflow.connect(
node, out_file,
ants_reg_func_mni, 'inputspec.reference_mask'
)
# pass the reference mask file
node, out_file = strat['functional_brain_mask']
workflow.connect(
node, out_file,
ants_reg_func_mni, 'inputspec.moving_mask'
)
ants_reg_func_mni.inputs.inputspec.ants_para = \
c.anatomical_preproc['registration_workflow']['registration'][
'ANTs']['T1_registration']
ants_reg_func_mni.inputs.inputspec.fixed_image_mask = None
strat.append_name(ants_reg_func_mni.name)
strat.update_resource_pool({
'registration_method': 'ANTS',
'ants_initial_xfm': (
ants_reg_func_mni, 'outputspec.ants_initial_xfm'),
'ants_rigid_xfm': (
ants_reg_func_mni, 'outputspec.ants_rigid_xfm'),
'ants_affine_xfm': (
ants_reg_func_mni, 'outputspec.ants_affine_xfm'),
'func_longitudinal_to_mni_nonlinear_xfm': (
ants_reg_func_mni, 'outputspec.warp_field'),
'mni_to_func_longitudinal_nonlinear_xfm': (
ants_reg_func_mni, 'outputspec.inverse_warp_field'),
'func_longitudinal_to_mni_ants_composite_xfm': (
ants_reg_func_mni, 'outputspec.composite_transform'),
'func_longitudinal_template_to_standard': (
ants_reg_func_mni, 'outputspec.normalized_output_brain')
})
strat_list += new_strat_list
'''
# Func -> T1 Registration (Initial Linear Reg)
workflow, strat_list, diff_complete = connect_func_to_anat_init_reg(workflow, strat_list, c)
# Func -> T1 Registration (BBREG)
workflow, strat_list = connect_func_to_anat_bbreg(workflow, strat_list, c, diff_complete)
# Func -> T1/EPI Template
workflow, strat_list = connect_func_to_template_reg(workflow, strat_list, c)
'''
return workflow, strat_list
def func_longitudinal_template_wf(subject_id, strat_list, config):
'''
Parameters
----------
subject_id : string
the id of the subject
strat_list : list of list
first level strategy, second level session
config : configuration
a configuration object containing the information of the pipeline config.
Returns
-------
None
'''
workflow_name = 'func_longitudinal_template_' + str(subject_id)
workflow = pe.Workflow(name=workflow_name)
workflow.base_dir = config.pipeline_setup['working_directory']['path']
workflow.config['execution'] = {
'hash_method': 'timestamp',
'crashdump_dir': os.path.abspath(
config.pipeline_setup['crash_directory']['path'])
}
# strat_nodes_list = strat_list['func_default']
strat_init = Strategy()
templates_for_resampling = [
(config.resolution_for_func_preproc,
config.template_brain_only_for_func,
'template_brain_for_func_preproc', 'resolution_for_func_preproc'),
(config.resolution_for_func_preproc, config.template_skull_for_func,
'template_skull_for_func_preproc', 'resolution_for_func_preproc'),
(config.resolution_for_func_preproc, config.ref_mask_for_func,
'template_ref_mask', 'resolution_for_func_preproc'),
# TODO check float resolution
(config.resolution_for_func_preproc,
config.functional_registration['2-func_registration_to_template'][
'target_template']['EPI_template']['template_epi'],
'template_epi', 'resolution_for_func_preproc'),
(config.resolution_for_func_derivative,
config.functional_registration['2-func_registration_to_template'][
'target_template']['EPI_template']['template_epi'],
'template_epi_derivative', 'resolution_for_func_derivative'),
| |
"brakeman",
"bramble",
"brambly",
"branch",
"branched",
"branchlike",
"brand",
"brander",
"brandish",
"brandy",
"brash",
"brashly",
"brashness",
"brass",
"brasserie",
"brassiere",
"brassily",
"brassiness",
"brassy",
"bratty",
"bratwurst",
"bravado",
"brave",
"bravely",
"braveness",
"bravery",
"bravo",
"bravura",
"brawl",
"brawler",
"brawn",
"brawniness",
"brawny",
"braze",
"brazen",
"brazenly",
"brazenness",
"brazer",
"brazier",
"breach",
"bread",
"breadbasket",
"breadboard",
"breadbox",
"breadcrumb",
"breaded",
"breadfruit",
"breadline",
"breadth",
"breadwinner",
"break",
"breakable",
"breakage",
"breakaway",
"breakdown",
"breaker",
"breakeven",
"breakfast",
"breakfront",
"breakneck",
"breakout",
"breakthrough",
"breakup",
"breakwater",
"bream",
"breast",
"breastbone",
"breastplate",
"breaststroke",
"breastwork",
"breath",
"breathable",
"breathalyze",
"breathe",
"breather",
"breathing",
"breathless",
"breathlessly",
"breathlessness",
"breathtaking",
"breathtakingly",
"breathy",
"breech",
"breeches",
"breed",
"breeder",
"breeding",
"breeze",
"breezeway",
"breezily",
"breeziness",
"breezy",
"brethren",
"breve",
"brevet",
"breviary",
"brevity",
"brewer",
"brewery",
"brewpub",
"briar",
"bribe",
"briber",
"bribery",
"brick",
"brickbat",
"bricklayer",
"bricklaying",
"brickwork",
"bridal",
"bride",
"bridegroom",
"bridesmaid",
"bridge",
"bridgeable",
"bridgehead",
"bridgework",
"bridle",
"brief",
"briefcase",
"briefing",
"briefly",
"briefness",
"briefs",
"brier",
"brigade",
"brigadier",
"brigand",
"brigandage",
"brigantine",
"bright",
"brighten",
"brightener",
"brightly",
"brightness",
"brights",
"brilliance",
"brilliancy",
"brilliant",
"brilliantine",
"brilliantly",
"brimful",
"brimless",
"brimstone",
"brindle",
"brindled",
"brine",
"bring",
"bringer",
"brininess",
"brink",
"brinkmanship",
"brinksmanship",
"briny",
"brioche",
"briquet",
"briquette",
"brisk",
"brisket",
"briskly",
"briskness",
"bristle",
"bristly",
"britches",
"brittle",
"brittleness",
"broach",
"broad",
"broadband",
"broadcast",
"broadcaster",
"broadcasting",
"broadcloth",
"broaden",
"broadloom",
"broadly",
"broadminded",
"broadness",
"broadsheet",
"broadside",
"broadsword",
"brocade",
"broccoli",
"brochette",
"brochure",
"brogan",
"brogue",
"broil",
"broiler",
"broiling",
"broke",
"broken",
"brokenhearted",
"brokenheartedly",
"brokenly",
"brokenness",
"broker",
"brokerage",
"bromide",
"bromidic",
"bromine",
"bronc",
"bronchi",
"bronchial",
"bronchitic",
"bronchitis",
"bronchus",
"bronco",
"broncobuster",
"brontosaur",
"brontosaurus",
"bronze",
"brooch",
"brood",
"brooder",
"brooding",
"broodingly",
"broodmare",
"broody",
"brook",
"brooklet",
"broom",
"broomstick",
"broth",
"brothel",
"brother",
"brotherhood",
"brotherliness",
"brotherly",
"brougham",
"brought",
"brouhaha",
"browbeat",
"browbeaten",
"brown",
"brownie",
"brownish",
"brownness",
"brownout",
"brownstone",
"browse",
"browser",
"bruin",
"bruise",
"bruised",
"bruiser",
"bruising",
"bruit",
"brunch",
"brunet",
"brunette",
"brunt",
"brush",
"brushed",
"brushoff",
"brushwood",
"brushwork",
"brusque",
"brusquely",
"brusqueness",
"brutal",
"brutality",
"brutalization",
"brutalize",
"brutally",
"brute",
"brutish",
"brutishly",
"brutishness",
"bubble",
"bubblegum",
"bubbly",
"buccaneer",
"buckaroo",
"buckboard",
"bucket",
"bucketful",
"buckeye",
"buckle",
"buckler",
"buckram",
"bucksaw",
"buckshot",
"buckskin",
"buckskins",
"buckteeth",
"bucktooth",
"bucktoothed",
"buckwheat",
"bucolic",
"bucolically",
"budding",
"buddy",
"budge",
"budgerigar",
"budget",
"budgetary",
"budgie",
"buffalo",
"buffer",
"buffered",
"buffet",
"buffoon",
"buffoonery",
"buffoonish",
"bugaboo",
"bugbear",
"bugger",
"buggy",
"bugle",
"bugler",
"build",
"builder",
"building",
"buildup",
"built",
"bulbous",
"bulge",
"bulgy",
"bulimarexia",
"bulimia",
"bulimic",
"bulkhead",
"bulkiness",
"bulky",
"bulldog",
"bulldoze",
"bulldozer",
"bullet",
"bulletin",
"bulletproof",
"bullfight",
"bullfighter",
"bullfighting",
"bullfinch",
"bullfrog",
"bullhead",
"bullheaded",
"bullheadedness",
"bullhorn",
"bullion",
"bullish",
"bullishly",
"bullishness",
"bullock",
"bullpen",
"bullring",
"bullshit",
"bullshitter",
"bully",
"bulrush",
"bulwark",
"bulwarks",
"bumble",
"bumblebee",
"bumbler",
"bumbling",
"bummed",
"bummer",
"bumper",
"bumpiness",
"bumpkin",
"bumptious",
"bumptiously",
"bumptiousness",
"bumpy",
"bunch",
"bunchy",
"bunco",
"buncombe",
"bundle",
"bungalow",
"bungee",
"bunghole",
"bungle",
"bungled",
"bungler",
"bungling",
"bunion",
"bunker",
"bunkhouse",
"bunko",
"bunkum",
"bunny",
"bunting",
"buoyancy",
"buoyant",
"buoyantly",
"burble",
"burbs",
"burden",
"burdensome",
"burdock",
"bureau",
"bureaucracy",
"bureaucrat",
"bureaucratic",
"bureaucratically",
"bureaucratization",
"bureaucratize",
"bureaux",
"burgeon",
"burgeoning",
"burger",
"burgh",
"burgher",
"burglar",
"burglarize",
"burglarproof",
"burglary",
"burgle",
"burgomaster",
"burgundy",
"burial",
"burlap",
"burled",
"burlesque",
"burliness",
"burly",
"burnable",
"burner",
"burning",
"burnish",
"burnished",
"burnisher",
"burnoose",
"burnout",
"burnt",
"burrito",
"burro",
"burrow",
"burrower",
"bursa",
"bursae",
"bursar",
"bursitis",
"burst",
"busboy",
"busby",
"busgirl",
"bushed",
"bushel",
"bushiness",
"bushing",
"bushman",
"bushmaster",
"bushwhack",
"bushwhacker",
"bushy",
"busily",
"business",
"businesslike",
"businessman",
"businessperson",
"businesswoman",
"busing",
"buskin",
"busses",
"bussing",
"busted",
"buster",
"bustle",
"bustling",
"busty",
"busybody",
"busyness",
"busywork",
"butane",
"butch",
"butcher",
"butchery",
"butler",
"butte",
"butter",
"butterball",
"buttercup",
"butterfat",
"butterfingered",
"butterfingers",
"butterfly",
"buttermilk",
"butternut",
"butterscotch",
"buttery",
"buttock",
"buttocks",
"button",
"buttonhole",
"buttonwood",
"buttress",
"buxom",
"buyback",
"buyer",
"buyout",
"buzzard",
"buzzer",
"buzzword",
"bygone",
"bygones",
"bylaw",
"byline",
"bypass",
"bypath",
"byplay",
"byproduct",
"byroad",
"bystander",
"byway",
"byword",
"byzantine",
"cabal",
"cabala",
"caballero",
"cabana",
"cabaret",
"cabbage",
"cabbie",
"cabby",
"cabdriver",
"cabin",
"cabinet",
"cabinetmaker",
"cabinetmaking",
"cabinetry",
"cabinetwork",
"cable",
"cablecast",
"cablegram",
"cabochon",
"caboodle",
"caboose",
"cabriolet",
"cabstand",
"cacao",
"cache",
"cachepot",
"cachet",
"cackle",
"cackler",
"cacophonous",
"cacophony",
"cacti",
"cactus",
"cadaver",
"cadaverous",
"caddie",
"caddish",
"caddishly",
"caddishness",
"caddy",
"cadence",
"cadenced",
"cadenza",
"cadet",
"cadge",
"cadger",
"cadmium",
"cadre",
"caducei",
"caduceus",
"caesarean",
"caesura",
"cafeteria",
"caffeine",
"caftan",
"cagey",
"cagily",
"caginess",
"cahoot",
"cahoots",
"caiman",
"cairn",
"caisson",
"caitiff",
"cajole",
"cajolement",
"cajoler",
"cajolery",
"cakewalk",
"calabash",
"calaboose",
"calamari",
"calamine",
"calamitous",
"calamitously",
"calamity",
"calcareous",
"calciferous",
"calcification",
"calcify",
"calcimine",
"calcine",
"calcite",
"calcium",
"calculable",
"calculate",
"calculated",
"calculating",
"calculatingly",
"calculation",
"calculative",
"calculator",
"calculi",
"calculus",
"caldera",
"caldron",
"calendar",
"calender",
"calfskin",
"caliber",
"calibrate",
"calibration",
"calibrator",
"calico",
"californium",
"caliper",
"calipers",
"caliph",
"caliphate",
"calisthenic",
"calisthenics",
"calla",
"callback",
"caller",
"calligrapher",
"calligraphic",
"calligraphist",
"calligraphy",
"calling",
"calliope",
"callosity",
"callous",
"calloused",
"callously",
"callousness",
"callow",
"callowness",
"callus",
"callused",
"calmly",
"calmness",
"caloric",
"calorie",
"calorific",
"calumet",
"calumniate",
"calumniation",
"calumniator",
"calumnious",
"calumny",
"calve",
"calves",
"calyces",
"calypso",
"calyx",
"camaraderie",
"camber",
"cambial",
"cambium",
"cambric",
"camcorder",
"camel",
"camelhair",
"camellia",
"cameo",
"camera",
"cameraman",
"camerawoman",
"camisole",
"camomile",
"camouflage",
"camouflager",
"campaign",
"campaigner",
"campanile",
"campanologist",
"campanology",
"camper",
"campfire",
"campground",
"camphor",
"camping",
"campsite",
"campus",
"campy",
"camshaft",
"canal",
"canalization",
"canalize",
"canape",
"canard",
"canary",
"canasta",
"cancan",
"cancel",
"canceler",
"cancellation",
"cancer",
"cancerous",
"candelabra",
"candelabrum",
"candid",
"candidacy",
"candidate",
"candidly",
"candidness",
"candied",
"candle",
"candlelight",
"candlepower",
"candler",
"candlestick",
"candor",
"candy",
"canebrake",
"caner",
"canine",
"canister",
"canker",
"cankerous",
"cannabis",
"canned",
"cannelloni",
"cannery",
"cannibal",
"cannibalism",
"cannibalistic",
"cannibalization",
"cannibalize",
"cannily",
"canniness",
"cannon",
"cannonade",
"cannonball",
"cannot",
"canny",
"canoe",
"canoeist",
"canola",
"canon",
"canonical",
"canonically",
"canonization",
"canonize",
"canopied",
"canopy",
"canst",
"cantabile",
"cantaloupe",
"cantankerous",
"cantankerously",
"cantankerousness",
"cantata",
"canteen",
"canter",
"canticle",
"cantilever",
"cantilevered",
"canto",
"canton",
"cantonal",
"cantonment",
"cantor",
"canvas",
"canvasback",
"canvass",
"canvasser",
"canyon",
"capability",
"capable",
"capably",
"capacious",
"capaciously",
"capaciousness",
"capacitance",
"capacitor",
"capacity",
"caparison",
"caped",
"caper",
"capeskin",
"capillarity",
"capillary",
"capital",
"capitalism",
"capitalist",
"capitalistic",
"capitalistically",
"capitalization",
"capitalize",
"capitally",
"capitol",
"capitulate",
"capitulation",
"caplet",
"capon",
"cappuccino",
"caprice",
"capricious",
"capriciously",
"capriciousness",
"capsicum",
"capsize",
"capstan",
"capstone",
"capsular",
"capsule",
"capsulize",
"captain",
"captaincy",
"caption",
"captious",
"captiously",
"captiousness",
"captivate",
"captivating",
"captivation",
"captivator",
"captive",
"captivity",
"captor",
"capture",
"caracul",
"carafe",
"caramel",
"caramelize",
"carapace",
"carat",
"caravan",
"caravansary",
"caravanserai",
"caravel",
"caraway",
"carbide",
"carbine",
"carbohydrate",
"carbon",
"carbonaceous",
"carbonate",
"carbonated",
"carbonation",
"carboniferous",
"carbonize",
"carboy",
"carbuncle",
"carbuncular",
"carburetor",
"carcass",
"carcinogen",
"carcinogenic",
"carcinogenicity",
"carcinoma",
"cardamom",
"cardboard",
"carder",
"cardiac",
"cardigan",
"cardinal",
"cardinally",
"cardiogram",
"cardiograph",
"cardiologist",
"cardiology",
"cardiopulmonary",
"cardiovascular",
"cards",
"cardsharp",
"cardsharper",
"careen",
"career",
"careerist",
"carefree",
"careful",
"carefully",
"carefulness",
"caregiver",
"careless",
"carelessly",
"carelessness",
"caress",
"caret",
"caretaker",
"careworn",
"carfare",
"cargo",
"carhop",
"caribou",
"caricature",
"caricaturist",
"caries",
"carillon",
"caring",
"carious",
"carjack",
"carjacker",
"carjacking",
"carload",
"carmine",
"carnage",
"carnal",
"carnality",
"carnally",
"carnation",
"carnelian",
"carnival",
"carnivore",
"carnivorous",
"carnivorously",
"carnivorousness",
"carny",
"carob",
"carol",
"caroler",
"caroller",
"carom",
"carotene",
"carotid",
"carousal",
"carouse",
"carousel",
"carouser",
"carpal",
"carpel",
"carpenter",
"carpentry",
"carper",
"carpet",
"carpetbag",
"carpetbagger",
"carpeting",
"carpi",
"carpool",
"carport",
"carpus",
"carrel",
"carriage",
"carrier",
"carrion",
"carrot",
"carrousel",
"carry",
"carryall",
"carryout",
"carryover",
"carsick",
"carsickness",
"cartage",
"cartel",
"carter",
"carthorse",
"cartilage",
"cartilaginous",
"cartload",
"cartographer",
"cartographic",
"cartography",
"carton",
"cartoon",
"cartoonist",
"cartridge",
"cartwheel",
"carve",
"carver",
"carving",
"caryatid",
"casaba",
"cascade",
"cascara",
"caseharden",
"casein",
"caseload",
"casement",
"casework",
"caseworker",
"cashew",
"cashier",
"cashless",
"cashmere",
"casing",
"casino",
"casket",
"cassava",
"casserole",
"cassette",
"cassia",
"cassock",
"cassowary",
"castanet",
"castanets",
"castaway",
"caste",
"castellated",
"caster",
"castigate",
"castigation",
"castigator",
"casting",
"castle",
"castoff",
"castor",
"castrate",
"castration",
"casual",
"casually",
"casualness",
"casualty",
"casuist",
"casuistic",
"casuistry",
"cataclysm",
"cataclysmal",
"cataclysmic",
"catacomb",
"catacombs",
"catafalque",
"catalepsy",
"cataleptic",
"catalog",
"cataloger",
"catalogue",
"cataloguer",
"catalpa",
"catalyses",
"catalysis",
"catalyst",
"catalytic",
"catalyze",
"catamaran",
"catapult",
"cataract",
"catarrh",
"catastrophe",
"catastrophic",
"catastrophically",
"catatonia",
"catatonic",
"catbird",
"catboat",
"catcall",
"catch",
"catchall",
"catcher",
"catching",
"catchment",
"catchup",
"catchword",
"catchy",
"catechism",
"catechist",
"catechize",
"categorical",
"categorically",
"categorization",
"categorize",
"category",
"cater",
"catercorner",
"caterer",
"caterpillar",
"caterwaul",
"catfish",
"catgut",
"catharses",
"catharsis",
"cathartic",
"cathedral",
"catheter",
"catheterize",
"cathode",
"cathodic",
"catholic",
"catholicity",
"cation",
"catkin",
"catlike",
"catnap",
"catnip",
"catsup",
"cattail",
"cattily",
"cattiness",
"cattle",
"cattleman",
"catty",
"catwalk",
"caucus",
"caudal",
"caudally",
"caught",
"cauldron",
"cauliflower",
"caulk",
"caulker",
"caulking",
"causal",
"causality",
"causally",
"causation",
"causative",
"cause",
"causeless",
"causer",
"causerie",
"causeway",
"caustic",
"caustically",
"causticity",
"cauterization",
"cauterize",
"caution",
"cautionary",
"cautious",
"cautiously",
"cautiousness",
"cavalcade",
"cavalier",
"cavalierly",
"cavalry",
"cavalryman",
"caveat",
"caveman",
"cavern",
"cavernous",
"cavernously",
"caviar",
"caviare",
"cavil",
"caviler",
"caving",
"cavity",
"cavort",
"cayenne",
"cayman",
"cayuse",
"cease",
"ceasefire",
"ceaseless",
"ceaselessly",
"ceaselessness",
"cecal",
"cecum",
"cedar",
"ceder",
"cedilla",
"ceiling",
"celandine",
"celebrant",
"celebrate",
"celebrated",
"celebration",
"celebrator",
"celebratory",
"celebrity",
"celerity",
"celery",
"celesta",
"celestial",
"celestially",
"celibacy",
"celibate",
"cellar",
"celled",
"cellist",
"cellmate",
"cello",
"cellophane",
"cellular",
"cellulite",
"celluloid",
"cellulose",
"cement",
"cementer",
"cementum",
"cemetery",
"cenobite",
"cenobitic",
"cenotaph",
"censer",
"censor",
"censorial",
"censorious",
"censoriously",
"censoriousness",
"censorship",
"censurable",
"censure",
"censurer",
"census",
"centaur",
"centavo",
"centenarian",
"centenary",
"centennial",
"centennially",
"center",
"centerboard",
"centered",
"centerfold",
"centerpiece",
"centigrade",
"centigram",
"centiliter",
"centime",
"centimeter",
"centipede",
"central",
"centrality",
"centralization",
"centralize",
"centralizer",
"centrally",
"centrifugal",
"centrifugally",
"centrifuge",
"centripetal",
"centripetally",
"centrism",
"centrist",
"centurion",
"century",
"cephalic",
"ceramic",
"ceramics",
"ceramist",
"cereal",
"cerebella",
"cerebellar",
"cerebellum",
"cerebra",
"cerebral",
"cerebrate",
"cerebration",
"cerebrum",
"cerement",
"ceremonial",
"ceremonially",
"ceremonious",
"ceremoniously",
"ceremoniousness",
"ceremony",
"cerise",
"cerium",
"cermet",
"certain",
"certainly",
"certainty",
"certifiable",
"certifiably",
"certificate",
"certification",
"certified",
"certify",
"certitude",
"cerulean",
"cervical",
"cervices",
"cervix",
"cesarean",
"cesium",
"cessation",
"cession",
"cesspool",
"cetacean",
"chafe",
"chaff",
"chaffinch",
"chagrin",
"chain",
"chains",
"chainsaw",
"chair",
"chairlift",
"chairman",
"chairmanship",
"chairperson",
"chairwoman",
"chaise",
"chalcedony",
"chalet",
"chalice",
| |
['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims,
root_ranks):
tensor = mx.nd.ones(shapes[dim], ctx=ctx) * rank
root_tensor = mx.nd.ones(shapes[dim], ctx=ctx) * root_rank
tensor = tensor.astype(dtype)
root_tensor = root_tensor.astype(dtype)
broadcast_tensor = hvd.broadcast(tensor, root_rank=root_rank,
name=str(count))
if rank != root_rank:
if same(tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim,
mx.nd.max(tensor == root_tensor))
print("tensor", hvd.rank(), tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(), tensor == root_tensor)
assert not same(tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast modifies source tensor'
if not same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim)
print("broadcast_tensor", hvd.rank(), broadcast_tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(),
broadcast_tensor == root_tensor)
assert same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast produces incorrect broadcasted tensor'
count += 1
def test_horovod_broadcast_inplace(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims,
root_ranks):
tensor = mx.nd.ones(shapes[dim], ctx=ctx) * rank
root_tensor = mx.nd.ones(shapes[dim], ctx=ctx) * root_rank
tensor = tensor.astype(dtype)
root_tensor = root_tensor.astype(dtype)
# Only do broadcasting using broadcast_tensor
broadcast_tensor = tensor.copy()
hvd.broadcast_(broadcast_tensor, root_rank=root_rank,
name=str(count))
if rank != root_rank:
if same(tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim,
mx.nd.max(tensor == root_tensor))
print("tensor", hvd.rank(), tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(), tensor == root_tensor)
assert not same(tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast modifies source tensor'
if not same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim)
print("broadcast_tensor", hvd.rank(), broadcast_tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(),
broadcast_tensor == root_tensor)
assert same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast produces incorrect broadcasted tensor'
count += 1
def test_horovod_broadcast_parameters(self):
"""Test the correctness of broadcast_parameters."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
root_rank = 1
tensor_dict = {}
root_dict = {}
for dtype, dim, in itertools.product(dtypes, dims):
tensor_dict[count] = mx.nd.ones(shapes[dim], ctx=ctx) * rank
root_dict[count] = mx.nd.ones(shapes[dim], ctx=ctx) * root_rank
tensor_dict[count] = tensor_dict[count].astype(dtype)
root_dict[count] = root_dict[count].astype(dtype)
count += 1
hvd.broadcast_parameters(tensor_dict, root_rank=root_rank)
for i in range(count):
if not same(tensor_dict[i].asnumpy(), root_dict[i].asnumpy()):
print("broadcast", i, dtypes[i], dims[i])
print("broadcast_tensor", hvd.rank(), tensor_dict[i])
print("root_tensor", hvd.rank(), root_dict[i])
print("comparison", hvd.rank(), tensor_dict[i] == root_dict[i])
assert same(tensor_dict[i].asnumpy(), root_dict[i].asnumpy()), \
'hvd.broadcast_parameters produces incorrect broadcasted tensor'
def test_horovod_broadcast_process_sets(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors if restricted to non-global process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if rank in even_ranks:
set_size = len(even_ranks)
set_ranks = even_ranks
this_set = even_set
elif rank in odd_ranks:
set_size = len(odd_ranks)
set_ranks = odd_ranks
this_set = odd_set
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
root_ranks = list(set_ranks)
for dtype, dim, root_rank in itertools.product(dtypes, dims,
root_ranks):
tensor = mx.nd.ones(shapes[dim], ctx=ctx) * rank
root_tensor = mx.nd.ones(shapes[dim], ctx=ctx) * root_rank
tensor = tensor.astype(dtype)
root_tensor = root_tensor.astype(dtype)
broadcast_tensor = hvd.broadcast(tensor, root_rank=root_rank,
name=str(count),
process_set=this_set)
if rank != root_rank:
if same(tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim,
mx.nd.max(tensor == root_tensor))
print("tensor", hvd.rank(), tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(), tensor == root_tensor)
assert not same(tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast modifies source tensor'
if not same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim)
print("broadcast_tensor", hvd.rank(), broadcast_tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(),
broadcast_tensor == root_tensor)
assert same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast produces incorrect broadcasted tensor'
count += 1
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_broadcast_error(self):
"""Test that the broadcast returns an error if any dimension besides
the first is different among the tensors being broadcasted."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
shape = (17, rank+1)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
try:
output = hvd.broadcast(tensor, 0)
output.wait_to_read()
assert False, 'hvd.broadcast did not throw error'
except (MXNetError, RuntimeError):
pass
def test_horovod_broadcast_type_error(self):
"""Test that the broadcast returns an error if the types being broadcasted
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
shape = (17, 3)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
if rank % 2 == 0:
tensor = tensor.astype('int32')
else:
tensor = tensor.astype('float32')
try:
output = hvd.broadcast(tensor, 0)
output.wait_to_read()
assert False, 'hvd.broadcast did not throw error'
except (MXNetError, RuntimeError):
pass
def test_horovod_broadcast_rank_error(self):
"""Test that the broadcast returns an error if different ranks
specify different root rank."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
shape = (17, 17, 17)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
try:
output = hvd.broadcast(tensor, root_rank=rank)
output.wait_to_read()
assert False, 'hvd.broadcast did not throw rank error'
except (MXNetError, RuntimeError):
pass
def test_horovod_broadcast_deferred_init_parameters(self):
"""Test that the deferred initialized parameters are broadcasted."""
hvd.init()
root_rank = 0
rank = hvd.rank()
# This test does not apply if there is only one worker.
if hvd.size() == 1:
self.skipTest("Only one worker available")
mx.random.seed(rank)
layer = mx.gluon.nn.Conv2D(10, 2)
layer.initialize()
hvd.broadcast_parameters(layer.collect_params(), root_rank=root_rank)
x = mx.nd.ones((5, 4, 10, 10))
layer(x)
tensors = [p.data() for _, p in sorted(layer.collect_params().items())]
root_tensors = []
for tensor in tensors:
root_tensors.append(hvd.broadcast(tensor, root_rank=root_rank))
for tensor, root_tensor in zip(tensors, root_tensors):
assert same(tensor.asnumpy(), root_tensor.asnumpy()), \
'horovod did not broadcast deferred initialized parameter correctly'
def test_horovod_allgather(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
for dtype, dim in itertools.product(dtypes, dims):
tensor = mx.ndarray.ones(shape=[17] * dim, dtype=dtype, ctx=ctx) * rank
gathered = hvd.allgather(tensor)
assert list(gathered.shape) == [17 * size] + [17] * (dim - 1)
for i in range(size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == [17] * dim, \
'hvd.allgather produces incorrect gathered shape'
assert rank_tensor.min() == i, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.max() == i, 'hvd.allgather produces incorrect gathered tensor'
def test_horovod_allgather_variable_size(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = mx.ndarray.ones(
shape=[tensor_sizes[rank]] + [17] * (dim - 1), dtype=dtype, ctx=ctx) * rank
gathered = hvd.allgather(tensor)
expected_size = sum(tensor_sizes)
assert list(gathered.shape) == [expected_size] + [17] * (dim - 1)
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = gathered[sum(
tensor_sizes[:i]):sum(tensor_sizes[:i + 1])]
assert list(rank_tensor.shape) == rank_size
assert rank_tensor.min() == i
assert rank_tensor.max() == i
def test_horovod_allgather_process_sets(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors if restricted to non-global process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
even_ranks = [rk for rk in range(0, | |
<filename>bot.py
__author__ = 'RAEON'
from session import Session
from buffer import Buffer
from cell import Cell
import random
import time
import math
class Bot(object):
def __init__(self, game):
self.game = game
# core variables
# self.running = False # no point, is there? we have is_connected() and is_alive()
# self.thread = None # instances are updated by their Game or Server if no game has been found yet
self.session = Session()
self.buffer = Buffer()
# game information
self.name = 'Test' #''.join([random.choice('0123456789abcdefghijlkmnopqrstuvwxyz') for i in range(8)])
self.last_x = 0 # last sent mouse X coordinate
self.last_y = 0 # last sent mouse Y coordinate
self.view_x = 0 # viewport x
self.view_y = 0 # viewport y
self.view_w = 0 # viewport width
self.view_h = 0 # viewport height
# our state
self.has_sent_init = False
self.last_sent_spawn = 0
self.last_update = 0
# cell information
self.ids = [] # list of ids (to get cell, query id in all_cells)
# no longer track cell ids
self.ladder = []
self.mode = 'ffa'
def connect(self, host, port):
if not self.is_connected() and (time.time() - self.game.last_connect > 15):
if self.session.connect(host, port):
print('[' + self.name + '] Connected')
# reset game variables
self.last_x = 0
self.last_y = 0
self.view_x = 0
self.view_y = 0
self.view_w = 0
self.view_h = 0
# reset some more variables
self.game.last_connect = time.time()
self.has_sent_init = False
self.last_sent_spawn = 0
# clear our lists
self.ids = []
self.ladder = {}
# try and become ALIIIIVE!
self.send_init()
self.send_spawn()
self.send_move_relative(0, 0) # cuz fuck moving, thats why
self.n_updates = 0
return True
print('[' + self.name + '] Failed to connect')
return False
# version 520
def disconnect(self):
if self.is_connected():
# disconnect
self.session.disconnect()
# remove ourselves from all cell watchers
# in game cell objects
for cell in self.game.cells.values():
cell.remove_watcher(self)
# remove all bot.ids from game.ids
for id in self.ids:
self.game.remove_id(id)
if self.has_id(id):
self.remove_id(id)
self.game.remove_id(id)
# game deletes all cells w/o watchers
return True
return False
# version 520
def update(self):
# connect if not connected
if not self.is_connected():
self.connect(self.game.host, self.game.port)
return False
# spawn if not alive
if not self.is_alive():
self.send_spawn()
# dont return: if we do, we dont parse spawn packet
# get all data
all = []
all.extend(self.session.inbound)
self.session.inbound = self.session.inbound[len(all):]
if (len(all) != 0):
self.n_updates += 1
# parse all data
for data in all:
self.buffer.fill(data)
packet = self.buffer.read_byte()
self.parse_packet(packet)
if not self.last_update == self.game.timestamp:
# if we didn't receive an update this tick, we dont need to check for destroys.
return
# removing dead cells no longer happens in bot.py
# cells are only removed on a packet, or when there are no watchers (only on disconnect)
return True
def act(self):
# todo: write AI
pass
def parse_packet(self, id):
b = self.buffer
if id == 16:
self.last_update = self.game.timestamp
self.parse_mergers()
self.parse_updates()
self.parse_deaths()
elif id == 17:
x = b.read_float()
y = b.read_float()
ratio = b.read_float()
print('[17]', x, y, ratio)
elif id == 20:
for id in self.ids:
self.game.remove_id(id)
self.ids = []
print('[20] cell reset')
elif id == 32:
id = b.read_int()
self.add_id(id)
self.game.add_id(id)
print('[32] ', id)
elif id == 49:
self.ladder = {}
self.mode = 'ffa'
amount = b.read_int()
for i in range(0, amount):
id = b.read_int()
self.ladder[id] = b.read_string()
self.game.ladder = self.ladder.copy()
self.game.mode = 'ffa'
#print('[49]')
elif id == 50:
# the 3rd ladder version, original was 48 (non-indexed ladder), 49 (indexed) and now 50
self.ladder = []
count = b.read_int()
for i in range(0, count):
self.ladder.append(b.read_float())
if len(self.game.ladder) == 0:
self.game.ladder = self.ladder.copy()
self.game.mode = 'teams'
#print('[50]')
elif id == 64:
self.game.view_x = self.view_x = b.read_double()
self.game.view_y = self.view_y = b.read_double()
self.game.view_w = self.view_w = b.read_double()
self.game.view_h = self.view_h = b.read_double()
print('[64] viewport:', self.view_x, self.view_y, self.view_w, self.view_h)
# version 520
def parse_mergers(self):
amount = self.buffer.read_short()
for i in range(0, amount):
hunter, prey = self.buffer.read_int(), self.buffer.read_int()
if self.game.has_id(hunter) and self.game.has_id(prey): # if we both know these cells
# self.ids: our own cells ids
# game.ids: all bot cell ids
# game.cells: all global cell objects
# game.cells: remove eaten cell from global cells
cell = self.game.get_cell(prey) # prey = prey_id
self.game.remove_cell(prey)
# self.ids/game.ids: remove cell id from bot and game if it is our own
if self.has_id(cell.id):
self.remove_id(cell.id)
self.game.remove_id(cell.id)
print('[game/parse_mergers] %d ate %d' % (hunter, prey))
# version 520
def parse_updates(self):
b = self.buffer
current_time = time.time()
while True:
id = b.read_int()
if id == 0:
break
x = b.read_short()
y = b.read_short()
size = b.read_short()
red = b.read_byte()
green = b.read_byte()
blue = b.read_byte()
color = (red, green, blue)
byte = b.read_byte()
virus = (byte & 1)
agitated = (byte & 16) # what is this?
# skipping bytes, no idea what the purpose is
if (byte & 2):
b.skip(4)
elif (byte & 4):
b.skip(8)
elif (byte & 8):
b.skip(16)
# read name
name = b.read_string()
# if cell is not known globally:
# create global instance
# if cell in self.ids:
# set owner to self
# check if this cell is known globally
if self.game.has_cell(id):
# known globally
# update global cell
cell = self.game.get_cell(id)
#print(str(current_time - cell.last_update))
t = current_time - cell.last_update
if (t > 0.0):
vx = (float(x) - float(cell.x))/t
vy = (float(y) - float(cell.y))/t
cell.vx = (vx + cell.vx)/2.0
cell.vy = (vy + cell.vy)/2.0
v = math.sqrt(cell.vx*cell.vx + cell.vy*cell.vy)
max_velocity = 800
if v > max_velocity:
cell.vx *= (max_velocity/v)
cell.vy *= (max_velocity/v)
cell.x = x
cell.y = y
else:
cell.x += cell.vx*t
cell.y += cell.vy*t
cell.interpolated_x = cell.x
cell.interpolated_y = cell.y
cell.last_update = current_time
cell.size = size
cell.color = color
cell.virus = virus
cell.agitated = agitated
cell.timestamp = self.game.timestamp
else:
# not known globally
# create new global cell
cell = Cell(id, x, y, size, color, virus, agitated, name)
cell.watchers.append(self)
cell.timestamp = self.game.timestamp
# set owner if it is ours
if self.has_id(id):
cell.owner = self
# add cell to global cells
self.game.add_cell(cell)
# version 520
def parse_deaths(self):
amount = self.buffer.read_int()
for i in range(0, amount):
id = self.buffer.read_int()
# if it is one of ours
if self.has_id(id):
self.remove_id(id)
self.game.remove_id(id)
if len(self.ids) == 0:
self.send_spawn()
print("[bot/parse_deaths] No cells left, respawning")
# remove cell globally
if self.game.has_cell(id):
cell = self.game.get_cell(id)
cell.remove_watcher(self)
self.game.remove_cell(id)
def send_init(self):
if self.is_connected() and not self.has_sent_init:
self.has_sent_init = True
self.buffer.write_byte(254)
self.buffer.write_int(4)
self.buffer.flush_session(self.session)
self.buffer.write_byte(255)
self.buffer.write_int(1)
self.buffer.flush_session(self.session)
return True
return False
def send_spawn(self):
if self.is_connected() and (time.time() - self.last_sent_spawn > 4):
for cell in self.game.cells.values():
cell.remove_watcher(self)
self.last_sent_spawn = time.time()
self.buffer.write_string(self.name)
self.buffer.flush_session(self.session)
return True
return False
def send_move(self, x, y):
if self.is_connected() and self.is_alive():
if not (self.last_x == x and self.last_y == y):
# update our last variables
self.last_x = x
self.last_y = y
# send new coordinates
self.buffer.write_byte(16)
self.buffer.write_double(x)
self.buffer.write_double(y)
self.buffer.write_int(0)
# flush
self.buffer.flush_session(self.session)
return True
return False
def send_move_relative(self, rel_x, rel_y):
x, y = self.get_center()
x += rel_x
y += rel_y
return self.send_move(x, y)
def send_split(self, times=1):
if self.is_connected() and self.is_alive():
for i in range(0, times):
self.buffer.write_byte(17)
self.buffer.flush_session(self.session)
return True
return False
def send_throw(self, times=1):
if self.is_connected() and self.is_alive():
for i in range(0, times):
self.buffer.write_byte(21)
self.buffer.flush_session(self.session)
return True
return False
def send_spectate(self):
if self.is_connected():
self.buffer.write_byte(1)
self.buffer.flush_session(self.session)
return True
return False
def get_center(self):
x = 0
y = 0
amount = 0
for id in self.ids:
cell = self.game.get_cell(id)
if cell:
x += cell.x
y += cell.y
amount += 1
amount = max(1, amount) # prevent div by zero
return x/amount, y/amount
def get_interpolated_center(self, current_time):
x = 0
y = 0
amount = 0
for id in self.ids:
cell = self.game.get_cell(id)
if cell:
x += cell.interpolated_x
y += cell.interpolated_y
amount += 1
amount = max(1, amount)
return x/float(amount), y/float(amount)
def get_mass(self):
mass = 0
for id in self.ids:
cell = self.game.get_cell(id)
if cell:
mass += cell.size
return mass
def is_alive(self):
return len(self.ids) > 0
def is_connected(self):
return self.session.is_connected()
def add_id(self, id):
if not self.has_id(id):
self.ids.append(id)
return True
| |
<filename>aiosonic/__init__.py
"""Main module."""
import asyncio
import random
import re
import codecs
from functools import partial
from json import dumps
from json import loads
from ssl import SSLContext
import gzip
import zlib
from io import IOBase
from os.path import basename
from urllib.parse import urlparse
from urllib.parse import urlencode
from urllib.parse import ParseResult
from typing import Any
from typing import AsyncIterator
from typing import Callable
from typing import Dict
from typing import Iterator
from typing import Union
from typing import Optional
from typing import List
from typing import Tuple
import chardet
from aiosonic_utils.structures import CaseInsensitiveDict
from aiosonic.connection import Connection
from aiosonic.exceptions import ConnectTimeout
from aiosonic.exceptions import HttpParsingError
from aiosonic.exceptions import MaxRedirects
from aiosonic.exceptions import MissingWriterException
from aiosonic.exceptions import ReadTimeout
from aiosonic.exceptions import RequestTimeout
from aiosonic.connectors import TCPConnector
from aiosonic.exceptions import TimeoutException
from aiosonic.timeout import Timeouts
from aiosonic.utils import cache_decorator
from aiosonic.version import VERSION
# TYPES
from aiosonic.types import ParamsType
from aiosonic.types import DataType
from aiosonic.types import BodyType
from aiosonic.types import ParsedBodyType
try:
import cchardet as chardet
except ImportError:
pass
# VARIABLES
_HTTP_RESPONSE_STATUS_LINE = re.compile(
r'HTTP/(?P<version>(\d.)?(\d)) (?P<code>\d+) (?P<reason>[\w]*)')
_CHARSET_RGX = re.compile(r'charset=(?P<charset>[\w-]*);?')
_CACHE: Dict[str, Any] = {}
_LRU_CACHE_SIZE = 512
_CHUNK_SIZE = 1024 * 4 # 4kilobytes
_NEW_LINE = '\r\n'
_COMPRESSED_OPTIONS = set([b'gzip', b'deflate'])
# Functions with cache
@cache_decorator(_LRU_CACHE_SIZE)
def _get_url_parsed(url: str) -> ParseResult:
"""Get url parsed.
With cache_decorator for the sake of speed.
"""
return urlparse(url)
# Classes
class HttpHeaders(CaseInsensitiveDict):
"""Http headers dict."""
@staticmethod
def _clear_line(line: bytes):
"""Clear readed line."""
return line.rstrip().split(b': ')
#: Headers
HeadersType = Union[Dict[str, str], List[Tuple[str, str]], HttpHeaders]
def _add_header(headers: HeadersType, key: str, value: str):
"""Safe add header method."""
if isinstance(headers, List):
included = [item for item in headers if item[0] == key]
if included:
headers.remove(included[0])
headers.append((key, value))
else:
headers[key] = value
class HttpResponse:
"""Custom HttpResponse class for handling responses.
Properties:
* status_code (int): response status code
* headers (HttpHeaders): headers in case insensitive dict
* raw_headers (List[Tuple[bytes, bytes]]): headers as raw format
"""
def __init__(self):
self.headers = HttpHeaders()
self.raw_headers = []
self.body = b''
self.response_initial = None
self.connection = None
self.chunked = False
self.compressed = b''
self.chunks_readed = False
def _set_response_initial(self, data: bytes):
"""Parse first bytes from http response."""
res = re.match(_HTTP_RESPONSE_STATUS_LINE,
data.decode().rstrip('\r\n'))
if not res:
raise HttpParsingError('response line parsing error')
self.response_initial = res.groupdict()
def _set_header(self, key: bytes, val: bytes):
"""Set header to response."""
self.headers[key] = val
self.raw_headers.append((key, val))
def _set_connection(self, connection: Connection):
"""Set header to response."""
self.connection = connection
@property
def status_code(self) -> int:
"""Get status code."""
return int(self.response_initial['code'])
def _set_body(self, data):
"""Set body."""
if self.compressed == b'gzip':
self.body += gzip.decompress(data)
elif self.compressed == b'deflate':
self.body += zlib.decompress(data)
else:
self.body += data
def _get_encoding(self) -> str:
ctype = self.headers.get('content-type', '').lower()
res = re.findall(_CHARSET_RGX, ctype)
encoding = ''
if res:
encoding = res[0]
if encoding:
try:
codecs.lookup(encoding)
except LookupError:
encoding = ''
if not encoding:
if 'application' in ctype and 'json' in ctype:
# RFC 7159 states that the default encoding is UTF-8.
encoding = 'utf-8'
else:
encoding = chardet.detect(self.body)['encoding']
if not encoding:
encoding = 'utf-8'
return encoding.lower()
async def content(self) -> bytes:
"""Read response body."""
if self.chunked and not self.body:
res = b''
async for chunk in self.read_chunks():
res += chunk
self._set_body(res)
return self.body
async def text(self) -> str:
"""Read response body."""
body = await self.content()
encoding = self._get_encoding()
return (body).decode(encoding)
async def json(self, json_decoder=loads) -> dict:
"""Read response body."""
assert 'application/json' in self.headers['content-type'].lower()
body = await self.content()
return json_decoder(body)
async def read_chunks(self) -> AsyncIterator[bytes]:
"""Read chunks from chunked response."""
while True and not self.chunks_readed:
chunk_size = int((await
self.connection.reader.readline()).rstrip(), 16)
if not chunk_size:
# read last CRLF
await self.connection.reader.readline()
# free connection
await self.connection.release()
break
chunk = await self.connection.reader.readexactly(chunk_size + 2)
yield chunk[:-2]
self.chunks_readed = True
def _get_header_data(url: ParseResult,
connection: Connection,
method: str,
headers: HeadersType = None,
params: ParamsType = None,
multipart: bool = None,
boundary: str = None) -> Union[bytes, HeadersType]:
"""Prepare get data."""
path = url.path or '/'
http2conn = connection.h2conn
if params:
query = urlencode(params)
path += '%s' % query if '?' in path else '?%s' % query
get_base = '%s %s HTTP/1.1%s' % (method.upper(), path, _NEW_LINE)
port = url.port or (443 if url.scheme == 'https' else 80)
hostname = url.hostname
if port != 80:
hostname += ':' + str(port)
headers_base = {}
if http2conn:
headers_base.update({
':method': method,
':authority': hostname.split(':')[0],
':scheme': 'https',
':path': path,
'user-agent': 'aioload/%s' % VERSION
})
else:
headers_base.update({
'HOST': hostname,
'Connection': 'keep-alive',
'User-Agent': 'aioload/%s' % VERSION
})
if multipart:
headers_base[
'Content-Type'] = 'multipart/form-data; boundary="%s"' % boundary
if headers:
headers_base.update(headers)
if http2conn:
return headers_base
for key, data in headers_base.items():
get_base += '%s: %s%s' % (key, data, _NEW_LINE)
return (get_base + _NEW_LINE).encode()
def _setup_body_request(data: DataType,
headers: HeadersType) -> ParsedBodyType:
"""Get body to be sent."""
if isinstance(data, (AsyncIterator, Iterator)):
_add_header(headers, 'Transfer-Encoding', 'chunked')
return data
else:
body: BodyType = b''
content_type = None
if isinstance(data, (Dict, tuple)):
body = urlencode(data)
content_type = 'application/x-www-form-urlencoded'
else:
body = data
content_type = 'text/plain'
if 'content-type' not in headers:
_add_header(headers, 'Content-Type', content_type)
body = body.encode() if isinstance(body, str) else body
_add_header(headers, 'Content-Length', str(len(body)))
return body
def _handle_chunk(chunk: bytes, connection: Connection):
"""Handle chunk sending in transfer-encoding chunked."""
chunk_size = hex(len(chunk)).replace('0x', '') + _NEW_LINE
if not connection.writer:
raise MissingWriterException('missing writer in connection')
connection.writer.write(chunk_size.encode() + chunk + _NEW_LINE.encode())
async def _send_chunks(connection: Connection, body: BodyType):
"""Send chunks."""
if isinstance(body, AsyncIterator):
async for chunk in body:
_handle_chunk(chunk, connection)
elif isinstance(body, Iterator):
for chunk in body:
_handle_chunk(chunk, connection)
else:
raise ValueError('wrong body param.')
if not connection.writer:
raise MissingWriterException('missing writer in connection')
connection.writer.write(('0' + _NEW_LINE * 2).encode())
async def _send_multipart(data: Dict[str, str],
boundary: str,
headers: HeadersType,
chunk_size: int = _CHUNK_SIZE) -> bytes:
"""Send multipart data by streaming."""
# TODO: precalculate body size and stream request
# precalculate file sizes by os.path.getsize
to_send = b''
for key, val in data.items():
# write --boundary + field
to_send += ('--%s%s' % (boundary, _NEW_LINE)).encode()
if isinstance(val, IOBase):
# TODO: Utility to accept files with multipart metadata
# (Content-Type, custom filename, ...),
# write Contet-Disposition
to_write = 'Content-Disposition: form-data; ' + \
'name="%s"; filename="%s"%s%s' % (
key, basename(val.name), _NEW_LINE, _NEW_LINE)
to_send += to_write.encode()
# read and write chunks
loop = asyncio.get_event_loop()
while True:
data = await loop.run_in_executor(None, val.read, chunk_size)
if not data:
break
to_send += data
val.close()
else:
to_send += ('Content-Disposition: form-data; name="%s"%s%s' %
(key, _NEW_LINE, _NEW_LINE)).encode()
to_send += val.encode() + _NEW_LINE.encode()
# write --boundary-- for finish
to_send += ('--%s--' % boundary).encode()
_add_header(headers, 'Content-Length', str(len(to_send)))
return to_send
async def _do_request(urlparsed: ParseResult,
headers_data: Callable,
connector: TCPConnector,
body: Optional[ParsedBodyType],
verify: bool,
ssl: Optional[SSLContext],
timeouts: Optional[Timeouts],
http2: bool = False) -> HttpResponse:
"""Something."""
async with (await connector.acquire(urlparsed)) as connection:
await connection.connect(urlparsed, verify, ssl, timeouts, http2)
to_send = headers_data(connection=connection)
if connection.h2conn:
return await connection.http2_request(to_send, body)
if not connection.writer or not connection.reader:
raise ConnectionError('Not connection writer or reader')
connection.writer.write(to_send)
if body:
if isinstance(body, (AsyncIterator, Iterator)):
await _send_chunks(connection, body)
else:
connection.writer.write(body)
response = HttpResponse()
# get response code and version
try:
response._set_response_initial(await asyncio.wait_for(
connection.reader.readline(),
(timeouts or connector.timeouts).sock_read))
except TimeoutException:
raise ReadTimeout()
res_data = None
# reading headers
while True:
res_data = await connection.reader.readline()
if b': ' not in res_data:
break
response._set_header(*HttpHeaders._clear_line(res_data))
size = response.headers.get(b'content-length')
chunked = response.headers.get(b'transfer-encoding', '') == b'chunked'
keepalive = b'close' not in response.headers.get(b'connection', b'')
response.compressed = response.headers.get(b'content-encoding', '')
if size:
response._set_body(await connection.reader.read(int(size)))
if chunked:
connection.block_until_read_chunks()
response.chunked = True
if keepalive:
connection.keep_alive()
response._set_connection(connection)
return response
class HTTPClient:
"""aiosonic.HTTPClient class.
This class holds the client creation that will be used for requests.
"""
def __init__(self, connector: TCPConnector = None):
"""Initialize client options.
Params:
* **connector**: TCPConnector to be used if provided
"""
self.connector = connector or TCPConnector()
async def _request_with_body(self,
url: str,
method: str,
data: DataType = None,
headers: HeadersType = None,
json: dict = None,
params: ParamsType = None,
json_serializer=dumps,
multipart: bool = False,
verify: bool = True,
ssl: SSLContext = None,
timeouts: Timeouts = None,
follow: bool = False,
http2: bool = False) -> HttpResponse:
"""Do post http request. """
if not data and not json:
TypeError('missing argument, either "json" or "data"')
if json:
data = json_serializer(json)
headers = headers or HttpHeaders()
_add_header(headers, 'Content-Type', 'application/json')
return await self.request(url,
method,
headers,
params,
data,
multipart,
verify=verify,
ssl=ssl,
follow=follow,
timeouts=timeouts,
http2=http2)
async def get(self,
url: str,
headers: HeadersType = None,
params: ParamsType = None,
verify: bool = True,
ssl: SSLContext = None,
timeouts: Timeouts = None,
follow: bool = False,
http2: bool = False,
encode: bool = | |
# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance testing traffic generator library."""
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from .DropRateSearch import DropRateSearch
from .Constants import Constants
from .ssh import SSH, exec_cmd_no_error
from .topology import NodeType
from .topology import NodeSubTypeTG
from .topology import Topology
from .MLRsearch.AbstractMeasurer import AbstractMeasurer
from .MLRsearch.MultipleLossRatioSearch import MultipleLossRatioSearch
from .MLRsearch.ReceiveRateMeasurement import ReceiveRateMeasurement
from .PLRsearch.PLRsearch import PLRsearch
__all__ = ['TGDropRateSearchImpl', 'TrafficGenerator', 'OptimizedSearch']
class TGDropRateSearchImpl(DropRateSearch):
"""Drop Rate Search implementation."""
def __init__(self):
super(TGDropRateSearchImpl, self).__init__()
def measure_loss(self, rate, frame_size, loss_acceptance,
loss_acceptance_type, traffic_profile, skip_warmup=False):
"""Runs the traffic and evaluate the measured results.
:param rate: Offered traffic load.
:param frame_size: Size of frame.
:param loss_acceptance: Permitted drop ratio or frames count.
:param loss_acceptance_type: Type of permitted loss.
:param traffic_profile: Module name as a traffic profile identifier.
See resources/traffic_profiles/trex for implemented modules.
:param skip_warmup: Start TRex without warmup traffic if true.
:type rate: float
:type frame_size: str
:type loss_acceptance: float
:type loss_acceptance_type: LossAcceptanceType
:type traffic_profile: str
:type skip_warmup: bool
:returns: Drop threshold exceeded? (True/False)
:rtype: bool
:raises NotImplementedError: If TG is not supported.
:raises RuntimeError: If TG is not specified.
"""
# we need instance of TrafficGenerator instantiated by Robot Framework
# to be able to use trex_stl-*()
tg_instance = BuiltIn().get_library_instance(
'resources.libraries.python.TrafficGenerator')
if tg_instance.node['subtype'] is None:
raise RuntimeError('TG subtype not defined')
elif tg_instance.node['subtype'] == NodeSubTypeTG.TREX:
unit_rate = str(rate) + self.get_rate_type_str()
if skip_warmup:
tg_instance.trex_stl_start_remote_exec(self.get_duration(),
unit_rate, frame_size,
traffic_profile,
warmup_time=0.0)
else:
tg_instance.trex_stl_start_remote_exec(self.get_duration(),
unit_rate, frame_size,
traffic_profile)
loss = tg_instance.get_loss()
sent = tg_instance.get_sent()
if self.loss_acceptance_type_is_percentage():
loss = (float(loss) / float(sent)) * 100
logger.trace("comparing: {} < {} {}".format(loss,
loss_acceptance,
loss_acceptance_type))
if float(loss) > float(loss_acceptance):
return False
else:
return True
else:
raise NotImplementedError("TG subtype not supported")
def get_latency(self):
"""Returns min/avg/max latency.
:returns: Latency stats.
:rtype: list
"""
tg_instance = BuiltIn().get_library_instance(
'resources.libraries.python.TrafficGenerator')
return tg_instance.get_latency_int()
class TrafficGenerator(AbstractMeasurer):
"""Traffic Generator.
FIXME: Describe API."""
# TODO: Decrease friction between various search and rate provider APIs.
# TODO: Remove "trex" from lines which could work with other TGs.
# Use one instance of TrafficGenerator for all tests in test suite
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
def __init__(self):
self._result = None
self._loss = None
self._sent = None
self._latency = None
self._received = None
self._node = None
# T-REX interface order mapping
self._ifaces_reordered = False
# Parameters not given by measure().
self.frame_size = None
self.traffic_profile = None
self.warmup_time = None
@property
def node(self):
"""Getter.
:returns: Traffic generator node.
:rtype: dict
"""
return self._node
def get_loss(self):
"""Return number of lost packets.
:returns: Number of lost packets.
:rtype: str
"""
return self._loss
def get_sent(self):
"""Return number of sent packets.
:returns: Number of sent packets.
:rtype: str
"""
return self._sent
def get_received(self):
"""Return number of received packets.
:returns: Number of received packets.
:rtype: str
"""
return self._received
def get_latency_int(self):
"""Return rounded min/avg/max latency.
:returns: Latency stats.
:rtype: list
"""
return self._latency
def initialize_traffic_generator(
self, tg_node, tg_if1, tg_if2, tg_if1_adj_node, tg_if1_adj_if,
tg_if2_adj_node, tg_if2_adj_if, osi_layer, tg_if1_dst_mac=None,
tg_if2_dst_mac=None):
"""TG initialization.
TODO: Document why do we need (and how do we use) _ifaces_reordered.
:param tg_node: Traffic generator node.
:param tg_if1: TG - name of first interface.
:param tg_if2: TG - name of second interface.
:param tg_if1_adj_node: TG if1 adjecent node.
:param tg_if1_adj_if: TG if1 adjecent interface.
:param tg_if2_adj_node: TG if2 adjecent node.
:param tg_if2_adj_if: TG if2 adjecent interface.
:param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
:param tg_if1_dst_mac: Interface 1 destination MAC address.
:param tg_if2_dst_mac: Interface 2 destination MAC address.
:type tg_node: dict
:type tg_if1: str
:type tg_if2: str
:type tg_if1_adj_node: dict
:type tg_if1_adj_if: str
:type tg_if2_adj_node: dict
:type tg_if2_adj_if: str
:type osi_layer: str
:type tg_if1_dst_mac: str
:type tg_if2_dst_mac: str
:returns: nothing
:raises RuntimeError: In case of issue during initialization.
"""
if tg_node['type'] != NodeType.TG:
raise RuntimeError('Node type is not a TG')
self._node = tg_node
if self._node['subtype'] == NodeSubTypeTG.TREX:
ssh = SSH()
ssh.connect(self._node)
(ret, _, _) = ssh.exec_command(
"sudo -E sh -c '{0}/resources/tools/trex/"
"trex_installer.sh {1}'".format(Constants.REMOTE_FW_DIR,
Constants.TREX_INSTALL_VERSION),
timeout=1800)
if int(ret) != 0:
raise RuntimeError('TRex installation failed.')
if1_pci = Topology().get_interface_pci_addr(self._node, tg_if1)
if2_pci = Topology().get_interface_pci_addr(self._node, tg_if2)
if1_addr = Topology().get_interface_mac(self._node, tg_if1)
if2_addr = Topology().get_interface_mac(self._node, tg_if2)
if osi_layer == 'L2':
if1_adj_addr = if2_addr
if2_adj_addr = if1_addr
elif osi_layer == 'L3':
if1_adj_addr = Topology().get_interface_mac(tg_if1_adj_node,
tg_if1_adj_if)
if2_adj_addr = Topology().get_interface_mac(tg_if2_adj_node,
tg_if2_adj_if)
elif osi_layer == 'L7':
if1_addr = Topology().get_interface_ip4(self._node, tg_if1)
if2_addr = Topology().get_interface_ip4(self._node, tg_if2)
if1_adj_addr = Topology().get_interface_ip4(tg_if1_adj_node,
tg_if1_adj_if)
if2_adj_addr = Topology().get_interface_ip4(tg_if2_adj_node,
tg_if2_adj_if)
else:
raise ValueError("Unknown Test Type")
# in case of switched environment we can override MAC addresses
if tg_if1_dst_mac is not None and tg_if2_dst_mac is not None:
if1_adj_addr = tg_if1_dst_mac
if2_adj_addr = tg_if2_dst_mac
if min(if1_pci, if2_pci) != if1_pci:
if1_pci, if2_pci = if2_pci, if1_pci
if1_addr, if2_addr = if2_addr, if1_addr
if1_adj_addr, if2_adj_addr = if2_adj_addr, if1_adj_addr
self._ifaces_reordered = True
if osi_layer == 'L2' or osi_layer == 'L3':
(ret, _, _) = ssh.exec_command(
"sudo sh -c 'cat << EOF > /etc/trex_cfg.yaml\n"
"- version: 2\n"
" interfaces: [\"{0}\",\"{1}\"]\n"
" port_info:\n"
" - dest_mac: [{2}]\n"
" src_mac: [{3}]\n"
" - dest_mac: [{4}]\n"
" src_mac: [{5}]\n"
"EOF'"\
.format(if1_pci, if2_pci,
"0x"+if1_adj_addr.replace(":", ",0x"),
"0x"+if1_addr.replace(":", ",0x"),
"0x"+if2_adj_addr.replace(":", ",0x"),
"0x"+if2_addr.replace(":", ",0x")))
elif osi_layer == 'L7':
(ret, _, _) = ssh.exec_command(
"sudo sh -c 'cat << EOF > /etc/trex_cfg.yaml\n"
"- version: 2\n"
" interfaces: [\"{0}\",\"{1}\"]\n"
" port_info:\n"
" - ip: [{2}]\n"
" default_gw: [{3}]\n"
" - ip: [{4}]\n"
" default_gw: [{5}]\n"
"EOF'"\
.format(if1_pci, if2_pci,
if1_addr, if1_adj_addr,
if2_addr, if2_adj_addr))
else:
raise ValueError("Unknown Test Type")
if int(ret) != 0:
raise RuntimeError('TRex config generation error')
self._startup_trex(osi_layer)
def _startup_trex(self, osi_layer):
"""Startup sequence for the TRex traffic generator.
:param osi_layer: 'L2', 'L3' or 'L7' - OSI Layer testing type.
:type osi_layer: str
:raises RuntimeError: If node subtype is not a TREX or startup failed.
"""
if self._node['subtype'] != NodeSubTypeTG.TREX:
raise RuntimeError('Node subtype is not a TREX!')
for _ in range(0, 3):
# Kill TRex only if it is already running.
cmd = "sh -c 'pgrep t-rex && pkill t-rex && sleep 3 || true'"
exec_cmd_no_error(
self._node, cmd, sudo=True, message='Kill TRex failed!')
# Configure TRex.
ports = ''
for port in self._node['interfaces'].values():
ports += ' {pci}'.format(pci=port.get('pci_address'))
cmd = ("sh -c 'cd {dir}/scripts/ && "
"./dpdk_nic_bind.py -u {ports} || true'"
.format(dir=Constants.TREX_INSTALL_DIR, ports=ports))
exec_cmd_no_error(
self._node, cmd, sudo=True,
message='Unbind PCI ports from driver failed!')
cmd = ("sh -c 'cd {dir}/scripts/ && ./trex-cfg'"
.format(dir=Constants.TREX_INSTALL_DIR))
exec_cmd_no_error(
self._node, cmd, sudo=True, message='Config TRex failed!')
# Start TRex.
cmd = ("sh -c 'cd {dir}/scripts/ && "
"nohup ./t-rex-64 {mode} -i -c 7 > "
"/tmp/trex.log 2>&1 &' > /dev/null"
.format(dir=Constants.TREX_INSTALL_DIR,
mode='--astf' if osi_layer == 'L7' else ''))
try:
exec_cmd_no_error(self._node, cmd, sudo=True)
except RuntimeError:
cmd = "sh -c 'cat /tmp/trex.log'"
exec_cmd_no_error(self._node, cmd, sudo=True,
message='Get TRex logs failed!')
raise RuntimeError('Start TRex failed!')
# Test if TRex starts successfuly.
cmd = ("sh -c '{dir}/resources/tools/trex/trex_server_info.py'"
.format(dir=Constants.REMOTE_FW_DIR))
try:
exec_cmd_no_error(
self._node, cmd, sudo=True, message='Test TRex failed!',
retries=20)
except RuntimeError:
continue
return
# After max retries TRex is still not responding to API critical error
# occurred.
raise RuntimeError('Start TRex failed after multiple retries!')
@staticmethod
def is_trex_running(node):
"""Check if TRex is running using pidof.
:param node: Traffic generator node.
:type node: dict
:returns: True if TRex is running otherwise False.
:rtype: bool
:raises RuntimeError: If node type is not a TG.
"""
if node['type'] != NodeType.TG:
raise RuntimeError('Node type is not a TG')
ssh = SSH()
ssh.connect(node)
ret, _, _ = ssh.exec_command_sudo("pidof t-rex")
return bool(int(ret) == 0)
@staticmethod
def teardown_traffic_generator(node):
"""TG teardown.
:param node: Traffic generator node.
:type node: dict
:returns: nothing
:raises RuntimeError: If node type is not a TG,
or if TRex teardown fails.
"""
if node['type'] != NodeType.TG:
raise RuntimeError('Node type is not a TG')
if node['subtype'] == NodeSubTypeTG.TREX:
ssh = SSH()
ssh.connect(node)
(ret, _, _) = | |
a batch of builds
:param [Build] builds:
:param str project: Project ID or project name
:rtype: [Build]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(builds, '[Build]')
response = self._send(http_method='PATCH',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='4.0',
route_values=route_values,
content=content)
return self._deserialize('[Build]', self._unwrap_collection(response))
def get_build_changes(self, project, build_id, continuation_token=None, top=None, include_source_change=None):
"""GetBuildChanges.
Gets the changes associated with a build
:param str project: Project ID or project name
:param int build_id:
:param str continuation_token:
:param int top: The maximum number of changes to return
:param bool include_source_change:
:rtype: [Change]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if include_source_change is not None:
query_parameters['includeSourceChange'] = self._serialize.query('include_source_change', include_source_change, 'bool')
response = self._send(http_method='GET',
location_id='54572c7b-bbd3-45d4-80dc-28be08941620',
version='4.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Change]', self._unwrap_collection(response))
def get_changes_between_builds(self, project, from_build_id=None, to_build_id=None, top=None):
"""GetChangesBetweenBuilds.
[Preview API] Gets the changes associated between given builds
:param str project: Project ID or project name
:param int from_build_id:
:param int to_build_id:
:param int top: The maximum number of changes to return
:rtype: [Change]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if from_build_id is not None:
query_parameters['fromBuildId'] = self._serialize.query('from_build_id', from_build_id, 'int')
if to_build_id is not None:
query_parameters['toBuildId'] = self._serialize.query('to_build_id', to_build_id, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='f10f0ea5-18a1-43ec-a8fb-2042c7be9b43',
version='4.0-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Change]', self._unwrap_collection(response))
def get_build_controller(self, controller_id):
"""GetBuildController.
Gets a controller
:param int controller_id:
:rtype: :class:`<BuildController> <build.v4_0.models.BuildController>`
"""
route_values = {}
if controller_id is not None:
route_values['controllerId'] = self._serialize.url('controller_id', controller_id, 'int')
response = self._send(http_method='GET',
location_id='fcac1932-2ee1-437f-9b6f-7f696be858f6',
version='4.0',
route_values=route_values)
return self._deserialize('BuildController', response)
def get_build_controllers(self, name=None):
"""GetBuildControllers.
Gets controller, optionally filtered by name
:param str name:
:rtype: [BuildController]
"""
query_parameters = {}
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
response = self._send(http_method='GET',
location_id='fcac1932-2ee1-437f-9b6f-7f696be858f6',
version='4.0',
query_parameters=query_parameters)
return self._deserialize('[BuildController]', self._unwrap_collection(response))
def create_definition(self, definition, project=None, definition_to_clone_id=None, definition_to_clone_revision=None):
"""CreateDefinition.
Creates a new definition
:param :class:`<BuildDefinition> <build.v4_0.models.BuildDefinition>` definition:
:param str project: Project ID or project name
:param int definition_to_clone_id:
:param int definition_to_clone_revision:
:rtype: :class:`<BuildDefinition> <build.v4_0.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if definition_to_clone_id is not None:
query_parameters['definitionToCloneId'] = self._serialize.query('definition_to_clone_id', definition_to_clone_id, 'int')
if definition_to_clone_revision is not None:
query_parameters['definitionToCloneRevision'] = self._serialize.query('definition_to_clone_revision', definition_to_clone_revision, 'int')
content = self._serialize.body(definition, 'BuildDefinition')
response = self._send(http_method='POST',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='4.0',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('BuildDefinition', response)
def delete_definition(self, definition_id, project=None):
"""DeleteDefinition.
Deletes a definition and all associated builds
:param int definition_id:
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
self._send(http_method='DELETE',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='4.0',
route_values=route_values)
def get_definition(self, definition_id, project=None, revision=None, min_metrics_time=None, property_filters=None, include_latest_builds=None):
"""GetDefinition.
Gets a definition, optionally at a specific revision
:param int definition_id:
:param str project: Project ID or project name
:param int revision:
:param datetime min_metrics_time:
:param [str] property_filters:
:param bool include_latest_builds:
:rtype: :class:`<BuildDefinition> <build.v4_0.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if revision is not None:
query_parameters['revision'] = self._serialize.query('revision', revision, 'int')
if min_metrics_time is not None:
query_parameters['minMetricsTime'] = self._serialize.query('min_metrics_time', min_metrics_time, 'iso-8601')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
if include_latest_builds is not None:
query_parameters['includeLatestBuilds'] = self._serialize.query('include_latest_builds', include_latest_builds, 'bool')
response = self._send(http_method='GET',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='4.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('BuildDefinition', response)
def get_definitions(self, project=None, name=None, repository_id=None, repository_type=None, query_order=None, top=None, continuation_token=None, min_metrics_time=None, definition_ids=None, path=None, built_after=None, not_built_after=None, include_all_properties=None, include_latest_builds=None, task_id_filter=None):
"""GetDefinitions.
Gets definitions, optionally filtered by name
:param str project: Project ID or project name
:param str name:
:param str repository_id:
:param str repository_type:
:param str query_order:
:param int top:
:param str continuation_token:
:param datetime min_metrics_time:
:param [int] definition_ids:
:param str path:
:param datetime built_after:
:param datetime not_built_after:
:param bool include_all_properties:
:param bool include_latest_builds:
:param str task_id_filter:
:rtype: [BuildDefinitionReference]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
if repository_id is not None:
query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str')
if repository_type is not None:
query_parameters['repositoryType'] = self._serialize.query('repository_type', repository_type, 'str')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if min_metrics_time is not None:
query_parameters['minMetricsTime'] = self._serialize.query('min_metrics_time', min_metrics_time, 'iso-8601')
if definition_ids is not None:
definition_ids = ",".join(map(str, definition_ids))
query_parameters['definitionIds'] = self._serialize.query('definition_ids', definition_ids, 'str')
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
if built_after is not None:
query_parameters['builtAfter'] = self._serialize.query('built_after', built_after, 'iso-8601')
if not_built_after is not None:
query_parameters['notBuiltAfter'] = self._serialize.query('not_built_after', not_built_after, 'iso-8601')
if include_all_properties is not None:
query_parameters['includeAllProperties'] = self._serialize.query('include_all_properties', include_all_properties, 'bool')
if include_latest_builds is not None:
query_parameters['includeLatestBuilds'] = self._serialize.query('include_latest_builds', include_latest_builds, 'bool')
if task_id_filter is not None:
query_parameters['taskIdFilter'] = self._serialize.query('task_id_filter', task_id_filter, 'str')
response = self._send(http_method='GET',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='4.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[BuildDefinitionReference]', self._unwrap_collection(response))
def update_definition(self, definition, definition_id, project=None, secrets_source_definition_id=None, secrets_source_definition_revision=None):
"""UpdateDefinition.
Updates an existing definition
:param :class:`<BuildDefinition> <build.v4_0.models.BuildDefinition>` definition:
:param int definition_id:
:param str project: Project ID or project name
:param int secrets_source_definition_id:
:param int secrets_source_definition_revision:
:rtype: :class:`<BuildDefinition> <build.v4_0.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if secrets_source_definition_id is not None:
query_parameters['secretsSourceDefinitionId'] = self._serialize.query('secrets_source_definition_id', secrets_source_definition_id, 'int')
if secrets_source_definition_revision is not None:
query_parameters['secretsSourceDefinitionRevision'] = self._serialize.query('secrets_source_definition_revision', secrets_source_definition_revision, 'int')
content = self._serialize.body(definition, 'BuildDefinition')
response = self._send(http_method='PUT',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='4.0',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('BuildDefinition', response)
def create_folder(self, folder, project, path):
"""CreateFolder.
[Preview API] Creates a new folder
:param :class:`<Folder> <build.v4_0.models.Folder>` folder:
:param str project: Project ID or project name
:param str path:
:rtype: :class:`<Folder> <build.v4_0.models.Folder>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
content = self._serialize.body(folder, 'Folder')
response = self._send(http_method='PUT',
location_id='a906531b-d2da-4f55-bda7-f3e676cc50d9',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Folder', response)
def delete_folder(self, project, path):
"""DeleteFolder.
[Preview API] Deletes a definition folder for given folder name and path and all it's existing definitions and it's corresponding builds
:param str project: Project ID or project name
:param str path:
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
self._send(http_method='DELETE',
location_id='a906531b-d2da-4f55-bda7-f3e676cc50d9',
version='4.0-preview.1',
route_values=route_values)
def get_folders(self, project, path=None, query_order=None):
"""GetFolders.
[Preview API] Gets folders
:param str project: Project ID or project name
:param str path:
:param str query_order:
:rtype: [Folder]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
response = self._send(http_method='GET',
location_id='a906531b-d2da-4f55-bda7-f3e676cc50d9',
version='4.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Folder]', self._unwrap_collection(response))
def update_folder(self, folder, project, path):
"""UpdateFolder.
[Preview API] Updates an existing folder at given existing path
:param :class:`<Folder> <build.v4_0.models.Folder>` folder:
:param str project: Project ID or project name
:param str path:
:rtype: :class:`<Folder> <build.v4_0.models.Folder>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
content = self._serialize.body(folder, 'Folder')
response = self._send(http_method='POST',
location_id='a906531b-d2da-4f55-bda7-f3e676cc50d9',
version='4.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Folder', response)
def get_build_log(self, project, build_id, log_id, start_line=None, end_line=None, **kwargs):
"""GetBuildLog.
Gets a log
:param str project: Project ID or project name
:param int build_id:
:param int log_id:
:param long start_line:
:param long end_line:
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if log_id is not None:
route_values['logId'] = self._serialize.url('log_id', log_id, 'int')
query_parameters = | |
g():
yield 1
yield 2
raise StopIteration(3) # py2.7 compat.
f1 = e.map(lambda x: x, g())
assert isinstance(f1, Iterator)
start = time() # ensure that we compute eagerly
while not s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
g1 = g()
try:
while True:
f = next(f1)
n = yield f._result()
assert n == next(g1)
except StopIteration as e:
with pytest.raises(StopIteration) as exc_info:
next(g1)
assert e.args == exc_info.value.args
@gen_cluster(executor=True)
def test_map_iterator(e, s, a, b):
x = iter([1, 2, 3])
y = iter([10, 20, 30])
f1 = e.map(add, x, y)
assert isinstance(f1, Iterator)
start = time() # ensure that we compute eagerly
while not s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
f2 = e.map(double, f1)
assert isinstance(f2, Iterator)
future = next(f2)
result = yield future._result()
assert result == (1 + 10) * 2
futures = list(f2)
results = []
for f in futures:
r = yield f._result()
results.append(r)
assert results == [(2 + 20) * 2, (3 + 30) * 2]
items = enumerate(range(10))
futures = e.map(lambda x: x, items)
assert isinstance(futures, Iterator)
result = yield next(futures)._result()
assert result == (0, 0)
futures_l = list(futures)
results = []
for f in futures_l:
r = yield f._result()
results.append(r)
assert results == [(i, i) for i in range(1,10)]
@gen_cluster(executor=True)
def test_map_infinite_iterators(e, s, a, b):
futures = e.map(add, [1, 2], itertools.repeat(10))
assert len(futures) == 2
def test_map_iterator_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
items = enumerate(range(10))
futures = e.map(lambda x: x, items)
next(futures).result() == (0, 0)
@gen_cluster(executor=True)
def test_map_differnet_lengths(e, s, a, b):
assert len(e.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
ensure_default_get(e)
ensure_default_get(e)
ensure_default_get(e)
ensure_default_get(e)
assert _globals['get'] == e.get
out, err = capsys.readouterr()
assert len(out.strip().split('\n')) == 1
assert _globals.get('get') != e.get
@gen_cluster(timeout=60, executor=True)
def test_async_persist(e, s, a, b):
from dask.imperative import delayed, Delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = e.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y._keys() == yy._keys()
assert w._keys() == ww._keys()
while y.key not in s.tasks and w.key not in s.tasks:
yield gen.sleep(0.01)
assert s.who_wants[y.key] == {e.id}
assert s.who_wants[w.key] == {e.id}
yyf, wwf = e.compute([yy, ww])
yyy, www = yield e._gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(e.persist(y), Delayed)
assert isinstance(e.persist([y]), (list, tuple))
@gen_cluster(executor=True)
def test__persist(e, s, a, b):
pytest.importorskip('dask.array')
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = e.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy._keys() == y._keys()
g, h = e.compute([y, yy])
gg, hh = yield e._gather([g, h])
assert (gg == hh).all()
def test_persist(loop):
pytest.importorskip('dask.array')
import dask.array as da
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = e.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy._keys() == y._keys()
zz = yy.compute(get=e.get)
z = y.compute(get=e.get)
assert (zz == z).all()
@gen_cluster(timeout=60, executor=True)
def test_long_traceback(e, s, a, b):
from distributed.core import dumps
n = sys.getrecursionlimit()
sys.setrecursionlimit(500)
try:
x = e.submit(deep, 1000)
yield _wait([x])
assert len(dumps(e.futures[x.key]['traceback'])) < 10000
assert isinstance(e.futures[x.key]['exception'], RuntimeError)
finally:
sys.setrecursionlimit(n)
@gen_cluster(executor=True)
def test_wait_on_collections(e, s, a, b):
import dask.bag as db
L = e.map(double, [[1], [2], [3]])
x = db.Bag({('b', i): f for i, f in enumerate(L)}, 'b', 3)
yield _wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(executor=True)
def test_futures_of(e, s, a, b):
x, y, z = e.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
assert set(futures_of({'x': x, 'y': [y]})) == {x, y}
import dask.bag as db
b = db.Bag({('b', i): f for i, f in enumerate([x, y, z])}, 'b', 3)
assert set(futures_of(b)) == {x, y, z}
@gen_cluster(executor=True)
def test_futures_of_cancelled_raises(e, s, a, b):
x = e.submit(inc, 1)
yield e._cancel([x])
with pytest.raises(CancelledError):
yield x._result()
with pytest.raises(CancelledError):
yield e._get({'x': (inc, x), 'y': (inc, 2)}, ['x', 'y'])
with pytest.raises(CancelledError):
e.submit(inc, x)
with pytest.raises(CancelledError):
e.submit(add, 1, y=x)
with pytest.raises(CancelledError):
e.map(add, [1], y=x)
assert 'y' not in s.tasks
@gen_cluster(ncores=[('127.0.0.1', 1)], executor=True)
def test_dont_delete_recomputed_results(e, s, w):
x = e.submit(inc, 1) # compute first time
yield _wait([x])
x.__del__() # trigger garbage collection
xx = e.submit(inc, 1) # compute second time
start = time()
while xx.key not in w.data: # data shows up
yield gen.sleep(0.01)
assert time() < start + 1
while time() < start + (s.delete_interval + 100) / 1000: # and stays
assert xx.key in w.data
yield gen.sleep(0.01)
@gen_cluster(ncores=[], executor=True)
def test_fatally_serialized_input(e, s):
o = FatallySerializedObject()
future = e.submit(inc, o)
while not s.tasks:
yield gen.sleep(0.01)
@gen_cluster(executor=True)
def test_balance_tasks_by_stacks(e, s, a, b):
x = e.submit(inc, 1)
yield _wait(x)
y = e.submit(inc, 2)
yield _wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(executor=True)
def test_run(e, s, a, b):
results = yield e._run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = yield e._run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = yield e._run(inc, 1, workers=[])
assert results == {}
def test_run_sync(loop):
def func(x, y=10):
return x + y
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
result = e.run(func, 1, y=2)
assert result == {'127.0.0.1:%d' % a['port']: 3,
'127.0.0.1:%d' % b['port']: 3}
result = e.run(func, 1, y=2, workers=['127.0.0.1:%d' % a['port']])
assert result == {'127.0.0.1:%d' % a['port']: 3}
def test_run_exception(loop):
def raise_exception(exc_type, exc_msg):
raise exc_type(exc_msg)
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
for exc_type in [ValueError, RuntimeError]:
with pytest.raises(exc_type) as excinfo:
e.run(raise_exception, exc_type, 'informative message')
assert 'informative message' in str(excinfo.value)
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = '127.0.0.1:%d' % a['port']
b_addr = '127.0.0.1:%d' % b['port']
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
d = e.ncores()
assert d == {a_addr: 1, b_addr: 1}
d = e.ncores([a_addr])
assert d == {a_addr: 1}
d = e.ncores(a_addr)
assert d == {a_addr: 1}
d = e.ncores(('127.0.0.1', a['port']))
assert d == {a_addr: 1}
x = e.submit(inc, 1)
y = e.submit(inc, 2)
z = e.submit(inc, 3)
wait([x, y, z])
d = e.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = e.who_has([x, y])
assert set(d) == {x.key, y.key}
d = e.who_has(x)
assert set(d) == {x.key}
d = e.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = e.has_what([a_addr])
assert set(d) == {a_addr}
d = e.has_what(a_addr)
assert set(d) == {a_addr}
d = e.has_what(('127.0.0.1', a['port']))
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
incs = e.map(inc, [1, 2, 3])
doubles = e.map(double, [1, 2, 3])
wait(incs + doubles)
assert e.nbytes(summary=False) == {k.key: sizeof(1)
for k in incs + doubles}
assert e.nbytes(summary=True) == {'inc': sizeof(1) * 3,
'double': sizeof(1) * 3}
@gen_cluster(executor=True)
def test_diagnostic_nbytes(e, s, a, b):
incs = e.map(inc, [1, 2, 3])
doubles = e.map(double, [1, 2, 3])
yield _wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1)
for k in incs + doubles}
assert s.get_nbytes(summary=True) == {'inc': sizeof(1) * 3,
'double': sizeof(1) * 3}
@gen_test()
def test_worker_aliases():
s = Scheduler(validate=True)
s.start(0)
a = Worker(s.ip, s.port, name='alice')
b = Worker(s.ip, s.port, name='bob')
yield [a._start(), b._start()]
e = Executor((s.ip, s.port), start=False)
yield e._start()
L = e.map(inc, range(10), workers='alice')
yield _wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
yield e._shutdown()
yield [a._close(), b._close()]
yield s.close()
def test_persist_get_sync(loop):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = e.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute(get=e.get) == ((1+1) + (2+2)) + 10
@gen_cluster(executor=True)
def test_persist_get(e, | |
"""The dialog for calculating minimal cut sets"""
import io
import traceback
import scipy
from qtpy.QtCore import Qt, Slot
from qtpy.QtWidgets import (QButtonGroup, QCheckBox, QComboBox, QCompleter,
QDialog, QGroupBox, QHBoxLayout, QHeaderView,
QLabel, QLineEdit, QMessageBox, QPushButton,
QRadioButton, QTableWidget, QVBoxLayout)
import optlang_enumerator.mcs_computation as mcs_computation
import cobra
from cobra.util.solver import interface_to_str
from cnapy.appdata import AppData
import cnapy.utils as utils
from cnapy.flux_vector_container import FluxVectorContainer
class MCSDialog(QDialog):
"""A dialog to perform minimal cut set computation"""
def __init__(self, appdata: AppData, central_widget):
QDialog.__init__(self)
self.setWindowTitle("Minimal Cut Sets Computation")
self.appdata = appdata
self.central_widget = central_widget
self.out = io.StringIO()
self.err = io.StringIO()
self.layout = QVBoxLayout()
l1 = QLabel("Target Region(s)")
self.layout.addWidget(l1)
s1 = QHBoxLayout()
completer = QCompleter(
self.appdata.project.cobra_py_model.reactions.list_attr("id"), self)
completer.setCaseSensitivity(Qt.CaseInsensitive)
self.target_list = QTableWidget(1, 4)
self.target_list.setHorizontalHeaderLabels(
["region no", "T", "≥/≤", "t"])
self.target_list.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.target_list.horizontalHeader().setSectionResizeMode(0, QHeaderView.Fixed)
self.target_list.horizontalHeader().resizeSection(0, 100)
self.target_list.horizontalHeader().setSectionResizeMode(2, QHeaderView.Fixed)
self.target_list.horizontalHeader().resizeSection(2, 50)
item = QLineEdit("1")
self.target_list.setCellWidget(0, 0, item)
item2 = ReceiverLineEdit(self)
item2.setCompleter(completer)
self.target_list.setCellWidget(0, 1, item2)
combo = QComboBox(self.target_list)
combo.insertItem(1, "≤")
combo.insertItem(2, "≥")
self.target_list.setCellWidget(0, 2, combo)
item = QLineEdit("0")
self.target_list.setCellWidget(0, 3, item)
self.active_receiver = item2
s1.addWidget(self.target_list)
s11 = QVBoxLayout()
self.add_target = QPushButton("+")
self.add_target.clicked.connect(self.add_target_region)
self.rem_target = QPushButton("-")
self.rem_target.clicked.connect(self.rem_target_region)
s11.addWidget(self.add_target)
s11.addWidget(self.rem_target)
s1.addItem(s11)
self.layout.addItem(s1)
l2 = QLabel("Desired Region(s)")
self.layout.addWidget(l2)
s2 = QHBoxLayout()
self.desired_list = QTableWidget(1, 4)
self.desired_list.setHorizontalHeaderLabels(
["region no", "D", "≥/≤", "d"])
self.desired_list.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.desired_list.horizontalHeader().setSectionResizeMode(0, QHeaderView.Fixed)
self.desired_list.horizontalHeader().resizeSection(0, 100)
self.desired_list.horizontalHeader().setSectionResizeMode(2, QHeaderView.Fixed)
self.desired_list.horizontalHeader().resizeSection(2, 50)
item = QLineEdit("1")
self.desired_list.setCellWidget(0, 0, item)
item2 = ReceiverLineEdit(self)
item2.setCompleter(completer)
self.desired_list.setCellWidget(0, 1, item2)
combo = QComboBox(self.desired_list)
combo.insertItem(1, "≤")
combo.insertItem(2, "≥")
self.desired_list.setCellWidget(0, 2, combo)
item = QLineEdit("0")
self.desired_list.setCellWidget(0, 3, item)
s2.addWidget(self.desired_list)
s21 = QVBoxLayout()
self.add_desire = QPushButton("+")
self.add_desire.clicked.connect(self.add_desired_region)
self.rem_desire = QPushButton("-")
self.rem_desire.clicked.connect(self.rem_desired_region)
s21.addWidget(self.add_desire)
s21.addWidget(self.rem_desire)
s2.addItem(s21)
self.layout.addItem(s2)
s3 = QHBoxLayout()
sgx = QVBoxLayout()
self.gen_kos = QCheckBox("Gene KOs")
self.exclude_boundary = QCheckBox(
"Exclude boundary\nreactions as cuts")
sg1 = QHBoxLayout()
s31 = QVBoxLayout()
l = QLabel("Max. Solutions")
s31.addWidget(l)
l = QLabel("Max. Size")
s31.addWidget(l)
l = QLabel("Time Limit [sec]")
s31.addWidget(l)
sg1.addItem(s31)
s32 = QVBoxLayout()
self.max_solu = QLineEdit("inf")
self.max_solu.setMaximumWidth(50)
s32.addWidget(self.max_solu)
self.max_size = QLineEdit("7")
self.max_size.setMaximumWidth(50)
s32.addWidget(self.max_size)
self.time_limit = QLineEdit("inf")
self.time_limit.setMaximumWidth(50)
s32.addWidget(self.time_limit)
sg1.addItem(s32)
sgx.addWidget(self.gen_kos)
sgx.addWidget(self.exclude_boundary)
sgx.addItem(sg1)
s3.addItem(sgx)
g3 = QGroupBox("Solver")
s33 = QVBoxLayout()
self.bg1 = QButtonGroup()
self.solver_optlang = QRadioButton()
self.set_optlang_solver_text()
self.solver_optlang.setToolTip(
"Change solver in COBRApy configuration.")
s33.addWidget(self.solver_optlang)
self.bg1.addButton(self.solver_optlang)
self.bg1.buttonClicked.connect(self.configure_solver_options)
g3.setLayout(s33)
s3.addWidget(g3)
g4 = QGroupBox("MCS search")
s34 = QVBoxLayout()
self.bg2 = QButtonGroup()
self.any_mcs = QRadioButton("any MCS (fast)")
self.any_mcs.setChecked(True)
s34.addWidget(self.any_mcs)
self.bg2.addButton(self.any_mcs)
# Search type: by cardinality only with CPLEX/Gurobi possible
self.mcs_by_cardinality = QRadioButton("by cardinality")
s34.addWidget(self.mcs_by_cardinality)
self.bg2.addButton(self.mcs_by_cardinality)
self.smalles_mcs_first = QRadioButton("smallest MCS first")
s34.addWidget(self.smalles_mcs_first)
self.bg2.addButton(self.smalles_mcs_first)
# Search type: continuous search only with optlang + CPLEX/Gurobi possible
self.mcs_continuous_search = QRadioButton("continuous search")
s34.addWidget(self.mcs_continuous_search)
self.bg2.addButton(self.mcs_continuous_search)
g4.setLayout(s34)
s3.addWidget(g4)
self.layout.addItem(s3)
# Disable incompatible combinations
self.solver_optlang.setChecked(True)
self.configure_solver_options()
s4 = QVBoxLayout()
self.consider_scenario = QCheckBox(
"Consider constraint given by scenario")
s4.addWidget(self.consider_scenario)
self.advanced = QCheckBox(
"Advanced: Define knockout/addition costs for genes/reactions")
self.advanced.setEnabled(False)
s4.addWidget(self.advanced)
self.layout.addItem(s4)
buttons = QHBoxLayout()
self.compute_mcs = QPushButton("Compute MCS")
buttons.addWidget(self.compute_mcs)
self.cancel = QPushButton("Close")
buttons.addWidget(self.cancel)
self.layout.addItem(buttons)
# max width for buttons
self.add_target.setMaximumWidth(20)
self.rem_target.setMaximumWidth(20)
self.add_desire.setMaximumWidth(20)
self.rem_desire.setMaximumWidth(20)
self.setLayout(self.layout)
# Connecting the signal
self.cancel.clicked.connect(self.reject)
self.compute_mcs.clicked.connect(self.compute)
self.central_widget.broadcastReactionID.connect(self.receive_input)
@Slot(str)
def receive_input(self, text):
completer_mode = self.active_receiver.completer().completionMode()
# temporarily disable completer popup
self.active_receiver.completer().setCompletionMode(QCompleter.CompletionMode.InlineCompletion)
self.active_receiver.insert(text)
self.active_receiver.completer().setCompletionMode(completer_mode)
@Slot()
def set_optlang_solver_text(self):
self.optlang_solver_name = interface_to_str(self.appdata.project.cobra_py_model.problem)
self.solver_optlang.setText(f"{self.optlang_solver_name} (optlang)")
@Slot()
def configure_solver_options(self): # called when switching solver
if self.solver_optlang.isChecked():
self.gen_kos.setChecked(False)
self.gen_kos.setEnabled(False)
self.exclude_boundary.setEnabled(True)
if self.optlang_solver_name != 'cplex' and self.optlang_solver_name != 'gurobi':
if self.mcs_by_cardinality.isChecked() or self.mcs_continuous_search.isChecked():
self.any_mcs.setChecked(True)
self.mcs_by_cardinality.setEnabled(False)
self.mcs_continuous_search.setEnabled(False)
else:
self.mcs_by_cardinality.setEnabled(True)
self.mcs_continuous_search.setEnabled(True)
else:
self.gen_kos.setEnabled(True)
self.exclude_boundary.setChecked(False)
self.mcs_by_cardinality.setEnabled(False)
if self.mcs_by_cardinality.isChecked():
self.any_mcs.setChecked(True)
self.mcs_continuous_search.setEnabled(False)
if self.mcs_continuous_search.isChecked():
self.any_mcs.setChecked(True)
def add_target_region(self):
i = self.target_list.rowCount()
self.target_list.insertRow(i)
completer = QCompleter(
self.appdata.project.cobra_py_model.reactions.list_attr("id"), self)
completer.setCaseSensitivity(Qt.CaseInsensitive)
item = QLineEdit("1")
self.target_list.setCellWidget(i, 0, item)
item2 = ReceiverLineEdit(self)
item2.setCompleter(completer)
self.target_list.setCellWidget(i, 1, item2)
combo = QComboBox(self.target_list)
combo.insertItem(1, "≤")
combo.insertItem(2, "≥")
self.target_list.setCellWidget(i, 2, combo)
item = QLineEdit("0")
self.target_list.setCellWidget(i, 3, item)
def add_desired_region(self):
i = self.desired_list.rowCount()
self.desired_list.insertRow(i)
completer = QCompleter(
self.appdata.project.cobra_py_model.reactions.list_attr("id"), self)
completer.setCaseSensitivity(Qt.CaseInsensitive)
item = QLineEdit("1")
self.desired_list.setCellWidget(i, 0, item)
item2 = ReceiverLineEdit(self)
item2.setCompleter(completer)
self.desired_list.setCellWidget(i, 1, item2)
combo = QComboBox(self.desired_list)
combo.insertItem(1, "≤")
combo.insertItem(2, "≥")
self.desired_list.setCellWidget(i, 2, combo)
item = QLineEdit("0")
self.desired_list.setCellWidget(i, 3, item)
def rem_target_region(self):
i = self.target_list.rowCount()
self.target_list.removeRow(i-1)
def rem_desired_region(self):
i = self.desired_list.rowCount()
self.desired_list.removeRow(i-1)
def compute(self):
mcs_equation_errors = self.check_for_mcs_equation_errors()
if mcs_equation_errors == "":
self.compute_optlang()
else:
QMessageBox.warning(
self,
"MCS target/desired region error",
f"Cannot perform MCS calculation due to the following error(s) "
f"in the given target and/or desired regions:\n"
f"{mcs_equation_errors}"
)
def compute_optlang(self):
max_mcs_num = float(self.max_solu.text())
max_mcs_size = int(self.max_size.text())
timeout = float(self.time_limit.text())
if timeout == float('inf'):
timeout = None
if self.smalles_mcs_first.isChecked():
enum_method = 1
elif self.mcs_by_cardinality.isChecked():
enum_method = 2
elif self.any_mcs.isChecked():
enum_method = 3
elif self.mcs_continuous_search.isChecked():
enum_method = 4
with self.appdata.project.cobra_py_model as model:
update_stoichiometry_hash = False
if self.consider_scenario.isChecked(): # integrate scenario into model bounds
self.appdata.project.load_scenario_into_model(model)
if len(self.appdata.project.scen_values) > 0:
update_stoichiometry_hash = True
for r in model.reactions: # make all reactions bounded for COBRApy FVA
if r.lower_bound == -float('inf'):
r.lower_bound = cobra.Configuration().lower_bound
r.set_hash_value()
update_stoichiometry_hash = True
if r.upper_bound == float('inf'):
r.upper_bound = cobra.Configuration().upper_bound
r.set_hash_value()
update_stoichiometry_hash = True
if self.appdata.use_results_cache and update_stoichiometry_hash:
model.set_stoichiometry_hash_object()
reac_id = model.reactions.list_attr("id")
reac_id_symbols = mcs_computation.get_reac_id_symbols(reac_id)
rows = self.target_list.rowCount()
targets = dict()
for i in range(0, rows):
p1 = self.target_list.cellWidget(i, 0).text()
p2 = self.target_list.cellWidget(i, 1).text()
if len(p1) > 0 and len(p2) > 0:
if self.target_list.cellWidget(i, 2).currentText() == '≤':
p3 = "<="
else:
p3 = ">="
p4 = float(self.target_list.cellWidget(i, 3).text())
targets.setdefault(p1, []).append((p2, p3, p4))
targets = list(targets.values())
try:
targets = [mcs_computation.relations2leq_matrix(mcs_computation.parse_relations(
t, reac_id_symbols=reac_id_symbols), reac_id) for t in targets]
except ValueError:
QMessageBox.warning(self, "Failed to parse the target region(s)",
"Check that the equations are correct.")
return
rows = self.desired_list.rowCount()
desired = dict()
for i in range(0, rows):
p1 = self.desired_list.cellWidget(i, 0).text()
p2 = self.desired_list.cellWidget(i, 1).text()
if len(p1) > 0 and len(p2) > 0:
if self.desired_list.cellWidget(i, 2).currentText() == '≤':
p3 = "<="
else:
p3 = ">="
p4 = float(self.desired_list.cellWidget(i, 3).text())
desired.setdefault(p1, []).append((p2, p3, p4))
desired = list(desired.values())
try:
desired = [mcs_computation.relations2leq_matrix(mcs_computation.parse_relations(
d, reac_id_symbols=reac_id_symbols), reac_id) for d in desired]
except ValueError:
QMessageBox.warning(self, "Failed to parse the desired region(s)",
"Check that the equations are correct.")
return
self.setCursor(Qt.BusyCursor)
try:
mcs, err_val = mcs_computation.compute_mcs(model,
targets=targets, desired=desired, enum_method=enum_method,
max_mcs_size=max_mcs_size, max_mcs_num=max_mcs_num, timeout=timeout,
exclude_boundary_reactions_as_cuts=self.exclude_boundary.isChecked(),
results_cache_dir=self.appdata.results_cache_dir
if self.appdata.use_results_cache else None)
except mcs_computation.InfeasibleRegion as e:
QMessageBox.warning(self, 'Cannot calculate MCS', str(e))
return targets, desired
except Exception:
output = io.StringIO()
traceback.print_exc(file=output)
exstr = output.getvalue()
print(exstr)
utils.show_unknown_error_box(exstr)
return targets, desired
finally:
self.setCursor(Qt.ArrowCursor)
print(err_val)
if err_val == 1:
QMessageBox.warning(self, "Enumeration stopped abnormally",
"Result is probably incomplete.\nCheck console output for more information.")
elif err_val == -1:
QMessageBox.warning(self, "Enumeration terminated permaturely",
"Aborted due to excessive generation of candidates that are not cut sets.\n"
"Modify the problem or try a different enumeration setup.")
if len(mcs) == 0:
QMessageBox.information(self, 'No cut sets',
'Cut sets have not been calculated or do not exist.')
return targets, desired
# omcs = [{reac_id[i]: -1.0 for i in m} for m in mcs]
omcs = scipy.sparse.lil_matrix((len(mcs), len(reac_id)))
for i,m in enumerate(mcs):
for j in m:
omcs[i, j] = -1.0
self.appdata.project.modes = FluxVectorContainer(omcs, reac_id=reac_id)
self.central_widget.mode_navigator.current = 0
QMessageBox.information(self, 'Cut sets found',
str(len(mcs))+' Cut sets have been calculated.')
self.central_widget.mode_navigator.set_to_mcs()
self.central_widget.update_mode()
self.accept()
def check_left_mcs_equation(self, equation: str) -> str:
errors = ""
semantics = []
reaction_ids = []
last_part = ""
counter = 1
for char in equation+" ":
if (char == " ") or (char in ("*", "/", "+", "-")) or (counter == len(equation+" ")):
if last_part != "":
try:
float(last_part)
except ValueError:
reaction_ids.append(last_part)
semantics.append("reaction")
else:
semantics.append("number")
last_part = ""
if counter == len(equation+" "):
break
if char in "*":
semantics.append("multiplication")
elif char in "/":
semantics.append("division")
elif char in ("+", "-"):
semantics.append("dash")
elif char not in " ":
last_part += char
counter += 1
if len(reaction_ids) == 0:
errors += f"EQUATION ERROR in {equation}:\nNo reaction ID is given in the equation\n"
if semantics.count("division") > 1:
errors += f"ERROR in {equation}:\nAn equation must not have more than one /"
last_is_multiplication = False
last_is_division = False
last_is_dash = False
last_is_reaction = False
prelast_is_reaction = False
prelast_is_dash = False
last_is_number = False
is_start = True
for semantic in semantics:
if is_start:
if semantic in ("multiplication", "division"):
errors += f"ERROR in {equation}:\nAn equation must not start with * or /"
is_start = False
if (last_is_multiplication or last_is_division) and (semantic in ("multiplication", "division")):
errors += f"ERROR in {equation}:\n* or / must not follow on * or /\n"
if last_is_dash and (semantic in ("multiplication", "division")):
errors += f"ERROR in {equation}:\n* or / must not follow on + or -\n"
| |
<reponame>JHU-Econ-Choice-2018/brock-mirman-etc-jacalin1<gh_stars>0
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Capital Dynamics in a Stochastic Growth Model
# %% [markdown]
# The handout [BrockMirman](http://econ.jhu.edu/people/ccarroll/public/lecturenotes/DSGEmodels/BrockMirman) derived some facts about a special case of a representative agent DSGE model where analytical results can be obtained. This exercise asks you to explore this model and closely related ones further, numerically, by adapting tools from the [QuantEcon](https://lectures.quantecon.org/py/) treatment of [optimal growth](https://lectures.quantecon.org/py/optgrowth.html) (you should download their Jupyter notebook to get a start).
# %% [markdown]
# ## PROBLEM
# ## Calculate theoretical variance of $k$
# The handout shows that if the productivity shocks $\phi_{t}$ are iid and have variance $\sigma^{2}_{\phi}$ then
#
# $$\newcommand{\var}{\text{var}}$$
# \begin{eqnarray}
# k_{t+1} & = & \log \alpha \beta + \alpha k_{t} + \phi_{t}
# \end{eqnarray}
#
# Show that this implies that the variance of $k$ is
# \begin{eqnarray}
# \var(k) & = & \frac{\sigma^{2}_{\phi}}{1-\alpha^{2}}
# \end{eqnarray}
# %% [markdown]
# ## Solution
#
# We have:
# \begin{eqnarray}
# k_{t+1} & = & \log \alpha \beta + \alpha k_{t} + \phi_{t}
# \end{eqnarray}
#
# Thus taking the variance on both sides:
# $$ var(k_{t+1}) = var(\log \alpha \beta + \alpha k_{t} + \phi_{t}) $$
#
# $\log \alpha \beta$ is constant:
# $$ var(k_{t+1}) = \alpha ^2 var(k_{t}) + var(\phi_{t}) + 2 cov(k_{t}, \phi_{t}) $$
#
# Productivity shocks are iid and uncorrelated to $k_t$, thus using $var(k_{t}) = var(k_{t+1})$ and rearranging:
# $$ var(k) = \frac{\sigma ^2_{\phi}}{1 - \alpha^2} $$
#
# %% [markdown]
# # PROBLEM
# ## Simulate the economy and calculate $\var(k)$
#
# Now using the QuantEcon tools, simulate the economy under the baseline parameter values and show that your simulation results correspond to the theoretical result
# %% [markdown]
# ## Solution (Setup)
# %%
## SETUP
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from interpolation import interp
from numba import njit, prange
from quantecon.optimize.scalar_maximization import brent_max
class OptimalGrowthModel:
def __init__(self,
f, # Production function
u, # Utility function
β=0.96, # Discount factor
μ=0,
s=0.1,
grid_max=4,
grid_size=200,
shock_size=250):
self.β, self.μ, self.s = β, μ, s
self.f, self.u = f, u
self.y_grid = np.linspace(1e-5, grid_max, grid_size) # Set up grid
self.shocks = np.exp(μ + s * np.random.randn(shock_size)) # Store shocks
def operator_factory(og, parallel_flag=True):
"""
A function factory for building the Bellman operator, as well as
a function that computes greedy policies.
Here og is an instance of OptimalGrowthModel.
"""
f, u, β = og.f, og.u, og.β
y_grid, shocks = og.y_grid, og.shocks
@njit
def objective(c, v, y):
"""
The right hand side of the Bellman equation
"""
# First turn v into a function via interpolation
v_func = lambda x: interp(y_grid, v, x)
return u(c) + β * np.mean(v_func(f(y - c) * shocks))
@njit(parallel=parallel_flag)
def T(v):
"""
The Bellman operator
"""
v_new = np.empty_like(v)
for i in prange(len(y_grid)):
y = y_grid[i]
# Solve for optimal v at y
v_max = brent_max(objective, 1e-10, y, args=(v, y))[1]
v_new[i] = v_max
return v_new
@njit
def get_greedy(v):
"""
Computes the v-greedy policy of a given function v
"""
σ = np.empty_like(v)
for i in range(len(y_grid)):
y = y_grid[i]
# Solve for optimal c at y
c_max = brent_max(objective, 1e-10, y, args=(v, y))[0]
σ[i] = c_max
return σ
return T, get_greedy
α = 0.4 # Production function parameter
@njit
def f(k):
"""
Cobb-Douglas production function
"""
return k**α
og = OptimalGrowthModel(f=f, u=np.log)
T, get_greedy = operator_factory(og)
def solve_model(og,
use_parallel=True,
tol=1e-4,
max_iter=1000,
verbose=True,
print_skip=25):
T, _ = operator_factory(og, parallel_flag=use_parallel)
# Set up loop
v = np.log(og.y_grid) # Initial condition
i = 0
error = tol + 1
while i < max_iter and error > tol:
v_new = T(v)
error = np.max(np.abs(v - v_new))
i += 1
if verbose and i % print_skip == 0:
print(f"Error at iteration {i} is {error}.")
v = v_new
if i == max_iter:
print("Failed to converge!")
if verbose and i < max_iter:
print(f"\nConverged in {i} iterations.")
return v_new
# %% [markdown]
# ## Solution QuantEcon Q1.
# %%
def simulate_og(σ_func, og, α, y0=0.1, ts_length=100):
'''
Compute a time series given consumption policy σ.
'''
y = np.empty(ts_length)
ξ = np.random.randn(ts_length-1)
y[0] = y0
for t in range(ts_length-1):
y[t+1] = (y[t] - σ_func(y[t]))**α * np.exp(og.μ + og.s * ξ[t])
return y
fig, ax = plt.subplots(figsize=(9, 6))
for β in (0.8, 0.9, 0.98):
og = OptimalGrowthModel(f, np.log, β=β, s=0.05)
y_grid = og.y_grid
v_solution = solve_model(og, verbose=False)
σ_star = get_greedy(v_solution)
σ_func = lambda x: interp(y_grid, σ_star, x) # Define an optimal policy function
y = simulate_og(σ_func, og, α)
ax.plot(y, lw=2, alpha=0.6, label=rf'$\beta = {β}$')
ax.legend(loc='lower right')
plt.show()
# %% [markdown]
# ## Solution
# %%
ts_length=500
ξ = np.random.randn(ts_length-1)
def simulate_og(σ_func, og, α, y0=0.1, ts_length=500):
'''
Compute a time series given consumption policy σ.
'''
y = np.empty(ts_length)
k = np.empty(ts_length)
#ξ = np.random.randn(ts_length-1)
shocks = np.empty(ts_length-1)
y[0] = y0
for t in range(ts_length-1):
k[t+1] = np.log(y[t] - σ_func(y[t]))
y[t+1] = (y[t] - σ_func(y[t]))**α * np.exp(og.μ + og.s * ξ[t])
shocks[t] = np.exp(og.μ + og.s * ξ[t])
return y, k, shocks
β=0.96
og = OptimalGrowthModel(f, np.log, β=β, s=0.05)
y_grid = og.y_grid
v_solution = solve_model(og, verbose=False)
σ_star = get_greedy(v_solution)
σ_func = lambda x: interp(y_grid, σ_star, x) # Define an optimal policy function
y, k, shocks = simulate_og(σ_func, og, α)
vk = round(np.var(k[10:]),5)
vtheoretical = round(np.var(shocks[10:])/(1 - α**2),5)
print(f"\nEmpirical variance is {vk} and theoretical variance is {vtheoretical}.")
print("Thus, simulation results correspond to the theoretical result.")
# %% [markdown] {"hidden": true}
# ## Compare the Results to a linearized approximation
#
# Now numerically confirm the result from the BrockMirman handout that
#
# \begin{eqnarray}
# y_{t+1} & = & \alpha (y_{t} + \log \alpha \beta ) + \phi_{t+1}
# \end{eqnarray}
# %%
x = np.empty(ts_length)
x[0] = 0
for t in range(ts_length-1):
x[t+1] = round(np.log(y[t+1]) - α *(np.log(y[t]) + np.log(α*β)) + (og.μ + og.s * ξ[t]),0)
x
# %% [markdown]
# # PROBLEM
# ## Suppose $\phi_{t}$ is serially correlated
#
# Now we want to consider a case where the level of productivity $\epsilon$ is serially correlated:
#
# \begin{eqnarray}
# \phi_{t} = \zeta \phi_{t-1} + \nu_{t}
# \end{eqnarray}
#
# for some shock $\nu$ with variance $\sigma^{2}_{\nu}$ and a serial correlation coefficient $0 < \zeta < 1$. Calculate the variance of $k$ under this new assumption.
# %% [markdown]
# ## Solution
#
# We have:
# $$ var(k) = \frac{\sigma ^2_{\phi}}{1 - \alpha^2} $$
#
# As $\phi$ follows an AR(1) process:
# $$ var(\phi) = \frac{var(\nu)}{1 - \rho^2} $$
#
# Thus:
# $$ var(k) = \frac{var(\nu)}{(1 - \alpha^2)(1 - \rho^2)} $$
#
# %% [markdown] {"heading_collapsed": true}
# # PROBLEM
# ## Now Solve and Simulate the Model
#
# Use the tools provided on the QuantEcon website to solve the model. Then, starting with a capital stock equal to the stochastic steady state, simulate the model for 100 periods five separate times. Compare the variances you have calculated numerically to the formulas you derived analytically, and make some remarks about what this means for trying to calibrate the model to data by examining the variance of $k$ in empirical data.
# %% [markdown]
# ## Solution
# %%
ρ = 0.2
def simulate_ogc(σ_func, og, α, y0=0.1, ts_length=500):
'''
Compute a time series given consumption policy σ.
'''
y = np.empty(ts_length)
k = np.empty(ts_length)
ξ = 0.05*np.random.randn(ts_length-1)
shocks = np.empty(ts_length)
y[0] = y0
shocks[0] = ξ[0]/(1-ρ**2)
for t in range(ts_length-1):
shocks[t+1] = ρ*shocks[t] + ξ[t]
k[t+1] = np.log(y[t] - σ_func(y[t]))
y[t+1] = (y[t] - σ_func(y[t]))**α * np.exp(shocks[t+1])
return y, k, shocks
β=0.96
og = OptimalGrowthModel(f, np.log, β=β, s=0.05)
y_grid = og.y_grid
v_solution = solve_model(og, verbose=False)
σ_star = get_greedy(v_solution)
σ_func = lambda x: interp(y_grid, σ_star, x) # Define an optimal policy function
y, k, shocks = simulate_ogc(σ_func, og, α)
vk = round(np.var(k[200:]),5)
vtheoretical = round(0.05**2/((1 - α**2)*(1 - ρ**2)),5)
print(f"\nEmpirical variance is {vk} and theoretical variance is {vtheoretical}.")
print("Thus, simulation results does not necessarily correspond to the theoretical result, especially if ρ is high.")
# %% [markdown] {"heading_collapsed": true, "hidden": true}
# ## Now Do a Similar Exercise for the CRRA utility Model
#
# Use the QuantEcon code to solve the model for a value of relative risk aversion $\rho = 3$. Now calculate the variance of $k$ for this new model in the same way you did for the earlier model.
#
# %%
α=0.4
β=0.96
μ=0
s=0.05
rho=3
@njit
def crra(c):
return c**(1-rho)/(1-rho) # CRRA Utility
og = | |
- 1,
vm0_region[1] + 1,
vm0_region[2] - 1,
vm0_region[3] + 1,
)
# proportion in ocean
if xlen0 <= 0 or ylen0 <= 0:
logger.debug(f"Optimising skipped for {srf_meta['name']}. 100% Land coverage")
land0 = 100
optimise = False
else:
land0 = get_vm_land_proportion(o1, o2, o3, o4, wd=temp_dir)
logger.debug(f"Land coverage found to be {land0}%")
if not optimise:
logger.info(f"No optimisation for {srf_meta['name']} : no_optimise==True")
if faultprop.Mw < 3.5:
optimise = False
logger.info(f"No optimisation for {srf_meta['name']} : mw<3.5")
if land0 >= target_land_coverage:
logger.info(
f"No optimisation for {srf_meta['name']} : land coverage >= {target_land_coverage}%"
)
optimise = False
# modify VM if necessary
if optimise:
logger.info(
f"Optimising : {srf_meta['name']} has mw>= 3.5 and land coverage < {target_land_coverage}%"
)
# rotation based on centre line bearing at this point
l1 = centre_lon(vm0_region[2])
l2 = centre_lon(vm0_region[3])
mid = geo.ll_mid(l1, vm0_region[2], l2, vm0_region[3])
bearing = round(geo.ll_bearing(mid[0], mid[1], l2, vm0_region[3]))
# wanted distance is at corners, not middle top to bottom
_, xlen1, ylen1 = determine_vm_extent(
rjb, hh, srf_meta["corners"].reshape((-1, 2)), rot=bearing, wd=temp_dir
)
logger.debug(
f"Pre-optimisation origin: {origin}, bearing: {bearing}, xlen: {xlen1}, ylen: {ylen1}"
)
# cut down ocean areas
(origin, bearing, xlen1, ylen1) = reduce_domain(
origin, bearing, xlen1, ylen1, hh, temp_dir, logger=logger
)
logger.debug(
f"After optimisation origin: {origin}, bearing: {bearing}, xlen: {xlen1}, ylen: {ylen1}"
)
try:
c1, c2, c3, c4 = geo.build_corners(origin, bearing, xlen1, ylen1)
except ValueError:
error_message = f"Error for vm {srf_meta['name']}. Raising exception."
logger.log(qclogging.NOPRINTCRITICAL, error_message)
raise ValueError(error_message)
else:
logger.debug(f"Optimised corners of the VM are: {(c1, c2, c3, c4)}")
# proportion in ocean
land1 = get_vm_land_proportion(c1, c2, c3, c4, wd=temp_dir)
logger.debug(f"Optimised land coverage found to be {land1}%")
# adjust region to fit new corners
plot_region = (
min(c4[0], c1[0], c3[0], c2[0], plot_region[0]),
max(c4[0], c1[0], c3[0], c2[0], plot_region[1]),
min(c4[1], c1[1], c3[1], c2[1], plot_region[2]),
max(c4[1], c1[1], c3[1], c2[1], plot_region[3]),
)
else: # not optimised
# store original variables as final versions
ylen1 = ylen0
xlen1 = xlen0
land1 = land0
c1, c2, c3, c4 = o1, o2, o3, o4
# not enough land in final domain
if math.floor(land1) == 0:
logger.info(
"Land coverage is less than 1%. Setting xlen and ylen to 0. Not creating VM"
)
xlen1 = 0
ylen1 = 0
success = False
# zlen is independent from xlen and ylen
zlen = round(get_max_depth(faultprop.Mw, srf_meta["dbottom"]) / hh) * hh
logger.debug(f"zlen set to {zlen}")
if xlen1 == 0 or ylen1 == 0 or zlen == 0:
logger.debug(
f"All xlen={xlen1} ylen={ylen1} zlen={zlen} should be non-zero. Not creating VM"
)
success = False
bounds_invalid = validate_vm_bounds([c1, c2, c3, c4], srf_meta["corners"].tolist())
if bounds_invalid:
logger.warning(f"Bounds not valid, not making VM: {bounds_invalid}")
success = False
# modified sim time
vm_corners = np.asarray([c1, c2, c3, c4])
initial_time = get_sim_duration(
vm_corners,
np.concatenate(srf_meta["corners"], axis=0),
ds_multiplier,
fault_depth,
logger=logger,
)
sim_duration = (initial_time // dt) * dt
logger.debug(
f"Unrounded sim duration: {initial_time}. Rounded sim duration: {sim_duration}"
)
# optimisation results
return (
success,
{
"name": srf_meta["name"],
"mag": faultprop.Mw,
"dbottom": srf_meta["dbottom"],
"zlen": zlen,
"sim_time": sim_duration,
"xlen": xlen0,
"ylen": ylen0,
"land": land0,
"zlen_mod": zlen,
"sim_time_mod": sim_duration,
"xlen_mod": xlen1,
"ylen_mod": ylen1,
"land_mod": land1,
"origin": origin,
"bearing": bearing,
"adjusted": optimise,
"plot_region": plot_region,
"path": "{:.6f}\t{:.6f}\n{:.6f}\t{:.6f}\n{:.6f}\t{:.6f}\n{:.6f}\t{:.6f}\n".format(
o1[0], o1[1], o2[0], o2[1], o3[0], o3[1], o4[0], o4[1]
),
"path_mod": "{:.6f}\t{:.6f}\n{:.6f}\t{:.6f}\n{:.6f}\t{:.6f}\n{:.6f}\t{:.6f}\n".format(
c1[0], c1[1], c2[0], c2[1], c3[0], c3[1], c4[0], c4[1]
),
},
)
def main(
srf_meta: dict,
ds_multiplier: float,
dt: float,
hh: float,
min_vs: float,
outdir: Path,
pgv: float,
vm_topo: str,
vm_version: str,
deep_rupture: bool = False,
target_land_coverage: float = 99.0,
optimise: bool = True,
plot_enabled: bool = True,
logger: Logger = qclogging.get_basic_logger(),
):
"""
Orchestrates conversion from rel CSV file to vm_params.yaml
Parameters
----------
srf_meta : Data extracted from REL csv file
ds_multiplier :
dt :
hh :
min_vs :
outdir : Directory where output files are eventually saved (eg. ..../Data/VMs/Hossack)
pgv :
vm_topo :
vm_version :
deep_rupture : If true, continue even if the rupture is too deep.
target_land_coverage : Land coverage level (%) that triggers optimisation if not met.
optimise : Performs area optimisation if set true
plot_enabled : If true, plot the vm domain
logger:
"""
# temp directory for current process
with TemporaryDirectory(prefix=f"_tmp_{srf_meta['name']}_", dir=outdir) as temp_dir:
temp_dir = Path(temp_dir)
qclogging.add_general_file_handler(
logger, outdir / f"rel2vm_params_{srf_meta['name']}_log.txt"
)
vm_params_path = outdir / "vm_params.yaml"
success, vm_params_dict_extended = optimise_vm_params(
srf_meta,
ds_multiplier,
dt,
hh,
pgv,
temp_dir,
deep_rupture=deep_rupture,
optimise=optimise,
target_land_coverage=target_land_coverage,
logger=logger,
)
if success:
xlen = float(vm_params_dict_extended["xlen_mod"])
ylen = float(vm_params_dict_extended["ylen_mod"])
zmax = float(vm_params_dict_extended["zlen_mod"])
zmin = 0.0
code = "rt"
vm_params_dict = {
"mag": float(faultprop.Mw),
"MODEL_LAT": float(vm_params_dict_extended["origin"][1]),
"MODEL_LON": float(vm_params_dict_extended["origin"][0]),
"MODEL_ROT": float(vm_params_dict_extended["bearing"]),
"hh": hh,
"min_vs": min_vs,
"model_version": vm_version,
"topo_type": vm_topo,
"extent_x": xlen,
"extent_y": ylen,
"extent_zmax": zmax,
"extent_zmin": zmin,
"sim_duration": float(vm_params_dict_extended["sim_time_mod"]),
"flo": min_vs / (5.0 * hh),
"nx": int(round(float(xlen) / hh)),
"ny": int(round(float(ylen) / hh)),
"nz": int(round(float(zmax - zmin) / hh)),
"sufx": f"_{code}01-h{hh:.3f}",
}
if vm_params_path.exists():
logger.info(
f"Warning: {vm_params_path} already exists and is overwritten."
)
os.remove(vm_params_path)
dump_yaml(vm_params_dict, vm_params_path)
logger.debug(f"Saved {vm_params_path}")
print(f"Success: Wrote vm_params.yaml at {outdir}", file=sys.stderr)
# generate a corners like NZVM would have
logger.debug("Saving VeloModCorners.txt")
write_corner_file(outdir, vm_params_dict_extended["path_mod"])
else:
logger.error("Failed: Not good VM params to proceed")
if plot_enabled:
plot_vm(
vm_params_dict_extended,
srf_meta["corners"],
NZ_LAND_OUTLINE,
NZ_CENTRE_LINE,
faultprop.Mw,
outdir,
temp_dir,
logger=logger,
)
def write_corner_file(outdir: Path, paths: str):
"""
Write a corner file
Parameters
----------
outdir :
paths : str of corner coordinates "Lon1\tLat1\nLon2\tLat2\nLon3\tLat3\nLon4\tLat4" where Lat or Lon are in .6f format.
"""
with open(f"{outdir}/VeloModCorners.txt", "w") as c:
c.write(">Velocity model corners(python generated)\n")
c.write(">Lon\tLat\n")
c.write(paths)
def load_rel(rel_file: Path, logger: Logger = qclogging.get_basic_logger()):
"""
Parameters
----------
rel_file : Path to REL csv file
logger :
Returns
-------
SRF meta data dictionary
"""
# name of the fault is found from the basename
name = get_fault_from_realisation(rel_file) # XXXX_REL01.csv --> XXXX
logger.debug(f"Found first REL file for {name}")
rel_df = pd.read_csv(rel_file)
# common attributes in all types of rel csv
def rel_meta(attr):
return rel_df[attr].loc[0] if attr in rel_df.columns else None
type = rel_meta("type")
if type == 1: # point source
hdepth = rel_meta("depth")
planes = [
{
"centre": [rel_meta("longitude"), rel_meta("latitude")],
"length": 0.1,
"width": 0.1,
"strike": rel_meta("strike"),
"dip": rel_meta("dip"),
"dtop": hdepth,
}
]
corners, [dbottom] = get_corners_dbottom(planes)
elif type == 2: # finite fault single plane
planes = [
{
"centre": [rel_meta("longitude"), rel_meta("latitude")],
"length": rel_meta("flen"),
"width": rel_meta("fwid"),
"strike": rel_meta("strike"),
"dip": rel_meta("dip"),
"dtop": rel_meta("dtop"),
}
]
corners, [dbottom] = get_corners_dbottom(planes)
else: # type 4 (we never have type 3)
assert type != 3
planes = []
num_subfaults = rel_meta("plane_count")
for i in range(num_subfaults):
plane = {}
plane["centre"] = [
rel_meta(f"clon_subfault_{i}"),
rel_meta(f"clat_subfault_{i}"),
]
plane["length"] = rel_meta(f"length_subfault_{i}")
plane["width"] = rel_meta(f"width_subfault_{i}")
plane["strike"] = rel_meta(f"strike_subfault_{i}")
plane["dip"] = rel_meta(f"dip_subfault_{i}")
plane["dtop"] = rel_meta(f"dtop_subfault_{i}")
planes.append(plane)
corners, _ = get_corners_dbottom(planes, dip_dir=rel_meta("dip_dir"))
dbottom = rel_meta("dbottom")
if type >= 2: # type 2 and 4 will have hdepth computed
hdepth = np.round(rel_meta("dhypo"), decimals=1) * np.sin(
np.radians(rel_meta("dip"))
) + rel_meta("dtop")
return {
"name": name,
"dip": rel_meta("dip"),
"rake": rel_meta("rake"),
"dbottom": dbottom,
"corners": np.array(corners),
"mag": rel_meta("magnitude"),
"hdepth": hdepth,
}
def load_args():
"""
Unpacks arguments and does basic checks
Parameters
----------
Returns
-------
Processed arguments
"""
parser = ArgumentParser()
arg = parser.add_argument
arg("rel_file", help="REL csv file")
arg(
"-o",
"--outdir",
help="output directory to place VM files "
"(if not specified, the same location as rel_file is in)",
default=None,
)
arg(
"--pgv",
help="max PGV at velocity model perimiter (estimated, cm/s)",
type=float,
default=-1.0,
)
arg("--hh", help="velocity model grid spacing (km)", type=float, default=0.4)
arg(
"--dt",
help="timestep to estimate simulation duration (s) Default: hh/20",
type=float,
default=None,
)
arg("--min-vs", help="for nzvm gen and flo (km/s)", type=float, default=0.5)
arg("--vm-version", help="velocity model version to generate", default="2.06")
arg(
"--vm-topo",
help="topo_type parameter for velocity model generation",
choices=[x.value for x in constants.VelocityModelTopographyType],
default="BULLDOZED",
)
arg(
"--no-optimise",
help="Don't try and optimise the vm if it is off shore. Removes dependency on having GMT coastline data",
action="store_false",
default=True,
dest="optimise",
)
arg(
"--deep-rupture",
help="Continue even if too deep",
action="store_true",
default=False,
)
arg(
"--target-land-coverage",
help="Land coverage level (%%) that triggers optimisation if not met (Default: 99.0)",
type=float,
default=None,
)
arg(
"--min-rjb",
help="Specify a minimum horizontal distance (in km) for the VM to span from the fault"
" - invalid VMs will still not be generated",
default=0,
)
arg(
"--ds-multiplier",
help="Sets the DS | |
'result_count': result_count,
'querystring': querystring,
'qs_combined': qs_combined,
'ordering_str': ordering_str,
'p': p,
'max_pages': max_pages,
'films': films,
})
def user_comments(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
p = int(request.GET.get('p', 0))
if p == 1:
return HttpResponseRedirect(reverse('user_comments', args=(selected_user.id, selected_user.slug_cache)))
max_pages = int(math.ceil(1.0 * selected_user.number_of_comments / COMMENTS_PER_PAGE))
if max_pages == 0:
max_pages = 1
if p == 0:
p = 1
if p > max_pages:
return HttpResponseRedirect(reverse('user_comments', args=(selected_user.id, selected_user.slug_cache)) + '?p=' + str(max_pages))
comments_qs = selected_user.comment_set.select_related('film', 'topic', 'poll', 'reply_to', 'reply_to__created_by')
if max_pages > 1:
first_comment = selected_user.number_of_comments - COMMENTS_PER_PAGE * (p - 1) - (COMMENTS_PER_PAGE - 1)
last_comment = selected_user.number_of_comments - COMMENTS_PER_PAGE * (p - 1)
comments = comments_qs.filter(serial_number_by_user__lte=last_comment, serial_number_by_user__gte=first_comment)
else:
comments = comments_qs.all()
return render(request, 'ktapp/user_profile_subpages/user_comments.html', {
'active_tab': 'comments',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'comments': comments.order_by('-created_at'),
'p': p,
'max_pages': max_pages,
})
def user_wishlist(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
wishlist_type = request.GET.get('t', 'igen')
if wishlist_type == 'nem':
wishlist_type = 'N'
elif wishlist_type == 'szerez':
wishlist_type = 'G'
else:
wishlist_type = 'Y'
filters = [('wished_by_id', '%s:%s' % (wishlist_type, selected_user.id))] + filmlist.get_filters_from_request(request)
films, nice_filters = filmlist.filmlist(
user_id=request.user.id,
filters=filters,
ordering=('average_rating', 'DESC'),
films_per_page=None,
)
querystring = {}
for filter_type, filter_value in nice_filters:
if filter_type in {'title', 'year', 'director', 'actor', 'country', 'genre', 'keyword', 'my_rating', 'other_rating', 'my_wish'}:
querystring[filter_type] = filter_value
elif filter_type == 'number_of_ratings':
min_value, max_value = filter_value.split('-')
querystring['num_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['num_rating_max'] = kt_utils.coalesce(max_value, '')
elif filter_type == 'average_rating':
min_value, max_value = filter_value.split('-')
querystring['avg_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['avg_rating_max'] = kt_utils.coalesce(max_value, '')
elif filter_type == 'fav_average_rating':
min_value, max_value = filter_value.split('-')
querystring['fav_avg_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['fav_avg_rating_max'] = kt_utils.coalesce(max_value, '')
if wishlist_type == 'N':
querystring['t'] = 'nem'
if wishlist_type == 'G':
querystring['t'] = 'szerez'
qs_combined = '&'.join('%s=%s' % (key, val) for key, val in querystring.iteritems())
if qs_combined != '':
qs_combined = '&' + qs_combined
films = list(films)
result_count = len(films)
return render(request, 'ktapp/user_profile_subpages/user_wishlist.html', {
'active_tab': 'wishlist',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'result_count': result_count,
'querystring': querystring,
'qs_combined': qs_combined,
'films': films,
'wishlist_type': wishlist_type,
'number_of_wishes_yes': selected_user.number_of_wishes_yes,
'number_of_wishes_no': selected_user.number_of_wishes_no,
'number_of_wishes_get': selected_user.number_of_wishes_get,
})
def user_toplists(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
toplists = models.UserToplist.objects.filter(created_by=selected_user).order_by('-created_at')
toplist_details = []
for toplist in toplists:
if toplist.toplist_type == models.UserToplist.TOPLIST_TYPE_FILM:
items, _ = filmlist.filmlist(
user_id=request.user.id,
filters=[('usertoplist_id', toplist.id)],
ordering='serial_number',
films_per_page=None,
)
toplist_list = []
with_comments = False
for item in items:
toplist_list.append(item)
if item.comment:
with_comments = True
else:
toplist_list = []
with_comments = False
for item in models.UserToplistItem.objects.filter(usertoplist=toplist).select_related('director', 'actor').order_by('serial_number'):
toplist_list.append(item)
if item.comment:
with_comments = True
toplist_details.append((
toplist,
toplist_list,
with_comments,
))
return render(request, 'ktapp/user_profile_subpages/user_toplists.html', {
'active_tab': 'toplists',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'toplist_details': toplist_details,
})
def user_articles(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
articles = []
for review in models.Review.objects.filter(created_by=selected_user).select_related('film'):
articles.append((
review.created_at,
'R',
review.film,
None,
review.snippet + '...',
))
for bio in models.Biography.objects.filter(created_by=selected_user).select_related('artist'):
articles.append((
bio.created_at,
'B',
None,
bio.artist,
bio.snippet + '...',
))
for article in models.Link.objects.filter(author=selected_user).select_related('film', 'artist'):
articles.append((
article.created_at,
'A',
article.film,
article.artist,
article.lead,
article.url,
article.name,
article.link_domain,
article.id,
))
articles.sort(key=lambda item: item[0], reverse=True)
return render(request, 'ktapp/user_profile_subpages/user_articles.html', {
'active_tab': 'articles',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_articles': number_of_articles,
'number_of_messages': number_of_messages,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'articles': articles,
})
def user_activity(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
cursor = connection.cursor()
max_max_vote = models.KTUser.objects.all().aggregate(Max('number_of_ratings'))['number_of_ratings__max']
max_max_comment = models.KTUser.objects.all().aggregate(Max('number_of_comments'))['number_of_comments__max']
scale_vote = (1.0 * selected_user.number_of_ratings / max_max_vote)**0.3
scale_comment = (1.0 * selected_user.number_of_comments / max_max_comment)**0.3
min_year = selected_user.date_joined.year
max_year = datetime.date.today().year
years = range(max_year, min_year - 1, -1)
min_month = selected_user.date_joined.month
max_month = datetime.date.today().month
months = []
if len(years) == 1:
for month in range(max_month, min_month - 1, -1):
months.append('%04d-%02d' % (years[0], month))
else:
for year in years:
if year == max_year:
for month in range(max_month, 0, -1):
months.append('%04d-%02d' % (year, month))
elif year == min_year:
for month in range(12, min_month - 1, -1):
months.append('%04d-%02d' % (year, month))
else:
for month in range(12, 0, -1):
months.append('%04d-%02d' % (year, month))
years = ['%04d' % y for y in years]
vote_data = {
'm': {},
'y': {},
}
comment_data = {
'm': {},
'y': {},
}
max_vote = {
'm': 0,
'y': 0,
}
max_comment = {
'm': 0,
'y': 0,
}
cursor.execute('SELECT LEFT(`when`, 7) AS dt, COUNT(1) FROM ktapp_vote WHERE user_id = %s AND `when` IS NOT NULL GROUP BY dt', [selected_user.id])
for row in cursor.fetchall():
vote_data['m'][row[0]] = row[1]
if row[1] > max_vote['m']:
max_vote['m'] = row[1]
cursor.execute('SELECT LEFT(`when`, 4) AS dt, COUNT(1) FROM ktapp_vote WHERE user_id = %s AND `when` IS NOT NULL GROUP BY dt', [selected_user.id])
for row in cursor.fetchall():
vote_data['y'][row[0]] = row[1]
if row[1] > max_vote['y']:
max_vote['y'] = row[1]
cursor.execute('SELECT LEFT(created_at, 7) AS dt, COUNT(1) FROM ktapp_comment WHERE created_by_id = %s AND created_at IS NOT NULL GROUP BY dt', [selected_user.id])
for row in cursor.fetchall():
comment_data['m'][row[0]] = row[1]
if row[1] > max_comment['m']:
max_comment['m'] = row[1]
cursor.execute('SELECT LEFT(created_at, 4) AS dt, COUNT(1) FROM ktapp_comment WHERE created_by_id = %s AND created_at IS NOT NULL GROUP BY dt', [selected_user.id])
for row in cursor.fetchall():
comment_data['y'][row[0]] = row[1]
if row[1] > max_comment['y']:
max_comment['y'] = row[1]
data_month = []
for month in months:
data_month.append((
month,
vote_data['m'].get(month, 0),
comment_data['m'].get(month, 0),
int(100.0 * scale_vote * vote_data['m'].get(month, 0) / max_vote['m']) if max_vote['m'] > 0 else 0,
int(100.0 * scale_comment * comment_data['m'].get(month, 0) / max_comment['m']) if max_comment['m'] > 0 else 0,
))
data_year = []
for year in years:
data_year.append((
year,
vote_data['y'].get(year, 0),
comment_data['y'].get(year, 0),
int(100.0 * scale_vote * vote_data['y'].get(year, 0) / max_vote['y']) if max_vote['y'] > 0 else 0,
int(100.0 * scale_comment * comment_data['y'].get(year, 0) / max_comment['y']) if max_comment['y'] > 0 else 0,
))
return render(request, 'ktapp/user_profile_subpages/user_activity.html', {
'active_tab': 'activity',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'data_month': data_month,
'data_year': data_year,
})
@login_required()
def user_messages(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
messages_qs = models.Message.objects.filter(private=True).filter(owned_by=request.user).filter(
Q(sent_by=selected_user)
| Q(sent_to=selected_user)
).select_related('sent_by')
try:
p = int(request.GET.get('p', 0))
except ValueError:
p = 0
if p == 1:
return HttpResponseRedirect(reverse('user_messages', args=(selected_user.id, selected_user.slug_cache)))
max_pages = int(math.ceil(1.0 * number_of_messages / MESSAGES_PER_PAGE))
if max_pages == 0:
max_pages = 1
if p == 0:
p = 1
if p > max_pages:
return HttpResponseRedirect(reverse('user_messages', args=(selected_user.id, selected_user.slug_cache)) + '?p=' + str(max_pages))
return render(request, 'ktapp/user_profile_subpages/user_messages.html', {
'active_tab': 'messages',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'messages': messages_qs.order_by('-sent_at')[(p-1) * MESSAGES_PER_PAGE:p * MESSAGES_PER_PAGE],
'p': p,
'max_pages': max_pages,
})
@login_required()
def edit_profile(request):
def set_fav(field_name, domain, get_object_function):
old_items = set()
for item in models.UserFavourite.objects.filter(user=request.user, domain=domain):
old_items.add(item.fav_id)
new_items = set()
for name in kt_utils.strip_whitespace(request.POST.get(field_name, '')).split(','):
name = kt_utils.strip_whitespace(name)
if name:
item = get_object_function(name)
if item:
new_items.add(item.id)
for item_id in old_items - new_items:
models.UserFavourite.objects.filter(user=request.user, domain=domain, fav_id=item_id).delete()
for item_id in new_items - old_items:
models.UserFavourite.objects.create(user=request.user, domain=domain, fav_id=item_id)
next_url = request.GET.get('next', request.POST.get('next', reverse('user_profile', args=(request.user.id, request.user.slug_cache))))
if request.POST:
if request.POST.get('t', '') == 'pic':
if request.POST.get('a', '') == 'del':
if request.user.profile_pic:
request.user.profile_pic.delete()
request.user.profile_pic = None
request.user.save()
models.Event.objects.create(
user=request.user,
event_type=models.Event.EVENT_TYPE_DELETE_PROFILE_PIC,
)
else:
if 'img' in request.FILES:
picture = models.Picture.objects.create(
img=request.FILES['img'],
picture_type=models.Picture.PICTURE_TYPE_USER_PROFILE,
created_by=request.user,
user=request.user,
)
request.user.profile_pic = picture
request.user.save()
models.Event.objects.create(
user=request.user,
event_type=models.Event.EVENT_TYPE_UPLOAD_PROFILE_PIC,
)
return HttpResponseRedirect(next_url)
request.user.bio = request.POST.get('bio', '').strip()
gender = request.POST.get('gender', '')
if gender not in {'U', 'M', 'F'}:
gender = 'U'
request.user.gender = gender
try:
request.user.year_of_birth = int(request.POST.get('year_of_birth', 0))
except ValueError:
request.user.year_of_birth = 0
request.user.location = kt_utils.strip_whitespace(request.POST.get('location', ''))
request.user.public_gender = bool(request.POST.get('public_gender', ''))
request.user.public_year_of_birth = bool(request.POST.get('public_year_of_birth', ''))
request.user.public_location = bool(request.POST.get('public_location', ''))
set_fav('fav_director', models.UserFavourite.DOMAIN_DIRECTOR, models.Artist.get_artist_by_name)
set_fav('fav_actor', models.UserFavourite.DOMAIN_ACTOR, models.Artist.get_artist_by_name)
set_fav('fav_genre', models.UserFavourite.DOMAIN_GENRE, lambda name: models.Keyword.get_keyword_by_name(name, models.Keyword.KEYWORD_TYPE_GENRE))
set_fav('fav_country', models.UserFavourite.DOMAIN_COUNTRY, lambda name: models.Keyword.get_keyword_by_name(name, models.Keyword.KEYWORD_TYPE_COUNTRY))
request.user.fav_period = kt_utils.strip_whitespace(request.POST.get('fav_period', ''))
request.user.save()
models.Event.objects.create(
user=request.user,
event_type=models.Event.EVENT_TYPE_EDIT_PROFILE,
)
return HttpResponseRedirect(next_url)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles | |
+ w * 1j * t_values[39]))
+ (R_values[40] / (1 + w * 1j * t_values[40]))
+ (R_values[41] / (1 + w * 1j * t_values[41]))
+ (R_values[42] / (1 + w * 1j * t_values[42]))
+ (R_values[43] / (1 + w * 1j * t_values[43]))
+ (R_values[44] / (1 + w * 1j * t_values[44]))
+ (R_values[45] / (1 + w * 1j * t_values[45]))
+ (R_values[46] / (1 + w * 1j * t_values[46]))
+ (R_values[47] / (1 + w * 1j * t_values[47]))
+ (R_values[48] / (1 + w * 1j * t_values[48]))
+ (R_values[49] / (1 + w * 1j * t_values[49]))
+ (R_values[50] / (1 + w * 1j * t_values[50]))
+ (R_values[51] / (1 + w * 1j * t_values[51]))
+ (R_values[52] / (1 + w * 1j * t_values[52]))
+ (R_values[53] / (1 + w * 1j * t_values[53]))
+ (R_values[54] / (1 + w * 1j * t_values[54]))
+ (R_values[55] / (1 + w * 1j * t_values[55]))
+ (R_values[56] / (1 + w * 1j * t_values[56]))
+ (R_values[57] / (1 + w * 1j * t_values[57]))
)
def KK_RC59(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL>.edu / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
+ (R_values[12] / (1 + w * 1j * t_values[12]))
+ (R_values[13] / (1 + w * 1j * t_values[13]))
+ (R_values[14] / (1 + w * 1j * t_values[14]))
+ (R_values[15] / (1 + w * 1j * t_values[15]))
+ (R_values[16] / (1 + w * 1j * t_values[16]))
+ (R_values[17] / (1 + w * 1j * t_values[17]))
+ (R_values[18] / (1 + w * 1j * t_values[18]))
+ (R_values[19] / (1 + w * 1j * t_values[19]))
+ (R_values[20] / (1 + w * 1j * t_values[20]))
+ (R_values[21] / (1 + w * 1j * t_values[21]))
+ (R_values[22] / (1 + w * 1j * t_values[22]))
+ (R_values[23] / (1 + w * 1j * t_values[23]))
+ (R_values[24] / (1 + w * 1j * t_values[24]))
+ (R_values[25] / (1 + w * 1j * t_values[25]))
+ (R_values[26] / (1 + w * 1j * t_values[26]))
+ (R_values[27] / (1 + w * 1j * t_values[27]))
+ (R_values[28] / (1 + w * 1j * t_values[28]))
+ (R_values[29] / (1 + w * 1j * t_values[29]))
+ (R_values[30] / (1 + w * 1j * t_values[30]))
+ (R_values[31] / (1 + w * 1j * t_values[31]))
+ (R_values[32] / (1 + w * 1j * t_values[32]))
+ (R_values[33] / (1 + w * 1j * t_values[33]))
+ (R_values[34] / (1 + w * 1j * t_values[34]))
+ (R_values[35] / (1 + w * 1j * t_values[35]))
+ (R_values[36] / (1 + w * 1j * t_values[36]))
+ (R_values[37] / (1 + w * 1j * t_values[37]))
+ (R_values[38] / (1 + w * 1j * t_values[38]))
+ (R_values[39] / (1 + w * 1j * t_values[39]))
+ (R_values[40] / (1 + w * 1j * t_values[40]))
+ (R_values[41] / (1 + w * 1j * t_values[41]))
+ (R_values[42] / (1 + w * 1j * t_values[42]))
+ (R_values[43] / (1 + w * 1j * t_values[43]))
+ (R_values[44] / (1 + w * 1j * t_values[44]))
+ (R_values[45] / (1 + w * 1j * t_values[45]))
+ (R_values[46] / (1 + w * 1j * t_values[46]))
+ (R_values[47] / (1 + w * 1j * t_values[47]))
+ (R_values[48] / (1 + w * 1j * t_values[48]))
+ (R_values[49] / (1 + w * 1j * t_values[49]))
+ (R_values[50] / (1 + w * 1j * t_values[50]))
+ (R_values[51] / (1 + w * 1j * t_values[51]))
+ (R_values[52] / (1 + w * 1j * t_values[52]))
+ (R_values[53] / (1 + w * 1j * t_values[53]))
+ (R_values[54] / (1 + w * 1j * t_values[54]))
+ (R_values[55] / (1 + w * 1j * t_values[55]))
+ (R_values[56] / (1 + w * 1j * t_values[56]))
+ (R_values[57] / (1 + w * 1j * t_values[57]))
+ (R_values[58] / (1 + w * 1j * t_values[58]))
)
def KK_RC60(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
+ (R_values[12] / (1 + w * 1j * t_values[12]))
+ (R_values[13] / (1 + w * 1j * t_values[13]))
+ (R_values[14] / (1 + w * 1j * t_values[14]))
+ (R_values[15] / (1 + w * 1j * t_values[15]))
+ (R_values[16] / (1 + w * 1j * t_values[16]))
+ (R_values[17] / (1 + w * 1j * t_values[17]))
+ (R_values[18] / (1 + w * 1j * t_values[18]))
+ (R_values[19] / (1 + w * 1j * t_values[19]))
+ (R_values[20] / (1 + w * 1j * t_values[20]))
+ (R_values[21] / (1 + w * 1j * t_values[21]))
+ (R_values[22] / (1 + w * 1j * t_values[22]))
+ (R_values[23] / (1 + w * 1j * t_values[23]))
+ (R_values[24] / (1 + w * 1j * t_values[24]))
+ (R_values[25] / (1 + w * 1j * t_values[25]))
+ (R_values[26] / (1 + w * 1j * t_values[26]))
+ (R_values[27] / (1 + w * 1j * t_values[27]))
+ (R_values[28] / (1 + w * 1j * t_values[28]))
+ (R_values[29] / (1 + w * 1j * t_values[29]))
+ (R_values[30] / (1 + w * 1j * t_values[30]))
+ (R_values[31] / (1 + w * 1j * t_values[31]))
+ (R_values[32] / (1 + w * 1j * t_values[32]))
+ (R_values[33] / (1 + w * 1j * t_values[33]))
+ (R_values[34] / (1 + w * 1j * t_values[34]))
+ (R_values[35] / (1 + w * 1j * t_values[35]))
+ (R_values[36] / (1 + w * 1j * t_values[36]))
+ (R_values[37] / (1 + w * 1j * t_values[37]))
+ (R_values[38] / (1 + w * 1j * t_values[38]))
+ (R_values[39] / (1 + w * 1j * t_values[39]))
+ (R_values[40] / (1 + w * 1j * t_values[40]))
+ (R_values[41] / (1 + w * 1j * t_values[41]))
+ (R_values[42] / (1 + w * 1j * t_values[42]))
+ (R_values[43] / (1 + w * 1j * t_values[43]))
+ (R_values[44] / (1 + w * 1j * t_values[44]))
+ (R_values[45] / (1 + w * 1j * t_values[45]))
+ (R_values[46] / (1 + w * 1j * t_values[46]))
| |
to test that treats integers as the latest minor within the major."""
return request.param
@pytest.fixture(params=[
(iati.resources.create_codelist_path, iati.resources.FILE_CODELIST_EXTENSION, iati.resources.PATH_CODELISTS),
(iati.resources.create_ruleset_path, iati.resources.FILE_RULESET_EXTENSION, iati.resources.PATH_RULESETS),
(iati.resources.create_schema_path, iati.resources.FILE_SCHEMA_EXTENSION, iati.resources.PATH_SCHEMAS)
])
def func_plus_expected_data(self, request):
"""Return a tuple containing a function to test, plus the extension and a component that should be present in the returned path."""
output = collections.namedtuple('output', 'func_to_test expected_extension expected_component')
return output(func_to_test=request.param[0], expected_extension=request.param[1], expected_component=request.param[2])
def test_create_path_minor_known(self, filename_no_meaning, std_ver_minor_independent_mixedinst_valid_known, func_plus_expected_data):
"""Check that the expected components are present in a path from a generation function at a known minor or independent version of the Standard."""
version_folder = iati.resources.folder_name_for_version(std_ver_minor_independent_mixedinst_valid_known)
full_path = func_plus_expected_data.func_to_test(filename_no_meaning, std_ver_minor_independent_mixedinst_valid_known)
assert isinstance(full_path, str)
assert full_path.endswith(filename_no_meaning + func_plus_expected_data.expected_extension)
assert version_folder in full_path
assert func_plus_expected_data.expected_component in full_path
def test_create_path_major_known_codelists(self, filename_no_meaning_single, std_ver_major_uninst_valid_known):
"""Check that a generation function returns a value for a major version.
This is relevant to Codelists, but not other components. These are tested separately.
"""
version_folder = iati.resources.folder_name_for_version(std_ver_major_uninst_valid_known)
full_path = iati.resources.create_codelist_path(filename_no_meaning_single, std_ver_major_uninst_valid_known)
assert isinstance(full_path, str)
assert full_path.endswith(filename_no_meaning_single + iati.resources.FILE_CODELIST_EXTENSION)
assert os.path.sep + version_folder + os.path.sep in full_path
assert iati.resources.PATH_CODELISTS in full_path
def test_create_path_major_known_decimalised_integers(self, filename_no_meaning_single, std_ver_major_uninst_valid_known, func_to_test_decimalised_integers):
"""Check that a generation function returns the same value for a major version as the last minor within the major.
This is relevant to some Standard components, though not all. As such, it uses a different fixture to other functions in this class.
"""
minor_version = max(iati.version.versions_for_integer(std_ver_major_uninst_valid_known))
major_path = func_to_test_decimalised_integers(filename_no_meaning_single, std_ver_major_uninst_valid_known)
minor_path = func_to_test_decimalised_integers(filename_no_meaning_single, minor_version)
assert major_path == minor_path
def test_create_path_no_version(self, filename_no_meaning_single, func_to_test):
"""Check that specifying a version of the Standard to create a path for is required."""
with pytest.raises(TypeError):
func_to_test(filename_no_meaning_single)
def test_create_path_unknown(self, filename_no_meaning_single, std_ver_all_mixedinst_valid_unknown, func_to_test):
"""Check that a ValueError is raised when using a generation function to create a path for a at an unknown version of the Standard."""
with pytest.raises(ValueError):
func_to_test(filename_no_meaning_single, std_ver_all_mixedinst_valid_unknown)
def test_create_path_ver_typerr(self, filename_no_meaning_single, std_ver_all_uninst_typeerr, func_to_test):
"""Check that a TypeError is raised when using a generation function to create a path from a version of an incorrect type."""
with pytest.raises(TypeError):
func_to_test(filename_no_meaning_single, std_ver_all_uninst_typeerr)
def test_create_path_path_valueerr(self, filepath_invalid_value, std_ver_minor_inst_valid_single, func_to_test):
"""Check that a ValueError is raised when providing a generation function a path to work from that is a string that cannot be a filepath."""
with pytest.raises(ValueError):
func_to_test(filepath_invalid_value, std_ver_minor_inst_valid_single)
def test_create_path_path_typeerr(self, filepath_invalid_type, std_ver_minor_inst_valid_single, func_to_test):
"""Check that a TypeError is raised when providing a generation function a path to work from that is of a type that cannot be a filepath."""
with pytest.raises(TypeError):
func_to_test(filepath_invalid_type, std_ver_minor_inst_valid_single)
class TestResourceGetCodelistPaths:
"""A container for get_codelist_paths() tests."""
def test_find_codelist_paths(self, codelist_lengths_by_version):
"""Check that all codelist paths are being found.
This covers major, minor and version-independent.
"""
decimalised_version = iati.version._decimalise_integer(codelist_lengths_by_version.version) # pylint: disable=protected-access
expected_root = iati.resources.path_for_version(iati.resources.PATH_CODELISTS, decimalised_version)
paths = iati.resources.get_codelist_paths(codelist_lengths_by_version.version)
assert len(paths) == len(set(paths))
assert len(paths) == codelist_lengths_by_version.expected_length
for path in paths:
assert path[-4:] == iati.resources.FILE_CODELIST_EXTENSION
assert expected_root in path
assert os.path.isfile(path)
def test_get_codelist_mapping_paths_independent(self):
"""Test getting a list of version-independent Codelist files.
Todo:
Look to better determine how to access the different categories of Codelist.
"""
result = iati.resources.get_codelist_paths(iati.version.STANDARD_VERSION_ANY)
assert result == []
def test_get_codelist_paths_minor_partsupport(self, std_ver_minor_mixedinst_valid_partsupport):
"""Test getting a list of Codelist paths. The requested version is partially supported by pyIATI."""
result = iati.resources.get_codelist_paths(std_ver_minor_mixedinst_valid_partsupport)
assert result == []
def test_get_codelist_paths_minor_unknown(self, std_ver_all_mixedinst_valid_unknown):
"""Test getting a list of Codelist paths. The requested version is not known by pyIATI."""
result = iati.resources.get_codelist_paths(std_ver_all_mixedinst_valid_unknown)
assert result == []
class TestResourceGetCodelistMappingPaths:
"""A container for get_codelist_mapping_paths() tests.
Note:
This class contains very similar tests to the equivalent for Rulesets. They are different because the Ruleset creation function takes two arguments, not one.
"""
def test_get_codelist_mapping_paths_minor_fullsupport(self, std_ver_minor_mixedinst_valid_fullsupport):
"""Test getting a list of Codelist Mapping paths. The requested version is fully supported by pyIATI."""
result = iati.resources.get_codelist_mapping_paths(std_ver_minor_mixedinst_valid_fullsupport)
assert len(result) == 1
assert result[0] == iati.resources.create_codelist_mapping_path(std_ver_minor_mixedinst_valid_fullsupport)
assert os.path.isfile(result[0])
def test_get_codelist_mapping_paths_independent(self):
"""Test getting a list of version-independent Codelist Mapping files."""
result = iati.resources.get_codelist_mapping_paths(iati.version.STANDARD_VERSION_ANY)
assert result == []
def test_get_codelist_mapping_paths_minor_partsupport(self, std_ver_minor_mixedinst_valid_partsupport):
"""Test getting a list of Codelist Mapping paths. The requested version is partially supported by pyIATI."""
result = iati.resources.get_codelist_mapping_paths(std_ver_minor_mixedinst_valid_partsupport)
assert result == []
def test_get_codelist_mapping_paths_minor_unknown(self, std_ver_all_mixedinst_valid_unknown):
"""Test getting a list of Codelist Mapping paths. The requested version is not known by pyIATI."""
result = iati.resources.get_codelist_mapping_paths(std_ver_all_mixedinst_valid_unknown)
assert result == []
def test_get_codelist_mapping_paths_major_known(self, std_ver_major_uninst_valid_known):
"""Test getting a list of Codelist Mapping paths. The requested version is a known integer version. The list should contain paths for each supported minor within the major."""
supported_versions_at_major = [version for version in iati.version.versions_for_integer(std_ver_major_uninst_valid_known) if version in iati.version.STANDARD_VERSIONS_SUPPORTED]
expected_path_count = len(supported_versions_at_major)
result = iati.resources.get_codelist_mapping_paths(std_ver_major_uninst_valid_known)
assert len(result) == expected_path_count
for version in supported_versions_at_major:
assert iati.resources.create_codelist_mapping_path(version) in result
class TestResourceGetRulesetPaths:
"""A container for get_ruleset_paths() tests."""
def test_get_ruleset_paths_minor_fullsupport(self, std_ver_minor_mixedinst_valid_fullsupport):
"""Test getting a list of Ruleset paths. The requested version is fully supported by pyIATI."""
result = iati.resources.get_ruleset_paths(std_ver_minor_mixedinst_valid_fullsupport)
assert len(result) == 1
assert result[0] == iati.resources.create_ruleset_path(iati.resources.FILE_RULESET_STANDARD_NAME, std_ver_minor_mixedinst_valid_fullsupport)
assert os.path.isfile(result[0])
def test_get_ruleset_paths_independent(self):
"""Test getting a list of version-independent standard Rulesets."""
result = iati.resources.get_ruleset_paths(iati.version.STANDARD_VERSION_ANY)
assert result == []
def test_get_ruleset_paths_minor_partsupport(self, std_ver_minor_mixedinst_valid_partsupport):
"""Test getting a list of Ruleset paths. The requested version is partially supported by pyIATI."""
result = iati.resources.get_ruleset_paths(std_ver_minor_mixedinst_valid_partsupport)
assert result == []
def test_get_ruleset_paths_minor_unknown(self, std_ver_all_mixedinst_valid_unknown):
"""Test getting a list of Ruleset paths. The requested version is not known by pyIATI."""
result = iati.resources.get_ruleset_paths(std_ver_all_mixedinst_valid_unknown)
assert result == []
def test_get_ruleset_paths_major_known(self, std_ver_major_uninst_valid_known):
"""Test getting a list of Ruleset paths. The requested version is a known integer version. The list should contain paths for each supported minor within the major."""
supported_versions_at_major = [version for version in iati.version.versions_for_integer(std_ver_major_uninst_valid_known) if version in iati.version.STANDARD_VERSIONS_SUPPORTED]
expected_path_count = len(supported_versions_at_major)
result = iati.resources.get_ruleset_paths(std_ver_major_uninst_valid_known)
assert len(result) == expected_path_count
for version in supported_versions_at_major:
assert iati.resources.create_ruleset_path(iati.resources.FILE_RULESET_STANDARD_NAME, version) in result
class TestResourceGetSchemaPaths:
"""A container for get_x_schema_paths() tests."""
@pytest.fixture(params=[
(iati.resources.get_activity_schema_paths, iati.resources.FILE_SCHEMA_ACTIVITY_NAME),
(iati.resources.get_organisation_schema_paths, iati.resources.FILE_SCHEMA_ORGANISATION_NAME)
])
def func_and_name(self, request):
"""Return a named tuple containing a function to generate the paths for a type of Schema, plus the name of the Schema."""
output = collections.namedtuple('output', 'func schema_name')
return output(func=request.param[0], schema_name=request.param[1])
@pytest.fixture(params=[
iati.resources.get_all_schema_paths,
iati.resources.get_activity_schema_paths,
iati.resources.get_organisation_schema_paths
])
def schema_path_func_all(self, request):
"""Return a function that returns a list of paths for Schema resources."""
return request.param
def test_get_schema_paths_minor_known(self, std_ver_minor_mixedinst_valid_known, func_and_name):
"""Test getting a list of Org or Activity Schema paths. The requested version is known by pyIATI."""
result = func_and_name.func(std_ver_minor_mixedinst_valid_known)
assert len(result) == 1
assert result[0] == iati.resources.create_schema_path(func_and_name.schema_name, std_ver_minor_mixedinst_valid_known)
assert os.path.isfile(result[0])
def test_get_schema_paths_minor_unknown(self, std_ver_all_mixedinst_valid_unknown, schema_path_func_all):
"""Test getting a list of Org or Activity Schema paths. The requested version is not known by pyIATI."""
result = schema_path_func_all(std_ver_all_mixedinst_valid_unknown)
assert result == []
def test_get_schema_paths_independent(self, schema_path_func_all):
"""Test getting a list of version-independent Org or Activity Schemas."""
result = schema_path_func_all(iati.version.STANDARD_VERSION_ANY)
assert result == []
def test_get_schema_paths_major_known(self, std_ver_major_uninst_valid_known, func_and_name):
"""Test getting a list of Org or Activity Schema paths. The requested version is a known integer version. The list should contain paths for each supported minor within the major."""
versions_at_major = [version for version in iati.version.versions_for_integer(std_ver_major_uninst_valid_known)]
expected_path_count = len(versions_at_major)
result = func_and_name.func(std_ver_major_uninst_valid_known)
assert len(result) == expected_path_count
for version in versions_at_major:
assert iati.resources.create_schema_path(func_and_name.schema_name, version) in result
def test_get_all_schema_paths_minor_known(self, std_ver_minor_mixedinst_valid_known):
"""Test getting a list of all Schema paths. The requested version is known by pyIATI."""
activity_path = iati.resources.get_activity_schema_paths(std_ver_minor_mixedinst_valid_known)[0]
org_path = iati.resources.get_organisation_schema_paths(std_ver_minor_mixedinst_valid_known)[0]
result = iati.resources.get_all_schema_paths(std_ver_minor_mixedinst_valid_known)
assert len(result) == 2
assert activity_path in result
assert org_path in result
def test_get_all_schema_paths_major_known(self, std_ver_major_uninst_valid_known):
"""Test getting a list of all Schema paths. The requested version is a known integer version. The list should contain paths for each supported minor within the major."""
versions_at_major = [version for version in iati.version.versions_for_integer(std_ver_major_uninst_valid_known)]
expected_path_count = len(versions_at_major) * 2
activity_paths = iati.resources.get_activity_schema_paths(std_ver_major_uninst_valid_known)
org_paths = iati.resources.get_organisation_schema_paths(std_ver_major_uninst_valid_known)
result = iati.resources.get_all_schema_paths(std_ver_major_uninst_valid_known)
assert len(result) == expected_path_count
for path in activity_paths:
assert path in result
for path in org_paths:
assert path in result
class TestResourceGetPathsNotAVersion:
"""A container for get_*_paths() tests where the function is provided a value that cannot represent a version."""
@pytest.fixture(params=[
iati.resources.get_codelist_paths,
iati.resources.get_codelist_mapping_paths,
iati.resources.get_ruleset_paths,
iati.resources.get_all_schema_paths,
iati.resources.get_activity_schema_paths,
iati.resources.get_organisation_schema_paths
])
def func_to_test(self, request):
"""Return a function to test the behavior of. The function takes a single argument, which takes a value that can represent a version number."""
return request.param
def test_get_x_path_valueerr(self, std_ver_all_uninst_valueerr, func_to_test):
"""Check that a ValueError is raised when requesting paths for an value that cannot | |
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from required_modules.amplitude_encoding import *
from required_modules import gaussian_decoding as gd
import tqdm
def standardize_data(X):
'''
This function standardize an array, its substracts mean value,
and then divide the standard deviation.
param 1: array
return: standardized array
'''
rows, columns = X.shape
standardizedArray = np.zeros(shape=(rows, columns))
tempArray = np.zeros(rows)
for column in range(columns):
mean = np.mean(X[:, column])
std = np.std(X[:, column])
tempArray = np.empty(0)
for element in X[:, column]:
tempArray = np.append(tempArray, ((element - mean) / std))
standardizedArray[:, column] = tempArray
return standardizedArray
def fidelity(s1, s2):
""" This function returns the fidelity, given two complex state vectors. """
u = np.vdot(s1, s2)
return np.vdot(u, u).real
def reset_dec_eff_anz_q_net(n_qubits):
dev = qml.device("default.qubit", wires=n_qubits)
global dec_eff_anz_q_net
@qml.qnode(dev, interface="torch")
def dec_eff_anz_q_net(q_weights_flat, amplitudes=None, amplitude_wires=None, q_depth=None,
reg_size=None, n_qubits=None):
""" Going from quantum state to latent space. """
if not amplitude_wires:
amplitude_wires = list(range(n_qubits))
qml.QubitStateVector(amplitudes, wires=amplitude_wires)
# Reshape weights
q_weights = q_weights_flat.reshape(q_depth, n_qubits)
vqc.H_layer(n_qubits)
# Sequence of trainable variational layers
for k in range(q_depth):
if k % 2:
vqc.RZ_layer(q_weights[k])
else:
vqc.RY_layer(q_weights[k])
vqc.entangling_layer(n_qubits)
exp_vals = [qml.expval(qml.PauliZ(i)) for i in range(reg_size)]
return tuple(exp_vals)
def reset_enc_train_eff_anz_q_net(n_qubits):
dev = qml.device("default.qubit", wires=n_qubits)
global enc_train_eff_anz_q_net
@qml.qnode(dev, interface="torch")
def enc_train_eff_anz_q_net(q_weights_flat, amplitudes=None, init_rot=None, q_depth=None, n_qubits=None):
""" Used for training that performs the swap test. The name says encoder but super confusing!!
Everything however seems to work, just dont touch the code. """
qml.QubitStateVector(amplitudes, wires=list(range(n_qubits + 1, n_qubits*2+1)))
# Reshape weights
q_weights = q_weights_flat.reshape(q_depth, n_qubits)
# Change all these
vqc.H_layer(n_qubits+1)
# RY_layer(init_rot, ancilla=True)
# Sequence of trainable variational layers
for k in range(q_depth):
if k % 2:
vqc.RZ_layer(q_weights[k], ancilla=True)
else:
vqc.RY_layer(q_weights[k], ancilla=True)
vqc.entangling_layer(n_qubits, ancilla=True)
# perform the SWAP test
# The Hadammard has already been applied in the H_layer and is the 0 qubit
for k in range(1, n_qubits + 1):
qml.CSWAP(wires=[0, k, n_qubits + k])
qml.Hadamard(wires=0)
return qml.expval(qml.PauliZ(0))
def reset_enc_eff_anz_q_net(n_qubits):
dev = qml.device("default.qubit", wires=n_qubits)
global enc_eff_anz_q_net
@qml.qnode(dev, interface="torch")
def enc_eff_anz_q_net(q_weights_flat, init_rot=None, q_depth=None, n_qubits=None, op=None):
""" Goes from latent space to quantum state. """
# Reshape weights
q_weights = q_weights_flat.reshape(q_depth, n_qubits)
vqc.H_layer(n_qubits)
# Sequence of trainable variational layers
for k in range(q_depth):
if k % 2:
vqc.RZ_layer(q_weights[k])
else:
vqc.RY_layer(q_weights[k])
vqc.entangling_layer(n_qubits)
return qml.probs(wires=list(range(n_qubits)))
def kronecker(A, B):
return torch.einsum("ab,cd->acbd", A, B).view(A.size(0)*B.size(0), A.size(1)*B.size(1))
class HQA(nn.Module):
name = 'HQA'
def __init__(self, n_qubits, latent_size, num_params_enc, num_params_dec, gate_type='eff_anz',
interwoven=False, adv_decoder=False):
super().__init__()
self.latent_size = latent_size
self.n_qubits = n_qubits
self.num_params_enc = num_params_enc
self.num_params_dec = num_params_dec
self.gate_type = gate_type
self.q_circ = None
self.interwoven = interwoven
self.train_p_fuzz = None
self.adv_decoder = adv_decoder
# Why is this assertion here? Uncomment if something goes wrong later
if interwoven:
assert latent_size > 2 * n_qubits
else:
assert latent_size >= n_qubits
if gate_type == 'eff_anz':
assert num_params_dec % self.latent_size == 0
self.params_enc = nn.Parameter(0.1 * torch.randn(self.num_params_enc))
self.params_dec = nn.Parameter(0.1 * torch.randn(self.num_params_dec))
if adv_decoder:
self.c_decoder_layer1 = nn.Linear(self.latent_size, 2*self.latent_size)
self.c_decoder_layer_out = nn.Linear(2*self.latent_size, self.latent_size)
# Classical learning parameters
self.c_layer1 = nn.Linear(self.latent_size, 120)
self.c_layer2 = nn.Linear(120, num_params_enc * 3)
self.c_out = nn.Linear(num_params_enc * 3, num_params_enc)
self.reset_q_circs()
self.distributions = None
self.train_time = None
self.last_state = None
self.regressors = None
self.latent_vectors = None
self.raw_latent_vectors = None
self.mean_std_list = None
self.characterists_list = None
self.df_latent_vectors = None
self.batch_size = None
self.distr_type = None
self.pca = None
self.regular_term = None
self._last_latent_vector = None
def forward(self, x):
amplitudes = x
assert len(x) == 2 ** self.n_qubits
if self.interwoven:
amplitude_wires = list(range(0, 2 * self.n_qubits, 2))
else:
amplitude_wires = list(range(0, self.n_qubits))
if self.gate_type == 'eff_anz':
q_depth = int(self.num_params_dec // self.latent_size)
q_out = dec_eff_anz_q_net(self.params_dec, amplitudes=amplitudes, amplitude_wires=amplitude_wires,
q_depth=q_depth, reg_size=self.latent_size, n_qubits=self.latent_size)
else:
print("'{}' gate type has not yet been implemented.".format(self.gate_type)); exit(3)
if self.adv_decoder:
q_out = torch.sigmoid(self.c_decoder_layer1(q_out.float()))
q_out = self.c_decoder_layer_out(q_out.float())
self._last_latent_vector = q_out
out = torch.sigmoid(self.c_layer1(q_out.float()))
out = torch.sigmoid(self.c_layer2(out))
out = torch.tanh(self.c_out(out)) * np.pi / 2.0
if self.gate_type == 'eff_anz':
q_depth = int(self.num_params_enc // self.n_qubits)
out = enc_train_eff_anz_q_net(out, amplitudes=amplitudes, init_rot=None, q_depth=q_depth, n_qubits=self.n_qubits)
# self.draw_circs()
return out
def test(self, x):
output = self(x)
loss = vqc.MSELoss(1 - output, 0.0)
return loss
def encoder(self, z, plot=False): # Note the naming is different
""" Going from latent space to the state vector """
z = torch.tensor(z)
out = torch.sigmoid(self.c_layer1(z.float()))
out = torch.sigmoid(self.c_layer2(out))
out = torch.tanh(self.c_out(out)) * np.pi / 2.0
q_depth = int(self.num_params_enc // self.n_qubits)
obs_gen = vqc.proj_meas_gen(self.n_qubits)
if self.gate_type == 'eff_anz':
op = next(obs_gen)
q_out = enc_eff_anz_q_net(out, q_depth=q_depth, op=op, n_qubits=self.n_qubits)
for op in obs_gen:
break
q_out_elem = enc_eff_anz_q_net(out, q_depth=q_depth, op=op, n_qubits=self.n_qubits).unsqueeze(0)
q_out = torch.cat((q_out, q_out_elem))
if plot:
plt.plot(q_out.detach().numpy())
plt.show()
return q_out
def decoder(self, x): # Note that the naming is different
""" Going from state to latent vector """
# Encoder section
amplitudes = np.array(x)
assert len(x) == 2 ** self.n_qubits
if self.interwoven:
amplitude_wires = list(range(0, 2 * self.n_qubits, 2))
else:
amplitude_wires = list(range(0, self.n_qubits))
if self.gate_type == 'eff_anz':
q_depth = int(self.num_params_dec // self.latent_size)
q_out = dec_eff_anz_q_net(self.params_dec, amplitudes=amplitudes, amplitude_wires=amplitude_wires,
q_depth=q_depth, reg_size=self.latent_size, n_qubits=self.latent_size)
if self.adv_decoder:
q_out = torch.sigmoid(self.c_decoder_layer1(q_out.float()))
q_out = self.c_decoder_layer_out(q_out.float())
return q_out
def create_gaussian_latent_regressor(self, distributions=None):
if distributions == None:
distributions = create_gaussian_distributions(2 ** self.n_qubits)
next(distributions)
latent_vectors, X = [], []
for d in distributions:
distr, m, s = d
print("------ m={}, s={} ---------".format(m, s))
latent_vectors.append(self.decoder(distr).detach().numpy())
X.append([m, s])
X = np.array(X)
self.mean_std_list = X
self.characterists_list = X
Y = np.array(latent_vectors)
self.latent_vectors = Y
self.raw_latent_vectors = Y
self.regressors = [LinearRegression() for _ in range(len(Y[0, :]))]
[reg.fit(X, Y[:, i]) for i, reg in enumerate(self.regressors)]
self.dataframe_latent_points()
def latent_encoder(self, x, plot=False):
assert self.regressors != None, "Must create latent regressor."
out = np.array([reg.predict([x])[0] for reg in self.regressors])
assert len(out) == self.latent_size, "Error in regression. "
q_out = self.encoder(out)
if plot:
plt.plot(np.arange(-0.5*(2**self.n_qubits), 0.5*(2**self.n_qubits)), q_out.detach().numpy())
plt.show()
return q_out
def save(self, loss_evol):
i = 0
filename = 'pickled_models/hqa_model_ver_{}.pickle'.format(i)
while os.path.isfile(filename):
i += 1
filename = 'pickled_models/hqa_model_ver_{}.pickle'.format(i)
dbfile = open(filename, 'wb')
pickle.dump(self, dbfile)
dbfile.close()
filename = 'pickled_models/hqa_loss_evol_ver_{}.pickle'.format(i)
dbfile = open(filename, 'wb')
pickle.dump(loss_evol, dbfile)
dbfile.close()
def reset_q_circs(self):
reset_dec_eff_anz_q_net(self.latent_size)
reset_enc_train_eff_anz_q_net(2 * self.n_qubits + 1)
reset_enc_eff_anz_q_net(self.n_qubits)
def transition_states(self, states, iter_per_state, return_transition=False):
if not return_transition:
fig, ax = plt.subplots()
out_list = []
for s in range(len(states)-1):
state1 = self.decoder(states[s])
state2 = self.decoder(states[s + 1])
tran_vec = (state2 - state1)/iter_per_state
for i in range(iter_per_state + 1):
out = self.encoder(state1 + i*tran_vec)
if return_transition:
try:
out_list.append(out.detach().numpy())
except AttributeError:
out_list.append(out)
else:
plt.cla()
plt.plot(np.arange(-0.5*(2**self.n_qubits), 0.5*(2**self.n_qubits)), out.detach().numpy())
plt.show(block=False)
plt.pause(0.1)
return out_list
def create_latent_vectors(self, distributions, characteristics):
assert len(distributions) == len(characteristics)
self.characterists_list, self.latent_vectors = [], []
for i, d in tqdm.tqdm(enumerate(distributions)):
# print("Creating ... i={}, chara={}".format(i, characteristics[i]))
self.characterists_list.append(characteristics[i])
self.latent_vectors.append(self.decoder(d).detach().numpy())
self.characterists_list = np.array(self.characterists_list)
self.latent_vectors = np.array(self.latent_vectors)
self.raw_latent_vectors = self.latent_vectors
def latent_landscape(self, p1, p2, p3=None, states=None):
""" This function plots the states on the landscape of two parameters with index labels p1 & p2.
states should be a DataFrame. """
if states == None:
states = self.df_latent_vectors
p1_list, p2_list, p3_list = [], [], []
for s in range(len(states)):
p1_list.append(states.iloc[s].latent_vector[p1])
p2_list.append(states.iloc[s].latent_vector[p2])
if p3:
p3_list.append(states.iloc[s].latent_vector[p3])
self.df_latent_vectors = self.df_latent_vectors.assign(p1=p1_list)
self.df_latent_vectors = self.df_latent_vectors.assign(p2=p2_list)
if p3:
self.df_latent_vectors = self.df_latent_vectors.assign(p3=p3_list)
def pca_transform_latent_vectors(self, projection_size, print_importance=False):
assert isinstance(self.latent_vectors, np.ndarray), "Need to create latent vectors."
X = self.raw_latent_vectors
X = standardize_data(X)
covariance_matrix = np.cov(X.T)
eigen_values, eigen_vectors = np.linalg.eig(covariance_matrix)
if print_importance:
print("Variance explained by each principal component:\n"
"{}\n".format([(i/sum(eigen_values))*100 for i in eigen_values]))
self.pca = PCA(n_components=projection_size)
self.latent_vectors = self.pca.fit_transform(self.raw_latent_vectors)
def dataframe_latent_points(self, d_type=None):
assert isinstance(self.latent_vectors, np.ndarray), "Must create latent regressor."
if d_type == 'gaussian':
latent_points = pd.DataFrame({'mean' : self.characterists_list[:, 0], 'std': self.characterists_list[:, 1],
'latent_vector': list(self.latent_vectors)})
elif d_type == 'poly_pert_gaussian':
latent_points = pd.DataFrame({'mean': self.characterists_list[:, 0], 'peak': self.characterists_list[:, 1],
'latent_vector': list(self.latent_vectors)})
elif d_type == 'heisenberg': # Characteristics need to be supplied in this form with depth as the first column followed by the coupling terms
d = {"c{}".format(i): self.characterists_list[:, i] for i in range(1, len(self.characterists_list[0]))}
d['depth'] = self.characterists_list[:, 0]
d['latent_vector'] = list(self.latent_vectors)
latent_points = pd.DataFrame(d)
else:
d = {"c{}".format(i): self.characterists_list[:, i] for i in range(len(self.characterists_list[0]))}
d['latent_vector'] = list(self.latent_vectors)
latent_points = pd.DataFrame(d)
self.df_latent_vectors = latent_points
def draw_circs(self):
print(dec_eff_anz_q_net.draw())
print(enc_train_eff_anz_q_net.draw())
print(enc_eff_anz_q_net.draw())
def train_hqa(model, distributions, num_iterations, loss_evol=None, batch_size=1,
regular_term=None):
since = time.time()
print("Training started:")
N = 2 ** model.n_qubits
step = 0.005
criterion = vqc.MSELoss
model.distributions = distributions
optimizer = optim.Adam(model.parameters(), lr=step)
if not loss_evol:
loss_evol = []
for ii in range(num_iterations):
print('-----iteration {}--model:{}------'.format(ii, model))
with torch.set_grad_enabled(True):
optimizer.zero_grad()
| |
# File: tensor.py
# Creation: Wednesday August 19th 2020
# Author: <NAME>
# Contact: <EMAIL>
# <EMAIL>
# --------
# Copyright (c) 2020 <NAME>
"""
Defines tensors for deep learning application. A tensor is a multi-dimensional array, similar to ``numpy`` arrays.
"""
# Basic imports
import numpy as np
try:
import cupy as cp
except ModuleNotFoundError:
pass
# NETS package
import nets
from nets.cuda import numpy_or_cupy, cuda_available
from nets.utils import BackwardCallError, CUDANotAvailableError, deprecated
def tensor2string(tensor, prefix="", precision=4, separator=', ', floatmode=None,
edgeitems=3, threshold=100, max_line_width=100, suppress_small=True):
# Representation
nc = numpy_or_cupy(tensor)
array_str = nc.array_str(tensor.data,
precision=precision,
max_line_width=max_line_width,
suppress_small=suppress_small)
# Prefix
array_str = f"\n{prefix}".join(array_str.split("\n"))
return array_str
def to_numpy(arrayable):
"""Convert an object to a ``numpy.ndarray`` if possible.
Args:
arrayable: object to convert
Returns:
numpy.ndarray
Example:
>>> import numpy as np
>>> from nets.tensor import to_numpy
>>> from nets import Tensor
>>> array = [0, 1, 2, 3, 4, 4, 6, 7, 8, 9]
>>> assert isinstance(to_numpy(array), numpy.ndarray)
True
>>> tensor = Tensor([0, 1, 2, 3, 4, 4, 6, 7, 8, 9])
>>> assert isinstance(to_numpy(tensor), numpy.ndarray)
True
"""
if isinstance(arrayable, Tensor):
return np.array(arrayable.data)
elif isinstance(arrayable, np.ndarray):
return arrayable
elif cuda_available() and isinstance(arrayable, cp.ndarray):
return cp.asnumpy(arrayable)
else:
return np.array(arrayable)
def to_cupy(arrayable):
"""Convert an object to a ``cupy.ndarray`` if possible.
Args:
arrayable: object to convert
Returns:
cupy.ndarray
Example:
>>> import cupy as cp
>>> from nets.tensor import to_cupy
>>> from nets import Tensor
>>> array = [0, 1, 2, 3, 4, 4, 6, 7, 8, 9]
>>> assert isinstance(to_cupy(array), cp.ndarray)
True
>>> tensor = Tensor([0, 1, 2, 3, 4, 4, 6, 7, 8, 9])
>>> assert isinstance(to_cupy(tensor), cp.ndarray)
True
"""
if not cuda_available():
raise CUDANotAvailableError("Could not move a tensor to GPU because CUDA was not found.")
if isinstance(arrayable, Tensor):
return cp.array(arrayable.data)
elif isinstance(arrayable, np.ndarray):
return cp.array(arrayable)
elif isinstance(arrayable, cp.ndarray):
return arrayable
else:
return cp.array(arrayable)
# TODO: recursively check if Tensor are inside a list, array... and delete nested Tensor.
def to_tensor(tensorable, **kwargs):
"""Convert an object to a ``Tensor`` if possible.
Args:
tensorable: object to convert
Returns:
Tensor
Example:
>>> import numpy as np
>>> from nets.tensor import to_tensor
>>> from nets import Tensor
>>> array = [0, 1, 2, 3, 4, 4, 6, 7, 8, 9]
>>> assert isinstance(to_tensor(array), Tensor)
True
>>> array = np.array([0, 1, 2, 3, 4, 4, 6, 7, 8, 9])
>>> assert isinstance(to_tensor(array), Tensor)
True
"""
if isinstance(tensorable, Tensor):
return tensorable
else:
return Tensor(tensorable, **kwargs)
class Tensor(object):
"""A Tensor is a multi dimensional array that tracks and records previous gradients, creating a dynamic
computational graph.
* :attr:`data` (numpy.ndarray): numpy array of the tensor's data.
* :attr:`requires_grad` (bool): if ``True``, will save hooks and create a computational graphs from all previous operations
leadings to this tensor.
* :attr:`_hooks` (nets.autograd.Hook): hook(s) leadings to this tensor. Note that this attribute should not be modified manually.
* :attr:`grad` (float): gradient for this tensor.
* :attr:`id` (int): id of the tensor, mainly for debug mode.
"""
# Objects instance are heavy-weight in Python.
# Setting slots free memory, and does not keep built-in functions (__builtin__ things)
__slots__ = '_data', 'requires_grad', '_hooks', '_grad_fn', '_grad', '_id', '_version', '_device'
# A global parameter to track how many `Tensor` have been instantiate.
# This is mainly for debugging and visualization
_COUNTER = 0
def __init__(self, data, requires_grad=False, device="cpu", hooks=None):
self._device = device
# Load the data to the right device (either CPU or GPU)
if device == 'cpu':
data = to_numpy(data)
else:
data = to_cupy(data)
self._data = data
self.requires_grad = requires_grad
self._hooks = hooks or []
self._grad_fn = None
self._grad = None
# Update the tracking
self._version = 0
self._id = Tensor._COUNTER
Tensor._COUNTER += 1
@property
def device(self):
return self._device.lower()
@property
def grad(self):
return self._grad
@property
def grad_fn(self):
return self._grad_fn
@property
def is_leaf(self):
if self._grad_fn is None and self._hooks == []:
return True
return False
@property
def data(self):
return self._data
@data.setter
def data(self, new_data):
self._data = new_data
# Setting the data manually means we invalidate the gradient.
self.detach()
@property
def grad(self):
return self._grad
@property
def grad_fn(self):
return self._grad_fn
@property
def shape(self):
return self._data.shape
@property
def size(self):
return self._data.size
@property
def ndim(self):
return self._data.ndim
@property
def dtype(self):
return self._data.dtype
@dtype.setter
def dtype(self, new_dtype):
self._data = self._data.astype(new_dtype)
self.detach()
@property
def version(self):
return self._version
@property
def id(self):
return self._id
@property
def T(self):
return nets.transpose(self)
def register_hook(self, hook):
"""Register a hook to a ``Tensor``
Args:
hook (Hook): hook to register
"""
self._hooks.append(hook)
def astype(self, new_type):
r"""Set a new type to the ``Tensor``'s data.
Args:
new_type (type): new type to convert the data
"""
self.detach()
return nets.astype(self, new_type)
def detach(self):
r"""Unlink the ``Tensor`` from the computational graph.
By calling this method, the attribute ``_hooks`` and ``grad`` are set to their default values,
``None``.
"""
self._grad = None
self._grad_fn = None
self._hooks = []
def zero_grad(self):
r"""Set to a zero ``Tensor`` the gradient. This is call when initializing a ``Tensor`` that requires gradient
tracking, or re-initialize parameters's gradient after a training loop as they accumulate on each other.
"""
self._grad = nets.zeros(self.shape, device=self.device, dtype='float64')
self.detach()
self._grad = nets.zeros(
self.shape, device=self.device, dtype='float64')
def backward(self, grad=None):
r"""Compute a single backward pass on all ``Tensor`` linked to this one.
The ``Tensor`` depending to this top-level ``Tensor``are stored in the ``_hooks`` attribute.
The backward pass compute a gradient back-propagation on all ``Tensor`` registered in ``_hooks``.
The backward pass gradient in ``grad`` attribute (and add upstream gradient if the ``Tensor``
is used multiple times).
.. note::
To be able to back-propagate, the top-level ``Tensor`` must have ``requires_grad`` set to ``True``
to propagate the gradient.
Args:
grad (Tensor): upstream gradient. Default is None, and will be set to ``Tensor(1.0)``, a 0-dimensional
``Tensor``.
"""
# Check if the backward pass is legit
if not self.requires_grad:
raise BackwardCallError(r"called backward on non `requires_grad` tensor. Either there was no "
r"`requires_grad=True` initialization or gradients were set to `None` due to an "
r"inplace operation or the computational graph was split and gradients are no "
r"longer linked to this branch. Graph are usually split when a new tensor is "
r"created from a `numeric` function (zero, ones, eye, identity) and "
r"`requires_grad` was not specified.")
if grad is None:
if self.shape == ():
grad = Tensor(1.0, device=self.device)
else:
raise RuntimeError("grad must be specified for non-0-tensor")
# Update the gradient
# NOTE: the gradients accumulate !
self._grad = grad if self._grad is None else self._grad + grad
# Back-propagation in all dependencies
hooks = self._hooks
if hooks is not None:
for hook in self._hooks:
# Compute the gradient w.r.t the operation
backward_grad = hook.grad_fn(grad)
# Back-propagate in the tensor used in this operation
hook.tensor.backward(backward_grad)
# if self._grad_fn is not None:
# tensors = self._grad_fn.tensors
# grads = self._grad_fn.backward(grad)
# grads = grads if isinstance(grads, tuple) else (grads,)
# for tensor, grad in zip(tensors, grads):
# if tensor.requires_grad:
# tensor.backward(grad)
# TODO: handle properly nodes and leaf from different hooks
# ?: maybe add Variable class / is_leaf attributes
# ?: and counter to skip gradients that don't need to be set
def to(self, device):
"""Change the device where the tensor is located.
Args:
device (str): Name of the device to save the tensor. Options are ``'cuda'`` or ``'cpu'``.
"""
return Tensor(self.data, requires_grad=self.requires_grad, device=device)
def cpu(self):
"""Move the location of the tensor to the CPU.
"""
self.data = self.to('cpu').data
self._device = 'cpu'
def cuda(self):
"""Move the location of the tensor to the GPU.
"""
self.data = self.to('cuda').data
self._device = 'cuda'
def item(self):
r"""
Get the item (float, int etc.) of a 0-dimensional ``Tensor``. It will detach the tensor from the computational
graph by setting ``_hooks = []`` and ``grad = None`` to free memory and send this graph to the garbage collector.
"""
self.detach()
return self.data.item()
def tolist(self):
r"""Convert the ``Tensor`` data to a list (of list eventually).
"""
self.detach()
return self.data.tolist()
def numpy(self):
r"""Convert the ``Tensor`` data to a | |
<gh_stars>1-10
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_jar")
load("@bazel_tools//tools/build_defs/repo:jvm.bzl", "jvm_maven_import_external")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("@bazelrio//:deps_utils.bzl", "cc_library_headers", "cc_library_shared", "cc_library_sources", "cc_library_static")
def setup_wpilib_2022_1_1_beta_2_dependencies():
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-2/wpilibc-cpp-2022.1.1-beta-2-linuxathena.zip",
sha256 = "f0e13354897ddefebe552c156ba65c2b1d5c4286de5ea2ccfd4c0c62225128d4",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-2/wpilibc-cpp-2022.1.1-beta-2-linuxathenastatic.zip",
sha256 = "d9de9cecf888ceee59a949fb7ff275563ee1d36051b3df06e6a88c71f8fcc42b",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-2/wpilibc-cpp-2022.1.1-beta-2-windowsx86-64.zip",
sha256 = "cd4578b9d3bf84d5702f862acdbd9a9e05cf54d3a2589807e2e8143aec2cde93",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-2/wpilibc-cpp-2022.1.1-beta-2-linuxx86-64.zip",
sha256 = "7a6c2ae60403fc57540aa3af74d41dbd31d03c5f5e58fc55250944884f2356b9",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-2/wpilibc-cpp-2022.1.1-beta-2-osxx86-64.zip",
sha256 = "73bdf7d3f26cd4b593cfb00f624668246cc73475a57136b26c8ed5abe30c3e35",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-2/wpilibc-cpp-2022.1.1-beta-2-windowsx86-64static.zip",
sha256 = "47eec8da0bbd5e1421f02d78ff544d13e22e13d66f42895d2204bffb6704d555",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-2/wpilibc-cpp-2022.1.1-beta-2-linuxx86-64static.zip",
sha256 = "db81ada48ff44775298eac9a39d6a4644cf7ce1b659adf04441506c901c3667d",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-2/wpilibc-cpp-2022.1.1-beta-2-osxx86-64static.zip",
sha256 = "c41f58957708b305c36a69180e601b39f84dc75b8a29a178ad0ab247485e366f",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-2/wpilibc-cpp-2022.1.1-beta-2-headers.zip",
sha256 = "1d3dfd94310e829c59df00b7c7d9816b37799606414bf40e3530564604773a38",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1-beta-2/wpilibc-cpp-2022.1.1-beta-2-sources.zip",
sha256 = "48db17403c13f7e6e25d389bf7509f8fb90eab453cab35db42ebdbb8c3f5a65b",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-2/hal-cpp-2022.1.1-beta-2-linuxathena.zip",
sha256 = "97bc9d45eb42c4e0046c63cd1106d0c04d243c16387210f8753bfc4f877c7c80",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-2/hal-cpp-2022.1.1-beta-2-linuxathenastatic.zip",
sha256 = "19830fdc7c7cfbcbbb87356b8665f1dfb78de9362814bae277e123cd6006e4c2",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-2/hal-cpp-2022.1.1-beta-2-windowsx86-64.zip",
sha256 = "8d3791a5a750553a7b502d1e8db22d93e2e2f4ab8ca1b5d37e1a8f7eb11c8abc",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-2/hal-cpp-2022.1.1-beta-2-linuxx86-64.zip",
sha256 = "ed408cc739b606226900246bc722ccaf6d3d9d6823fe10a88ec165e8c844d7ad",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-2/hal-cpp-2022.1.1-beta-2-osxx86-64.zip",
sha256 = "04b46dee32a55cecf5e0e4a2af9970f4ee059fb0afae4c4b33fa4e53ab1d9f89",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-2/hal-cpp-2022.1.1-beta-2-windowsx86-64static.zip",
sha256 = "5126cf98d7e80be50d1eda4e7c693c117c0313fe9e141c12739993d73a8b5630",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-2/hal-cpp-2022.1.1-beta-2-linuxx86-64static.zip",
sha256 = "d6a6a306d1c65f080b4efa5b1a15dd032243188fe1423af0a24f238b37f8cfe9",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-2/hal-cpp-2022.1.1-beta-2-osxx86-64static.zip",
sha256 = "3ecdcbef0c7ba9f6611b990ac1282de3b74f82531d5bb87cc1b1a17be170a940",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-2/hal-cpp-2022.1.1-beta-2-headers.zip",
sha256 = "8c7cb75a7dbbe85fd160379237f05eeaba481a953b24455bd34288e08c2135a3",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1-beta-2/hal-cpp-2022.1.1-beta-2-sources.zip",
sha256 = "5c0576b9fe09f4b25df7c9dfa5335b4c1fcba7ff61bb92f0d861ec4334ed13c6",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-2/wpiutil-cpp-2022.1.1-beta-2-linuxathena.zip",
sha256 = "a5af8dfda0b4723a4bd77c4d96de9531452fba3c4ed8e7f850b0801880851aaf",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-2/wpiutil-cpp-2022.1.1-beta-2-linuxathenastatic.zip",
sha256 = "eaf47c60355377bfd5754920b790d3d65d08afed2f2da93d3c2f281e1c867f6c",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-2/wpiutil-cpp-2022.1.1-beta-2-windowsx86-64.zip",
sha256 = "6bfd04ffa99473418716b31f7e8a61bd6a09c156896d88f6d56db697164dab68",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-2/wpiutil-cpp-2022.1.1-beta-2-linuxx86-64.zip",
sha256 = "09b38a6118d55874f665e9c3855cc09cba64c608196d2d2d34f370e1ebb0e4dd",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-2/wpiutil-cpp-2022.1.1-beta-2-osxx86-64.zip",
sha256 = "a635045164580665a7390d841de1f285b5910c72c387b4cf7a9ef8ac55d1db82",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-2/wpiutil-cpp-2022.1.1-beta-2-windowsx86-64static.zip",
sha256 = "2bcb8762b93c3f11827a03086eaf09037451d801c3395a18c875d3022d2d66b4",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-2/wpiutil-cpp-2022.1.1-beta-2-linuxx86-64static.zip",
sha256 = "5fba49f81922019c25cbd0295f0e6c00b3a8c5fac23b3e056ecb7253ace258a9",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-2/wpiutil-cpp-2022.1.1-beta-2-osxx86-64static.zip",
sha256 = "0da4ce06077b2e1a2d2fd674129c19a8851213a77c8c077b1b6493658a9e61e3",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-2/wpiutil-cpp-2022.1.1-beta-2-headers.zip",
sha256 = "f63c7086f6d900f6088f42b5d31d538c89df1c6d110f679508883fc0a69a48ee",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1-beta-2/wpiutil-cpp-2022.1.1-beta-2-sources.zip",
sha256 = "26cc4ec3b6846426548a8069bc8fb8047bd1a03450fe076b387adaa3b56d1a27",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-2/ntcore-cpp-2022.1.1-beta-2-linuxathena.zip",
sha256 = "6fd6f700ae9d94d546c1f731d5823f22fbc8de05973383997beaccdc15dc9f3b",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-2/ntcore-cpp-2022.1.1-beta-2-linuxathenastatic.zip",
sha256 = "06ce4bd18cb31d383c96b61855689f082ab82eabb2d4904d390241e7140667a4",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-2/ntcore-cpp-2022.1.1-beta-2-windowsx86-64.zip",
sha256 = "99c44a1b6aaef37b03ff6f7c41c4ccb02f3c7ed9fd8bbdb04190b6ba86112d4d",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-2/ntcore-cpp-2022.1.1-beta-2-linuxx86-64.zip",
sha256 = "55f831750fc2f0e1168f6226d379ab8454244cdae584a021e7be2f6ade8c0907",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-2/ntcore-cpp-2022.1.1-beta-2-osxx86-64.zip",
sha256 = "e6bd27d7b1a145206a11e59518b3f19f82c73be2fc0d69404103bf46c761199a",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-2/ntcore-cpp-2022.1.1-beta-2-windowsx86-64static.zip",
sha256 = "f1a202aa07f97f734c08759a17dff2119a73122e62258bd683fb5f8fedbbe58c",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-2/ntcore-cpp-2022.1.1-beta-2-linuxx86-64static.zip",
sha256 = "648f6286db8bf8551440b866dba6a7dbd7613accd4e07ad14a79bc088cbb8ef9",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-2/ntcore-cpp-2022.1.1-beta-2-osxx86-64static.zip",
sha256 = "0d351c41eb50bec79e70ad209cfe00d818296285f92f0a0c3331226fd156c240",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-2/ntcore-cpp-2022.1.1-beta-2-headers.zip",
sha256 = "d91ee0e81421a4b40cd2983cda4b4bf430448413284ab5b088b2bab0de846ed1",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1-beta-2/ntcore-cpp-2022.1.1-beta-2-sources.zip",
sha256 = "68bda76f06efac28bc0858afdd063987d9adb7d502ace491cdce7a8867cac738",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-2/wpimath-cpp-2022.1.1-beta-2-linuxathena.zip",
sha256 = "01570c2eef2fa6da664c7eef542f52ff41154339433d9c20c51fcf20ed06f8a0",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-2/wpimath-cpp-2022.1.1-beta-2-linuxathenastatic.zip",
sha256 = "5dcd20d8164633e6fb6c883455df20d2837f385afb1193e52457abf10d0da484",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-2/wpimath-cpp-2022.1.1-beta-2-windowsx86-64.zip",
sha256 = "14a6e0fd84a61154cf9831c62889e8a816d49149f45be8b785e0a95bae5c59da",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-2/wpimath-cpp-2022.1.1-beta-2-linuxx86-64.zip",
sha256 = "54cb8d45bbdb3f1a6af5b638d666c6274d38bb31b1459eacfb009bfd83b0d4f5",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-2/wpimath-cpp-2022.1.1-beta-2-osxx86-64.zip",
sha256 = "2ce70b9193cf92947e39b955918e3ef00d8f4c77b03d75b3069322ab841f8fa8",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-2/wpimath-cpp-2022.1.1-beta-2-windowsx86-64static.zip",
sha256 = "f1d470b0632352f8371b1a0ce5f65e84da8e11118c65694d689d5ba7b1b90179",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-2/wpimath-cpp-2022.1.1-beta-2-linuxx86-64static.zip",
sha256 = "c24c4557175253487fa1fb5d99538ba8478e9256911872984d4e989da3a01630",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-2/wpimath-cpp-2022.1.1-beta-2-osxx86-64static.zip",
sha256 = "1ec2922e338373244cfc89484d9d8e369615f370a3ee1de41c43240dd64819a3",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-2/wpimath-cpp-2022.1.1-beta-2-headers.zip",
sha256 = "e08c8eaf576fd8f05c12af6fb9af6364c2d8484d689d17892d34f16b040fbba4",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1-beta-2/wpimath-cpp-2022.1.1-beta-2-sources.zip",
sha256 = "2d5d87ee83cbe6f699eaf3c3a00614a0532e06e0d9736af17023190a8aaee729",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-2/cameraserver-cpp-2022.1.1-beta-2-linuxathena.zip",
sha256 = "76d739ed2190285f4a3b977371f766f7640cce4c8e92435535dbf7c679004358",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-2/cameraserver-cpp-2022.1.1-beta-2-linuxathenastatic.zip",
sha256 = "428f94e3c21dbeded4ace51da043abc88979bd5aee4571256360041a55e37318",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-2/cameraserver-cpp-2022.1.1-beta-2-windowsx86-64.zip",
sha256 = "b9c0e355e21d55339f1b488d2a076fe4a0f8c079222ecf098d8034822fb55f46",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-2/cameraserver-cpp-2022.1.1-beta-2-linuxx86-64.zip",
sha256 = "a7ace64e900deb41749f0c37d6ecc37d437d24a858887ac92c51eb8b7a164684",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-2/cameraserver-cpp-2022.1.1-beta-2-osxx86-64.zip",
sha256 = "3646bfb7b8b665a6a3cc74f565d16200ae65afb8acbe4d7e107ba9a9af13df23",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-2/cameraserver-cpp-2022.1.1-beta-2-windowsx86-64static.zip",
sha256 = "fbdfbffff2866829d71f1ad17b8acbfa76fec6fb13fbcc0bf406ed920a79d2af",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-2/cameraserver-cpp-2022.1.1-beta-2-linuxx86-64static.zip",
sha256 = "a2c14e890978ba94fc1df261d7bd9ac4622ad9a481df97d559479ebfd2882714",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-2/cameraserver-cpp-2022.1.1-beta-2-osxx86-64static.zip",
sha256 = "59b08df64d74b2acdbc2be65ce208c8deb9a1b6bc75db368836f200cd9707f9c",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-2/cameraserver-cpp-2022.1.1-beta-2-headers.zip",
sha256 = "feababdd10581c2b517fed56e55c8df6d8a4f977ee1849f22247befeccce9a3a",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1-beta-2/cameraserver-cpp-2022.1.1-beta-2-sources.zip",
sha256 = "679ffd0da3701e0c75d99b88fcc552159179cc1c08853f9565671b0349316d2d",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-2/cscore-cpp-2022.1.1-beta-2-linuxathena.zip",
sha256 = "5ea93a4970a10696a87cfb66287b3a6b21480ff460f83f67fa8088089bcfcae0",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-2/cscore-cpp-2022.1.1-beta-2-linuxathenastatic.zip",
sha256 = "039898fdf531d22289b1b65a3787345fafe1859c7f4e6f926bba7d5146578869",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-2/cscore-cpp-2022.1.1-beta-2-windowsx86-64.zip",
sha256 = "6ffea3ab8859c625760791243294807a9193a2e934fed7c4b8697dbc4f09eda7",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-2/cscore-cpp-2022.1.1-beta-2-linuxx86-64.zip",
sha256 = "57c327380177a84fcca5788c099c3b425a6b37746c4fe18230d5a0d5c688b568",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-2/cscore-cpp-2022.1.1-beta-2-osxx86-64.zip",
sha256 = "62af34633f43b20b31c612ed437eb721271f5f95f9a4e370af3e5017d965c55f",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-2/cscore-cpp-2022.1.1-beta-2-windowsx86-64static.zip",
sha256 = "f55702464cd0c51973fd95c5044bb36fb142f8b3a6b1e697f2c006ee1429716e",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-2/cscore-cpp-2022.1.1-beta-2-linuxx86-64static.zip",
sha256 = "91cfd7180d96168132b4472825fd3786d9f43589066bd49a881f3c18a4924fc3",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-2/cscore-cpp-2022.1.1-beta-2-osxx86-64static.zip",
sha256 = "b847736263619d91132010b80002666f30f539ccf0353b0be037a9bde0324fa9",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-2/cscore-cpp-2022.1.1-beta-2-headers.zip",
sha256 = "9cba89aa01769a3e324602c1769e6d4a94604d3a5736b925aa1f639bc1423de2",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1-beta-2/cscore-cpp-2022.1.1-beta-2-sources.zip",
sha256 = "d0a8bf9d6f31b86841d18191efa2fc4c5d48197f1420a9ba451d03e5595c0c3e",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-2/wpilibOldCommands-cpp-2022.1.1-beta-2-linuxathena.zip",
sha256 = "a16e2994b61f1793471083457b19d8067dfb65d9d1ab62efc3bcb7800166cb39",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-2/wpilibOldCommands-cpp-2022.1.1-beta-2-linuxathenastatic.zip",
sha256 = "fa5548d979ecdcb4d74eaa16096857ce2432b84712193f2552dd161279dac0d0",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-2/wpilibOldCommands-cpp-2022.1.1-beta-2-windowsx86-64.zip",
sha256 = "b19607d933659d57bb3c5d50a6d0dc6601a5a911d0cc122514a8f53188b7e66c",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-2/wpilibOldCommands-cpp-2022.1.1-beta-2-linuxx86-64.zip",
sha256 = "20b018876798bf5eb01df876122a4f1eb8368ec71262bb1010d8f61cde4bfa74",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-2/wpilibOldCommands-cpp-2022.1.1-beta-2-osxx86-64.zip",
sha256 = "dce98a73c9a3e5ea3fe12eff0b94de3693fdd125e0c652d12fb8e92b3a1821d3",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-2/wpilibOldCommands-cpp-2022.1.1-beta-2-windowsx86-64static.zip",
sha256 = "975b3b6e3eda0d4d4c76d49ab0af4ffd506f48312916dcdaf3b64c75affbfe14",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-2/wpilibOldCommands-cpp-2022.1.1-beta-2-linuxx86-64static.zip",
sha256 = "80cd322b99ace21617b36f379c9b90ced08e7a5948cf663535edb3ad0bdd49d5",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-2/wpilibOldCommands-cpp-2022.1.1-beta-2-osxx86-64static.zip",
sha256 = "12500d989f2ceaccfcff754e06b604825b9536c12257576d70379aedb6ce86a9",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-2/wpilibOldCommands-cpp-2022.1.1-beta-2-headers.zip",
sha256 = "772f0c036a188c39e3d9d120e893b2162b0215b7a71ecb8d1b3eb823a3ee1f07",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1-beta-2/wpilibOldCommands-cpp-2022.1.1-beta-2-sources.zip",
sha256 = "320da177b7438845f28ebe12e44a2914ceed54fd7b09ea1ea1eb803cb4e5b4ae",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-2/wpilibNewCommands-cpp-2022.1.1-beta-2-linuxathena.zip",
sha256 = "8b7a916a72210092d89e61f6242199df5fd74c40f7c063d286315ccb1804d3cc",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-2/wpilibNewCommands-cpp-2022.1.1-beta-2-linuxathenastatic.zip",
sha256 = "d362486f071309f5656898d15564d65e463ba6dfc0cf588f2864a3792f2be041",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-2/wpilibNewCommands-cpp-2022.1.1-beta-2-windowsx86-64.zip",
sha256 = "b0520c8f7ba188705b6237945cfc1eac8783fe65edadaf46b64a43baca6218ea",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-2/wpilibNewCommands-cpp-2022.1.1-beta-2-linuxx86-64.zip",
sha256 = "e7f0f3a8634f7f08f9ec8452731a2b75bd7e1d8d2cefb788774f57a468436b14",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-2/wpilibNewCommands-cpp-2022.1.1-beta-2-osxx86-64.zip",
sha256 = "1d112f86c8836cdb0b5c400b315aa9c2e647f2dd384f66f9a9b903d9ed231be2",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-2/wpilibNewCommands-cpp-2022.1.1-beta-2-windowsx86-64static.zip",
sha256 = "b92c518c84cca74f75958194e77c35cb4c735c2bb56f0197da70a8407fa245de",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-2/wpilibNewCommands-cpp-2022.1.1-beta-2-linuxx86-64static.zip",
sha256 = "b2d23092a4ada243cbcd2a6424abba4f9820618ac6682291be18eb91856c2ac2",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-2/wpilibNewCommands-cpp-2022.1.1-beta-2-osxx86-64static.zip",
sha256 = "27a02f4346788bed6e65b5c33353b57599722afcb28d139ce6fe98061c3e120c",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-2/wpilibNewCommands-cpp-2022.1.1-beta-2-headers.zip",
sha256 = "b1c4ce4d637cf521be55e96c90f2166ffc462009074e6761d8b2ecd0612c0cb2",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1-beta-2/wpilibNewCommands-cpp-2022.1.1-beta-2-sources.zip",
sha256 = "f66ba765b0522381bf96eda08bf6e101ee8cd18ae1ef84c330d529d151c37c3c",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ds_socket_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ds_socket/2022.1.1-beta-2/halsim_ds_socket-2022.1.1-beta-2-windowsx86-64.zip",
sha256 = "442e3046d5e64cd9c84c54dd081b3bf00bdc1a642098b49bf0f480f3330f49a9",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ds_socket_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ds_socket/2022.1.1-beta-2/halsim_ds_socket-2022.1.1-beta-2-linuxx86-64.zip",
sha256 = "5a7748f8135512d111556bc359aa4b84951e268ca9d3efe0882c492d67bb1b93",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ds_socket_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ds_socket/2022.1.1-beta-2/halsim_ds_socket-2022.1.1-beta-2-osxx86-64.zip",
sha256 = "8458c5f016df2ab1211dbbdd36a156dc0433b80fa990215775d13feb49672ecd",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_gui_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_gui/2022.1.1-beta-2/halsim_gui-2022.1.1-beta-2-windowsx86-64.zip",
sha256 = "04cb0dadcf6a1d6e6731148a9e3c4fc8599cf392cd163b4a6cc83dd206f2edb6",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_gui_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_gui/2022.1.1-beta-2/halsim_gui-2022.1.1-beta-2-linuxx86-64.zip",
sha256 = "19fcbda099a731bd138b41ecf5c7788114b782558b404cbc8812c30632df2adb",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_gui_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_gui/2022.1.1-beta-2/halsim_gui-2022.1.1-beta-2-osxx86-64.zip",
sha256 = "3df6a0446cc26f3735b21eacc641cc0714c35cfa9fc5c4dcdb6e361049f746a2",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_client_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_client/2022.1.1-beta-2/halsim_ws_client-2022.1.1-beta-2-windowsx86-64.zip",
sha256 = "e885cc8e255d9e3d60cafd4309d64afe17ba521f85836bc6f52066d49b00c0af",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_client_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_client/2022.1.1-beta-2/halsim_ws_client-2022.1.1-beta-2-linuxx86-64.zip",
sha256 = "c6b4a9f385cc291bf93af3919f226204bca447e8633489e3b0d4dee4e4ae8fb4",
| |
default=0.0)
def process(self, timestream):
"""Apply threshold to `weight` dataset.
Parameters
----------
timestream : `.core.container` with `weight` attribute
Returns
-------
timestream : same as input timestream
The input container with modified weights.
"""
timestream.redistribute(["prod", "stack"])
weight = timestream.weight[:]
# Average over the frequency and time axes to get a per baseline
# average
mean_weight = weight.mean(axis=2).mean(axis=0)
# Figure out which entries to keep
threshold = np.maximum(
self.absolute_threshold, self.relative_threshold * mean_weight
)
keep = weight > threshold[np.newaxis, :, np.newaxis]
keep_total = timestream.comm.allreduce(np.sum(keep))
keep_frac = keep_total / float(np.prod(weight.global_shape))
self.log.info(
"%0.5f%% of data is below the weight threshold"
% (100.0 * (1.0 - keep_frac))
)
timestream.weight[:] = np.where(keep, weight, 0.0)
return timestream
class RFISensitivityMask(task.SingleTask):
"""Slightly less crappy RFI masking.
Attributes
----------
mask_type : string, optional
One of 'mad', 'sumthreshold' or 'combine'.
Default is combine, which uses the sumthreshold everywhere
except around the transits of the Sun, CasA and CygA where it
applies the MAD mask to avoid masking out the transits.
include_pol : list of strings, optional
The list of polarisations to include. Default is to use all
polarisations.
remove_median : bool, optional
Remove median accross times for each frequency?
Recomended. Default: True.
sir : bool, optional
Apply scale invariant rank (SIR) operator on top of final mask?
We find that this is advisable while we still haven't flagged
out all the static bands properly. Default: True.
sigma : float, optional
The false positive rate of the flagger given as sigma value assuming
the non-RFI samples are Gaussian.
Used for the MAD and TV station flaggers.
max_m : int, optional
Maximum size of the SumThreshold window to use.
The default (8) seems to work well with sensitivity data.
start_threshold_sigma : float, optional
The desired threshold for the SumThreshold algorythm at the
final window size (determined by max m) given as a
number of standard deviations (to be estimated from the
sensitivity map excluding weight and static masks).
The default (8) seems to work well with sensitivity data
using the default max_m.
tv_fraction : float, optional
Number of bad samples in a digital TV channel that cause the whole
channel to be flagged.
tv_base_size : [int, int]
The size of the region used to estimate the baseline for the TV channel
detection.
tv_mad_size : [int, int]
The size of the region used to estimate the MAD for the TV channel detection.
"""
mask_type = config.enum(["mad", "sumthreshold", "combine"], default="combine")
include_pol = config.list_type(str, default=None)
remove_median = config.Property(proptype=bool, default=True)
sir = config.Property(proptype=bool, default=True)
sigma = config.Property(proptype=float, default=5.0)
max_m = config.Property(proptype=int, default=8)
start_threshold_sigma = config.Property(proptype=float, default=8)
tv_fraction = config.Property(proptype=float, default=0.5)
tv_base_size = config.list_type(int, length=2, default=(11, 3))
tv_mad_size = config.list_type(int, length=2, default=(201, 51))
def process(self, sensitivity):
"""Derive an RFI mask from sensitivity data.
Parameters
----------
sensitivity : containers.SystemSensitivity
Sensitivity data to derive the RFI mask from.
Returns
-------
rfimask : containers.RFIMask
RFI mask derived from sensitivity.
"""
## Constants
# Convert MAD to RMS
MAD_TO_RMS = 1.4826
# The difference between the exponents in the usual
# scaling of the RMS (n**0.5) and the scaling used
# in the sumthreshold algorithm (n**log2(1.5))
RMS_SCALING_DIFF = np.log2(1.5) - 0.5
# Distribute over polarisation as we need all times and frequencies
# available simultaneously
sensitivity.redistribute("pol")
# Divide sensitivity to get a radiometer test
radiometer = sensitivity.measured[:] * tools.invert_no_zero(
sensitivity.radiometer[:]
)
radiometer = mpiarray.MPIArray.wrap(radiometer, axis=1)
freq = sensitivity.freq
npol = len(sensitivity.pol)
nfreq = len(freq)
static_flag = ~self._static_rfi_mask_hook(freq)
madmask = mpiarray.MPIArray(
(npol, nfreq, len(sensitivity.time)), axis=0, dtype=np.bool
)
madmask[:] = False
stmask = mpiarray.MPIArray(
(npol, nfreq, len(sensitivity.time)), axis=0, dtype=np.bool
)
stmask[:] = False
for li, ii in madmask.enumerate(axis=0):
# Only process this polarisation if we should be including it,
# otherwise skip and let it be implicitly set to False (i.e. not
# masked)
if self.include_pol and sensitivity.pol[ii] not in self.include_pol:
continue
# Initial flag on weights equal to zero.
origflag = sensitivity.weight[:, ii] == 0.0
# Remove median at each frequency, if asked.
if self.remove_median:
for ff in range(nfreq):
radiometer[ff, li] -= np.median(
radiometer[ff, li][~origflag[ff]].view(np.ndarray)
)
# Combine weights with static flag
start_flag = origflag | static_flag[:, None]
# Obtain MAD and TV masks
this_madmask, tvmask = self._mad_tv_mask(
radiometer[:, li], start_flag, freq
)
# combine MAD and TV masks
madmask[li] = this_madmask | tvmask
# Add TV channels to ST start flag.
start_flag = start_flag | tvmask
# Determine initial threshold
med = np.median(radiometer[:, li][~start_flag].view(np.ndarray))
mad = np.median(abs(radiometer[:, li][~start_flag].view(np.ndarray) - med))
threshold1 = (
mad
* MAD_TO_RMS
* self.start_threshold_sigma
* self.max_m ** RMS_SCALING_DIFF
)
# SumThreshold mask
stmask[li] = rfi.sumthreshold(
radiometer[:, li],
self.max_m,
start_flag=start_flag,
threshold1=threshold1,
correct_for_missing=True,
)
# Perform an OR (.any) along the pol axis and reform into an MPIArray
# along the freq axis
madmask = mpiarray.MPIArray.wrap(madmask.redistribute(1).any(0), 0)
stmask = mpiarray.MPIArray.wrap(stmask.redistribute(1).any(0), 0)
# Pick which of the MAD or SumThreshold mask to use (or blend them)
if self.mask_type == "mad":
finalmask = madmask
elif self.mask_type == "sumthreshold":
finalmask = stmask
else:
# Combine ST and MAD masks
madtimes = self._combine_st_mad_hook(sensitivity.time)
finalmask = stmask
finalmask[:, madtimes] = madmask[:, madtimes]
# Collect all parts of the mask onto rank 1 and then broadcast to all ranks
finalmask = mpiarray.MPIArray.wrap(finalmask, 0).allgather()
# Apply scale invariant rank (SIR) operator, if asked for.
if self.sir:
finalmask = self._apply_sir(finalmask, static_flag)
# Create container to hold mask
rfimask = containers.RFIMask(axes_from=sensitivity)
rfimask.mask[:] = finalmask
return rfimask
def _combine_st_mad_hook(self, times):
"""Override this function to add a custom blending mask between the
SumThreshold and MAD flagged data.
This is useful to use the MAD algorithm around bright source
transits, where the SumThreshold begins to remove real signal.
Parameters
----------
times : np.ndarray[ntime]
Times of the data at floating point UNIX time.
Returns
-------
combine : np.ndarray[ntime]
Mixing array as a function of time. If `True` that sample will be
filled from the MAD, if `False` use the SumThreshold algorithm.
"""
return np.ones_like(times, dtype=np.bool)
def _static_rfi_mask_hook(self, freq):
"""Override this function to apply a static RFI mask to the data.
Parameters
----------
freq : np.ndarray[nfreq]
1D array of frequencies in the data (in MHz).
Returns
-------
mask : np.ndarray[nfreq]
Mask array. True will include a frequency channel, False masks it out.
"""
return np.ones_like(freq, dtype=np.bool)
def _apply_sir(self, mask, baseflag, eta=0.2):
"""Expand the mask with SIR."""
# Remove baseflag from mask and run SIR
nobaseflag = np.copy(mask)
nobaseflag[baseflag] = False
nobaseflagsir = rfi.sir(nobaseflag[:, np.newaxis, :], eta=eta)[:, 0, :]
# Make sure the original mask (including baseflag) is still masked
flagsir = nobaseflagsir | mask
return flagsir
def _mad_tv_mask(self, data, start_flag, freq):
"""Use the specific scattered TV channel flagging."""
# Make copy of data
data = np.copy(data)
# Calculate the scaled deviations
data[start_flag] = 0.0
maddev = mad(
data, start_flag, base_size=self.tv_base_size, mad_size=self.tv_mad_size
)
# Replace any NaNs (where too much data is missing) with a
# large enough value to always be flagged
maddev = np.where(np.isnan(maddev), 2 * self.sigma, maddev)
# Reflag for scattered TV emission
tvmask = tv_channels_flag(maddev, freq, sigma=self.sigma, f=self.tv_fraction)
# Create MAD mask
madmask = maddev > self.sigma
# Ensure start flag is masked
madmask = madmask | start_flag
return madmask, tvmask
class RFIMask(task.SingleTask):
"""Crappy RFI masking.
Attributes
----------
sigma : float, optional
The false positive rate of the flagger given as sigma value assuming
the non-RFI samples are Gaussian.
tv_fraction : float, optional
Number of bad samples in a digital TV channel that cause the whole
channel to be flagged.
stack_ind : int
Which stack to process to derive flags for the whole dataset.
destripe : bool, optional
Deprecated option to remove the striping.
"""
sigma = config.Property(proptype=float, default=5.0)
tv_fraction = config.Property(proptype=float, default=0.5)
stack_ind = config.Property(proptype=int)
destripe = config.Property(proptype=bool, default=False)
def process(self, sstream):
"""Apply a day time mask.
Parameters
----------
sstream : containers.SiderealStream
Unmasked sidereal stack.
Returns
-------
mstream : containers.SiderealStream
Masked sidereal stream.
"""
sstream.redistribute("stack")
ssv = sstream.vis[:]
ssw = sstream.weight[:]
# Figure out which rank actually has the requested index
lstart = ssv.local_offset[1]
| |
# -*- coding: utf-8 -*-
"""
RPC
~~~
:author: <NAME> <<EMAIL>>
:copyright: (c) <NAME>, 2014
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
"""
# stdlib
import abc
import copy
# canteen
from canteen import core
from canteen import base
from canteen import model
# canteen core
from canteen.core import runtime
from canteen.core import injection
# canteen HTTP
from canteen.logic import http
# canteen util
from canteen.util import decorators
from canteen.util import struct as datastructures
with core.Library('protorpc', strict=True) as (library, protorpc):
#### ==== Dependencies ==== ####
# remote / message packages
from protorpc import remote as premote
from protorpc import registry as pregistry
# message packages
from protorpc import messages as pmessages
from protorpc.messages import Field as ProtoField
from protorpc.messages import Message as ProtoMessage
# message types
from protorpc import message_types as pmessage_types
from protorpc.message_types import VoidMessage as ProtoVoidMessage
# WSGI internals
from protorpc.wsgi import util as pwsgi_util
from protorpc.wsgi import service as pservice
## Globals
_RPC_BASE_URI = '/_rpc/'
#### ==== Message Fields ==== ####
class VariantField(ProtoField):
""" Field definition for a completely variant field. Allows containment
of any valid Python value supported by Protobuf/ProtoRPC. """
VARIANTS = frozenset([pmessages.Variant.DOUBLE, pmessages.Variant.FLOAT,
pmessages.Variant.BOOL, pmessages.Variant.INT64,
pmessages.Variant.UINT64, pmessages.Variant.SINT64,
pmessages.Variant.INT32, pmessages.Variant.UINT32,
pmessages.Variant.SINT32, pmessages.Variant.STRING,
pmessages.Variant.BYTES, pmessages.Variant.MESSAGE,
pmessages.Variant.ENUM])
DEFAULT_VARIANT = pmessages.Variant.STRING
type = (int, long, bool, basestring, dict, pmessages.Message)
class StringOrIntegerField(ProtoField):
""" Field definition for a field that can contain either a string or
integer. Usually used for key names/IDs or message IDs/hashes. """
VARIANTS = frozenset([pmessages.Variant.STRING, pmessages.Variant.DOUBLE,
pmessages.Variant.INT64, pmessages.Variant.INT32,
pmessages.Variant.UINT64, pmessages.Variant.UINT32])
DEFAULT_VARIANT = pmessages.Variant.STRING
type = (int, long, basestring, dict, pmessages.Message)
#### ==== Message Classes ==== ####
class Key(ProtoMessage):
""" Message for a :py:class:`canteen.model.Key`. """
encoded = pmessages.StringField(1) # encoded (`urlsafe`) key
kind = pmessages.StringField(2) # kind name for key
id = StringOrIntegerField(3) # integer or string ID for key
namespace = pmessages.StringField(4) # string namespace for key
parent = pmessages.MessageField('Key', 5) # recursive key message
class Echo(ProtoMessage):
""" I am rubber and you are glue... """
message = pmessages.StringField(1, default='Hello, world!')
## expose message classes alias
messages = datastructures.WritableObjectProxy(**{
# canteen-provided messages
'Key': Key, # message class for a canteen model key
'Echo': Echo, # echo message defaulting to `hello, world` for testing
# builtin messages
'Message': ProtoMessage, # top-level protorpc message class
'VoidMessage': ProtoVoidMessage, # top-level protorpc void message
# specific types
'Enum': pmessages.Enum, # enum descriptor / definition class
'Field': pmessages.Field, # top-level protorpc field class
'FieldList': pmessages.FieldList, # top-level protorpc field list class
# field types
'VariantField': VariantField, # generic hold-anything property
'BooleanField': pmessages.BooleanField, # boolean true/false field
'BytesField': pmessages.BytesField, # low-level binary-safe string field
'EnumField': pmessages.EnumField, # field for referencing an `Enum` class
'FloatField': pmessages.FloatField, # field for a floating point number
'IntegerField': pmessages.IntegerField, # field for an integer
'MessageField': pmessages.MessageField, # field for a sub-message
'StringField': pmessages.StringField, # field for unicode or ASCII
'DateTimeField': pmessage_types.DateTimeField # field for datetime types
})
def service_mappings(services, registry_path='/_rpc/meta', protocols=None):
""" Generates mappings from `url -> service` for registered Canteen RPC
services.
Takes an iterable of URL and service mappings, wraps with appropriate
WSGI utilities, and registers with registry service for Endpoints/meta
integration.
:param services: Iterable of services, preferably a ``list`` of
``tuples``, where each is in the format ``(url, service)``. ``url``
should be a relative prefix for matching requests, like
``/_rpc/hello`` for something called ``HelloService``.
:param registry_path: Path prefix for ``RegistryService``, which returns
metadata about registered RPC services. Required for integration with
Google Cloud Endpoints or the various ProtoRPC client-side library
generation options out there.
:param protocols: Protocols to use for dispatching services. Custom
protocol implementations are supported and two are shipped with
canteen - ``JSON`` and ``msgpack`` RPC formats (note: not necessarily
affiliated with any standards that are actually called "msgpack-rpc"
or "jsonrpc").
:returns: WSGI application prepared by :py:mod:`protorpc`, which, upon
dispatch, will attempt to delegate response to the first matching
``Service`` implementation, as governed by the mappings generated in
this function from ``services``. """
if not protocols:
# load canteen builtin protocols
from canteen.base import protocol
protocols = protocol.Protocol.mapping
if isinstance(services, dict):
services = services.iteritems()
final_mapping, paths, registry_map = (
[],
set(),
{} if registry_path else None
)
for service_path, service_factory in services:
service_class = service_factory.service_class if (
hasattr(service_factory, 'service_class')) else service_factory
if service_path not in paths:
paths.add(service_path)
else:
raise premote.ServiceConfigurationError(
'Path %r is already defined in service mapping' %
service_path.encode('utf-8'))
if registry_map is not None: registry_map[service_path] = service_class
final_mapping.append((
pservice.service_mapping(*(
service_factory, service_path), protocols=protocols)))
if registry_map is not None:
final_mapping.append(pservice.service_mapping(*(
pregistry.RegistryService.new_factory(registry_map), registry_path),
protocols=protocols))
return pwsgi_util.first_found(final_mapping)
@http.url('rpc', (
r'%s<string:version>/<string:service>.<string:method>' % _RPC_BASE_URI))
class ServiceHandler(base.Handler):
""" Builtin concrete :py:class:`base.Handler` for use with RPC services. As
services are bound to names, they are registered here and eventually
mapped URLs are generated (via `service_mappings`).
Normally this handler is mapped at ``<version>/<service>.<method>``,
which supports both service types ('concrete' and 'registry') at the
following URLs (with examples inline):
- concrete: ``v1/hello.hi`` for a ``HelloService`` with ``hi`` method
- meta: ``meta/registry.services`` to describe a service's methods """
__services__ = {} # holds services mapped to their names
@classmethod
def add_service(cls, name, service, config=None, **kwargs):
""" Add a service to this handler's local dispatch registry.
Called from ``@rpc.service`` to mount a service to dispatch.
:param name: Simple string name for the service. For instance,
``hello`` for ``HelloService``.
:param service: Service class to be registered.
:param config: Configuration can be passed as a dictionary
(at ``config``) or with ``kwargs``, which override items in
``config``.
:returns: The service class passwd at ``service``. """
config = config or {}
config.update(kwargs)
cls.__services__[name] = (service, config or {})
return service
@decorators.classproperty
def services(cls):
""" Iterator for all locally-registered services, presented as
a class-level property.
:yields: Each named service, in the tupled format
``name, service``, much like ``dict.iteritems``. """
for name in sorted(cls.__services__.iterkeys()):
yield name, cls.__services__[name]
@classmethod
def get_service(cls, name):
""" Retrieve a locally-registered service by name.
:param name: Short name for the service. For instance,
``hello`` for ``HelloService``.
:returns: Registered ``rpc.Service`` class at that name,
or ``None`` if no matching service could be located. """
if name in cls.__services__:
return cls.__services__[name][0]
@classmethod
def describe(cls, json=False,
javascript=False,
callable='apptools.rpc.service.factory'):
""" Describe locally-registered services in various formats.
Exposed to template context as ``services.describe``, so that
frontends can easily be notified of supported RPC services.
Omitting both ``json`` and ``javascript`` will return a ``list``
of ``tuples`` describing each service, as ``(name, methods, config)``.
Passing ``json`` *and* ``javascript`` is unsupported and raises
a ``TypeError`` describing your foolishness.
:param json: Describe the services as a JSON string, suitable for
placement on an HTML page. Boolean, defaults to ``False``.
:param javascript: Generate JS that calls a function (assumed to
already be present on ``window``) with a structure describing
locally registered services, suitable for placement on an HTML
page. Boolean, defaults to ``False``.
:param callable: Opportunity to change the frontend callable function
that will be passed the service manifest. Defaults to the hard-coded
value ``apptools.rpc.service.factory`` for backwards compatibility.
:returns: ``list`` of ``tuples`` if requesting structured description,
or a JSON string of that structure if ``json=True`` is passed, or JS
code invoked with that JSON structure if ``javascript=True`` is
passed. """
_services = []
for name, service in cls.services:
service, config = service
_services.append((
name, # service shortname
# service methods
tuple((name for name in service.all_remote_methods().iterkeys())),
# service config
config or {}
))
if json and javascript:
raise TypeError('Please pick between "JSON" and "JavaScript"'
' output for services.')
if json: # generate JSON only?
import json as serializer
return serializer.dumps(_services)
if javascript: # generate javascript?
import json as serializer
return "%s(%s);" % (callable, serializer.dumps(_services))
return _services # or return raw?
@decorators.classproperty
def application(cls):
""" Utility for generating a WSGI application capable of dispatching
locally-registered services on ``ServiceHandler``, exposed as a class-
level property.
Uses :py:mod:`protorpc`'s fantastic `wsgi.utils.first_found`, which
will dispatch `rpc.Service` applications one at a time until a non-404
error occurs, in which case response | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""Control the execution of several Processors. In some cases it is necessary
to run several Processors in a loop. When there is a large number of tiles and
a few subsets of these tiles need to be processed with different settings,
you might want to maximize the resource use of the Processors, so one can
finish as quickly as possible. Then probably won't be enough resource left
to run a second Processor in parallel. This is where a Controller comes in,
which can control the execution of the Processors."""
import json
import logging
import os
from shutil import copyfile
from typing import List
from io import TextIOBase
import yaml
from click import echo, secho, exceptions
from tile_processor import processor, worker, tileconfig, db, output
log = logging.getLogger(__name__)
# logging.getLogger("pykwalify").setLevel(logging.WARNING)
class ConfigurationSchema:
"""Schema for validating a configuration file.
For registering and removing configuration schemas,
see the `register-schema` and `remove-schema` commands.
"""
def __init__(self, name=None):
self.name = name
self.dir = os.path.join(os.path.dirname(__file__), "schemas")
self.db_path = os.path.join(
os.path.dirname(__file__), "schemas", "schemas.json"
)
self.db = self.fetch()
self.schema = self.fetch(self.name) if self.name else None
def fetch(self, name=None):
"""Load the schema database (schema.json) or a specific schema if
name is provided."""
if name is None:
with open(self.db_path, "r") as fp:
return json.load(fp)
else:
with open(self.db_path, "r") as fp:
s = json.load(fp)
try:
src = os.path.join(self.dir, s[name])
except KeyError:
secho(
message=f"The configuration schema '{name}' is not "
f"registered, but it is expected by the "
f"Controller. You can register the schema "
f"with the 'register-schema' command.",
fg="red",
)
return None
try:
with open(src, "r") as cfgp:
return yaml.load(cfgp, Loader=yaml.FullLoader)
except FileNotFoundError:
raise exceptions.ClickException(
message=f"The configuration schema '{name}' is registered, "
f"but the file {src} is not found."
)
def register(self, name, path):
"""Register a configuration schema in the schema database
(schema.json)."""
fname = os.path.basename(path)
try:
dst = os.path.join(self.dir, fname)
copyfile(path, dst)
except Exception as e:
log.exception(e)
raise
self.db[name] = fname
try:
with open(self.db_path, "w") as fp:
json.dump(self.db, fp)
except Exception as e:
log.exception(e)
raise
echo(f"Registered the configuraton schema '{fname}' as '{name}'")
def remove(self, name=None):
"""Remove a schema from the database."""
if name is None:
name = self.name
try:
fname = self.db[name]
del self.db[name]
with open(self.db_path, "w") as fp:
json.dump(self.db, fp)
except KeyError:
secho(
f"Schema '{name}' not in the database, not removing anything",
fg="yellow",
)
return
try:
p = os.path.join(self.dir, fname)
os.remove(p)
echo(f"Removed the configuration schema '{name}'")
except FileNotFoundError:
secho(
f"Schema file '{fname}' is not in {self.dir}, "
f"not removing anything",
fg="yellow",
)
return
def validate_configuration(self, config):
"""Validates a configuration file against the schema.
Validation is done with `pykwalify
<https://pykwalify.readthedocs.io/en/master/>`_.
"""
# FIXME: do something about the schemas, either remove completely or use
# another library for validation. But better remove completely
if config is None:
log.warning(f"config is None")
return None
cfg = yaml.load(config, Loader=yaml.FullLoader)
# if self.schema:
# try:
# c = pykwalify.core.Core(
# source_data=cfg, schema_data=self.schema
# )
# # return the validated configuration
# return c.validate(raise_exception=True)
# except pykwalify.errors.PyKwalifyException:
# log.exception("Configuration file is not valid")
# raise
# else:
# log.warning("There is no registered schema, skipping validation")
# return cfg
return cfg
class ControllerFactory:
"""Registers and instantiates a Controller that launches the Processors."""
def __init__(self):
self._controllers = {}
def register_controller(self, key, controller):
"""Register an controller for use.
:param key: Name of the controller
:param controller: Can be a function, a class, or an object that
implements .__call__()
"""
self._controllers[key] = controller
def create(self, key, **kwargs):
"""Instantiate a Processor"""
controller = self._controllers.get(key)
if not controller:
raise ValueError(key)
return controller(**kwargs)
class Controller:
def __init__(
self,
configuration: TextIOBase,
threads: int = 1,
monitor_log: logging.Logger = None,
monitor_interval: int = None,
config_schema: str = None,
):
self.schema = ConfigurationSchema(config_schema)
self.cfg = self.parse_configuration(
configuration, threads, monitor_log, monitor_interval
)
self.processors = {}
def parse_configuration(
self,
configuration: TextIOBase,
threads: int,
monitor_log: logging.Logger,
monitor_interval: int,
) -> dict:
"""Parse, validate and prepare the configuration file.
:param monitor_log: Logger for monitoring
:param monitor_interval: Monitoring interval in seconds
:param configuration: A text stream, containing the configuration
:param threads: Number of threads
:return: Configuration
"""
cfg = {}
if configuration is None:
log.error("Configuration file is empty")
return cfg
else:
if isinstance(configuration, TextIOBase):
try:
cfg_stream = self.schema.validate_configuration(configuration)
log.info(f"Configuration file is valid")
except Exception as e:
log.exception(e)
raise
elif isinstance(configuration, dict):
# Makea copy so we can work with frozendict-s from dagster
cfg_stream = {**configuration}
else:
raise ValueError("configuration is neither TextIOBase nor a dictionary")
cfg["threads"] = int(threads)
cfg["monitor_log"] = monitor_log
cfg["monitor_interval"] = monitor_interval
cfg["config"] = cfg_stream
return cfg
def configure(self, tiles, processor_key: str, worker_key: str = None,
worker_class = None):
"""Configure the controller.
Input-specific subclasses need to implement this.
"""
if worker_key:
worker_init = worker.factory.create(worker_key)
else:
worker_init = worker_class
self.cfg["worker"] = worker_init.execute
# Configure the tiles (DBTiles in this case)
tilescfg = tileconfig.DbTiles(
conn=db.Db(**self.cfg["config"]["database"]),
tile_index_schema=db.Schema(self.cfg["config"]["features_tiles"]),
features_schema=db.Schema(self.cfg["config"]["features"]),
)
tilescfg.configure(tiles=tiles)
out_dir = output.DirOutput(self.cfg["config"]["output"]["dir"])
# Set up logic for processing different parts. Parst are required
# for example when processing a large area that needs different tile
# configurations. For instance the Netherlands with AHN2 and AHN3.
parts = {
"part_A": tilescfg,
}
# Create a processor for each part
for part, _tilescfg in parts.items():
_tilescfg.output = output.Output(
dir=output.DirOutput(out_dir.join_path(part))
)
proc = processor.factory.create(
processor_key, name=part, tiles=_tilescfg
)
self.processors[proc] = part
log.info(f"Configured {self.__class__.__name__}")
def run(self, restart: int = 0) -> dict:
"""Run the Controller.
:return: `(processor.name : [tile ID])`
Returns the tile IDs per Processor that failed even after
restarts
"""
log.info(f"Running {self.__class__.__name__}")
results = {}
for proc in self.processors:
proc.configure(**self.cfg)
res = proc.process(restart=restart)
results[proc.name] = res
log.info(f"Done {self.__class__.__name__}. {results}")
return results
class ExampleController(Controller):
"""Controller for tiles that are stored in PostgreSQL."""
def configure(self, tiles, processor_key: str, worker_key: str = None,
worker_class = None):
"""Configure the controller."""
if worker_key:
worker_init = worker.factory.create(worker_key)
else:
worker_init = worker_class
self.cfg["worker"] = worker_init.execute
if worker_key == "Example":
tilescfg = tileconfig.FileTiles()
tilescfg.configure(tiles=tiles)
out_dir = output.DirOutput(self.cfg["config"]["output"]["dir"])
# Set up logic
parts = {"part_A": tilescfg, "part_B": tilescfg}
else:
# For the ExampleDb worker
tilescfg = tileconfig.DbTiles(
conn=db.Db(**self.cfg["config"]["database"]),
tile_index_schema=db.Schema(
self.cfg["config"]["features_tiles"]
),
features_schema=db.Schema(self.cfg["config"]["features"]),
)
tilescfg.configure(tiles=tiles)
out_dir = output.DirOutput(self.cfg["config"]["output"]["dir"])
# Set up logic
parts = {
"part_A": tilescfg,
}
for part, _tilescfg in parts.items():
_tilescfg.output = output.Output(
dir=output.DirOutput(out_dir.join_path(part))
)
proc = processor.factory.create(
processor_key, name=part, tiles=_tilescfg
)
self.processors[proc] = part
log.info(f"Configured {self.__class__.__name__}")
class AHNController(Controller):
"""Controller for AHN when only one version of AHN need to be processed."""
def parse_configuration(
self,
configuration: TextIOBase,
threads: int,
monitor_log: logging.Logger,
monitor_interval: int,
) -> dict:
"""Parse, validate and prepare the configuration file.
:param monitor_log:
:param monitor_interval:
:param config: A text stream, containing the configuration
:param threads: Number of threads
:return: Configuration
"""
cfg = {}
if configuration is None:
log.error("Configuration is empty")
return cfg
else:
if isinstance(configuration, TextIOBase):
try:
cfg_stream = self.schema.validate_configuration(configuration)
log.info(f"Configuration file is valid")
except Exception as e:
log.exception(e)
raise
elif isinstance(configuration, dict):
# Makea copy so we can work with frozendict-s from dagster
cfg_stream = {**configuration}
else:
raise ValueError("configuration is neither TextIOBase nor a dictionary")
cfg["config"] = cfg_stream
directory_mapping = {}
for mapping in cfg_stream["elevation"]["directories"]:
dir, properties = {**mapping}.popitem()
if not os.path.isabs(dir):
raise ValueError(
f"Path {dir} is not absolute in "
f"elevation:directories"
)
directory_mapping[dir] = properties
cfg["config"]["directory_mapping"] = directory_mapping
cfg["threads"] = int(threads)
cfg["monitor_log"] = monitor_log
cfg["monitor_interval"] = monitor_interval
return cfg
def configure(
self, tiles, processor_key: str, worker_key: str = None, worker_class = None, restart: int = 0
):
"""Configure the control logic."""
if worker_key:
worker_init = worker.factory.create(worker_key)
else:
worker_init = worker_class
self.cfg["worker"] = worker_init.execute
# Configure the tiles
conn = db.Db(**self.cfg["config"]["database"])
elevation_tiles = tileconfig.DbTiles(
conn=conn,
tile_index_schema=db.Schema(self.cfg["config"]["elevation_tiles"]),
)
feature_tiles = tileconfig.DbTiles(
conn=conn,
tile_index_schema=db.Schema(self.cfg["config"]["features_tiles"]),
features_schema=db.Schema(self.cfg["config"]["features"]),
)
# Configure feature tiles with elevation from AHN3
ahntiles = tileconfig.DbTilesAHN(
conn=conn,
elevation_tiles=elevation_tiles,
feature_tiles=feature_tiles,
)
ahntiles.configure(
tiles=tiles,
version=None,
directory_mapping=self.cfg["config"]["directory_mapping"],
tin=False,
)
# Set up outputs
output_obj = output.Output()
if "database" in self.cfg["config"]["output"]:
output_obj.db = output.DbOutput(
conn=db.Db(**self.cfg["config"]["output"]["database"])
)
if "dir" in self.cfg["config"]["output"]:
# FIXME: Output.dir should be a dict maybe by default?
output_obj.dir = {}
if isinstance(self.cfg["config"]["output"]["dir"], str):
output_obj.dir["path"] = output.DirOutput(
path=self.cfg["config"]["output"]["dir"]
)
elif isinstance(self.cfg["config"]["output"]["dir"], dict):
for k,dirpath in self.cfg["config"]["output"]["dir"].items():
output_obj.dir[k] = output.DirOutput(dirpath)
else:
raise ValueError(f'Expected str or dict in {self.cfg["config"]["output"]["dir"]}')
for k, | |
('alcohol', FloatTensorType(shape=[None, 1])),
('quality', FloatTensorType(shape=[None, 1])),
('color', StringTensorType(shape=[None, 1]))
]
pipe.fit(X_train)
model_onnx = convert_sklearn(
pipe, initial_types=init_types, target_opset=TARGET_OPSET)
oinf = InferenceSession(model_onnx.SerializeToString())
pred = pipe.transform(X_train)
inputs = {c: X_train[c].values for c in X_train.columns}
inputs = {c: v.reshape((v.shape[0], 1)) for c, v in inputs.items()}
onxp = oinf.run(None, inputs)
got = onxp[0]
assert_almost_equal(pred, got)
@ignore_warnings(category=(FutureWarning, UserWarning))
def test_pipeline_tfidf_svc(self):
pipe = Pipeline([
('tfidf', TfidfVectorizer()),
('clf_svc', SVC(probability=True, kernel='linear'))])
data = numpy.array(["first sentance", "second sentence",
"many sentances", "dummy sentance",
"no sentance at all"])
y = numpy.array([0, 0, 1, 0, 1])
pipe.fit(data, y)
expected_label = pipe.predict(data)
expected_proba = pipe.predict_proba(data)
df = pandas.DataFrame(data)
df.columns = ['text']
# first conversion if shape=[None, 1]
model_onnx = convert_sklearn(
pipe, initial_types=[('text', StringTensorType([None, 1]))],
target_opset=TARGET_OPSET,
options={id(pipe): {'zipmap': False}})
sess = InferenceSession(model_onnx.SerializeToString())
got = sess.run(None, {'text': data.reshape((-1, 1))})
assert_almost_equal(expected_proba, got[1])
assert_almost_equal(expected_label, got[0])
# sess.run(None, {'text': df}) --> failures
# sess.run(None, {'text': df["text"]}) --> failures
# second conversion with shape=[None]
model_onnx = convert_sklearn(
pipe, initial_types=[('text', StringTensorType([None]))],
target_opset=TARGET_OPSET,
options={id(pipe): {'zipmap': False}})
sess = InferenceSession(model_onnx.SerializeToString())
got = sess.run(None, {'text': data})
assert_almost_equal(expected_proba, got[1])
assert_almost_equal(expected_label, got[0])
# sess.run(None, {'text': df}) failure
# sess.run(None, {'text': df["text"]}) failure
sess.run(None, {'text': df["text"].values}) # success
@ignore_warnings(category=(FutureWarning, UserWarning))
def test_pipeline_voting_tfidf_svc(self):
pipe1 = Pipeline([
('tfidf1', TfidfVectorizer()),
('svc', SVC(probability=True, kernel='linear'))])
pipe2 = Pipeline([
('tfidf2', TfidfVectorizer(norm='l2', use_idf=False)),
('sgd', SGDClassifier(alpha=0.0001, penalty='l2',
loss='modified_huber'))])
pipe3 = Pipeline([
('tfidf3', TfidfVectorizer()),
('mnb', MultinomialNB())])
voting = VotingClassifier(
[('p1', pipe1), ('p2', pipe2), ('p3', pipe3)],
voting='soft', flatten_transform=False)
data = numpy.array(["first sentance", "second sentence",
"many sentances", "dummy sentance",
"no sentance at all"])
y = numpy.array([0, 0, 1, 0, 1])
voting.fit(data, y)
expected_label = voting.predict(data)
expected_proba = voting.predict_proba(data)
df = pandas.DataFrame(data)
df.columns = ['text']
model_onnx = convert_sklearn(
voting, initial_types=[('text', StringTensorType([None, 1]))],
target_opset=TARGET_OPSET,
options={id(voting): {'zipmap': False}})
# with open("debug.onnx", "wb") as f:
# f.write(model_onnx.SerializeToString())
sess = InferenceSession(model_onnx.SerializeToString())
got = sess.run(None, {'text': data.reshape((-1, 1))})
assert_almost_equal(expected_proba, got[1], decimal=5)
assert_almost_equal(expected_label, got[0])
@ignore_warnings(category=(FutureWarning, UserWarning))
def test_pipeline_pipeline_voting_tfidf_svc(self):
pipe1 = Pipeline([
('ntfidf1', Pipeline([
('tfidf1', TfidfVectorizer()),
('scaler', FeatureUnion([
('scaler2', StandardScaler(with_mean=False)),
('mm', MaxAbsScaler())]))])),
('svc', SVC(probability=True, kernel='linear'))])
pipe2 = Pipeline([
('tfidf2', TfidfVectorizer(norm='l2', use_idf=False)),
('sgd', SGDClassifier(alpha=0.0001, penalty='l2',
loss='modified_huber'))])
pipe3 = Pipeline([
('tfidf3', TfidfVectorizer()),
('mnb', MultinomialNB())])
voting = VotingClassifier(
[('p1', pipe1), ('p2', pipe2), ('p3', pipe3)],
voting='soft', flatten_transform=False)
data = numpy.array(["first sentance", "second sentence",
"many sentances", "dummy sentance",
"no sentance at all"])
y = numpy.array([0, 0, 1, 0, 1])
voting.fit(data, y)
expected_label = voting.predict(data)
expected_proba = voting.predict_proba(data)
df = pandas.DataFrame(data)
df.columns = ['text']
model_onnx = convert_sklearn(
voting, initial_types=[('text', StringTensorType([None, 1]))],
target_opset=TARGET_OPSET,
options={id(voting): {'zipmap': False}})
# with open("debug.onnx", "wb") as f:
# f.write(model_onnx.SerializeToString())
sess = InferenceSession(model_onnx.SerializeToString())
got = sess.run(None, {'text': data.reshape((-1, 1))})
assert_almost_equal(expected_proba, got[1])
assert_almost_equal(expected_label, got[0])
@unittest.skipIf(TARGET_OPSET < 11,
reason="SequenceConstruct not available")
@unittest.skipIf(
not check_scikit_version(),
reason="Scikit 0.21 too old")
@ignore_warnings(category=(FutureWarning, UserWarning))
def test_pipeline_pipeline_rf(self):
cat_feat = ['A', 'B']
text_feat = 'TEXT'
pipe = Pipeline(steps=[
('preprocessor', ColumnTransformer(
transformers=[
('cat_tr', OneHotEncoder(handle_unknown='ignore'),
cat_feat),
('count_vect', Pipeline(steps=[
('count_vect', CountVectorizer(
max_df=0.8, min_df=0.05, max_features=1000))]),
text_feat)])),
('classifier', MultiOutputClassifier(
estimator=RandomForestClassifier(
n_estimators=5, max_depth=5)))])
data = numpy.array([
["cat1", "cat2", "cat3", "cat1", "cat2"],
["C1", "C2", "C3", "C3", "C4"],
["first sentance", "second sentence",
"many sentances", "dummy sentance",
"no sentance at all"]]).T
y = numpy.array([[0, 1], [0, 1], [1, 0], [0, 1], [1, 1]])
df = pandas.DataFrame(data, columns=['A', 'B', 'TEXT'])
pipe.fit(df, y)
expected_label = pipe.predict(df)
expected_proba = pipe.predict_proba(df)
model_onnx = convert_sklearn(
pipe, initial_types=[
('A', StringTensorType([None, 1])),
('B', StringTensorType([None, 1])),
('TEXT', StringTensorType([None, 1]))],
target_opset=TARGET_OPSET,
options={MultiOutputClassifier: {'zipmap': False}})
# with open("debug.onnx", "wb") as f:
# f.write(model_onnx.SerializeToString())
sess = InferenceSession(model_onnx.SerializeToString())
got = sess.run(None, {'A': data[:, :1], 'B': data[:, 1:2],
'TEXT': data[:, 2:]})
self.assertEqual(len(expected_proba), len(got[1]))
for e, g in zip(expected_proba, got[1]):
assert_almost_equal(e, g, decimal=5)
assert_almost_equal(expected_label, got[0])
@unittest.skipIf(TARGET_OPSET < 11,
reason="SequenceConstruct not available")
@unittest.skipIf(
not check_scikit_version(),
reason="Scikit 0.21 too old")
@ignore_warnings(category=(DeprecationWarning, FutureWarning, UserWarning))
def test_issue_712_multio(self):
dfx = pandas.DataFrame(
{'CAT1': ['985332', '985333', '985334', '985335', '985336'],
'CAT2': ['1985332', '1985333', '1985334', '1985335', '1985336'],
'TEXT': ["abc abc", "abc def", "def ghj", "abcdef", "abc ii"]})
dfy = pandas.DataFrame(
{'REAL': [5, 6, 7, 6, 5],
'CATY': [0, 1, 0, 1, 0]})
cat_features = ['CAT1', 'CAT2']
categorical_transformer = OneHotEncoder(handle_unknown='ignore')
textual_feature = 'TEXT'
count_vect_transformer = Pipeline(steps=[
('count_vect', CountVectorizer(
max_df=0.8, min_df=0.05, max_features=1000))])
preprocessor = ColumnTransformer(
transformers=[
('cat_transform', categorical_transformer, cat_features),
('count_vector', count_vect_transformer, textual_feature)])
model_RF = RandomForestClassifier(random_state=42, max_depth=50)
rf_clf = Pipeline(steps=[
('preprocessor', preprocessor),
('classifier', MultiOutputClassifier(estimator=model_RF))])
rf_clf.fit(dfx, dfy)
expected_label = rf_clf.predict(dfx)
expected_proba = rf_clf.predict_proba(dfx)
inputs = {'CAT1': dfx['CAT1'].values.reshape((-1, 1)),
'CAT2': dfx['CAT2'].values.reshape((-1, 1)),
'TEXT': dfx['TEXT'].values.reshape((-1, 1))}
onx = to_onnx(rf_clf, dfx, target_opset=TARGET_OPSET,
options={MultiOutputClassifier: {'zipmap': False}})
sess = InferenceSession(onx.SerializeToString())
got = sess.run(None, inputs)
assert_almost_equal(expected_label, got[0])
self.assertEqual(len(expected_proba), len(got[1]))
for e, g in zip(expected_proba, got[1]):
assert_almost_equal(e, g, decimal=5)
@unittest.skipIf(TARGET_OPSET < 11,
reason="SequenceConstruct not available")
@unittest.skipIf(
not check_scikit_version(),
reason="Scikit 0.21 too old")
@ignore_warnings(category=(DeprecationWarning, FutureWarning, UserWarning))
def test_issue_712_svc_multio(self):
for sub_model in [LinearSVC(), SVC()]:
for method in ["sigmoid", "isotonic"]:
with self.subTest(sub_model=sub_model, method=method):
dfx = pandas.DataFrame(
{'CAT1': ['985332', '985333', '985334', '985335',
'985336', '985332', '985333', '985334',
'985335', '985336', '985336'],
'CAT2': ['1985332', '1985333', '1985334', '1985335',
'1985336', '1985332', '1985333', '1985334',
'1985335', '1985336', '1985336'],
'TEXT': ["abc abc", "abc def", "def ghj", "abcdef",
"abc ii", "abc abc", "abc def", "def ghj",
"abcdef", "abc ii", "abc abc"]})
dfy = pandas.DataFrame(
{'REAL': [5, 6, 7, 6, 5, 5, 6, 7, 5, 6, 7],
'CATY': [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]})
cat_features = ['CAT1', 'CAT2']
categorical_transformer = OneHotEncoder(
handle_unknown='ignore')
textual_feature = 'TEXT'
count_vect_transformer = Pipeline(steps=[
('count_vect', CountVectorizer(
max_df=0.8, min_df=0.05, max_features=1000))])
preprocessor = ColumnTransformer(
transformers=[
('cat_transform', categorical_transformer,
cat_features),
('count_vector', count_vect_transformer,
textual_feature)])
model_SVC = CalibratedClassifierCV(
sub_model, cv=2, method=method)
rf_clf = Pipeline(steps=[
('preprocessor', preprocessor),
('classifier', MultiOutputClassifier(
estimator=model_SVC))])
rf_clf.fit(dfx, dfy)
expected_label = rf_clf.predict(dfx)
expected_proba = rf_clf.predict_proba(dfx)
inputs = {'CAT1': dfx['CAT1'].values.reshape((-1, 1)),
'CAT2': dfx['CAT2'].values.reshape((-1, 1)),
'TEXT': dfx['TEXT'].values.reshape((-1, 1))}
onx = to_onnx(
rf_clf, dfx, target_opset=TARGET_OPSET,
options={MultiOutputClassifier: {'zipmap': False}})
sess = InferenceSession(onx.SerializeToString())
got = sess.run(None, inputs)
assert_almost_equal(expected_label, got[0])
self.assertEqual(len(expected_proba), len(got[1]))
for e, g in zip(expected_proba, got[1]):
if method == "isotonic" and isinstance(sub_model, SVC):
# float/double issues
assert_almost_equal(e[2:4], g[2:4], decimal=3)
else:
assert_almost_equal(e, g, decimal=5)
@unittest.skipIf(TARGET_OPSET < 11,
reason="SequenceConstruct not available")
@unittest.skipIf(
not check_scikit_version(),
reason="Scikit 0.21 too old")
@ignore_warnings(category=(DeprecationWarning, FutureWarning, UserWarning))
def test_issue_712_svc_binary0(self):
for sub_model in [LinearSVC(), SVC()]:
for method in ["sigmoid", "isotonic"]:
with self.subTest(sub_model=sub_model, method=method):
dfx = pandas.DataFrame(
{'CAT1': ['985332', '985333', '985334', '985335',
'985336', '985332', '985333', '985334',
'985335', '985336', '985336'],
'CAT2': ['1985332', '1985333', '1985334', '1985335',
'1985336', '1985332', '1985333', '1985334',
'1985335', '1985336', '1985336'],
'TEXT': ["abc abc", "abc def", "def ghj", "abcdef",
"abc ii", "abc abc", "abc def", "def ghj",
"abcdef", "abc ii", "abc abc"]})
dfy = numpy.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0])
cat_features = ['CAT1', 'CAT2']
categorical_transformer = OneHotEncoder(
handle_unknown='ignore')
textual_feature = 'TEXT'
count_vect_transformer = Pipeline(steps=[
('count_vect', CountVectorizer(
max_df=0.8, min_df=0.05, max_features=1000))])
preprocessor = ColumnTransformer(
transformers=[
('cat_transform', categorical_transformer,
cat_features),
('count_vector', count_vect_transformer,
textual_feature)])
model_SVC = CalibratedClassifierCV(
sub_model, cv=2, method=method)
rf_clf = Pipeline(steps=[
('preprocessor', preprocessor),
('classifier', model_SVC)])
rf_clf.fit(dfx, dfy)
expected_label = rf_clf.predict(dfx)
expected_proba = rf_clf.predict_proba(dfx)
inputs = {'CAT1': dfx['CAT1'].values.reshape((-1, 1)),
'CAT2': dfx['CAT2'].values.reshape((-1, 1)),
'TEXT': dfx['TEXT'].values.reshape((-1, 1))}
onx = to_onnx(rf_clf, dfx, target_opset=TARGET_OPSET,
options={'zipmap': False})
sess = InferenceSession(onx.SerializeToString())
got = sess.run(None, inputs)
assert_almost_equal(expected_label, got[0])
assert_almost_equal(expected_proba, got[1], decimal=5)
@unittest.skipIf(TARGET_OPSET < 11,
reason="SequenceConstruct not available")
@unittest.skipIf(
not check_scikit_version(),
reason="Scikit 0.21 too old")
@ignore_warnings(category=(DeprecationWarning, FutureWarning, UserWarning))
def test_issue_712_svc_multi(self):
for sub_model in [SVC(), LinearSVC()]:
for method in ["isotonic", "sigmoid"]:
with self.subTest(sub_model=sub_model, method=method):
dfx = pandas.DataFrame(
{'CAT1': ['985332', '985333', '985334', '985335',
'985336', '985332', '985333', '985334',
'985335', '985336', '985336'],
'CAT2': ['1985332', '1985333', '1985334', '1985335',
'1985336', '1985332', '1985333', '1985334',
'1985335', '1985336', '1985336'],
'TEXT': ["abc abc", "abc def", "def ghj", "abcdef",
"abc ii", "abc abc", "abc def", "def ghj",
"abcdef", "abc ii", "abc abc"]})
dfy = numpy.array([5, 6, 7, 6, 5, 5, 8, 7, 5, 6, 8])
cat_features = ['CAT1', 'CAT2']
categorical_transformer = OneHotEncoder(
handle_unknown='ignore')
textual_feature = 'TEXT'
count_vect_transformer = Pipeline(steps=[
('count_vect', CountVectorizer(
max_df=0.8, min_df=0.05, max_features=1000))])
preprocessor = ColumnTransformer(
transformers=[
('cat_transform', categorical_transformer,
cat_features),
('count_vector', count_vect_transformer,
textual_feature)])
model_SVC = CalibratedClassifierCV(
sub_model, cv=2, method=method)
rf_clf = Pipeline(steps=[
('preprocessor', preprocessor),
('classifier', model_SVC)])
rf_clf.fit(dfx, dfy)
expected_label = rf_clf.predict(dfx)
expected_proba = rf_clf.predict_proba(dfx)
inputs = {'CAT1': dfx['CAT1'].values.reshape((-1, 1)),
'CAT2': dfx['CAT2'].values.reshape((-1, 1)),
'TEXT': dfx['TEXT'].values.reshape((-1, 1))}
onx = to_onnx(rf_clf, dfx, target_opset=TARGET_OPSET,
options={'zipmap': False})
sess = InferenceSession(onx.SerializeToString())
got = sess.run(None, inputs)
assert_almost_equal(expected_label, got[0])
if method == "isotonic":
# float/double issues
assert_almost_equal(
expected_proba[2:4], got[1][2:4], decimal=3)
else:
assert_almost_equal(expected_proba, got[1], decimal=5)
@unittest.skipIf(TARGET_OPSET < 11,
reason="SequenceConstruct not available")
@unittest.skipIf(
not check_scikit_version(),
reason="Scikit 0.21 too old")
@ignore_warnings(category=(FutureWarning, | |
# -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at <EMAIL>
# For commercial licensing contact, please contact <EMAIL>
import os, sys
import torch
import torchvision
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from skimage.io import imread
import cv2
from .models.encoders import ResnetEncoder, MLP, HRNEncoder
from .models.moderators import TempSoftmaxFusion
from .models.FLAME import FLAMETex
from .models.SMPLX import SMPLX
from .utils import util
from .utils import rotation_converter as converter
from .utils import tensor_cropper
from .utils.config import cfg
class PIXIE(object):
def __init__(self, config=None, device='cuda:0'):
if config is None:
self.cfg = cfg
else:
self.cfg = config
self.device = device
# parameters setting
self.param_list_dict = {}
for lst in self.cfg.params.keys():
param_list = cfg.params.get(lst)
self.param_list_dict[lst] = {i:cfg.model.get('n_'+i) for i in param_list}
# Build the models
self._create_model()
# Set up the cropping modules used to generate face/hand crops from the body predictions
self._setup_cropper()
def _setup_cropper(self):
self.Cropper = {}
for crop_part in ['head', 'hand']:
data_cfg = self.cfg.dataset[crop_part]
scale_size = (data_cfg.scale_min + data_cfg.scale_max)*0.5
self.Cropper[crop_part] = tensor_cropper.Cropper(
crop_size=data_cfg.image_size,
scale=[scale_size, scale_size],
trans_scale = 0)
def _create_model(self):
self.model_dict = {}
# Build all image encoders
# Hand encoder only works for right hand, for left hand, flip inputs and flip the results back
self.Encoder = {}
for key in self.cfg.network.encoder.keys():
if self.cfg.network.encoder.get(key).type == 'resnet50':
self.Encoder[key] = ResnetEncoder().to(self.device)
elif self.cfg.network.encoder.get(key).type == 'hrnet':
self.Encoder[key] = HRNEncoder().to(self.device)
self.model_dict[f'Encoder_{key}'] = self.Encoder[key].state_dict()
# Build the parameter regressors
self.Regressor = {}
for key in self.cfg.network.regressor.keys():
n_output = sum(self.param_list_dict[f'{key}_list'].values())
channels = [2048] + self.cfg.network.regressor.get(key).channels + [n_output]
if self.cfg.network.regressor.get(key).type == 'mlp':
self.Regressor[key] = MLP(channels=channels).to(self.device)
self.model_dict[f'Regressor_{key}'] = self.Regressor[key].state_dict()
# Build the extractors
# to extract separate head/left hand/right hand feature from body feature
self.Extractor = {}
for key in self.cfg.network.extractor.keys():
channels = [2048] + self.cfg.network.extractor.get(key).channels + [2048]
if self.cfg.network.extractor.get(key).type == 'mlp':
self.Extractor[key] = MLP(channels=channels).to(self.device)
self.model_dict[f'Extractor_{key}'] = self.Extractor[key].state_dict()
# Build the moderators
self.Moderator = {}
for key in self.cfg.network.moderator.keys():
share_part = key.split('_')[0]
detach_inputs = self.cfg.network.moderator.get(key).detach_inputs
detach_feature = self.cfg.network.moderator.get(key).detach_feature
channels = [2048*2] + self.cfg.network.moderator.get(key).channels + [2]
self.Moderator[key] = <PASSWORD>(
detach_inputs=detach_inputs, detach_feature=detach_feature,
channels=channels).to(self.device)
self.model_dict[f'Moderator_{key}'] = self.Moderator[key].state_dict()
# Build the SMPL-X body model, which we also use to represent faces and
# hands, using the relevant parts only
self.smplx = SMPLX(self.cfg.model).to(self.device)
self.part_indices = self.smplx.part_indices
# Build the FLAME texture space
if self.cfg.model.use_tex:
self.flametex = FLAMETex(self.cfg.model).to(self.device)
#-- resume model
model_path = self.cfg.pretrained_modelpath
if os.path.exists(model_path):
checkpoint = torch.load(model_path)
for key in self.model_dict.keys():
util.copy_state_dict(self.model_dict[key], checkpoint[key])
else:
print(f'pixie trained model path: {model_path} does not exist!')
exit()
# eval mode
for module in [self.Encoder, self.Regressor, self.Moderator, self.Extractor]:
for net in module.values():
net.eval()
def decompose_code(self, code, num_dict):
''' Convert a flattened parameter vector to a dictionary of parameters
'''
code_dict = {}
start = 0
for key in num_dict:
end = start+int(num_dict[key])
code_dict[key] = code[:, start:end]
start = end
return code_dict
def part_from_body(self, image, part_key, points_dict, crop_joints = None):
''' crop part(head/left_hand/right_hand) out from body data, joints also change accordingly
'''
assert part_key in ['head', 'left_hand', 'right_hand']
assert 'smplx_kpt' in points_dict.keys()
if part_key == 'head':
# use face 68 kpts for cropping head image
indices_key = 'face'
elif part_key == 'left_hand':
indices_key = 'left_hand'
elif part_key == 'right_hand':
indices_key = 'right_hand'
# get points for cropping
part_indices = self.part_indices[indices_key]
if crop_joints is not None:
points_for_crop = crop_joints[:, part_indices]
else:
points_for_crop = points_dict['smplx_kpt'][:, part_indices]
# crop
cropper_key = 'hand' if 'hand' in part_key else part_key
points_scale = image.shape[-2:]
cropped_image, tform = self.Cropper[cropper_key].crop(
image,
points_for_crop,
points_scale
)
# transform points(must be normalized to [-1.1]) accordingly
cropped_points_dict = {}
for points_key in points_dict.keys():
points = points_dict[points_key]
cropped_points = self.Cropper[cropper_key].transform_points(points, tform, points_scale, normalize=True)
cropped_points_dict[points_key] = cropped_points
return cropped_image, cropped_points_dict
@torch.no_grad()
def encode(self, data, threthold=True, keep_local=True, copy_and_paste=False, body_only=False):
''' Encode images to smplx parameters
Args:
data: dict
key: image_type (body/head/hand)
value:
image: [bz, 3, 224, 224], range [0,1]
image_hd(needed if key==body): a high res version of image, only for cropping parts from body image
head_image: optinal, well-cropped head from body image
left_hand_image: optinal, well-cropped left hand from body image
right_hand_image: optinal, well-cropped right hand from body image
Returns:
param_dict: dict
key: image_type (body/head/hand)
value: param_dict
'''
for key in data.keys():
assert key in ['body', 'head', 'hand']
feature = {}
param_dict = {}
# Encode features
for key in data.keys():
part = key
# encode feature
feature[key] = {}
feature[key][part] = self.Encoder[part](data[key]['image'])
# for head/hand image
if key == 'head' or key == 'hand':
# predict head/hand-only parameters from part feature
part_dict = self.decompose_code(self.Regressor[part](feature[key][part]), self.param_list_dict[f'{part}_list'])
# if input is part data, skip feature fusion: share feature is the same as part feature
# then predict share parameters
feature[key][f'{key}_share'] = feature[key][key]
share_dict = self.decompose_code(
self.Regressor[f'{part}_share'](feature[key][f'{part}_share']),
self.param_list_dict[f'{part}_share_list'])
# compose parameters
param_dict[key] = {**share_dict, **part_dict}
# for body image
if key == 'body':
fusion_weight = {}
f_body = feature['body']['body']
# extract part feature
for part_name in ['head', 'left_hand', 'right_hand']:
feature['body'][f'{part_name}_share'] = self.Extractor[f'{part_name}_share'](f_body)
# -- check if part crops are given, if not, crop parts by coarse body estimation
if 'head_image' not in data[key].keys() \
or 'left_hand_image' not in data[key].keys() \
or 'right_hand_image' not in data[key].keys():
#- run without fusion to get coarse estimation, for cropping parts
# body only
body_dict = self.decompose_code(self.Regressor[part](feature[key][part]), self.param_list_dict[part+'_list'])
# head share
head_share_dict = self.decompose_code(self.Regressor['head'+'_share'](feature[key]['head'+'_share']), self.param_list_dict['head'+'_share_list'])
# right hand share
right_hand_share_dict = self.decompose_code(self.Regressor['hand'+'_share'](feature[key]['right_hand'+'_share']), self.param_list_dict['hand'+'_share_list'])
# left hand share
left_hand_share_dict = self.decompose_code(self.Regressor['hand'+'_share'](feature[key]['left_hand'+'_share']), self.param_list_dict['hand'+'_share_list'])
# change the dict name from right to left
left_hand_share_dict['left_hand_pose'] = left_hand_share_dict.pop('right_hand_pose')
left_hand_share_dict['left_wrist_pose'] = left_hand_share_dict.pop('right_wrist_pose')
param_dict[key] = {**body_dict, **head_share_dict, **left_hand_share_dict, **right_hand_share_dict}
if body_only:
param_dict['moderator_weight'] = None
return param_dict
prediction_body_only = self.decode(param_dict[key], param_type='body')
# crop
for part_name in ['head', 'left_hand', 'right_hand']:
part = part_name.split('_')[-1]
points_dict = {
'smplx_kpt': prediction_body_only['smplx_kpt'],
'trans_verts': prediction_body_only['transformed_vertices']
}
cropped_image, cropped_joints_dict = self.part_from_body(data['body']['image_hd'], part_name, points_dict)
data[key][part_name+'_image'] = cropped_image
# -- encode features from part crops, then fuse feature using the weight from moderator
for part_name in ['head', 'left_hand', 'right_hand']:
part = part_name.split('_')[-1]
cropped_image = data[key][part_name+'_image']
# if left hand, flip it as if it is right hand
if part_name == 'left_hand':
cropped_image = torch.flip(cropped_image, dims=(-1,))
# run part regressor
f_part = self.Encoder[part](cropped_image)
part_dict = self.decompose_code(self.Regressor[part](f_part), self.param_list_dict[f'{part}_list'])
part_share_dict = self.decompose_code(self.Regressor[f'{part}_share'](f_part), self.param_list_dict[f'{part}_share_list'])
param_dict['body_'+part_name] = {**part_dict, **part_share_dict}
# moderator to assign weight, then integrate features
f_body_out, f_part_out, f_weight = self.Moderator[f'{part}_share'](feature['body'][f'{part_name}_share'], f_part, work=True)
if copy_and_paste:
# copy and paste strategy always trusts the results from part
feature['body'][f'{part_name}_share'] = f_part
elif threthold and part == 'hand':
# for hand, if part weight > 0.7 (very confident, then fully trust part)
part_w = f_weight[:,[1]]
part_w[part_w > 0.7] = 1.
f_body_out = feature['body'][f'{part_name}_share']*(1. - part_w) + f_part*part_w
feature['body'][f'{part_name}_share'] = f_body_out
else:
feature['body'][f'{part_name}_share'] = f_body_out
fusion_weight[part_name] = f_weight
# save weights from moderator, that can be further used for optimization/running specific tasks on parts
param_dict['moderator_weight'] = fusion_weight
# -- predict parameters from fused body feature
# head share
head_share_dict = self.decompose_code(self.Regressor['head'+'_share'](feature[key]['head'+'_share']), self.param_list_dict['head'+'_share_list'])
# right hand share
right_hand_share_dict = self.decompose_code(self.Regressor['hand'+'_share'](feature[key]['right_hand'+'_share']), self.param_list_dict['hand'+'_share_list'])
# left hand share
left_hand_share_dict = self.decompose_code(self.Regressor['hand'+'_share'](feature[key]['left_hand'+'_share']), self.param_list_dict['hand'+'_share_list'])
# change the dict name from right to left
left_hand_share_dict['left_hand_pose'] = left_hand_share_dict.pop('right_hand_pose')
left_hand_share_dict['left_wrist_pose'] = left_hand_share_dict.pop('right_wrist_pose')
param_dict['body'] = {**body_dict, **head_share_dict, **left_hand_share_dict, **right_hand_share_dict}
# copy tex param from head param dict to body param dict
param_dict['body']['tex'] = param_dict['body_head']['tex']
param_dict['body']['light'] = param_dict['body_head']['light']
if keep_local:
# for local change that will not affect whole body and produce unnatral pose, trust part
param_dict[key]['exp'] = param_dict['body_head']['exp']
param_dict[key]['right_hand_pose'] = param_dict['body_right_hand']['right_hand_pose']
param_dict[key]['left_hand_pose'] = param_dict['body_left_hand']['right_hand_pose']
return param_dict
def convert_pose(self, param_dict, param_type):
''' Convert pose parameters to rotation matrix
Args:
param_dict: smplx parameters
param_type: should be one of body/head/hand
Returns:
param_dict: smplx parameters
'''
assert param_type in ['body', 'head', 'hand']
# convert pose representations: the output from network are | |
"""
Python script for compute lower and upper bounds for the
mnll and jsd metrics
License:
MIT License
Copyright (c) 2020 Kundaje Lab
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import numpy as np
import os
import pandas as pd
import pyBigWig
import sys
from basepairmodels.cli.argparsers import bounds_argsparser
from basepairmodels.cli.metrics import mnll, profile_cross_entropy
from mseqgen import quietexception
from scipy.ndimage import gaussian_filter1d
from scipy.spatial.distance import jensenshannon
from scipy.special import logsumexp
from scipy.stats import pearsonr, spearmanr, multinomial
from tqdm import tqdm
def get_average_profile(input_bigWig, peaks_df, peak_width):
"""
Function to compute the average profile across all peaks
Args:
input_bigWig (str): path to bigWig file
peaks_df (str): pandas dataframe containing peaks
information.
The dataframe should have 'chrom', 'start', and 'end'
as first 3 columns. Each peak should have the same
width (equal to peak_width) i.e 'end' - 'start' is the
same for all rows in the dataframe.
peak_width (int): width of each peak.
Returns:
np.array: numpy array of length peak_width
"""
# open the bigWig file for reading
bw = pyBigWig.open(input_bigWig)
# initialize numpy array for average profile
average_profile = np.zeros(peak_width)
# iterate through all peaks and compute the average
for idx, row in peaks_df.iterrows():
# raise exception if 'end' - 'start' is not equal to peak_width
if (row['end'] - row['start']) != peak_width:
raise quietexception.QuietException(
"Inconsistent peak width found at: {}:{}-{}".format(
row['chrom'], row['start'], row['end']))
# read values from bigWig
average_profile += np.nan_to_num(
bw.values(row['chrom'], row['start'], row['end']))
# average profile
average_profile /= peaks_df.shape[0]
# close bigWig file
bw.close()
return average_profile
def gaussian1D_smoothing(input_array, sigma, window_size):
"""
Function to smooth input array using 1D gaussian smoothing
Args:
input_array (numpy.array): input array of values
sigma (float): sigma value for gaussian smoothing
window_size (int): window size for gaussian smoothing
Returns:
numpy.array: smoothed output array
"""
# compute truncate value (#standard_deviations)
truncate = (((window_size - 1)/2)-0.5)/sigma
return gaussian_filter1d(input_array, sigma=sigma, truncate=truncate)
def get_nonzero_pseudoreplicate_pair(true_counts):
"""
Function to generate pseudoreplicate pair from true counts
where each pseudoreplicate has nonzero sum(counts)
Args:
true_counts (numpy.array): 1D numpy array containing base
level true counts
Returns:
tuple: (numpy array of counts for replicate 1,
numpy array of counts for replicate 2)
"""
# pseudoreplicate that will be treated as the "observed" profile
obs = np.zeros(len(true_counts))
# pseudoreplicate that will be treated as the "predicted" profile
pred = np.zeros(len(true_counts))
while True:
# generate one coin toss for each true_count
coin_tosses = np.random.binomial(1, 0.5, sum(true_counts))
coin_toss_idx = 0
# assign each count to one of the two pseudoreplicates
# along the width of the profile
for i in range(len(true_counts)):
# for each count at that position
for j in range(true_counts[i]):
# if coin toss value is 0 assign it to 'obs' else
# assign it to 'pred'
if coin_tosses[coin_toss_idx] == 0:
obs[i] += 1
else:
pred[i] += 1
coin_toss_idx += 1
# if by chance one of the two pseudoreplicates doesn't
# get any counts
if sum(obs) == 0 or sum(pred) == 0:
# reinitialize the arrays
obs = np.zeros(len(true_counts))
pred = np.zeros(len(true_counts))
continue
else:
break
return (obs, pred)
def bounds(input_bigWig, peaks_df, peak_width, smoothing_params=[7, 81]):
"""
Function to compute lower & upper bounds, and average profile
performance for cross entropy and jsd metrics
Args:
input_bigWig (str): path to bigWig file
peaks_df (str): pandas dataframe containing peaks
information.
The dataframe should have 'chrom', 'start', and 'end'
as first 3 columns. Each peak should have the same
width (equal to peak_width) i.e 'end' - 'start' is the
same for all rows in the dataframe.
peak_width (int): width of each peak.
smoothing_params (list): list of length 2, containing sigma
and window_size values for 1D gaussian smoothing of
profiles
Returns:
tuple: (numpy array of average profile, pandas dataframe
with bounds values in columns)
"""
# compute the average profile
print("Computing average profile ...")
avg_profile = get_average_profile(input_bigWig, peaks_df, peak_width)
# get average profile as probabilities
avg_profile_prob = avg_profile / np.sum(avg_profile)
# open the bigWig file for reading
bw = pyBigWig.open(input_bigWig)
# arrays to hold metrics values for mnll, cross entropy, jsd,
# pearson and spearman correlation of the peak profile computed
# against uniform, average and self(observed peak) profile
# mnll
mnll_uniform = np.zeros(peaks_df.shape[0])
mnll_average = np.zeros(peaks_df.shape[0])
mnll_self = np.zeros(peaks_df.shape[0])
# cross entropy
ce_uniform = np.zeros(peaks_df.shape[0])
ce_average = np.zeros(peaks_df.shape[0])
ce_self = np.zeros(peaks_df.shape[0])
# jsd
jsd_uniform = np.zeros(peaks_df.shape[0])
jsd_average = np.zeros(peaks_df.shape[0])
jsd_self = np.zeros(peaks_df.shape[0])
# pearson
pearson_uniform = np.zeros(peaks_df.shape[0])
pearson_average = np.zeros(peaks_df.shape[0])
pearson_self = np.zeros(peaks_df.shape[0])
# spearman
spearman_uniform = np.zeros(peaks_df.shape[0])
spearman_average = np.zeros(peaks_df.shape[0])
spearman_self = np.zeros(peaks_df.shape[0])
print("Computing bounds ...")
# iterate through all peaks
for idx, row in tqdm(peaks_df.iterrows(), desc='peak',
total=peaks_df.shape[0]):
# raise exception if 'end' - 'start' is not equal to peak_width
if (row['end'] - row['start']) != peak_width:
raise quietexception.QuietException(
"Inconsistent peak width found at: {}:{}-{}".format(
row['chrom'], row['start'], row['end']))
# get bigWig profile
profile = np.nan_to_num(
bw.values(row['chrom'], row['start'], row['end']))
# if we find that the profile at this peak is all zeros
if sum(profile) == 0:
print("Found 'zero' profile at {}: ({}, {})".format(
row['chrom'], row['start'], row['end']))
# assign nans to all
mnll_uniform[idx] = np.nan
mnll_average[idx] = np.nan
mnll_self[idx] = np.nan
ce_uniform[idx] = np.nan
ce_average[idx] = np.nan
ce_self[idx] = np.nan
jsd_uniform[idx] = np.nan
jsd_average[idx] = np.nan
jsd_self[idx] = np.nan
pearson_uniform[idx] = np.nan
pearson_average[idx] = np.nan
pearson_self[idx] = np.nan
spearman_uniform[idx] = np.nan
spearman_average[idx] = np.nan
spearman_self[idx] = np.nan
continue
# uniform distribution profile
uniform_profile = np.ones(peak_width) * (1.0 / peak_width)
# smoothed profile
profile_smooth = gaussian1D_smoothing(profile, smoothing_params[0],
smoothing_params[1])
# smoothed profile as probabilities
profile_smooth_prob = profile_smooth / np.sum(profile_smooth)
# profile as probabilities
profile_prob = profile / np.sum(profile)
# mnll of profile with uniform profile
mnll_uniform[idx] = mnll(profile, probs=uniform_profile)
# mnll of profile with average profile
mnll_average[idx] = mnll(profile, probs=avg_profile_prob)
# mnll of profile with itself
mnll_self[idx] = mnll(profile, probs=profile_prob)
# cross entropy of profile with uniform profile
ce_uniform[idx] = profile_cross_entropy(profile,
probs=uniform_profile)
# cross entropy of profile with average profile
ce_average[idx] = profile_cross_entropy(profile,
probs=avg_profile_prob)
# cross entropy of profile with itself
ce_self[idx] = profile_cross_entropy(profile, probs=profile_prob)
# jsd of profile with uniform profile
jsd_uniform[idx] = jensenshannon(profile_prob, uniform_profile)
# jsd of profile with average profile
jsd_average[idx] = jensenshannon(profile_prob, avg_profile_prob)
# jsd of profile with itself (upper bound)
jsd_self[idx] = 0.0
# pearson of profile with uniform profile
### nothing to do ... leave it as zeros
# pearson of profile with average profile
pearson_average[idx] = pearsonr(profile, avg_profile_prob)[0]
# pearson of profile with itself
pearson_self[idx] = pearsonr(profile, profile)[0]
# spearman of profile with uniform profile
### nothing to do ... leave it as zeros
# spearman of profile with average profile
spearman_average[idx] = spearmanr(profile, avg_profile_prob)[0]
spearman_self[idx] = spearmanr(profile, profile)[0]
# create a pandas dataframe | |
<filename>tests/test_document.py
# coding: utf-8
import unittest
import json
import os
from xylose.scielodocument import Article, Citation, Journal, html_decode
from xylose import tools
class ToolsTests(unittest.TestCase):
def test_get_language_without_iso_format(self):
language = tools.get_language(u'xx', None)
self.assertEqual(language, u'xx')
def test_get_language_iso639_1_defined(self):
language = tools.get_language(u'pt', u'iso 639-1')
self.assertEqual(language, u'pt')
def test_get_language_iso639_1_undefined(self):
language = tools.get_language(u'xx', u'iso 639-1')
self.assertEqual(language, u'#undefined xx#')
def test_get_language_iso639_2_defined(self):
language = tools.get_language(u'pt', u'iso 639-2')
self.assertEqual(language, u'por')
def test_get_language_iso639_2_undefined(self):
language = tools.get_language(u'xx', u'iso 639-2')
self.assertEqual(language, u'#undefined xx#')
def test_get_publication_date_year_month_day(self):
date = tools.get_publication_date('20120102')
self.assertEqual(date, '2012-01-02')
def test_get_publication_date_year_month(self):
date = tools.get_publication_date('20120100')
self.assertEqual(date, '2012-01')
def test_get_publication_date_year(self):
date = tools.get_publication_date('20120000')
self.assertEqual(date, '2012')
def test_get_publication_date_year_day(self):
date = tools.get_publication_date('20120001')
self.assertEqual(date, '2012')
def test_get_publication_date_wrong_day(self):
date = tools.get_publication_date('201201')
self.assertEqual(date, '2012-01')
def test_get_publication_date_wrong_day_month(self):
date = tools.get_publication_date('2012')
self.assertEqual(date, '2012')
def test_get_publication_date_wrong_day_not_int(self):
date = tools.get_publication_date('201201xx')
self.assertEqual(date, '2012-01')
def test_get_publication_date_wrong_day_month_not_int(self):
date = tools.get_publication_date('2012xxxx')
self.assertEqual(date, '2012')
def test_get_publication_date_wrong_month_not_int(self):
date = tools.get_publication_date('2012xx01')
self.assertEqual(date, '2012')
class JournalTests(unittest.TestCase):
def setUp(self):
path = os.path.dirname(os.path.realpath(__file__))
self.fulldoc = json.loads(open('%s/fixtures/full_document.json' % path).read())
self.journal = Journal(self.fulldoc['title'])
def test_journal(self):
journal = self.journal
self.assertTrue(isinstance(journal, Journal))
def test_scielo_issn(self):
self.fulldoc['title']['v400'] = [{u'_': u'2222-2222'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.scielo_issn, '2222-2222')
def test_load_issn_with_v935_without_v35(self):
del(self.fulldoc['title']['v35'])
self.fulldoc['title']['v400'] = [{u'_': u'2222-2222'}]
self.fulldoc['title']['v935'] = [{u'_': u'3333-3333'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.print_issn, None)
self.assertEqual(journal.electronic_issn, None)
def test_load_issn_without_v935_without_v35(self):
del(self.fulldoc['title']['v35'])
del(self.fulldoc['title']['v935'])
self.fulldoc['title']['v400'] = [{u'_': u'2222-2222'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.print_issn, None)
def test_load_issn_without_v935_and_v35_PRINT(self):
self.fulldoc['title']['v35'] = [{u'_': u'PRINT'}]
self.fulldoc['title']['v400'] = [{u'_': u'2222-2222'}]
del(self.fulldoc['title']['v935'])
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.print_issn, u'2222-2222')
self.assertEqual(journal.electronic_issn, None)
def test_load_issn_without_v935_and_v35_ONLINE(self):
self.fulldoc['title']['v35'] = [{u'_': u'ONLINE'}]
self.fulldoc['title']['v400'] = [{u'_': u'2222-2222'}]
del(self.fulldoc['title']['v935'])
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.print_issn, None)
self.assertEqual(journal.electronic_issn, u'2222-2222')
def test_load_issn_with_v935_and_v35_PRINT(self):
self.fulldoc['title']['v35'] = [{u'_': u'PRINT'}]
self.fulldoc['title']['v400'] = [{u'_': u'2222-2222'}]
self.fulldoc['title']['v935'] = [{u'_': u'3333-3333'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.print_issn, u'3333-3333')
self.assertEqual(journal.electronic_issn, u'2222-2222')
def test_load_issn_with_v935_and_v35_ONLINE(self):
self.fulldoc['title']['v35'] = [{u'_': u'ONLINE'}]
self.fulldoc['title']['v400'] = [{u'_': u'2222-2222'}]
self.fulldoc['title']['v935'] = [{u'_': u'3333-3333'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.print_issn, u'2222-2222')
self.assertEqual(journal.electronic_issn, u'3333-3333')
def test_load_issn_with_v935_equal_v400_and_v35_PRINT(self):
self.fulldoc['title']['v35'] = [{u'_': u'PRINT'}]
self.fulldoc['title']['v400'] = [{u'_': u'3333-3333'}]
self.fulldoc['title']['v935'] = [{u'_': u'3333-3333'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.print_issn, u'3333-3333')
self.assertEqual(journal.electronic_issn, None)
def test_load_issn_with_v935_equal_v400_and_v35_ONLINE(self):
self.fulldoc['title']['v35'] = [{u'_': u'ONLINE'}]
self.fulldoc['title']['v400'] = [{u'_': u'3333-3333'}]
self.fulldoc['title']['v935'] = [{u'_': u'3333-3333'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.print_issn, None)
self.assertEqual(journal.electronic_issn, u'3333-3333')
def test_any_issn_priority_electronic(self):
self.fulldoc['title']['v35'] = [{u'_': u'ONLINE'}]
self.fulldoc['title']['v400'] = [{u'_': u'2222-2222'}]
self.fulldoc['title']['v935'] = [{u'_': u'3333-3333'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.any_issn(priority='electronic'), u'3333-3333')
def test_any_issn_priority_electronic_without_electronic(self):
self.fulldoc['title']['v35'] = [{u'_': u'PRINT'}]
self.fulldoc['title']['v400'] = [{u'_': u'3333-3333'}]
self.fulldoc['title']['v935'] = [{u'_': u'3333-3333'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.any_issn(priority='electronic'), u'3333-3333')
def test_any_issn_priority_print(self):
self.fulldoc['title']['v35'] = [{u'_': u'ONLINE'}]
self.fulldoc['title']['v400'] = [{u'_': u'2222-2222'}]
self.fulldoc['title']['v935'] = [{u'_': u'3333-3333'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.any_issn(priority='print'), u'2222-2222')
def test_any_issn_priority_print_without_print(self):
self.fulldoc['title']['v35'] = [{u'_': u'ONLINE'}]
self.fulldoc['title']['v400'] = [{u'_': u'3333-3333'}]
self.fulldoc['title']['v935'] = [{u'_': u'3333-3333'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.any_issn(priority='print'), u'3333-3333')
def test_without_scielo_domain(self):
journal = self.journal
del(journal.data['v690'])
self.assertEqual(journal.scielo_domain, None)
def test_without_scielo_domain_title_v690(self):
journal = self.journal
self.assertEqual(journal.scielo_domain, u'www.scielo.br')
def test_collection_acronym(self):
self.fulldoc['title']['v992'] = [{'_': 'scl'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.collection_acronym, u'scl')
def test_without_journal_url(self):
journal = self.journal
del(journal.data['v690'])
self.assertEqual(journal.url(), None)
def test_journal_url(self):
journal = self.journal
expected = u"http://www.scielo.br/scielo.php?script=sci_serial&pid=2179-975X&lng=en"
self.assertEqual(journal.url(), expected)
def test_wos_subject_areas(self):
self.fulldoc['title']['v854'] = [{u'_': u'MARINE & FRESHWATER BIOLOGY'}, {u'_': u'OCEANOGRAPHY'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.wos_subject_areas, [u'MARINE & FRESHWATER BIOLOGY', u'OCEANOGRAPHY'])
def test_without_wos_subject_areas(self):
del(self.fulldoc['title']['v854'])
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.wos_subject_areas, None)
def test_journal_abbreviated_title(self):
self.fulldoc['title']['v150'] = [{u'_': u'It is the journal title'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.abbreviated_title, u'It is the journal title')
def test_without_journal_abbreviated_title(self):
del(self.fulldoc['title']['v150'])
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.abbreviated_title, None)
def test_subject_areas(self):
self.fulldoc['title']['v441'] = [{u'_': u'HEALTH SCIENCES'}, {u'_': u'BIOLOGICAL SCIENCES'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.subject_areas, [u'HEALTH SCIENCES', u'BIOLOGICAL SCIENCES'])
def test_without_subject_areas(self):
del(self.fulldoc['title']['v441'])
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.subject_areas, None)
def test_wos_citation_indexes(self):
self.fulldoc['title']['v851'] = [{u'_': u'SCIE'}]
self.fulldoc['title']['v852'] = [{u'_': u'SSCI'}]
self.fulldoc['title']['v853'] = [{u'_': u'AHCI'}]
journal = Journal(self.fulldoc['title'])
self.assertEqual(journal.wos_citation_indexes, [u'SCIE', u'SSCI', u'AHCI'])
def test_without_wos_citation_indexes(self):
journal = Journal(self.fulldoc)
self.assertEqual(journal.wos_citation_indexes, None)
def test_publisher_name(self):
journal = self.journal
self.assertEqual(journal.publisher_name, u'Associação Brasileira de Limnologia')
def test_without_publisher_name(self):
journal = self.journal
del(journal.data['v480'])
self.assertEqual(journal.publisher_name, None)
def test_publisher_loc(self):
journal = self.journal
self.assertEqual(journal.publisher_loc, u'<NAME>')
def test_without_publisher_loc(self):
journal = self.journal
del(journal.data['v490'])
self.assertEqual(journal.publisher_loc, None)
def test_journal_title(self):
journal = self.journal
self.assertEqual(journal.title, u'Acta Limnologica Brasiliensia')
def test_without_journal_title(self):
journal = self.journal
del(journal.data['v100'])
self.assertEqual(journal.title, None)
def test_journal_acronym(self):
journal = self.journal
self.assertEqual(journal.acronym, u'alb')
def test_without_journal_acronym(self):
journal = self.journal
del(journal.data['v68'])
self.assertEqual(journal.acronym, None)
class ArticleTests(unittest.TestCase):
def setUp(self):
path = os.path.dirname(os.path.realpath(__file__))
self.fulldoc = json.loads(open('%s/fixtures/full_document.json' % path).read())
self.article = Article(self.fulldoc)
def test_article(self):
article = self.article
self.assertTrue(isinstance(article, Article))
def test_languages_field_v601(self):
self.fulldoc['article']['v601'] = [{'_': 'pt'}, {'_': 'es'}, {'_': 'en'}]
article = Article(self.fulldoc)
self.assertEqual(sorted(article.languages().keys()), ['en', 'es', 'pt'])
def test_languages_field_v720(self):
self.fulldoc['article']['v720'] = [
{
'v': 'ori',
'l': 'pt',
'f': 'pdf',
'u': 'http://www.scielo.br/pdf/abcd/v22n3/v22n3a01.pdf'
},
{
'v': 'ori',
'l': 'pt',
'f': 'html',
'u': 'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=en&nrm=iso&tlng=pt'
},
{
'v': 'trd',
'l': 'es',
'f': 'pdf',
'u': 'http://www.scielo.br/pdf/abcd/v22n3/v22n3a01_es.pdf'
},
{
'v': 'trd',
'l': 'es',
'f': 'html',
'u': 'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=en&nrm=iso&tlng=es'
},
{
'v': 'trd',
'l': 'en',
'f': 'pdf',
'u': 'http://www.scielo.br/pdf/abcd/v22n3/v22n3a01_en.pdf'
},
{
'v': 'trd',
'l': 'en',
'f': 'html',
'u': 'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=en&nrm=iso&tlng=en'
}
]
article = Article(self.fulldoc)
result = article.languages()
self.assertEqual(sorted(result.keys()), ['en', 'es', 'pt'])
self.assertEqual(result['en']['pdf'], u'http://www.scielo.br/pdf/abcd/v22n3/v22n3a01_en.pdf')
self.assertEqual(result['en']['html'], u'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=en&nrm=iso&tlng=en')
self.assertEqual(result['es']['pdf'], u'http://www.scielo.br/pdf/abcd/v22n3/v22n3a01_es.pdf')
self.assertEqual(result['es']['html'], u'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=en&nrm=iso&tlng=es')
self.assertEqual(result['pt']['pdf'], u'http://www.scielo.br/pdf/abcd/v22n3/v22n3a01.pdf')
self.assertEqual(result['pt']['html'], u'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=en&nrm=iso&tlng=pt')
def test_languages_field_v601_v720(self):
self.fulldoc['article']['v601'] = [{'_': 'pt'}, {'_': 'es'}, {'_': 'en'}]
self.fulldoc['article']['v720'] = [
{
'v': 'ori',
'l': 'pt',
'f': 'pdf',
'u': 'http://www.scielo.br/pdf/abcd/v22n3/v22n3a01.pdf'
},
{
'v': 'ori',
'l': 'pt',
'f': 'html',
'u': 'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=en&nrm=iso&tlng=pt'
},
{
'v': 'trd',
'l': 'es',
'f': 'pdf',
'u': 'http://www.scielo.br/pdf/abcd/v22n3/v22n3a01_es.pdf'
},
{
'v': 'trd',
'l': 'es',
'f': 'html',
'u': 'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=en&nrm=iso&tlng=es'
},
{
'v': 'trd',
'l': 'en',
'f': 'pdf',
'u': 'http://www.scielo.br/pdf/abcd/v22n3/v22n3a01_en.pdf'
},
{
'v': 'trd',
'l': 'en',
'f': 'html',
'u': 'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=en&nrm=iso&tlng=en'
}
]
article = Article(self.fulldoc)
result = article.languages()
self.assertEqual(sorted(result.keys()), ['en', 'es', 'pt'])
self.assertEqual(result['en']['pdf'], u'http://www.scielo.br/pdf/abcd/v22n3/v22n3a01_en.pdf')
self.assertEqual(result['en']['html'], u'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=en&nrm=iso&tlng=en')
self.assertEqual(result['es']['pdf'], u'http://www.scielo.br/pdf/abcd/v22n3/v22n3a01_es.pdf')
self.assertEqual(result['es']['html'], u'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=en&nrm=iso&tlng=es')
self.assertEqual(result['pt']['pdf'], u'http://www.scielo.br/pdf/abcd/v22n3/v22n3a01.pdf')
self.assertEqual(result['pt']['html'], u'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=en&nrm=iso&tlng=pt')
self.assertEqual(result['es']['xml'], u'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=es&tlng=es')
self.assertEqual(result['en']['xml'], u'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=en&tlng=en')
self.assertEqual(result['pt']['xml'], u'http://www.scielo.br/scielo.php?script=sci_arttext&pid=S2179-975X2011000300002&lng=pt&tlng=pt')
def test_collection_name_brazil(self):
self.fulldoc['collection'] = u'scl'
article = Article(self.fulldoc)
self.assertEqual(article.collection_name, u'Brazil')
def test_collection_name_undefined(self):
self.fulldoc['collection'] = u'xxx'
article = Article(self.fulldoc)
self.assertEqual(article.collection_name, u'Undefined: xxx')
def test_collection_acronym(self):
article = Article(self.fulldoc)
self.assertEqual(article.collection_acronym, u'scl')
def test_collection_acronym_priorizing_collection(self):
self.fulldoc['collection'] = u'yyy'
self.fulldoc['article']['v992'] = [{u'_': u'xxx'}]
article = Article(self.fulldoc)
self.assertEqual(article.collection_acronym, u'yyy')
def test_collection_acronym_retrieving_v992(self):
del(self.fulldoc['collection'])
self.fulldoc['article']['v992'] = [{u'_': u'xxx'}]
article = Article(self.fulldoc)
self.assertEqual(article.collection_acronym, u'xxx')
def test_without_collection_acronym(self):
del(self.fulldoc['collection'])
article = Article(self.fulldoc)
self.assertEqual(article.collection_acronym, None)
def test_subject_areas(self):
self.fulldoc['title']['v441'] = [{u'_': u'HEALTH SCIENCES'}, {u'_': u'BIOLOGICAL SCIENCES'}]
article = Article(self.fulldoc)
self.assertEqual(article.journal.subject_areas, [u'HEALTH SCIENCES', u'BIOLOGICAL SCIENCES'])
def test_without_subject_areas(self):
del(self.fulldoc['title']['v441'])
article = Article(self.fulldoc)
self.assertEqual(article.journal.subject_areas, None)
def test_wos_citation_indexes(self):
self.fulldoc['title']['v851'] = [{u'_': u'SCIE'}]
self.fulldoc['title']['v852'] = [{u'_': u'SSCI'}]
self.fulldoc['title']['v853'] = [{u'_': u'AHCI'}]
article = Article(self.fulldoc)
self.assertEqual(article.journal.wos_citation_indexes, [u'SCIE', u'SSCI', u'AHCI'])
def test_without_wos_citation_indexes(self):
article = Article(self.fulldoc)
self.assertEqual(article.journal.wos_citation_indexes, None)
def test_file_code(self):
article = Article(self.fulldoc)
self.assertEqual(article.file_code, 'alb_aop_230302')
def test_file_code_crazy_slashs_1(self):
self.fulldoc['article']['v702'] = [{u'_': u'file://r\\\\x//y//z\\\\file.html'}]
article = Article(self.fulldoc)
self.assertEqual(article.file_code, 'file')
def test_file_code_crazy_slashs_2(self):
self.fulldoc['article']['v702'] = [{"_": "rsp/v47n4/0034-8910-rsp-47-04-0675.xml"}]
article = Article(self.fulldoc)
self.assertEqual(article.file_code, '0034-8910-rsp-47-04-0675')
def test_wos_subject_areas(self):
self.fulldoc['title']['v854'] = [{u'_': u'MARINE & FRESHWATER BIOLOGY'}, {u'_': u'OCEANOGRAPHY'}]
article = Article(self.fulldoc)
self.assertEqual(article.journal.wos_subject_areas, [u'MARINE & FRESHWATER BIOLOGY', u'OCEANOGRAPHY'])
def test_without_wos_subject_areas(self):
del(self.fulldoc['title']['v854'])
article = Article(self.fulldoc)
self.assertEqual(article.journal.wos_subject_areas, None)
def test_journal_abbreviated_title(self):
self.fulldoc['title']['v150'] = [{u'_': u'It is the journal title'}]
article = Article(self.fulldoc)
self.assertEqual(article.journal.abbreviated_title, u'It is the journal title')
def test_without_journal_abbreviated_title(self):
del(self.fulldoc['title']['v150'])
self.assertEqual(self.article.journal.abbreviated_title, None)
def test_original_language_iso639_2(self):
article = self.article
self.assertEqual(article.original_language(iso_format='iso 639-2'), u'eng')
def test_original_language_invalid_iso639_2(self):
article = self.article
article.data['article']['v40'][0]['_'] = u'XXX'
self.assertEqual(article.original_language(iso_format='iso 639-2'), u'#undefined XXX#')
def test_original_language_original(self):
article = self.article
self.assertEqual(article.original_language(iso_format=None), u'en')
def test_publisher_name(self):
article = self.article
self.assertEqual(article.journal.publisher_name, u'Associação Brasileira de Limnologia')
def test_without_publisher_name(self):
article = self.article
del(article.data['title']['v480'])
self.assertEqual(article.journal.publisher_name, None)
def test_publisher_loc(self):
article = self.article
self.assertEqual(article.journal.publisher_loc, u'<NAME>')
def test_without_publisher_loc(self):
article = self.article
del(article.data['title']['v490'])
self.assertEqual(article.journal.publisher_loc, None)
def test_journal_title(self):
article = self.article
self.assertEqual(article.journal.title, u'Acta Limnologica Brasiliensia')
def test_without_journal_title(self):
article = self.article
del(article.data['title']['v100'])
self.assertEqual(article.journal.title, None)
def test_journal_acronym(self):
article = self.article
self.assertEqual(article.journal.acronym, u'alb')
def test_without_journal_acronym(self):
article = self.article
del(article.data['title']['v68'])
self.assertEqual(article.journal.acronym, None)
def test_publication_date(self):
article = self.article
article.data['article']['v65'] = [{u'_': u'20120102'}]
self.assertEqual(article.publication_date, '2012-01-02')
def test_without_publication_date(self):
article = self.article
del(article.data['article']['v65'])
with self.assertRaises(KeyError):
article.publication_date
def test_processing_date(self):
article = self.article
article.data['article']['v91'] = [{u'_': u'20120419'}]
self.assertEqual(article.processing_date, '2012-04-19')
def test_without_processing_date(self):
article = self.article
del(article.data['article']['v91'])
with self.assertRaises(KeyError):
article.processing_date
def test_receive_date(self):
article = self.article
article.data['article']['v112'] = [{u'_': u'20110706'}]
self.assertEqual(article.receive_date, '2011-07-06')
def test_whitwout_receive_date(self):
article = self.article
del(article.data['article']['v112'])
self.assertEqual(article.receive_date, None)
def test_acceptance_date(self):
article = self.article
article.data['article']['v114'] = [{u'_': u'20111214'}]
self.assertEqual(article.acceptance_date, '2011-12-14')
def test_whitwout_acceptance_date(self):
article = self.article
del(article.data['article']['v114'])
self.assertEqual(article.acceptance_date, None)
def test_review_date(self):
article = self.article
article.data['article']['v116'] = [{u'_': u'20111215'}]
self.assertEqual(article.review_date, '2011-12-15')
def test_whitwout_review_date(self):
article = self.article
self.assertEqual(article.review_date, None)
def test_ahead_publication_date(self):
article = self.article
article.data['article']['v223'] = [{u'_': u'20131125'}]
self.assertEqual(article.ahead_publication_date, '2013-11-25')
def test_whitwout_ahead_publication_date(self):
article = self.article
del(article.data['article']['v223'])
self.assertEqual(article.ahead_publication_date, None)
def test_publication_contract(self):
self.fulldoc['article']['v60'] = [{u'_': u'2009/53056-8'}]
article = Article(self.fulldoc)
self.assertEqual(article.contract, u'2009/53056-8')
def test_without_publication_contract(self):
del(self.fulldoc['article']['v60'])
self.assertEqual(self.article.contract, None)
def test_project_name(self):
self.fulldoc['article']['v59'] = [{u'_': u'Projeto ABCD'}]
article = Article(self.fulldoc)
self.assertEqual(article.project_name, u'Projeto ABCD')
def test_without_project_name(self):
self.assertEqual(self.article.project_name, None)
def test_project_sponsors(self):
self.fulldoc['article']['v58'] = [{u'_': u'Sponsor name', u'd': u'divisão 1'},
{u'_': u'Sponsor name'},
| |
# -*- coding: utf-8 -*-
__author__ = 'Chenjun'
from gensim.models import Word2Vec
import numpy as np
import cPickle as pickle
from gensim.models.keyedvectors import KeyedVectors
from sklearn.utils import shuffle
def get_w2v_from_model(model_path,emb_path):
"""
get w2v embeddings from w2v-model.
:param model_path: w2v-model path.
:param emb_path: embedding file path.
:return:
"""
model = Word2Vec.load(model_path)
model.wv.save_word2vec_format(emb_path,binary=False)
def word2vec(word_emb,word,vec_dim,scale):
"""
word to emb-vec, if found in trained emb-file, use the existing vec, otherwise value it randomly.
:param word_emb: trained emb-vec file.
:param word: input
:param vec_dim: vec dimension
:param scale: random value scale
:return:
"""
word = word.decode("utf8") ###str 2 utf8
unknown_word = np.random.uniform(-scale,scale,vec_dim)
if word in word_emb:
res = word_emb[word]
flag = 0
else:
res = unknown_word
flag = 1
return res,flag
def sentence2index(train_data_file_path, test_data_file_path, word2index_save_path, index_train_data_save_path, index_test_data_save_path):
"""
build a word2index dict, replace words in data with index and save them.
:param train_data_file_path: train data file path
:param test_data_file_path: test data file path
:param word2index_save_path: word2index dict file path
:param index_train_data_save_path: index data file path(train)
:param index_test_data_save_path: index data file path(test)
:return:
"""
word2index = {}
index = 1 # 0 used for pad 0(fill the blank).
#train
train_data = []
for line in open(train_data_file_path,'r'):
[query,answer,label] = line.split("\t")
query = query.split(" ")
answer = answer.split(" ")
label = int(label)
query_index = []
answer_index = []
for q_word in query:
if q_word not in word2index:
word2index[q_word] = index
query_index.append(index)
index += 1
else:
query_index.append(word2index[q_word])
for a_word in answer:
if a_word not in word2index:
word2index[a_word] = index
answer_index.append(index)
index += 1
else:
answer_index.append(word2index[a_word])
train_data.append([query_index,answer_index,label])
#test
test_data = []
for line in open(test_data_file_path,'r'):
[query,answer,label] = line.split("\t")
query = query.split(" ")
answer = answer.split(" ")
label = int(label)
query_index = []
answer_index = []
for q_word in query:
if q_word not in word2index:
word2index[q_word] = index
query_index.append(index)
index += 1
else:
query_index.append(word2index[q_word])
for a_word in answer:
if a_word not in word2index:
word2index[a_word] = index
answer_index.append(index)
index += 1
else:
answer_index.append(word2index[a_word])
test_data.append([query_index,answer_index,label])
print "word2index size: ",len(word2index)
print "data size: ",len(train_data), len(test_data)
pickle.dump(word2index,open(word2index_save_path,'w'))
pickle.dump(train_data,open(index_train_data_save_path,'w'))
pickle.dump(test_data, open(index_test_data_save_path, 'w'))
def index2vector(word2index_file_path,emb_file_path,index2vec_save_path,dim,scale):
"""
build the index2emb-vec matrix and save it.
:param word2index_file_path: word2index file path
:param emb_file_path: trained embedding file path
:param index2vec_save_path: index2vec file path
:param dim: vec dimension
:param scale: random value scale
:return:
"""
unk_count = 0
word2index = pickle.load(open(word2index_file_path,"r"))
word_emb = KeyedVectors.load_word2vec_format(emb_file_path,binary=False)
vocab_size = len(word2index)
index2vec = np.zeros((vocab_size + 1,dim),dtype="float32")
index2vec[0] = np.zeros(dim) # vector 0 used for words to fill the blank(pad 0).
for word in word2index:
index = word2index[word]
vec,flag = word2vec(word_emb,word,dim,scale)
index2vec[index] = vec
unk_count += flag
print "emb vocab size: ",len(word_emb.vocab)
print "unknown words count: ",unk_count
print "index2vec size: ",len(index2vec)
pickle.dump(index2vec,open(index2vec_save_path,"w"))
def split(data_file_path, group_file, index_train_data_file_path, index_valid_data_file_path, train_group_file_path, valid_group_file_path, alpha=0.8,):
data = pickle.load(open(data_file_path,"r"))
group = [int(x.strip()) for x in open(group_file, "r")]
data_collection = []
step = 0
for i in xrange(len(group)):
group_length = group[i]
group_data = []
for j in xrange(step, step + group_length):
group_data.append(data[j])
step += group_length
data_collection.append(group_data)
print "group data length: ", len(data_collection)
data_collection,group = shuffle(data_collection,group)
_train_data = data_collection[:int(alpha*len(data_collection))]
train_data = []
for group_data in _train_data:
for data in group_data:
train_data.append(data)
train_group = group[:int(alpha*len(data_collection))]
_valid_data = data_collection[int(alpha*len(data_collection)):]
valid_data = []
for group_data in _valid_data:
for data in group_data:
valid_data.append(data)
valid_group = group[int(alpha*len(data_collection)):]
print "train data length: ", len(train_data)
print "valid data length: ", len(valid_data)
pickle.dump(train_data,open(index_train_data_file_path,"w"))
pickle.dump(valid_data,open(index_valid_data_file_path,"w"))
pickle.dump(train_group, open(train_group_file_path, "w"))
pickle.dump(valid_group, open(valid_group_file_path, "w"))
def data_process(path,vec_dim,scale):
"""
data process
:param vec_dim: vec dimension
:param scale: random value scale
:return:
"""
#word2index index2vec index_data
train_data_file_path = path + "data/dbqa-data-train.txt"
test_data_file_path = path + "data/dbqa-data-test.txt"
emb_file_path = path + "save_models/wiki-w2v-" + str(vec_dim) + ".emb"
print "--- index_data ---"
# word2index index2vec index_data
word2index_file_path = path + "pkl/word2index.pkl"
index2vec_file_path = path + "pkl/index2vec.pkl"
index_train_data_file_path = path + "pkl/data-train.pkl"
index_valid_data_file_path = path + "pkl/data-valid.pkl"
index_test_data_file_path = path + "pkl/data-test.pkl"
train_group_file = path + "data/dbqa-data-train.txt.group"
train_group = path + "pkl/train_group.pkl"
valid_group = path + "pkl/valid_group.pkl"
sentence2index(train_data_file_path, test_data_file_path, word2index_file_path, index_train_data_file_path,
index_test_data_file_path)
index2vector(word2index_file_path, emb_file_path, index2vec_file_path, vec_dim, scale)
print "--- split_data ---"
split(index_train_data_file_path, train_group_file, index_train_data_file_path, index_valid_data_file_path,
train_group, valid_group, alpha=0.8)
def data_for_max_length_backward(data_file_path,save_file_path,data_length):
"""
change index-data to a fixed-length data, pad with zeroes.
:param data_file_path: index-data file path
:param save_file_path: fixed-length data path
:param data_length: fixed data length
:return:
"""
print "pad backward with zero..."
nn_data = []
for data in pickle.load(open(data_file_path,'r')):
q_data = data[0]
a_data = data[1]
label = data[2]
if len(q_data) >= data_length:
q_data = q_data[:data_length]
else:
for i in xrange(len(q_data), data_length):
q_data.append(0) # pad with zeros.
if len(a_data) >= data_length:
a_data = a_data[:data_length]
else:
for i in xrange(len(a_data),data_length):
a_data.append(0)
nn_data.append([q_data,a_data,label])
pickle.dump(nn_data,open(save_file_path,'w'))
def data_for_max_length_forward(data_file_path,save_file_path,data_length):
"""
change index-data to a fixed-length data, pad with zeroes.
:param data_file_path: index-data file path
:param save_file_path: fixed-length data path
:param data_length: fixed data length
:return:
"""
print "pad forward with zero..."
nn_data = []
for data in pickle.load(open(data_file_path, 'r')):
q_data = data[0]
a_data = data[1]
label = data[2]
if len(q_data) >= data_length:
q_data = q_data[:data_length]
else:
pad = [0] * (data_length-len(q_data))
q_data = pad + q_data
if len(a_data) >= data_length:
a_data = a_data[:data_length]
else:
pad = [0] * (data_length - len(a_data))
a_data = pad + a_data
nn_data.append([q_data, a_data, label])
pickle.dump(nn_data, open(save_file_path, 'w'))
def get_pair_data(data_file, group_file, save_path):
data = pickle.load(open(data_file,"r"))
group = pickle.load(open(group_file,"r"))
pair_data = []
step = 0
for i in xrange(len(group)):
group_length = group[i]
group_data = []
for j in xrange(step,step+group_length):
group_data.append(data[j])
step += group_length
pair_data += group_pair_data(group_data)
print "pair data length: ",len(pair_data)
pickle.dump(pair_data, open(save_path, "w"))
def group_pair_data(group_data):
p_data = []
n_data = []
pair_data = []
for data in group_data:
if data[2] == 1: #positive
p_data.append(data)
if data[2] == 0: #negative
n_data.append(data)
###
###
for p_d in p_data:
for n_d in n_data:
pair_data.append([p_d[0],p_d[1],n_d[1]])
return pair_data # shuffle?
def calculate_overlap_train(data_file, overlap_file, length):
data = pickle.load(open(data_file, "r"))
print "overlap train data length: ", len(data)
q_data = [x[0] for x in data]
a1_data = [x[1] for x in data]
a2_data = [x[2] for x in data]
overlap = []
for i in xrange(len(q_data)):
q1_overlap = [0.0] * length
q2_overlap = [0.0] * length
a1_overlap = [0.0] * length
a2_overlap = [0.0] * length
for j in xrange(length):
if q_data[i][j] in a1_data[i] and q_data[i][j] != 0:
q1_overlap[j] = 1.0
for j in xrange(length):
if q_data[i][j] in a2_data[i] and q_data[i][j] != 0:
q2_overlap[j] = 1.0
for j in xrange(length):
if a1_data[i][j] in q_data[i] and a1_data[i][j] != 0:
a1_overlap[j] = 1.0
for j in xrange(length):
if a2_data[i][j] in q_data[i] and a2_data[i][j] != 0:
a2_overlap[j] = 1.0
overlap.append([q1_overlap, q2_overlap, a1_overlap, a2_overlap])
print "Overlap train file(0,1) saved to " + overlap_file
pickle.dump(np.asarray(overlap, dtype='float32'), open(overlap_file, "w"))
def calculate_overlap_test(data_file,overlap_file,length):
data = pickle.load(open(data_file, "r"))
print "overlap test data length: ", len(data)
q_data = [x[0] for x in data]
a_data = [x[1] for x in data]
overlap = []
for i in xrange(len(q_data)):
q_overlap = [0.0] * length
a_overlap = [0.0] * length
for j in xrange(length):
if q_data[i][j] in a_data[i] and q_data[i][j]!=0:
q_overlap[j] = 1.0
for j in xrange(length):
if a_data[i][j] in q_data[i] and a_data[i][j]!=0:
a_overlap[j] = 1.0
overlap.append([q_overlap,a_overlap])
print "Overlap test file(0,1) saved to " + overlap_file
pickle.dump(np.asarray(overlap, dtype='float32'), open(overlap_file, "w"))
def calculate_lcs_test(data_file,lcs_file,length):
data = pickle.load(open(data_file,"r"))
print "test lcs data length: ",len(data)
lcs = []
for index in xrange(len(data)):
lcs_mat = [[0 for i in range(length + 1)] for j in range(length + 1)]
flag = [[0 for i in range(length + 1)] for j in range(length + 1)]
q_data = data[index][0]
a_data = data[index][1]
for i in xrange(length):
for j in xrange(length):
if q_data[i] == a_data[j] and q_data[i]!=0 and a_data[j]!=0:
lcs_mat[i + 1][j + 1] = lcs_mat[i][j] + 1
flag[i + 1][j + 1] = 'ok'
elif lcs_mat[i + 1][j] > lcs_mat[i][j + 1]:
lcs_mat[i + 1][j + 1] = lcs_mat[i + 1][j]
flag[i + 1][j + 1] = 'left'
else:
lcs_mat[i + 1][j + 1] = lcs_mat[i][j + 1]
flag[i + 1][j + 1] = 'up'
res = [0.] * length
getLcs(flag, q_data, length, length, res)
lcs.append([res, res])
print "test LCS file(0,1) saved to " + lcs_file
pickle.dump(np.asarray(lcs, dtype='float32'), open(lcs_file, "w"))
def calculate_lcs_train(data_file,lcs_file,length):
data = pickle.load(open(data_file,"r"))
print "train lcs data length: ",len(data)
lcs = []
| |
has state_dict lets get it
if hasattr(module, "state_dict") and hasattr(
local_module, "load_state_dict"
):
info("loading remote state dict")
sd_ptr = module.state_dict()
# get a blocking copy of the state_dict
info(f" Downloading remote layer: {layer_name}")
state_dict = sd_ptr.get(
request_block=request_block,
reason=reason,
timeout_secs=timeout_secs,
delete_obj=delete_obj,
)
# We have to recreate the OrderedDict for load_state_dict to work
ordered_state_dict = OrderedDict()
for elem, item in state_dict.items():
ordered_state_dict[str(elem)] = item
# iterate through the key, values
# weights and biases should be in there
if state_dict is not None:
# TODO: support torch.nn.modules.module._IncompatibleKeys
local_module.load_state_dict(ordered_state_dict)
else:
info(
f" Failed to get {layer_name} state_dict, skipping layer."
)
except Exception as e:
critical(f" Failed to download remote state for {layer_name}.")
traceback_and_raise(e)
info("\n> Finished downloading remote model <\n\n")
self.local_model = local_model
return self.local_model
# zero them so we know they are copied
def zero_layers(self) -> None:
for m in self.modules.values():
if hasattr(m, "weight"):
m.weight.requires_grad_(False).zero_()
if hasattr(m, "bias"):
m.bias.requires_grad_(False).zero_()
# easy way to check the weights have changed
def debug_sum_layers(self) -> None:
info("> Summing layers for debugging: ")
for n, m in self.modules.items():
if hasattr(m, "state_dict"):
if self.is_local:
state_dict = m.state_dict()
else:
state_dict = m.state_dict().get()
for k, v in state_dict.items():
if hasattr(v, "sum"):
s = v.sum().item()
info(f" Layer {n} sum({k}): {s}")
def object2proto(obj: torch.nn.Module, is_child: bool = False) -> Module_PB:
proto = Module_PB()
if "torch.nn." in type(obj).__module__:
proto.module_type = type(obj).__name__
else:
proto.module_type = f"_USER_DEFINED_MODULE_{type(obj).__name__}"
proto.forward.CopyFrom(sy.serialize(obj._forward_plan))
proto.module_repr = obj.extra_repr()
if hasattr(obj, "_uid2attr"):
proto._uid2attr.CopyFrom(sy.serialize(SyOrderedDict(obj._uid2attr)))
proto.parameters.CopyFrom(sy.serialize(SyOrderedDict(obj._parameters)))
for n, m in obj.named_children():
child_proto = object2proto(m, is_child=True)
child_proto.module_name = n
proto.children.append(child_proto)
return proto
def proto2object(proto: Module_PB) -> torch.nn.Module:
is_userdefined = proto.module_type.startswith("_USER_DEFINED_MODULE_")
if is_userdefined:
obj_type = type(
proto.module_type.replace("_USER_DEFINED_MODULE_", ""),
(torch.nn.Module,),
{},
)
else:
obj_type = getattr(torch.nn, proto.module_type)
args, kwargs = repr_to_kwargs(repr_str=proto.module_repr)
obj = obj_type(*args, **kwargs)
for name, param in sy.deserialize(proto.parameters).items():
# if we don't do this check, some torch.nn layers fail ( e.g. Conv2D with bias=False)
if not isinstance(param, _SyNone):
setattr(obj, str(name), param)
if proto.HasField("forward"):
forward_plan = sy.deserialize(proto.forward)
obj._forward_plan = forward_plan
compile_and_forward = create_compile_and_forward_fn(obj)
obj.__call__ = compile_and_forward
obj.forward = compile_and_forward
# obj.__call__ = forward_plan
# obj.forward = forward_plan
for child_proto in proto.children:
setattr(obj, str(child_proto.module_name), sy.deserialize(child_proto))
if proto.HasField("_uid2attr"):
obj._uid2attr = sy.deserialize(proto._uid2attr)
if is_userdefined:
recompile(obj)
return obj
def create_compile_and_forward_fn(obj: "SyModule") -> Callable:
"""Wraps a forward plan in a function that first recompiles the plan, and then
executes the plan
Args:
obj (SyModule): the SyModule
"""
def _compile_and_forward(*args, **kwargs): # type: ignore
recompile(obj)
return obj._forward_plan(*args, **kwargs)
return _compile_and_forward
def recompile(sy_module: "SyModule") -> None:
"""Recompiles the forward plan, if the object state has changed since the
forward plan was created, we update the plan here
Args:
sy_module (SyModule): the module to compile
"""
if hasattr(sy_module, "_forward_plan"):
for action in sy_module._forward_plan.actions: # type: ignore
if (
isinstance(action, SaveObjectAction)
and action.obj.id in sy_module._uid2attr
):
action.obj.data = getattr(
sy_module, str(sy_module._uid2attr[action.obj.id])
)
GenerateWrapper(
wrapped_type=torch.nn.Module,
import_path="torch.nn.Module",
protobuf_scheme=Module_PB,
type_object2proto=object2proto,
type_proto2object=proto2object,
)
class ForwardToPlanConverter(type):
"""This metaclass ensures that:
1) the object is initialized when calling Object()
2) obj._make_forward_plan() is called after initialization
"""
def __call__(cls: Any, *args, **kwargs) -> Any: # type: ignore
# TODO: check if contains input_size
obj = type.__call__(cls, *args, **kwargs)
obj._make_forward_plan()
return obj
class SyModule(torch.nn.Module, metaclass=ForwardToPlanConverter):
"""A `SyModule` is the pointable equivalent of a torch.nn.Module. In order to make
SyModule remotely executable, its `.forward` method is converted into a `Plan` object
when initializing a `SyModule` object. This object has two "modes", in which it behaves
differently. During the "forward plan building stage" it transforms parameters and submodules
into pointer when the user retrieves them. After plan building the model behaves more
like a regular torch.nn.Module, but instead of running a forward method, the user executes
a `Plan`. As the user does not need to understand the building stage, and the .forward API
is fairly similar to a regular torch.nn.Module, there is no need to understand all internals
to use this module.
"""
def __init__(self, *args, input_size: Optional[Tuple[int]] = None, **kwargs) -> None: # type: ignore
"""Initializes an empty SyModule
Args:
input_size (Tuple[Int], optional): input_size of the Module, needs to be defined or inferrable.
Defaults to None.
"""
super().__init__(*args, **kwargs)
self.building_forward = False
self._parameter_pointers: Dict[str, Pointer] = dict()
self.input_size = input_size
def _make_forward_plan(self) -> None:
"""Convert forward function into a `Plan` object
Raises:
ValueError: `.forward` method must be defined
"""
if getattr(self.forward, __name__, None) == "_forward_unimplemented": # type: ignore
raise ValueError("Missing .forward() method for Module")
inputs = self._get_forward_inputs()
self.building_forward = True
plan = make_plan(self.forward, inputs=inputs) # type: ignore
self.forward = self._local_forward
self._forward_plan = plan
self.__call__ = plan
self._create_uid2attr()
self.building_forward = False
self._remove_plan_action_data()
def _remove_plan_action_data(self) -> None:
"""
Sets `action.obj.data` for each symodule action in `self._forward_plan` to `None`.
This greatly reduces the proto memory footprint;
The whole state of `self` is saved in the action, which will be recompiled anyway.
"""
# Remove module action data
for action in self._forward_plan.actions:
if isinstance(action, SaveObjectAction) and action.obj.id in self._uid2attr:
action.obj.data = downcast(None)
def _local_forward(self, *args, **kwargs): # type: ignore
recompile(self)
return self._forward_plan(*args, **kwargs)
def _create_uid2attr(self) -> None:
self._uid2attr = {
param.id_at_location: attr_name
for attr_name, param in self._parameter_pointers.items()
}
def __getattr__(self, name: str) -> Any:
"""A custom getattr method. When retrieving a torch.nn.Module or a torch.nn.Parameter
*during forward plan building*, SyModule instead returns a Pointer to this attribute.
The first time an attribute is retrieved, we send it to the plan builder VM, and store
it in self._parameters_pointers, which will be used for plan Recompilation during
*deserialization*. If an attribute is requested again, we return the pointer from
`_parameters_pointers`
Args:
name (str): name of the attr
Returns:
Any: Attribute value or Pointer to it
"""
# this is __getattr__ instead of __getattribute__ because of the structure of torch.nn.Module
if name in self._parameter_pointers and self.building_forward:
return self._parameter_pointers[name]
res = super().__getattr__(name)
if (
isinstance(res, (torch.nn.Module, torch.nn.Parameter))
and self.building_forward
):
res_ptr = res.send(ROOT_CLIENT)
self._parameter_pointers[name] = res_ptr
return res_ptr
else:
return res
def _get_inp_key(self) -> str:
"""Get key for the `.forward` argument
Returns:
str: input key
"""
forward_signature = inspect.signature(self.forward)
args = list(forward_signature.parameters.items())
if len(args) == 0 or len(args) > 1:
raise ValueError(
"SyModules accept only *precisely 1* argument and no kwargs"
)
k, v = args[0]
if v.default is not inspect.Parameter.empty:
raise ValueError("SyModules accept only args, not kwargs")
inp_key = k
return inp_key
def _get_inp_size(self) -> Tuple[int]:
"""Get input size for this module
Returns:
Tuple[Int]: input size for `.forward`
"""
if not hasattr(self, "input_size") or not isinstance(
self.input_size, (tuple, list)
):
raise ValueError(
"SyModule needs `input_size`: Tuple(Int) as kwarg to trace the forward plan."
"Also, make sure to call **super().__init__(**kwargs)** in ALL your SyModules"
""
)
return self.input_size
def _get_forward_inputs(self) -> Dict[str, Pointer]:
"""Get the dummy inputs for generating the .forward `Plan`
Returns:
Dict[str: Any]: inputs for .forward
"""
input_size = self._get_inp_size()
inp_key = self._get_inp_key()
if isinstance(self, SySequential):
inp_key = "x"
inputs = {inp_key: torch.randn(input_size).send(ROOT_CLIENT)}
return inputs
class SySequential(SyModule):
"""The Syft equivalent of torch.nn.Sequential"""
def __init__(self, *args, input_size: Optional[Tuple[int]] = None): # type: ignore
"""initializes SySequential and stores the submodules
input_size (Tuple[Int], optional): input_size of the Module, needs to be defined or inferrable.
Defaults to None.
"""
super().__init__(input_size=input_size)
for idx, module in enumerate(args):
setattr(self, str(idx), module)
self.n_modules = len(args)
def __iter__(self): # type: ignore
if self.building_forward:
return iter([getattr(self, str(i)) for i in range(self.n_modules)])
else:
return iter(self._modules.values())
def _get_item_by_idx(self, iterator: Iterator, idx: int) -> SyModule:
"""Get the idx-th item of the iterator"""
size = self.n_modules
if not -size <= idx < size:
raise IndexError(f"index {idx} is out of range")
return next(islice(iterator, idx, None))
def __getitem__(self, idx: int) -> SyModule:
if isinstance(idx, slice): # type: ignore
raise ValueError("SySequential does not support slices")
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx: int, module: Module) -> None:
key = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx: Union[slice, int]) -> None:
if isinstance(idx, slice):
raise ValueError("SySequential does not support slices")
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
def forward(self, x: Any = None) -> Any: # type: ignore
"""Sequentially call submodule.forward
Args:
x (Any, optional): input. Defaults to None.
Returns:
Any: Module output
"""
out = | |
from tech import drc
import debug
import design
from math import log
from math import sqrt
import math
import contact
from pnand2 import pnand2
from pnand3 import pnand3
from pinv import pinv
from hierarchical_predecode2x4 import hierarchical_predecode2x4 as pre2x4
from hierarchical_predecode3x8 import hierarchical_predecode3x8 as pre3x8
from vector import vector
from globals import OPTS
class hierarchical_decoder(design.design):
"""
Dynamically generated hierarchical decoder.
"""
def __init__(self, rows):
design.design.__init__(self, "hierarchical_decoder_{0}rows".format(rows))
c = reload(__import__(OPTS.bitcell))
self.mod_bitcell = getattr(c, OPTS.bitcell)
self.bitcell_height = self.mod_bitcell.height
self.pre2x4_inst = []
self.pre3x8_inst = []
self.rows = rows
self.num_inputs = int(math.log(self.rows, 2))
(self.no_of_pre2x4,self.no_of_pre3x8)=self.determine_predecodes(self.num_inputs)
self.create_layout()
self.DRC_LVS()
def create_layout(self):
self.add_modules()
self.setup_layout_constants()
self.add_pins()
self.create_pre_decoder()
self.create_row_decoder()
self.create_vertical_rail()
self.route_vdd_gnd()
def add_modules(self):
self.inv = pinv()
self.add_mod(self.inv)
self.nand2 = pnand2()
self.add_mod(self.nand2)
self.nand3 = pnand3()
self.add_mod(self.nand3)
# CREATION OF PRE-DECODER
self.pre2_4 = pre2x4()
self.add_mod(self.pre2_4)
self.pre3_8 = pre3x8()
self.add_mod(self.pre3_8)
def determine_predecodes(self,num_inputs):
"""Determines the number of 2:4 pre-decoder and 3:8 pre-decoder
needed based on the number of inputs"""
if (num_inputs == 2):
return (1,0)
elif (num_inputs == 3):
return(0,1)
elif (num_inputs == 4):
return(2,0)
elif (num_inputs == 5):
return(1,1)
elif (num_inputs == 6):
return(3,0)
elif (num_inputs == 7):
return(2,1)
elif (num_inputs == 8):
return(1,2)
elif (num_inputs == 9):
return(0,3)
else:
debug.error("Invalid number of inputs for hierarchical decoder",-1)
def setup_layout_constants(self):
# Vertical metal rail gap definition
self.metal2_extend_contact = (contact.m1m2.second_layer_height - contact.m1m2.contact_width) / 2
self.metal2_spacing = self.metal2_extend_contact + self.m2_space
self.metal2_pitch = self.metal2_spacing + self.m2_width
self.via_shift = (contact.m1m2.second_layer_width - contact.m1m2.first_layer_width) / 2
self.predec_groups = [] # This array is a 2D array.
# Distributing vertical rails to different groups. One group belongs to one pre-decoder.
# For example, for two 2:4 pre-decoder and one 3:8 pre-decoder, we will
# have total 16 output lines out of these 3 pre-decoders and they will
# be distributed as [ [0,1,2,3] ,[4,5,6,7], [8,9,10,11,12,13,14,15] ]
# in self.predec_groups
index = 0
for i in range(self.no_of_pre2x4):
lines = []
for j in range(4):
lines.append(index)
index = index + 1
self.predec_groups.append(lines)
for i in range(self.no_of_pre3x8):
lines = []
for j in range(8):
lines.append(index)
index = index + 1
self.predec_groups.append(lines)
self.calculate_dimensions()
def add_pins(self):
""" Add the module pins """
for i in range(self.num_inputs):
self.add_pin("A[{0}]".format(i))
for j in range(self.rows):
self.add_pin("decode[{0}]".format(j))
self.add_pin("vdd")
self.add_pin("gnd")
def calculate_dimensions(self):
""" Calculate the overal dimensions of the hierarchical decoder """
# If we have 4 or fewer rows, the predecoder is the decoder itself
if self.num_inputs>=4:
self.total_number_of_predecoder_outputs = 4*self.no_of_pre2x4 + 8*self.no_of_pre3x8
else:
self.total_number_of_predecoder_outputs = 0
debug.error("Not enough rows for a hierarchical decoder. Non-hierarchical not supported yet.",-1)
# Calculates height and width of pre-decoder,
if(self.no_of_pre3x8 > 0):
self.predecoder_width = self.pre3_8.width
else:
self.predecoder_width = self.pre2_4.width
self.predecoder_height = self.pre2_4.height*self.no_of_pre2x4 + self.pre3_8.height*self.no_of_pre3x8
# Calculates height and width of row-decoder
if (self.num_inputs == 4 or self.num_inputs == 5):
nand_width = self.nand2.width
else:
nand_width = self.nand3.width
self.routing_width = self.metal2_pitch*self.total_number_of_predecoder_outputs
self.row_decoder_width = nand_width + self.routing_width + self.inv.width
self.row_decoder_height = self.inv.height * self.rows
# Calculates height and width of hierarchical decoder
self.height = self.predecoder_height + self.row_decoder_height
self.width = self.predecoder_width + self.routing_width
def create_pre_decoder(self):
""" Creates pre-decoder and places labels input address [A] """
for i in range(self.no_of_pre2x4):
self.add_pre2x4(i)
for i in range(self.no_of_pre3x8):
self.add_pre3x8(i)
def add_pre2x4(self,num):
""" Add a 2x4 predecoder """
if (self.num_inputs == 2):
base = vector(self.routing_width,0)
mirror = "RO"
index_off1 = index_off2 = 0
else:
base= vector(self.routing_width+self.pre2_4.width, num * self.pre2_4.height)
mirror = "MY"
index_off1 = num * 2
index_off2 = num * 4
pins = []
for input_index in range(2):
pins.append("A[{0}]".format(input_index + index_off1))
for output_index in range(4):
pins.append("out[{0}]".format(output_index + index_off2))
pins.extend(["vdd", "gnd"])
self.pre2x4_inst.append(self.add_inst(name="pre[{0}]".format(num),
mod=self.pre2_4,
offset=base,
mirror=mirror))
self.connect_inst(pins)
self.add_pre2x4_pins(num)
def add_pre2x4_pins(self,num):
""" Add the input pins to the 2x4 predecoder """
for i in range(2):
pin = self.pre2x4_inst[num].get_pin("in[{}]".format(i))
pin_offset = pin.ll()
pin = self.pre2_4.get_pin("in[{}]".format(i))
self.add_layout_pin(text="A[{0}]".format(i + 2*num ),
layer="metal2",
offset=pin_offset,
width=pin.width(),
height=pin.height())
def add_pre3x8(self,num):
""" Add 3x8 numbered predecoder """
if (self.num_inputs == 3):
offset = vector(self.routing_width,0)
mirror ="R0"
else:
height = self.no_of_pre2x4*self.pre2_4.height + num*self.pre3_8.height
offset = vector(self.routing_width+self.pre3_8.width, height)
mirror="MY"
# If we had 2x4 predecodes, those are used as the lower
# decode output bits
in_index_offset = num * 3 + self.no_of_pre2x4 * 2
out_index_offset = num * 8 + self.no_of_pre2x4 * 4
pins = []
for input_index in range(3):
pins.append("A[{0}]".format(input_index + in_index_offset))
for output_index in range(8):
pins.append("out[{0}]".format(output_index + out_index_offset))
pins.extend(["vdd", "gnd"])
self.pre3x8_inst.append(self.add_inst(name="pre3x8[{0}]".format(num),
mod=self.pre3_8,
offset=offset,
mirror=mirror))
self.connect_inst(pins)
# The 3x8 predecoders will be stacked, so use yoffset
self.add_pre3x8_pins(num,offset)
def add_pre3x8_pins(self,num,offset):
""" Add the input pins to the 3x8 predecoder at the given offset """
for i in range(3):
pin = self.pre3x8_inst[num].get_pin("in[{}]".format(i))
pin_offset = pin.ll()
self.add_layout_pin(text="A[{0}]".format(i + 3*num + 2*self.no_of_pre2x4),
layer="metal2",
offset=pin_offset,
width=pin.width(),
height=pin.height())
def create_row_decoder(self):
""" Create the row-decoder by placing NAND2/NAND3 and Inverters
and add the primary decoder output pins. """
if (self.num_inputs >= 4):
self.add_decoder_nand_array()
self.add_decoder_inv_array()
self.route_decoder()
def add_decoder_nand_array(self):
""" Add a column of NAND gates for final decode """
# Row Decoder NAND GATE array for address inputs <5.
if (self.num_inputs == 4 or self.num_inputs == 5):
self.add_nand_array(nand_mod=self.nand2)
# FIXME: Can we convert this to the connect_inst with checks?
for i in range(len(self.predec_groups[0])):
for j in range(len(self.predec_groups[1])):
pins =["out[{0}]".format(i),
"out[{0}]".format(j + len(self.predec_groups[0])),
"Z[{0}]".format(len(self.predec_groups[1])*i + j),
"vdd", "gnd"]
self.connect_inst(args=pins, check=False)
# Row Decoder NAND GATE array for address inputs >5.
elif (self.num_inputs > 5):
self.add_nand_array(nand_mod=self.nand3,
correct=drc["minwidth_metal1"])
# This will not check that the inst connections match.
for i in range(len(self.predec_groups[0])):
for j in range(len(self.predec_groups[1])):
for k in range(len(self.predec_groups[2])):
Z_index = len(self.predec_groups[1])*len(self.predec_groups[2]) * i \
+ len(self.predec_groups[2])*j + k
pins = ["out[{0}]".format(i),
"out[{0}]".format(j + len(self.predec_groups[0])),
"out[{0}]".format(k + len(self.predec_groups[0]) + len(self.predec_groups[1])),
"Z[{0}]".format(Z_index),
"vdd", "gnd"]
self.connect_inst(args=pins, check=False)
def add_nand_array(self, nand_mod, correct=0):
""" Add a column of NAND gates for the decoder above the predecoders."""
self.nand_inst = []
for row in range(self.rows):
name = "DEC_NAND[{0}]".format(row)
if ((row % 2) == 0):
y_off = self.predecoder_height + nand_mod.height*row
y_dir = 1
mirror = "R0"
else:
y_off = self.predecoder_height + nand_mod.height*(row + 1)
y_dir = -1
mirror = "MX"
self.nand_inst.append(self.add_inst(name=name,
mod=nand_mod,
offset=[self.routing_width, y_off],
mirror=mirror))
def add_decoder_inv_array(self):
"""Add a column of INV gates for the decoder above the predecoders
and to the right of the NAND decoders."""
z_pin = self.inv.get_pin("Z")
if (self.num_inputs == 4 or self.num_inputs == 5):
x_off = self.routing_width + self.nand2.width
else:
x_off = self.routing_width + self.nand3.width
self.inv_inst = []
for row in range(self.rows):
name = "DEC_INV_[{0}]".format(row)
if (row % 2 == 0):
inv_row_height = self.inv.height * row
mirror = "R0"
y_dir = 1
else:
inv_row_height = self.inv.height * (row + 1)
mirror = "MX"
y_dir = -1
y_off = self.predecoder_height + inv_row_height
offset = vector(x_off,y_off)
self.inv_inst.append(self.add_inst(name=name,
mod=self.inv,
offset=offset,
mirror=mirror))
# This will not check that the inst connections match.
self.connect_inst(args=["Z[{0}]".format(row),
"decode[{0}]".format(row),
"vdd", "gnd"],
check=False)
def route_decoder(self):
""" Route the nand to inverter in the decoder and add the pins. """
for row in range(self.rows):
# route nand output to output inv input
zr_pos = self.nand_inst[row].get_pin("Z").rc()
al_pos = self.inv_inst[row].get_pin("A").lc()
# ensure the bend is in the middle
mid1_pos = vector(0.5*(zr_pos.x+al_pos.x), zr_pos.y)
mid2_pos = vector(0.5*(zr_pos.x+al_pos.x), al_pos.y)
self.add_path("metal1", [zr_pos, mid1_pos, mid2_pos, al_pos])
z_pin = self.inv_inst[row].get_pin("Z")
self.add_layout_pin(text="decode[{0}]".format(row),
layer="metal1",
offset=z_pin.ll(),
width=z_pin.width(),
height=z_pin.height())
def create_vertical_rail(self):
""" Creates vertical metal 2 rails to connect predecoder and decoder stages."""
# This is not needed for inputs <4 since they have no pre/decode stages.
if (self.num_inputs >= 4):
# Array for saving the X offsets of the vertical rails. These rail
# offsets are accessed with indices.
self.rail_x_offsets = []
for i in range(self.total_number_of_predecoder_outputs):
# The offsets go into the negative x direction
# assuming the predecodes are placed at (self.routing_width,0)
x_offset = self.metal2_pitch * i
self.rail_x_offsets.append(x_offset+0.5*self.m2_width)
self.add_rect(layer="metal2",
offset=vector(x_offset,0),
width=drc["minwidth_metal2"],
height=self.height)
self.connect_rails_to_predecodes()
self.connect_rails_to_decoder()
def connect_rails_to_predecodes(self):
""" Iterates through all of the predecodes and connects to the rails including the offsets """
for pre_num in range(self.no_of_pre2x4):
for i in range(4):
index = pre_num * 4 + i
out_name = "out[{}]".format(i)
pin = self.pre2x4_inst[pre_num].get_pin(out_name)
self.connect_rail(index, pin)
for pre_num in range(self.no_of_pre3x8):
for i in range(8):
index = pre_num * 8 + i + self.no_of_pre2x4 | |
<reponame>encryptogroup/RAID-PIR<gh_stars>10-100
"""
<Author>
<NAME>
(inspired from upPIR by <NAME> et al.)
(inspired from a previous version by <NAME>)
<Date>
December 2014
<Description>
Lots of helper code for RAID-PIR. Much of this code will be used multiple
places, but some many not. Anything that is at least somewhat general will
live here.
"""
import sys
# used for os.path.exists, os.path.join and os.walk
import os
# only need ceil
import math
import socket
# use this to turn the stream abstraction into a message abstraction...
import session
try:
# for packing more complicated messages
import msgpack
except ImportError:
print("Requires MsgPack module (http://msgpack.org/)")
sys.exit(1)
# Check the python version. It's pretty crappy to do this from a library,
# but it's an easy way to check this universally
if sys.version_info[0] != 3 or sys.version_info[1] < 5:
print("Requires Python >= 3.5")
sys.exit(1)
import hashlib
from Crypto.Cipher import AES
from Crypto.Util import Counter
import time
_timer = time.perf_counter
pirversion = "v0.9.5"
# Exceptions...
class FileNotFound(Exception):
"""The file could not be found"""
class IncorrectFileContents(Exception):
"""The contents of the file do not match the manifest"""
# these keys must exist in a manifest dictionary.
_required_manifest_keys_regular = ['manifestversion', 'blocksize', 'blockcount', 'blockhashlist', 'hashalgorithm', 'vendorhostname', 'vendorport', 'fileinfolist']
_required_manifest_keys = ['manifestversion', 'blocksize', 'blockcount', 'hashalgorithm', 'vendorhostname', 'vendorport', 'fileinfolist']
# the original implementation, used in mirrors that hold data in RAM
def _compute_block_hashlist_fromdatastore(xordatastore, blockcount, blocksize, hashalgorithm):
"""private helper, used both the compute and check hashes"""
currenthashlist = []
# skip hash calculation if that is desired
if hashalgorithm == 'noop' or hashalgorithm == 'none' or hashalgorithm == None:
for _ in range(blockcount):
currenthashlist.append('')
return currenthashlist
# Now I'll check the blocks have the right hash...
for blocknum in range(blockcount):
# read the block ...
thisblock = xordatastore.get_data(blocksize * blocknum, blocksize)
# ... and check its hash
currenthashlist.append(find_hash(thisblock, hashalgorithm))
return currenthashlist
# implementation to read every file from disk to prevent ram from filling up. used for creating nogaps manifest.
def _compute_block_hashlist_fromdisk(offsetdict, blockcount, blocksize, hashalgorithm):
"""private helper, used both the compute and check hashes"""
print("[INFO] Calculating block hashes with algorithm", hashalgorithm, "...")
if hashalgorithm in ['noop', 'none', None]:
currenthashlist = ['']*blockcount
return currenthashlist
currenthashlist = []
lastoffset = 0
thisblock = b''
pt = int(blockcount / 20)
nextprint = pt
for blocknum in range(blockcount):
if blockcount > 99 and blocknum >= nextprint:
print(blocknum, "/", blockcount,\
"("+str(int(round(blocknum*1.0/blockcount*100)))+"%) done...")
nextprint = nextprint + pt
while len(thisblock) < blocksize:
if lastoffset in offsetdict:
fd = open(offsetdict[lastoffset], 'rb')
print("[INFO] reading", offsetdict[lastoffset])
thisfilecontents = fd.read()
fd.close()
lastlen = len(thisfilecontents)
lastoffset = lastoffset + lastlen
thisblock = thisblock + thisfilecontents
del fd
del thisfilecontents
else:
thisblock = thisblock + blocksize * b'\0'
# ... and check its hash
currenthashlist.append(find_hash(thisblock[:blocksize], hashalgorithm))
thisblock = thisblock[blocksize:]
print("[INFO] All blocks done.")
return currenthashlist
def _validate_manifest(manifest):
"""private function that validates the manifest is okay"""
# it raises a TypeError if it's not valid for some reason
if type(manifest) != dict:
raise TypeError("Manifest must be a dict!")
# check for the required keys
for key in _required_manifest_keys:
if key not in manifest:
raise TypeError("Manifest must contain key: " + key + "!")
# check specific things
if len(manifest['blockhashlist']) != manifest['blockcount']:
raise TypeError("There must be a hash for every manifest block")
# otherwise, I guess I'll let this slide. I don't want the checking to
# be too version specific
# JAC: Is this a dumb idea? Should I just check it all? Do I want
# this to fail later? Can the version be used as a proxy check for this?
_supported_hashalgorithms = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']
_supported_hashencodings = ['hex', 'raw']
def find_hash(contents, algorithm):
"""Helper function for hashing"""
# first, if it's a noop, do nothing. For testing and debugging only.
if algorithm == 'noop' or algorithm == "none" or algorithm == None:
return ''
# accept things like: "sha1", "sha256-raw", etc.
# before the '-' is one of the types known to hashlib. After is
# hashencoding = 'hex'
if '-' in algorithm:
# yes, this will raise an exception in some cases...
hashalgorithmname, hashencoding = algorithm.split('-')
# check the args
if hashalgorithmname not in _supported_hashalgorithms:
raise TypeError("Do not understand hash algorithm: '" + algorithm + "'")
if hashencoding not in _supported_hashencodings:
raise TypeError("Do not understand hash encoding: '" + algorithm + "'")
if hashalgorithmname == 'sha256':
hashobj = hashlib.sha256(contents)
else:
hashobj = hashlib.new(hashalgorithmname)
hashobj.update(contents)
if hashencoding == 'raw':
return hashobj.digest()
elif hashencoding == 'hex':
return hashobj.hexdigest()
else:
raise Exception("Internal Error! Unknown hashencoding '" + hashencoding + "'")
def transmit_mirrorinfo(mirrorinfo, vendorlocation, defaultvendorport=62293):
"""
<Purpose>
Sends our mirror information to a vendor.
<Arguments>
vendorlocation: A string that contains the vendor location. This can be of the form "IP:port", "hostname:port", "IP", or "hostname"
defaultvendorport: the port to use if the vendorlocation does not include one.
<Exceptions>
TypeError if the args are the wrong types or malformed...
various socket errors if the connection fails.
ValueError if vendor does not accept the mirrorinfo
<Side Effects>
Contacts the vendor and retrieves data from it
<Returns>
None
"""
if type(mirrorinfo) != dict:
raise TypeError("Mirror information must be a dictionary")
# do the actual communication...
answer = _remote_query_helper(vendorlocation, b"MIRRORADVERTISE" + msgpack.packb(mirrorinfo, use_bin_type=True), defaultvendorport)
if answer != b"OK":
raise ValueError(answer)
def retrieve_rawmanifest(vendorlocation, defaultvendorport=62293):
"""
<Purpose>
Retrieves the manifest data from a vendor. It does not parse this
data in any way.
<Arguments>
vendorlocation: A string that contains the vendor location. This can be of the form "IP:port", "hostname:port", "IP", or "hostname"
defaultvendorport: the port to use if the vendorlocation does not include one.
<Exceptions>
TypeError if the vendorlocation is the wrong type or malformed.
various socket errors if the connection fails.
<Side Effects>
Contacts the vendor and retrieves data from it
<Returns>
A string containing the manifest data (unprocessed). It is a good idea
to use parse_manifest to ensure this data is correct.
"""
return _remote_query_helper(vendorlocation, b"GET MANIFEST", defaultvendorport)
def retrieve_xorblock(socket, bitstring):
"""
<Purpose>
Retrieves a block from a mirror.
<Arguments>
socket: an open socket to the mirror
bitstring: a bit string that contains an appropriately sized request that specifies which blocks to combine.
<Exceptions>
TypeError if the arguments are the wrong types. ValueError if the
bitstring is the wrong size
various socket errors if the connection fails.
<Side Effects>
Contacts the mirror and retrieves data from it
<Returns>
Binary data of size of 1 block. Several blocks XORed together.
"""
response = _remote_query_helper_sock(socket, b"X" + bitstring)
if response == 'Invalid request length':
raise ValueError(response)
return response
# only request a xorblock, without receiving it
def request_xorblock(socket, bitstring):
session.sendmessage(socket, b"X" + bitstring)
def retrieve_xorblock_chunked(socket, chunks):
response = _remote_query_helper_sock(socket, b"C" + msgpack.packb(chunks, use_bin_type=True))
if response == 'Invalid request length':
raise ValueError(response)
# print "CHUNKED Query", len(msgpack.packb(chunks)), "|", len(response)
return response
# only request a xorblock, without receiving it
def request_xorblock_chunked(socket, chunks):
session.sendmessage(socket, b"C" + msgpack.packb(chunks, use_bin_type=True))
def retrieve_xorblock_chunked_rng(socket, chunks):
response = _remote_query_helper_sock(socket, b"R" + msgpack.packb(chunks, use_bin_type=True))
if response == 'Invalid request length':
raise ValueError(response)
# print "CHUNKED RNG Query", len(msgpack.packb(chunks)), "|", len(response)
return response
# only request a xorblock, without receiving it
def request_xorblock_chunked_rng(socket, chunks):
session.sendmessage(socket, b"R" + msgpack.packb(chunks, use_bin_type=True))
def retrieve_xorblock_chunked_rng_parallel(socket, chunks):
response = _remote_query_helper_sock(socket, b"M" + msgpack.packb(chunks, use_bin_type=True))
if response == 'Invalid request length':
raise ValueError(response)
# print "CHUNKED RNG PAR Query", len(msgpack.packb(chunks)), "|", len(response)
return response
# only request a xorblock, without receiving it
def request_xorblock_chunked_rng_parallel(socket, chunks):
session.sendmessage(socket, b"M" + msgpack.packb(chunks, use_bin_type=True))
def retrieve_mirrorinfolist(vendorlocation, defaultvendorport=62293):
"""
<Purpose>
Retrieves the mirrorinfolist from a vendor.
<Arguments>
vendorlocation: A string that contains the vendor location. This can be
of the form "IP:port", "hostname:port", "IP", or "hostname"
defaultvendorport: the port to use if the vendorlocation does not include
one.
<Exceptions>
TypeError if the vendorlocation is the wrong type or malformed.
various socket errors if the connection fails.
SessionEOF or ValueError may be raised if the other end is not speaking the
correct protocol
<Side Effects>
Contacts the vendor and retrieves data from it
<Returns>
A list of mirror information dictionaries.
"""
rawmirrordata = _remote_query_helper(vendorlocation, b"GET MIRRORLIST", defaultvendorport)
mirrorinfolist = msgpack.unpackb(rawmirrordata, raw=False)
# the mirrorinfolist must be a list (duh)
if type(mirrorinfolist) != list:
raise TypeError("Malformed mirror list from vendor. Is a " + str(type(mirrorinfolist)) + " not a list")
for mirrorlocation in mirrorinfolist:
# must be a string
if type(mirrorlocation) != dict:
raise TypeError("Malformed mirrorlocation from vendor. Is a " + str(type(mirrorlocation)) + " not a dict")
# everything checked out
return mirrorinfolist
# when a socket is already opened
def _remote_query_helper_sock(socket, command):
# issue the relevant command
session.sendmessage(socket, command)
# receive and return the answer
rawanswer = session.recvmessage(socket)
return rawanswer
# opens a new socket each time...
def _remote_query_helper(serverlocation, command, defaultserverport):
# private function that contains the guts of server communication. It
# issues a single query and then closes the connection. This is used
# both to talk to the vendor and also to talk to mirrors
if type(serverlocation) != str and type(serverlocation) != str:
raise TypeError("Server location must be a string, not " + str(type(serverlocation)))
# now let's split it and ensure there are 0 or 1 colons
splitlocationlist = serverlocation.split(':')
if len(splitlocationlist) > 2:
raise TypeError("Server location may not contain more than one colon")
# now either set the port or use the default
if len(splitlocationlist) == 2:
serverport = int(splitlocationlist[1])
else:
serverport = defaultserverport
# check that this port is in the right range
if serverport <= 0 or serverport > 65535:
raise TypeError("Server location's port is not in the allowed range")
serverhostname = splitlocationlist[0]
# now we actually download the information...
# first open the socket
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.connect((serverhostname, serverport))
# then issue the relevant command
session.sendmessage(serversocket, | |
if made > 0:
# Open the post list
plist += '\n[color=#333333][size=small][font=Arial]The posts that were included in this count are:\n[list]\n'
# Build a list of posts that were included in this count
for row in cursor:
# Generate the post URL
url = g_PostURL.format(tid=int(row[1]), pid=int(row[0]))
# Generate the element list and append it
plist += '[*][url={0}]{1}[/url]\n'.format(url, row[2])
# Close the list
plist += '\n[/list]\n[/font][/size][/color]\n'
# Generate the message and return it
return """[font=Arial][color=#333333][size=small]Dear respected FreeVPS Directory & Discussion Forum Member & VPS Owner!
[/size][/color][/font]
[color=#333333][size=small][font=Arial]I am contacting you because you've failed to complete posts that had to be made between ([b]{bdate}[/b]) and ([b]{edate}[/b]) to keep your VPS for the month ([b]{nextm} {nexty}[/b])
[/font][/size][/color]
[color=#333333][size=small][font=Arial]Amount: [b]{posts}[/b] Post(s). Required: [b]{required}[/b] Post(s).
[/font][/size][/color]
{postlist}
[color=#333333][size=small][font=Arial]If you believe you have received this message in error, eg, you have already made up your posts, or the posts were counted incorrectly please let me know as soon as possible and I will double check your posts.[/font][/size][/color]
[color=#333333][size=small][font=Arial]This was the final warning to complete your posts. Meaning that [b]your VPS will soon be terminated[/b]. If you have anything of importance stored or associated with the VPS we highly recommend you to backup your data and/or take the necessary precautions.[/font][/size][/color]
[color=#333333][size=small][font=Arial]Note that posts in the "SPAM/Testing" and "Introductions" forums DO NOT count towards your post count as we've disabled the post count there a long time ago.[/font][/size][/color]
[color=#333333][size=small][font=Arial]Only real and valid excuses with proper proof will be accepted. Do not use this to get out of making posts unless you have a genuine and applicable reason for doing so.
[/font][/size][/color]
[color=#333333][size=small][font=Arial]Yours sincerely,[/font][/size][/color]
[color=#333333][size=small][font=Arial][b]{manager}[/b][/font][/size][/color]
[font=Arial][color=#333333][size=small]Giveaway Manager[/size][/color][/font]
[font=Arial][color=#333333][size=small]FreeVPS Directory & Discussion Staff & Administration[/size][/color][/font]""".format(
bdate=bdate.strftime("%d/%B/%Y %H:%M:%S"),
edate=edate.strftime("%d/%B/%Y %H:%M:%S"),
ndate=bdate.strftime("%d/%B/%Y %H:%M:%S"),
nextm=ndate.strftime("%B"),
nexty=ndate.year,
posts=made,
required=need,
manager=admin,
postlist=plist
)
# -----------------------------------------------------------------------------------------------
# Run a complete count and generate an administrator report.
def AdminReport(adminid, bdt, edt, silent, warn):
# Import globals
global g_Db, g_PostCount, g_OwnerList, g_MultiList, g_IgnoredForums
# Add one day to end date for the next month
ndt = edt + datetime.timedelta(days=1)
# Now reset to the beginning of the month
ndt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
# Grab the time-stamp from the specified date-time
begin = int(time.mktime(bdt.timetuple()))
end = int(time.mktime(edt.timetuple()))
# Attempt to retrieve the name of the administrator
admin = FetchUserName(adminid)
# Make sure the owner list is up to date
if UpdateOwnersList() != True:
# Specify that we failed to deliver
return False
# The list of processed users
status = []
# Start processing owner identifiers
for userid in g_OwnerList:
# Obtain a database cursor and proceed to query the database
with closing(g_Db.cursor()) as cursor:
# Select the post count for the specified date range
try:
cursor.execute("SELECT pid, tid, subject FROM mybb_posts WHERE uid = {:d} AND dateline BETWEEN {:d} AND {:d} AND fid NOT IN ('{:s}')".format(userid, begin, end, g_IgnoredForums))
except Exception as e:
# Display information
print('Failed generate staff report: {:s}'.format(str(e)))
# Specify that we failed to send a notice to this user
status.append({'error': True, 'userid': userid, 'reason': str(e)})
# Proceed to the next user
continue
# The number of posts that are included in this count
made = 0 if not cursor.rowcount else int(cursor.rowcount)
# The number of posts that the user must complete
need = g_PostCount * 2 if userid in g_MultiList else g_PostCount
# Did the user complete his posts? And Are we supposed to send a report?
if made >= need or silent == True:
# Specify that it was not required to send a notice to this user
status.append({'error': False, 'userid': userid, 'made': made, 'need': need, 'reason': 'Unknown'})
# We don't need to send a notice to this user
continue
# Generate the user report
msg = GenerateWarningMessage(admin, bdt, edt, ndt, made, need, cursor) if warn == True else GenerateAlertMessage(admin, bdt, edt, ndt, made, need, cursor)
# Finally, send the PM to the user and move on to the next user
result = SendPrivateMessage(adminid, userid, '[{0}] Missing {1} post(s) between {2} and {3}'.format('WARNING' if warn == True else 'ALERT', need - made, bdt, edt), msg)
# Store the status of this user
status.append({'error': not result, 'userid': userid, 'made': made, 'need': need, 'reason': 'Failed to send PM'})
# Finally, generate the administrator report and return the result
return GenerateAdminReport(adminid, status, warn, bdt, edt)
# -----------------------------------------------------------------------------------------------
# Process submitted commands.
def ProgramPulse():
# Import globals
global g_LastShoutID, g_Db, g_RunLoop
# Obtain a database cursor and proceed to query the database
with closing(g_Db.cursor()) as cursor:
# Prevent the cursor from giving us cached results
g_Db.begin()
# Select any shout messages that might have been posted while we were busy
cursor.execute("SELECT id, uid, text FROM mybb_dvz_shoutbox WHERE id > {:d}".format(g_LastShoutID))
# Was there any new shout message?
if not cursor.rowcount:
# Take a break
try:
time.sleep(1)
except:
# Stop the loop
g_RunLoop = False
# I know. nasty. so what?
pass
# Try again
return
# Iterate over the returned rows
for row in cursor:
# Grab the shout identifier
shoutid, userid = int(row[0]), int(row[1])
# Exclude it on the next loop
if shoutid > g_LastShoutID:
g_LastShoutID = shoutid
# Is this a command?
try:
if row[2].index('#') != 0:
# Just raise and exception and continue the loop when catching it
raise Exception('Not a command!')
except:
continue
# Start with an empty command
cmd_list = None
# Attempt to extract a clean command
try:
cmd_list = re.sub(r'[^0-9a-z.]', '', row[2].lower()).split('.')
except:
continue
# Import globals
global g_ManagerID, g_OwnerList
# Grab the number of command sections
sections = len(cmd_list)
# Is there a command to process?
if sections < 1 or cmd_list[0] != 'pcbot':
continue
# Is this user allowed to send commands?
elif userid not in g_OwnerList and userid not in g_ManagerID:
# Update the shout message to display the warning
cursor.execute("UPDATE mybb_dvz_shoutbox SET text = 'You do not have the privilege to send commands', modified = {:d} WHERE id = {:d}".format(int(time.time()), shoutid))
# Skip the command
continue
# Is this a valid command?
elif sections < 2:
# Update the shout message to display the warning
cursor.execute("UPDATE mybb_dvz_shoutbox SET text = 'Incomplete Command: {:s}', modified = {:d} WHERE id = {:d}".format('.'.join(cmd_list), int(time.time()), shoutid))
# Skip the command
continue
# Grab the main command
cmd = cmd_list[1]
# Command: a user requesting a count
if cmd == 'count':
# Grab the sub command, if any, or default to 'current'
scmd = cmd_list[2] if sections > 2 else 'current'
# The reply message to replace the command shout
reply = None
# Command: count posts from current month
if scmd == 'current':
# Grab the current time and move to the beginning of the month
dtm = datetime.datetime.now().replace(day=1,hour=0,minute=0,second=0,microsecond=0)
# Attempt to count the user posts
pcount = CountUserPostsAfter(userid, dtm)
# Now generate the reply message
reply = '{:d} posts made after ({:s})'.format(pcount, str(dtm))
# Command: count posts from previous month
elif scmd == 'previous':
# Subtract 1 second from the beginning of the current month so we jump to the end of previous month
edt = datetime.datetime.now().replace(day=1,hour=0,minute=0,second=0,microsecond=0) - datetime.timedelta(seconds=1)
# Now use that to obtain the beginning of the previous month as well
bdt = edt.replace(day=1,hour=0,minute=0,second=0)
# Attempt to count the user posts
pcount = CountUserPostsBetween(userid, bdt, edt)
# Now generate the reply message
reply = '{:d} posts made between ({:s}) and ({:s})'.format(pcount, str(bdt), str(edt))
# Command: count posts from today
elif scmd == 'today':
# Grab the current time and move to the beginning of the day
bdt = datetime.datetime.now().replace(hour=0,minute=0,second=0,microsecond=0)
# Grab the current time and move to the end of the day
edt = datetime.datetime.now().replace(hour=23,minute=59,second=59,microsecond=0)
# Attempt to count the user posts
pcount = CountUserPostsBetween(userid, bdt, edt)
# Now generate the reply message
reply = '{:d} posts made between ({:s}) and ({:s})'.format(pcount, str(bdt), str(edt))
# Command: count posts from yesterday
elif scmd == 'yesterday':
# Grab the current time and move to the beginning of the day
dtm = datetime.datetime.now().replace(hour=0,minute=0,second=0,microsecond=0)
# Subtract one second to obtain the end | |
<gh_stars>1-10
#!/usr/bin/env python3
import importlib
import json
import logging
import os
import sys
import requests
import traceback
from os import listdir
from os.path import isdir
from os.path import isfile
from os.path import join
from future.backports.urllib.parse import parse_qs, urlparse
from mako.lookup import TemplateLookup
from oic import rndstr
from oic.utils.http_util import BadRequest
from oic.utils.http_util import extract_from_request
from oic.utils.http_util import get_or_post
from oic.utils.http_util import NotFound
from oic.utils.http_util import Response
from oic.utils.http_util import ServiceError
from oic.utils.http_util import SeeOther
from oic.utils import http_util
from otest.events import Events
from otest.events import EV_REQUEST
from otest.events import EV_RESPONSE
from otest.flow import FlowState
from otest.session import SessionHandler
from otest.rp.endpoints import static_mime
from otest.rp.handling import WebIh
from otest.rp.setup import as_arg_setup
from otest.rp.tool import WebTester
try:
from requests.packages import urllib3
except ImportError:
pass
else:
urllib3.disable_warnings()
__author__ = 'roland'
ROOT = './'
LOOKUP = TemplateLookup(directories=[ROOT + 'htdocs'],
module_directory=ROOT + 'modules',
input_encoding='utf-8', output_encoding='utf-8')
logger = logging.getLogger("")
def setup_logging(logfile_name, log_level=logging.DEBUG):
hdlr = logging.FileHandler(logfile_name)
base_formatter = logging.Formatter(
"%(asctime)s %(name)s:%(levelname)s %(message)s")
hdlr.setFormatter(base_formatter)
logger.addHandler(hdlr)
logger.setLevel(log_level)
class JLog(object):
def __init__(self, log, sid):
self.logger = log
self.id = sid
def info(self, info):
_dict = {'id': self.id}
_dict.update(info)
self.logger.info(json.dumps(_dict))
def debug(self, info):
_dict = {'id': self.id}
_dict.update(info)
self.logger.debug(json.dumps(_dict))
def exception(self, info):
_dict = {'id': self.id}
_dict.update(info)
self.logger.exception(json.dumps(_dict))
def error(self, info):
_dict = {'id': self.id}
_dict.update(info)
self.logger.error(json.dumps(_dict))
def warning(self, info):
_dict = {'id': self.id}
_dict.update(info)
self.logger.warning(json.dumps(_dict))
# def css(environ, event_db):
# try:
# info = open(environ["PATH_INFO"]).read()
# resp = Response(info)
# except (OSError, IOError):
# resp = NotFound(environ["PATH_INFO"])
#
# return resp
def start_page(environ, start_response, target):
msg = open('start_page.html').read().format(target=target)
resp = Response(msg)
return resp(environ, start_response)
def make_entity(provider_cls, **kw_args):
return provider_cls(**kw_args)
def absolute_url(url, startpage):
if url.startswith('http'):
return url
(scheme, netloc, path, params, query, fragment) = urlparse(startpage)
return '{}://{}{}'.format(scheme, netloc, url)
# =============================================================================
class Application(object):
def __init__(self, base_url, **kwargs):
self.base_url = base_url
self.kwargs = kwargs
self.events = Events()
self.endpoints = {}
self.session_conf = {}
self.internal = kwargs['internal']
def store_response(self, response):
self.events.store(EV_RESPONSE, response.info())
def wsgi_wrapper(self, environ, func, **kwargs):
kwargs = extract_from_request(environ, kwargs)
self.events.store(EV_REQUEST, kwargs)
args = func(**kwargs)
try:
resp, state = args
self.store_response(resp)
return resp
except TypeError:
resp = args
self.store_response(resp)
return resp
except Exception as err:
logger.error("%s" % err)
raise
def handle(self, environ, tester, sid, path, qs=''):
_sh = tester.sh
if qs:
msg = qs
else:
try:
msg = get_or_post(environ)
except AttributeError:
msg = {}
filename = self.kwargs['profile_handler'](_sh).log_path(
sid=sid, test_id=_sh['conv'].test_id)
_sh['conv'].entity_id = sid
return tester.do_next(msg, filename,
profile_handler=self.kwargs['profile_handler'],
path=path)
@staticmethod
def pick_grp(name):
return name.split('-')[1]
@staticmethod
def see_other_to_get(resp, sh):
loc = resp.message
res = sh['conv'].entity.server.http_request(loc, 'GET')
return res
def store_session_handler(self, sh):
sid = rndstr(24)
while sid in self.session_conf:
sid = rndstr(24)
sh['sid'] = sid
self.session_conf[sid] = sh
return sid
def init_session(self, tester, sh, test_id=''):
sid = self.store_session_handler(sh)
# session['session_info'] = sh
try:
del self.session_conf[sid]['flow']
except KeyError:
pass
try:
args = sh['test_conf']
except:
args = {}
args['test_id'] = test_id
return tester.do_config(sid, **args)
def run_test(self, tester, _path, _sid, environ, start_response):
_op = '{} {}'.format(environ['REQUEST_METHOD'], _path)
resp = tester.run(_path, sid=_sid, op=_op, **self.kwargs)
if resp:
logger.info(
'Response class: {}'.format(resp.__class__.__name__))
if isinstance(resp, requests.Response):
try:
loc = resp.headers['location']
except KeyError:
logger.info(
'Response type: {}, missing location'.format(
type(resp)))
resp = ServiceError(
'Wrong response: {}:{}'.format(resp.status_code,
resp.text))
return resp(environ, start_response), 0
else:
try:
tester.conv.events.store('Cookie',
resp.headers['set-cookie'])
except KeyError:
pass
# For me !
if loc.startswith(tester.base_url):
_path = loc[len(tester.base_url):]
if _path[0] == '/':
_path = _path[1:]
return 0, _path
else:
if self.internal:
_url = absolute_url(loc,
tester.sh['test_conf'][
'start_page'])
logging.info('Redirect not to me => {}'.format(_url))
res = tester.conv.entity.server.http_request(_url)
logging.info('{} response'.format(res.status_code))
logging.debug('txt: {}'.format(res.text))
res = tester.display_test_list()
return res, 0
else:
res = SeeOther(loc)
return res(environ, start_response), 0
elif resp is True or resp is False or resp is None:
return tester.display_test_list(), 0
else:
return resp(environ, start_response), 0
# publishes the OP endpoints
def application(self, environ, start_response):
session = environ['beaker.session']
jlog = JLog(logger, session.id)
path = environ.get('PATH_INFO', '').lstrip('/')
jlog.info({"remote_addr": environ["REMOTE_ADDR"],
"path": path})
# self.events.store(EV_REQUEST, path)
try:
sh = session['session_info']
except KeyError:
sh = SessionHandler(**self.kwargs)
#sh.session_init()
session['session_info'] = sh
info = WebIh(session=sh, **self.kwargs)
info.environ = environ
info.start_response = start_response
tester = WebTester(info, sh, **self.kwargs)
if 'path' in self.kwargs and path.startswith(self.kwargs['path']):
_path = path[len(kwargs['path']) + 1:]
else:
_path = path
if _path == "robots.txt":
return static_mime("static/robots.txt", environ, start_response)
elif _path.startswith("static/"):
return static_mime(_path, environ, start_response)
if _path == "list":
try:
qs = parse_qs(get_or_post(environ))
except Exception as err:
jlog.error({'message': err})
qs = {}
else:
if qs:
sh['test_conf'] = dict([(k, v[0]) for k, v in qs.items()])
# self.session_conf[sh['sid']] = sh
else:
return self.init_session(tester, sh)
logger.info('test_conf: {}'.format(sh['test_conf']))
if 'start_page' not in sh['test_conf']:
resp = BadRequest('You MUST provide a start_page')
return resp(environ, start_response)
info.profile = tester.sh.profile = qs['response_type'][0]
sh.session_init()
if 'test_id' in qs:
(res, _path) = self.run_test(tester, qs['test_id'][0],
sh['sid'], environ,
start_response)
if res:
return res
else:
res = tester.display_test_list()
return res
elif _path == '' or _path == 'config':
return self.init_session(tester, sh)
elif _path in self.kwargs['flows'].keys(): # Run flow
# Will use the same test configuration
try:
_ = tester.sh['test_conf']
except KeyError:
return self.init_session(tester, sh)
try:
_sid = tester.sh['sid']
except KeyError:
_sid = self.store_session_handler(sh)
# First time around this should not be set
try:
_ = self.session_conf[_sid]['flow']
except KeyError:
pass
else:
return self.init_session(tester, sh, _path)
(res, _path) = self.run_test(tester, _path, _sid, environ,
start_response)
if res:
return res
elif _path == 'display':
return info.flow_list()
elif _path == "opresult":
try:
_display_path = '/{}/display'.format(self.kwargs['path'])
except KeyError:
_display_path = '/display'
resp = SeeOther(
"{}#{}".format(_display_path,
self.pick_grp(sh['conv'].test_id)))
return resp(environ, start_response)
elif _path.startswith("test_info"):
p = _path.split("/")
try:
return info.test_info(p[1])
except KeyError:
return info.not_found()
elif _path == 'all':
for test_id in sh['flow_names']:
resp = tester.run(test_id, **self.kwargs)
if resp is True or resp is False:
continue
elif resp:
return resp(environ, start_response)
else:
resp = ServiceError('Unkown service error')
return resp(environ, start_response)
return tester.display_test_list()
# Whatever gets here should be of the form <session_id>/<path>
try:
sid, _path = _path.split('/', 1)
except ValueError:
pass
else:
if _path.startswith("static/"):
return static_mime(_path, environ, start_response)
try:
_sh = self.session_conf[sid]
except KeyError:
resp = ServiceError("Unknown session")
return resp(environ, start_response)
tester.sh = _sh
if 'HTTP_AUTHORIZATION' in environ:
_sh['conv'].events.store('HTTP_AUTHORIZATION',
environ['HTTP_AUTHORIZATION'])
_p = _path.split('?')
_sh['conv'].events.store('http request', '{} /{}'.format(
environ['REQUEST_METHOD'], _path))
if _p[0] in _sh['conv'].entity.endpoints():
resp = self.handle(environ, tester, sid, *_p)
self.session_conf[sid] = tester.sh
# The only redirect should be the one to the redirect_uri
if isinstance(resp, SeeOther):
if self.internal:
# res = self.see_other_to_get(resp, sh)
# res is probably a redirect
# send the user back to the test list page
return info.flow_list()
else:
return resp(environ, start_response)
elif isinstance(resp, Response):
return resp(environ, start_response)
else:
return resp
for endpoint, service in self.endpoints.items():
if _path == endpoint:
jlog.info({"service": service})
try:
resp = self.handle(environ, tester, sid, service)
return resp(environ, start_response)
except Exception as err:
print("%s" % err)
message = traceback.format_exception(*sys.exc_info())
print(message)
jlog.exception(err)
resp = ServiceError("%s" % err)
return resp(environ)
jlog.debug({"unknown side": path})
resp = NotFound("Couldn't find the side you asked for!")
return resp(environ, start_response)
def key_handling(key_dir):
if isdir(key_dir):
only_files = [f for f in listdir(key_dir) if isfile(join(key_dir, f))]
else:
os.makedirs(key_dir)
only_files = []
if not only_files:
only_files = ['one.pem']
for fil in only_files:
key = RSA.generate(2048)
f = open(join(key_dir, fil), 'w')
f.write(key.exportKey('PEM').decode('utf8'))
f.close()
return {key_dir: only_files}
# def find_allowed_algorithms(metadata_file, ic):
# mds = MetadataStore(ic.attribute_converters, ic,
# disable_ssl_certificate_validation=True)
#
# mds.imp([{
# "class": "saml2.mdstore.MetaDataFile",
# "metadata": [(metadata_file,)]}])
#
# md = mds.metadata[metadata_file]
# ed = list(md.entity.values())[0]
# res = {"digest_algorithms":[], "signing_algorithms":[]}
#
# for elem in ed['extensions']['extension_elements']:
# if elem['__class__'] == '{}&DigestMethod'.format(
# algsupport.NAMESPACE):
# res['digest_algorithms'].append(elem['algorithm'])
# elif elem['__class__'] == '{}&SigningMethod'.format(
# algsupport.NAMESPACE):
# res['signing_algorithms'].append(elem['algorithm'])
#
# return res
if __name__ == '__main__':
import argparse
from beaker.middleware import SessionMiddleware
from Cryptodome.PublicKey import RSA
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='debug', action='store_true')
parser.add_argument(
'-k', dest='insecure', action='store_true',
help='whether or not TLS certificate verification should be performed')
parser.add_argument(
'-H', dest='hostname',
help='If running behind a proxy this is the external name of the host')
parser.add_argument('-s', dest='tls', action='store_true',
help='Whether the server should handle SSL/TLS')
parser.add_argument(
'-f', dest='flowdir',
help='Directory where test descriptions in JSON format can be found')
parser.add_argument('-r', dest='rsa_key_dir', default='keys')
parser.add_argument('-l', dest='logfile')
parser.add_argument(
'-i', dest='internal', action='store_true',
help='Whether the server should handle all communication internally')
parser.add_argument('-m', dest='path2port')
parser.add_argument('-w', dest='cwd', help='change working directory')
parser.add_argument(
'-P', dest='port', help='Which port the test instance should listen at')
parser.add_argument('-O', dest='op_profiles',
help='Possible OP (=test tool) profiles')
parser.add_argument(
'-c', dest="ca_certs",
help="CA certs to use to verify HTTPS server certificates, "
"if HTTPS is used and no server CA certs are defined then "
"no cert verification will be done")
parser.add_argument(
'-x', dest='xport', help='ONLY for testing')
parser.add_argument(dest="config")
args = parser.parse_args()
if args.logfile:
setup_logging(args.logfile)
else:
setup_logging('tt.log')
session_opts = | |
_deploy_cluster(zone):
if not xmlobject.has_element(zone, "clusters.cluster"):
return
if zone.duplication__ == None:
zone_duplication = 1
else:
zone_duplication = int(zone.duplication__)
for zone_ref in range(zone_duplication):
for cluster in xmlobject.safe_list(zone.clusters.cluster):
if cluster_name and cluster_name != cluster.name_:
continue
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
action = api_actions.CreateClusterAction()
action.sessionUuid = session_uuid
action.name = generate_dup_name(generate_dup_name(cluster.name_, zone_ref, 'z'), cluster_ref, 'c')
action.description = generate_dup_name(generate_dup_name(cluster.description__, zone_ref, 'z'), cluster_ref, 'c')
action.hypervisorType = cluster.hypervisorType_
zone_name = generate_dup_name(zone.name_, zone_ref, 'z')
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zone_name)
zinv = get_first_item_from_list(zinvs, 'Zone', zone_name, 'Cluster')
action.zoneUuid = zinv.uuid
thread = threading.Thread(target=_add_cluster, args=(action, zone_ref, cluster, cluster_ref, ))
wait_for_thread_queue()
thread.start()
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
_deploy_cluster(zone)
wait_for_thread_done()
#Add Host
def add_host(deployConfig, session_uuid, host_ip = None, zone_name = None, \
cluster_name = None):
'''
Base on an xml deploy config object to add hosts.
If providing giving zone_name, cluster_name or host_ip, this function will
only add related hosts.
'''
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
def _deploy_host(cluster, zone_ref, cluster_ref):
if not xmlobject.has_element(cluster, "hosts.host"):
return
if zone_ref == 0 and cluster_ref == 0:
cluster_name = cluster.name_
else:
cluster_name = generate_dup_name(generate_dup_name(cluster.name_, zone_ref, 'z'), cluster_ref, 'c')
cinvs = res_ops.get_resource(res_ops.CLUSTER, session_uuid, name=cluster_name)
cinv = get_first_item_from_list(cinvs, 'Cluster', cluster_name, 'L3 network')
for host in xmlobject.safe_list(cluster.hosts.host):
if host_ip and host_ip != host.managementIp_:
continue
if host.duplication__ == None:
host_duplication = 1
else:
host_duplication = int(host.duplication__)
for i in range(host_duplication):
if cluster.hypervisorType_ == inventory.KVM_HYPERVISOR_TYPE:
action = api_actions.AddKVMHostAction()
action.username = host.username_
action.password = <PASSWORD>
action.timeout = AddKVMHostTimeOut
elif cluster.hypervisorType_ == inventory.SIMULATOR_HYPERVISOR_TYPE:
action = api_actions.AddSimulatorHostAction()
action.cpuCapacity = host.cpuCapacity_
action.memoryCapacity = sizeunit.get_size(host.memoryCapacity_)
action.sessionUuid = session_uuid
action.clusterUuid = cinv.uuid
action.hostTags = host.hostTags__
if zone_ref == 0 and cluster_ref == 0 and i == 0:
action.name = host.name_
action.description = host.description__
action.managementIp = host.managementIp_
else:
action.name = generate_dup_name(generate_dup_name(generate_dup_name(host.name_, zone_ref, 'z'), cluster_ref, 'c'), i, 'h')
action.description = generate_dup_name(generate_dup_name(generate_dup_name(host.description__, zone_ref, 'z'), cluster_ref, 'c'), i, 'h')
action.managementIp = generate_dup_host_ip(host.managementIp_, zone_ref, cluster_ref, i)
thread = threading.Thread(target=_thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
if not xmlobject.has_element(zone, 'clusters.cluster'):
continue
if zone.duplication__ == None:
zone_duplication = 1
else:
zone_duplication = int(zone.duplication__)
for zone_ref in range(zone_duplication):
for cluster in xmlobject.safe_list(zone.clusters.cluster):
if cluster_name and cluster_name != cluster.name_:
continue
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
_deploy_host(cluster, zone_ref, cluster_ref)
wait_for_thread_done()
test_util.test_logger('All add KVM host actions are done.')
#Add L3 network
def add_l3_network(deployConfig, session_uuid, l3_name = None, l2_name = None, \
zone_name = None):
'''
add_l3_network will add L3 network and also add related DNS, IpRange and
network services.
'''
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
def _deploy_l3_network(l2, zone_ref, cluster_ref):
if not xmlobject.has_element(l2, "l3Networks.l3BasicNetwork"):
return
if not l2.duplication__:
l2_dup = 1
else:
l2_dup = int(l2.duplication__)
for l2_num in range(l2_dup):
for l3 in xmlobject.safe_list(l2.l3Networks.l3BasicNetwork):
if l3_name and l3_name != l3.name_:
continue
l2Name = generate_dup_name(generate_dup_name(generate_dup_name(l2.name_, zone_ref, 'z'), cluster_ref, 'c'), l2_num, 'n')
l3Name = generate_dup_name(generate_dup_name(generate_dup_name(l3.name_, zone_ref, 'z'), cluster_ref, 'c'), l2_num, 'n')
l2invs = res_ops.get_resource(res_ops.L2_NETWORK, \
session_uuid, \
name=l2Name)
l2inv = get_first_item_from_list(l2invs, \
'L2 Network', l2Name, 'L3 Network')
thread = threading.Thread(target=_do_l3_deploy, \
args=(l3, l2inv.uuid, l3Name, session_uuid, ))
wait_for_thread_queue()
thread.start()
def _do_l3_deploy(l3, l2inv_uuid, l3Name, session_uuid):
action = api_actions.CreateL3NetworkAction()
action.sessionUuid = session_uuid
action.description = l3.description__
if l3.system__ and l3.system__ != 'False':
action.system = 'true'
action.l2NetworkUuid = l2inv_uuid
action.name = l3Name
action.type = inventory.L3_BASIC_NETWORK_TYPE
if l3.domain_name__:
action.dnsDomain = l3.domain_name__
try:
evt = action.run()
except:
exc_info.append(sys.exc_info())
test_util.test_logger(jsonobject.dumps(evt))
l3_inv = evt.inventory
#add dns
if xmlobject.has_element(l3, 'dns'):
for dns in xmlobject.safe_list(l3.dns):
action = api_actions.AddDnsToL3NetworkAction()
action.sessionUuid = session_uuid
action.dns = dns.text_
action.l3NetworkUuid = l3_inv.uuid
try:
evt = action.run()
except:
exc_info.append(sys.exc_info())
test_util.test_logger(jsonobject.dumps(evt))
#add ip range.
if xmlobject.has_element(l3, 'ipRange'):
do_add_ip_range(l3.ipRange, l3_inv.uuid, session_uuid)
#add network service.
providers = {}
action = api_actions.QueryNetworkServiceProviderAction()
action.sessionUuid = session_uuid
action.conditions = []
try:
reply = action.run()
except:
exc_info.append(sys.exc_info())
for pinv in reply:
providers[pinv.name] = pinv.uuid
if xmlobject.has_element(l3, 'networkService'):
do_add_network_service(l3.networkService, l3_inv.uuid, \
providers, session_uuid)
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
l2networks = []
if xmlobject.has_element(zone, 'l2Networks.l2NoVlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2NoVlanNetwork))
if xmlobject.has_element(zone, 'l2Networks.l2VlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VlanNetwork))
for l2 in l2networks:
if l2_name and l2_name != l2.name_:
continue
if zone.duplication__ == None:
duplication = 1
else:
duplication = int(zone.duplication__)
for zone_ref in range(duplication):
for cluster in xmlobject.safe_list(zone.clusters.cluster):
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
if zone_ref == 1 and cluster_ref == 1:
zone_ref = 0
cluster_ref = 0
_deploy_l3_network(l2, zone_ref, cluster_ref)
wait_for_thread_done()
test_util.test_logger('All add L3 Network actions are done.')
#Add Iprange
def add_ip_range(deployConfig, session_uuid, ip_range_name = None, \
zone_name= None, l3_name = None):
'''
Call by only adding an IP range. If the IP range is in L3 config,
add_l3_network will add ip range direclty.
deployConfig is a xmlobject. If using standard net_operation, please
check net_operations.add_ip_range(test_util.IpRangeOption())
'''
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
l3networks = []
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
l2networks = []
if xmlobject.has_element(zone, 'l2Networks.l2NoVlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2NoVlanNetwork))
if xmlobject.has_element(zone, 'l2Networks.l2VlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VlanNetwork))
for l2 in l2networks:
if xmlobject.has_element(l2, 'l3Networks.l3BasicNetwork'):
l3networks.extend(xmlobject.safe_list(l2.l3Networks.l3BasicNetwork))
if zone.duplication__ == None:
duplication = 1
else:
duplication = int(zone.duplication__)
for zone_duplication in range(duplication):
for l3 in l3networks:
if l3_name and l3_name != l3.name_:
continue
if not xmlobject.has_element(l3, 'ipRange'):
continue
if zone_duplication == 0:
l3Name = l3.name_
else:
l3Name = generate_dup_name(l3.name_, zone_duplication, 'z')
l3_invs = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid, name = l3Name)
l3_inv = get_first_item_from_list(l3_invs, 'L3 Network', l3Name, 'IP range')
do_add_ip_range(l3.ipRange, l3_inv.uuid, session_uuid, \
ip_range_name)
def do_add_ip_range(ip_range_xml_obj, l3_uuid, session_uuid, \
ip_range_name = None):
for ir in xmlobject.safe_list(ip_range_xml_obj):
if ip_range_name and ip_range_name != ir.name_:
continue
action = api_actions.AddIpRangeAction()
action.sessionUuid = session_uuid
action.description = ir.description__
action.endIp = ir.endIp_
action.gateway = ir.gateway_
action.l3NetworkUuid = l3_uuid
action.name = ir.name_
action.netmask = ir.netmask_
action.startIp = ir.startIp_
try:
evt = action.run()
except Exception as e:
exc_info.append(sys.exc_info())
raise e
test_util.test_logger(jsonobject.dumps(evt))
#Add Network Service
def add_network_service(deployConfig, session_uuid):
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
l3networks = []
for zone in xmlobject.safe_list(deployConfig.zones.zone):
l2networks = []
if xmlobject.has_element(zone, 'l2Networks.l2NoVlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2NoVlanNetwork))
if xmlobject.has_element(zone, 'l2Networks.l2VlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VlanNetwork))
for l2 in l2networks:
if xmlobject.has_element(l2, 'l3Networks.l3BasicNetwork'):
l3networks.extend(xmlobject.safe_list(l2.l3Networks.l3BasicNetwork))
providers = {}
action = api_actions.QueryNetworkServiceProviderAction()
action.sessionUuid = session_uuid
action.conditions = []
try:
reply = action.run()
except Exception as e:
exc_info.append(sys.exc_info())
raise e
for pinv in reply:
providers[pinv.name] = pinv.uuid
if zone.duplication__ == None:
duplication = 1
else:
duplication = int(zone.duplication__)
for zone_duplication in range(duplication):
for l3 in l3networks:
if not xmlobject.has_element(l3, 'networkService'):
continue
if zone_duplication == 0:
l3_name = l3.name_
else:
l3_name = generate_dup_name(l3.name_, zone_duplication, 'z')
l3_invs = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid, name = l3_name)
l3_inv = get_first_item_from_list(l3_invs, 'L3 Network', l3_name, 'Network Service')
do_add_network_service(l3.networkService, l3_inv.uuid, \
providers, session_uuid)
def do_add_network_service(net_service_xml_obj, l3_uuid, providers, \
session_uuid):
allservices = {}
for ns in xmlobject.safe_list(net_service_xml_obj):
puuid = providers.get(ns.provider_)
if not puuid:
raise test_util.TestError('cannot find network service provider[%s], it may not have been added' % ns.provider_)
servs = []
for nst in xmlobject.safe_list(ns.serviceType):
servs.append(nst.text_)
allservices[puuid] = servs
action = api_actions.AttachNetworkServiceToL3NetworkAction()
action.sessionUuid = session_uuid
action.l3NetworkUuid = l3_uuid
action.networkServices = allservices
try:
evt = action.run()
except Exception as e:
exc_info.append(sys.exc_info())
raise e
test_util.test_logger(jsonobject.dumps(evt))
#Add Image
def add_image(deployConfig, session_uuid):
def _add_image(action):
increase_image_thread()
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
except:
exc_info.append(sys.exc_info())
finally:
decrease_image_thread()
if not xmlobject.has_element(deployConfig, 'images.image'):
return
for i in xmlobject.safe_list(deployConfig.images.image):
for bsref in xmlobject.safe_list(i.backupStorageRef):
bss = res_ops.get_resource(res_ops.BACKUP_STORAGE, session_uuid, name=bsref.text_)
bs = get_first_item_from_list(bss, 'backup storage', bsref.text_, 'image')
action = api_actions.AddImageAction()
action.sessionUuid = session_uuid
#TODO: account uuid will be removed later.
action.accountUuid = inventory.INITIAL_SYSTEM_ADMIN_UUID
action.backupStorageUuids = [bs.uuid]
action.bits = i.bits__
if not action.bits:
action.bits = 64
action.description = i.description__
action.format = i.format_
action.mediaType = i.mediaType_
action.guestOsType = i.guestOsType__
if not action.guestOsType:
action.guestOsType = 'unknown'
action.hypervisorType = i.hypervisorType__
action.name = i.name_
action.url = i.url_
action.timeout = 1800000
thread = threading.Thread(target = _add_image, args = (action, ))
print 'before add image1: %s' % i.url_
wait_for_image_thread_queue()
print 'before add image2: %s' % i.url_
thread.start()
print 'add image: %s' % i.url_
print 'all images add command are executed'
wait_for_thread_done(True)
print 'all images have been added'
#Add Disk Offering
def add_disk_offering(deployConfig, session_uuid):
def _add_disk_offering(disk_offering_xml_obj, session_uuid):
action = api_actions.CreateDiskOfferingAction()
action.sessionUuid = session_uuid
action.name = | |
setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_identity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `delete_identity`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/identities/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_identity(self, id, **kwargs): # noqa: E501
"""Get an Identity # noqa: E501
Learn how identities work in [ORY Kratos' User And Identity Model Documentation](https://www.ory.sh/docs/next/kratos/concepts/identity-user-model). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_identity(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID must be set to the ID of identity you want to get (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Identity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_identity_with_http_info(id, **kwargs) # noqa: E501
def get_identity_with_http_info(self, id, **kwargs): # noqa: E501
"""Get an Identity # noqa: E501
Learn how identities work in [ORY Kratos' User And Identity Model Documentation](https://www.ory.sh/docs/next/kratos/concepts/identity-user-model). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_identity_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID must be set to the ID of identity you want to get (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Identity, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_identity" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_identity`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/identities/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Identity', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_schema(self, id, **kwargs): # noqa: E501
"""get_schema # noqa: E501
Get a Traits Schema Definition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schema(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID must be set to the ID of schema you want to get (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_schema_with_http_info(id, **kwargs) # noqa: E501
def get_schema_with_http_info(self, id, **kwargs): # noqa: E501
"""get_schema # noqa: E501
Get a Traits Schema Definition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schema_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID must be set to the ID of schema you want to get (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_schema" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_schema`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/schemas/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_self_service_error(self, error, **kwargs): # noqa: E501
"""Get User-Facing Self-Service Errors # noqa: E501
This endpoint returns the error associated with a user-facing self service errors. This endpoint supports stub values to help you implement the error UI: `?error=stub:500` - returns a stub 500 (Internal Server Error) error. More information can be found at [ORY Kratos User User Facing Error Documentation](https://www.ory.sh/docs/kratos/self-service/flows/user-facing-errors). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_self_service_error(error, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str error: Error is the container's ID (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ErrorContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_self_service_error_with_http_info(error, **kwargs) # noqa: E501
def get_self_service_error_with_http_info(self, error, **kwargs): # noqa: E501
"""Get User-Facing Self-Service Errors # noqa: E501
This endpoint returns the error associated with a user-facing self service errors. This | |
import numpy as np
import scipy.sparse as sparse
import scipy.sparse.linalg as linalg
from landlab import Component
# Things to add: 1. Explicit stability check.
# 2. Implicit handling of scenarios where kappa*dt exceeds critical step -
# subdivide dt automatically.
class PerronNLDiffuse(Component):
"""Nonlinear diffusion, following Perron (2011).
This module uses Taylor Perron's implicit (2011) method to solve the
nonlinear hillslope diffusion equation across a rectangular, regular grid
for a single timestep. Note it works with the mass flux implicitly, and
thus does not actually calculate it. Grid must be at least 5x5.
Boundary condition handling assumes each edge uses the same BC for each of
its nodes.
This component cannot yet handle looped boundary conditions, but all others
should be fine.
This component has KNOWN STABILITY ISSUES which will be resolved in a
future release; use at your own risk.
The primary method of this class is :func:`run_one_step`.
Examples
--------
>>> from landlab.components import PerronNLDiffuse
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> mg = RasterModelGrid((5, 5))
>>> z = mg.add_zeros("topographic__elevation", at="node")
>>> nl = PerronNLDiffuse(mg, nonlinear_diffusivity=1.)
>>> dt = 100.
>>> nt = 20
>>> uplift_rate = 0.001
>>> for i in range(nt):
... z[mg.core_nodes] += uplift_rate*dt
... nl.run_one_step(dt)
>>> z_target = np.array(
... [ 0. , 0. , 0. , 0. , 0. ,
... 0. , 0.00778637, 0.0075553 , 0.00778637, 0. ,
... 0. , 0.0075553 , 0.0078053 , 0.0075553 , 0. ,
... 0. , 0.00778637, 0.0075553 , 0.00778637, 0. ,
... 0. , 0. , 0. , 0. , 0. ])
>>> np.allclose(z, z_target)
True
References
----------
**Required Software Citation(s) Specific to this Component**
None Listed
**Additional References**
<NAME>. (2011). Numerical methods for nonlinear hillslope transport laws.
Journal of Geophysical Research 116(F2), 23 - 13.
https://dx.doi.org/10.1029/2010jf001801
"""
_name = "PerronNLDiffuse"
_info = {
"topographic__elevation": {
"dtype": float,
"intent": "inout",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Land surface topographic elevation",
}
}
def __init__(
self,
grid,
nonlinear_diffusivity=0.01,
S_crit=33.0 * np.pi / 180.0,
rock_density=2700.0,
sed_density=2700.0,
):
"""
Parameters
----------
grid : RasterModelGrid
A Landlab raster grid
nonlinear_diffusivity : float, array or field name
The nonlinear diffusivity
S_crit : float (radians)
The critical hillslope angle
rock_density : float (kg*m**-3)
The density of intact rock
sed_density : float (kg*m**-3)
The density of the mobile (sediment) layer
"""
super(PerronNLDiffuse, self).__init__(grid)
self._bc_set_code = self._grid.bc_set_code
self._values_to_diffuse = "topographic__elevation"
self._kappa = nonlinear_diffusivity
self._rock_density = rock_density
self._sed_density = sed_density
self._S_crit = S_crit
self._uplift = 0.0
self._delta_x = grid.dx
self._delta_y = grid.dy
self._one_over_delta_x = 1.0 / self._delta_x
self._one_over_delta_y = 1.0 / self._delta_y
self._one_over_delta_x_sqd = self._one_over_delta_x ** 2.0
self._one_over_delta_y_sqd = self._one_over_delta_y ** 2.0
self._b = 1.0 / self._S_crit ** 2.0
ncols = grid.number_of_node_columns
self._ncols = ncols
nrows = grid.number_of_node_rows
self._nrows = nrows
nnodes = grid.number_of_nodes
self._nnodes = nnodes
ninteriornodes = grid.number_of_interior_nodes
ncorenodes = ninteriornodes - 2 * (ncols + nrows - 6)
self._ninteriornodes = ninteriornodes
self._interior_grid_width = ncols - 2
self._core_cell_width = ncols - 4
self._interior_corners = np.array(
[ncols + 1, 2 * ncols - 2, nnodes - 2 * ncols + 1, nnodes - ncols - 2]
)
_left_list = np.array(range(2 * ncols + 1, nnodes - 2 * ncols, ncols))
# ^these are still real IDs
_right_list = np.array(range(3 * ncols - 2, nnodes - 2 * ncols, ncols))
_bottom_list = np.array(range(ncols + 2, 2 * ncols - 2))
_top_list = np.array(range(nnodes - 2 * ncols + 2, nnodes - ncols - 2))
self._left_list = _left_list
self._right_list = _right_list
self._bottom_list = _bottom_list
self._top_list = _top_list
self._core_nodes = self._coreIDtoreal(np.arange(ncorenodes, dtype=int))
self._corenodesbyintIDs = self._realIDtointerior(self._core_nodes)
self._ncorenodes = len(self._core_nodes)
self._corner_interior_IDs = self._realIDtointerior(self._interior_corners)
# ^i.e., interior corners as interior IDs
self._bottom_interior_IDs = self._realIDtointerior(np.array(_bottom_list))
self._top_interior_IDs = self._realIDtointerior(np.array(_top_list))
self._left_interior_IDs = self._realIDtointerior(np.array(_left_list))
self._right_interior_IDs = self._realIDtointerior(np.array(_right_list))
# build an ID map to let us easily map the variables of the core nodes
# onto the operating matrix:
# This array is ninteriornodes long, but the IDs it contains are
# REAL IDs
operating_matrix_ID_map = np.empty((ninteriornodes, 9))
self._interior_IDs_as_real = self._interiorIDtoreal(np.arange(ninteriornodes))
for j in range(ninteriornodes):
i = self._interior_IDs_as_real[j]
operating_matrix_ID_map[j, :] = np.array(
[
(i - ncols - 1),
(i - ncols),
(i - ncols + 1),
(i - 1),
i,
(i + 1),
(i + ncols - 1),
(i + ncols),
(i + ncols + 1),
]
)
self._operating_matrix_ID_map = operating_matrix_ID_map
self._operating_matrix_core_int_IDs = self._realIDtointerior(
operating_matrix_ID_map[self._corenodesbyintIDs, :]
)
# ^shape(ncorenodes,9)
# see below for corner and edge maps
# Build masks for the edges and corners to be applied to the operating
# matrix map.
# Antimasks are the boundary nodes, masks are "normal"
self._topleft_mask = [1, 2, 4, 5]
topleft_antimask = [0, 3, 6, 7, 8]
self._topright_mask = [0, 1, 3, 4]
topright_antimask = [2, 5, 6, 7, 8]
self._bottomleft_mask = [4, 5, 7, 8]
bottomleft_antimask = [0, 1, 2, 3, 6]
self._bottomright_mask = [3, 4, 6, 7]
bottomright_antimask = [0, 1, 2, 5, 8]
self._corners_masks = np.vstack(
(
self._bottomleft_mask,
self._bottomright_mask,
self._topleft_mask,
self._topright_mask,
)
)
# ^(each_corner,mask_for_each_corner)
self._corners_antimasks = np.vstack(
(
bottomleft_antimask,
bottomright_antimask,
topleft_antimask,
topright_antimask,
)
)
# ^so shape becomes (4,5)
self._left_mask = [1, 2, 4, 5, 7, 8]
self._left_antimask = [0, 3, 6]
self._top_mask = [0, 1, 2, 3, 4, 5]
self._top_antimask = [6, 7, 8]
self._right_mask = [0, 1, 3, 4, 6, 7]
self._right_antimask = [2, 5, 8]
self._bottom_mask = [3, 4, 5, 6, 7, 8]
self._bottom_antimask = [0, 1, 2]
self._antimask_corner_position = [0, 2, 2, 4]
# ^this is the position w/i the corner antimasks that the true corner
# actually occupies
self._modulator_mask = np.array(
[-ncols - 1, -ncols, -ncols + 1, -1, 0, 1, ncols - 1, ncols, ncols + 1]
)
self.updated_boundary_conditions()
def updated_boundary_conditions(self):
"""Call if grid BCs are updated after component instantiation."""
grid = self._grid
nrows = self._nrows
ncols = self._ncols
# ^Set up terms for BC handling (still feels very clumsy)
bottom_edge = grid.nodes_at_bottom_edge[1:-1]
top_edge = grid.nodes_at_top_edge[1:-1]
left_edge = grid.nodes_at_left_edge[1:-1]
right_edge = grid.nodes_at_right_edge[1:-1]
self._bottom_flag = 1
self._top_flag = 1
self._left_flag = 1
self._right_flag = 1
# self._corner_flags = [1,1,1,1] #In ID order, so BL,BR,TL,TR
if np.all(grid.status_at_node[bottom_edge] == 4):
# ^This should be all of them, or none of them
self._bottom_flag = 4
elif np.all(grid.status_at_node[bottom_edge] == 3):
self._bottom_flag = 3
elif np.all(grid.status_at_node[bottom_edge] == 2):
self._bottom_flag = 2
elif np.all(grid.status_at_node[bottom_edge] == 1):
pass
else:
raise NameError(
"Different cells on the same grid edge have "
"different boundary statuses"
)
# Note this could get fraught if we need to open a cell to let
# water flow out...
if np.all(grid.status_at_node[top_edge] == 4):
self._top_flag = 4
elif np.all(grid.status_at_node[top_edge] == 3):
self._top_flag = 3
elif np.all(grid.status_at_node[top_edge] == 2):
self._top_flag = 2
elif np.all(grid.status_at_node[top_edge] == 1):
pass
else:
raise NameError(
"Different cells on the same grid edge have "
"different boundary statuses"
)
if np.all(grid.status_at_node[left_edge] == 4):
self._left_flag = 4
elif np.all(grid.status_at_node[left_edge] == 3):
self._left_flag = 3
elif np.all(grid.status_at_node[left_edge] == 2):
self._left_flag = 2
elif np.all(grid.status_at_node[left_edge] == 1):
pass
else:
raise NameError(
"Different cells on the same grid edge have "
"different boundary statuses"
)
if np.all(grid.status_at_node[right_edge] == 4):
self._right_flag = 4
elif np.all(grid.status_at_node[right_edge] == 3):
self._right_flag = 3
elif np.all(grid.status_at_node[right_edge] == 2):
self._right_flag = 2
elif np.all(grid.status_at_node[right_edge] == 1):
pass
else:
raise NameError(
"Different cells on the same grid edge have "
"different boundary statuses"
)
self._fixed_grad_BCs_present = (
self._bottom_flag == 2
or self._top_flag == 2
or self._left_flag == 2
or self._right_flag == 2
)
self._looped_BCs_present = (
self._bottom_flag == 3
or self._top_flag == 3
or self._left_flag == 3
or self._right_flag == 3
)
if self._fixed_grad_BCs_present:
if self._values_to_diffuse != grid.fixed_gradient_of:
raise ValueError(
"Boundary conditions set in the grid don't "
"apply to the data the diffuser is trying to "
"work with"
)
if np.any(grid.status_at_node == 2):
self._fixed_grad_offset_map = np.empty(nrows * ncols, dtype=float)
self._fixed_grad_anchor_map = np.empty_like(self._fixed_grad_offset_map)
self._fixed_grad_offset_map[
grid.fixed_gradient_node_properties["boundary_node_IDs"]
] = grid.fixed_gradient_node_properties["values_to_add"]
self._corner_flags = grid.status_at_node[[0, ncols - 1, -ncols, -1]]
op_mat_just_corners = self._operating_matrix_ID_map[
self._corner_interior_IDs, :
]
op_mat_cnr0 = op_mat_just_corners[0, self._bottomleft_mask]
op_mat_cnr1 = op_mat_just_corners[1, self._bottomright_mask]
op_mat_cnr2 | |
Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StorageClass',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def read_storage_v1beta1_storage_class(self, name, **kwargs):
"""
read the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.read_storage_v1beta1_storage_class(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StorageClass (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.read_storage_v1beta1_storage_class_with_http_info(name, **kwargs)
else:
(data) = self.read_storage_v1beta1_storage_class_with_http_info(name, **kwargs)
return data
def read_storage_v1beta1_storage_class_with_http_info(self, name, **kwargs):
"""
read the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.read_storage_v1beta1_storage_class_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StorageClass (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'pretty', 'exact', 'export']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_storage_v1beta1_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_storage_v1beta1_storage_class`")
collection_formats = {}
resource_path = '/apis/storage.k8s.io/v1beta1/storageclasses/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'exact' in params:
query_params['exact'] = params['exact']
if 'export' in params:
query_params['export'] = params['export']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StorageClass',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def replace_storage_v1beta1_storage_class(self, name, body, **kwargs):
"""
replace the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_storage_v1beta1_storage_class(name, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StorageClass (required)
:param V1beta1StorageClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.replace_storage_v1beta1_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_storage_v1beta1_storage_class_with_http_info(name, body, **kwargs)
return data
def replace_storage_v1beta1_storage_class_with_http_info(self, name, body, **kwargs):
"""
replace the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_storage_v1beta1_storage_class_with_http_info(name, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StorageClass (required)
:param V1beta1StorageClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_storage_v1beta1_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_storage_v1beta1_storage_class`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_storage_v1beta1_storage_class`")
collection_formats = {}
resource_path = '/apis/storage.k8s.io/v1beta1/storageclasses/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StorageClass',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def watch_storage_v1beta1_storage_class(self, name, **kwargs):
"""
watch changes to an object of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_storage_v1beta1_storage_class(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StorageClass (required)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: VersionedEvent
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.watch_storage_v1beta1_storage_class_with_http_info(name, **kwargs)
else:
(data) = self.watch_storage_v1beta1_storage_class_with_http_info(name, **kwargs)
return data
def watch_storage_v1beta1_storage_class_with_http_info(self, name, **kwargs):
"""
watch changes to an object of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_storage_v1beta1_storage_class_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StorageClass (required)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: VersionedEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'field_selector', 'label_selector', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_storage_v1beta1_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' | |
from collections import namedtuple
import argparse
import pdb
import traceback
import sys
import os
from qca_hex_analyzer import WmiCtrlAnalyzer, HtcCtrlAnalyzer, HttAnalyzer, AllAnalyzer
import hexfilter
description = \
"Tool used to analyze hexdumps produced by a qca wireless kernel " \
"driver (such as ath6kl, ath10k or qcacld2.0). " \
"The hexdumps are assumed to contain dumps of the traffic " \
"between the driver and the target. " \
"No special preprocessing of the log files is required. " \
"Filter strings (description strings) can be used to limit the output " \
"(only RX or TX etc.). " \
"The driver must of course be configured to log all necessary debug " \
"data (for ath6kl and ath10k this means a proper debug mask). "
wmi_ctrl_help = \
"Subcommand for WMI control message parsing. " \
"This subcommand is used to extract WMI control messages from the input. "
wmi_ctrl_description = \
"Extracts WMI control message hexdata from an input (--input-file). " \
"The extracted messages will be printed to the output (--output -file). " \
"--ep-id is used to determine from which HTC endpoint the data will " \
"be extracted (see description of that option below). " \
"All valid WMI control message ID's will be printed together with the " \
"message enum string (from ath6kl source code). " \
"The --wmi-old option must be used if the driver does not use the WMI " \
"unified protocol (ath6kl). " \
"The WMI control message payload will also be printed together with " \
"message ID's if the --print-data option is used."
htc_ctrl_help = \
"Subcommand for HTC control message parsing. " \
"This subcommand is used to extract HTC control messages from the input. "
htc_ctrl_description = \
"Extracts HTC control message hexdata from an input (--input-file). " \
"The extracted messages will be printed to the output (--output -file). " \
"All valid HTC control message ID's will be printed together with the " \
"message enum string (from ath6kl source code). " \
"The message payload will also be printed together with the " \
"message ID's if the --print-data option is used. " \
"HTC control messages will always be extracted from endpoint 0."
htt_help = \
"Subcommand for HTT message parsing. " \
"This subcommand is used to extract HTT messages from the input. "
htt_description = \
"Extracts HTT message hexdata from an input (--input-file). " \
"The extracted messages will be printed to the output (--output -file). " \
"--ep-id is used to determine from which HTC endpoint the data will " \
"be extracted (see description of that option below). " \
"All valid HTT message ID's will be printed together with the " \
"message enum string (from ath10k source code). " \
"The message payload will also be printed together with " \
"message ID's if the --print-data option is used."
all_help = \
"Subcommand for parsing of all supported message types. " \
"This subcommand is used to extract both WMI control, " \
"HTC control and HTT messages from the input. "
all_description = \
"Extracts message hexdata from an input (--input-file). " \
"The extracted messages will be printed to the output (--output-file). " \
"The messages can be any of the supported message types " \
"(currently only WMI controli, HTC control and HTT). " \
"--wmi-ctrl-ep-id and --htt-ep-id is used to determine from which " \
"endpoints WMI and HTT data will be extracted " \
"(see description of those options below). " \
"HTC control messages will always be extracted from ep 0. " \
"All valid message ID's will be printed together " \
"with a corresponding message enum string. " \
"The message payload will also be printed together with " \
"message ID's if the --print-data option is used."
def auto_int(x):
return int(x, 0)
def load_options():
global parsed_args
base_parser = argparse.ArgumentParser(add_help=False)
base_parser.add_argument('-i', '--input-file',
help="Input (log) file. If omitted, "
"stdin will be read.")
base_parser.add_argument('-o', '--output-file',
help="Output file. If omitted, "
"the output will be written to stdout.")
base_parser.add_argument('-n', '--no-timestamps', action="store_true",
help="Specifies whether or not the input file "
"contains timestamps. ")
base_parser.add_argument('-d', '--desc-str', nargs='+', type=str,
help="Description string(s) of the dumps. "
"Only dumps with a prefix "
"matching any of the provided desc strings "
"will be analyzed. "
"If no --desc-str option is given, no "
"description filtering will be performed. "
"The prefix of a hexdump is the short "
"description string before the address "
"in each line of the dump, i.e the hexdump "
"prefix. "
"--desc-str is normally used to select "
"between RX and TX logs and should be "
"combined with a proper --data-direction "
"option.")
base_parser.add_argument('-a', '--data-direction', nargs=1, type=str,
help="This option is used to specify how the "
"hexdata should be interpreted. "
"Valid values are: "
"t2h (target to host) or h2t (host to target). "
"With t2h, RX trailers will be printed if "
"--print-data is used. h2t is default. "
"This option should be combined with an "
"applicable --desc-str option. ")
base_parser.add_argument('-v', '--desc-str-invert', nargs='+', type=str,
help="Description string(s) of the dumps to be. "
"excluded. Similar to --desc-str, but all "
"matching prefixes will be excluded from "
"the analysis.")
base_parser.add_argument('-s', '--short-htc-header', action="store_true",
help="Use 6 byte HTC header (\"old\" format) "
"instead of 8 bytes.")
base_parser.add_argument('-t', '--keep-timestamps', action="store_true",
help="Keep the timestamps associated with each "
"hexdump in the output. "
"This option will only have effect if the "
"log file contains timestamps.")
parser = argparse.ArgumentParser(prog="qca_hex_analyzer",
description=description,
parents=[base_parser])
subparsers = parser.add_subparsers(dest="subparser_name")
parser_wmi_ctrl = subparsers.add_parser('wmi-ctrl',
help=wmi_ctrl_help,
description=wmi_ctrl_description,
parents=[base_parser])
parser_wmi_ctrl.add_argument('--wmi-old', action="store_true",
help="Specifies whether or not the WMI messages "
"are according to the \"old\" WMI protocol. "
"If not set, the messages will be interpreted "
"according to the unified WMI format")
parser_wmi_ctrl.add_argument('-p', '--print-data', action="store_true",
help="Print WMI data message payload (and not just "
"WMI message ID) for all encountered messages. ")
parser_wmi_ctrl.add_argument('-e', '--ep-id', metavar='ID', nargs=1,
type=int, default=[2],
help="WMI control service endpoint ID. "
"This is the endpoint where the WMI control data is "
"expected to be present. Make sure the endpoint "
"matches the endpoint id associated with the "
"control service endpoint (service id 0x100) "
"of the driver (the endpoint received from the "
"target in the HTC service connect response). "
"If this option is omitted a default value of 2 "
"will be used.")
parser_wmi_ctrl.add_argument('--tlv', action="store_true",
help="TLV analysis."
"Each WMI message will be interpreted as a TLV "
"message and the content of the message will be. "
"written out in text (instead of hexdump). "
"If the encountered message is not supported by "
"the parser, the hex data will be printed instead.")
parser_wmi_ctrl.add_argument('--id', '--msg-id', metavar='ID',
nargs='+', type=auto_int,
help="WMI message id filter. "
"Only WMI messages with an id matching any of the "
"provided id's will be included in the output. "
"If no --id | --msg-id option is given, no "
"filtering will be performed. ")
parser_wmi_ctrl.add_argument('--skip-id', '--skip-msg-id', metavar='ID',
nargs='+', type=auto_int,
help="WMI message id exclude filter. "
"Similar to --id | --msg-id, but all matching "
"id's will be excluded from the output. ")
parser_htc_ctrl = subparsers.add_parser('htc-ctrl',
help=htc_ctrl_help,
description=htc_ctrl_description,
parents=[base_parser])
parser_htc_ctrl.add_argument('-p', '--print-data', action="store_true",
help="Print HTC ctrl data message payload (and not just "
"message ID) for all encountered messages. ")
parser_htt = subparsers.add_parser('htt',
help=htt_help,
description=htt_description,
parents=[base_parser])
parser_htt.add_argument('-p', '--print-data', action="store_true",
help="Print HTT data message payload (and not just "
"HTT message ID) for all encountered messages. ")
parser_htt.add_argument('-e', '--ep-id', metavar='ID', nargs=1,
type=int, default=[1],
help="HTT service endpoint ID. "
"This is the endpoint where the HTT data is "
"expected to be present. Make sure the endpoint "
"matches the endpoint id associated with the "
"HTT endpoint (service id 0x300) "
"of the driver (the endpoint received from the "
"target in the HTC service connect response). "
"If | |
data
self.logger.info('TextViewController: saveRig Signal received.')
self.rigs['defaultRig'] = self._componentData
@Slot()
def buildRig(self):
# If the model is not built, build it, otherwise remove it
# Send update button signal on view
error = False
for id, com in self._componentData.iteritems():
if len(com['deformTargets']) < 1:
error = True
if not error:
self._built = not self.built
self.onBuiltStateChange.emit(self.built)
else:
self._showError('Not enough deform targets!')
@Slot()
def bakeRig(self):
# If the model is baked, bake it, otherwise unbake it
# Send the update button signal for view
self._baked = not self.baked
self.onBakedStateChange.emit(self.baked)
@Slot()
def refreshRig(self):
# Tell the model to refresh the rig
pass
@Slot()
def bindRig(self):
# If the model is not bound, bind it, otherwise unbind it
# Send the update button signal for view
self._bound = not self.bound
self.onBoundStateChange.emit(self.bound)
@Slot(str)
def switchActiveRig(self, rigName):
# This will tell the model to switch the active rig
raise NotImplementedError
##### private properties #####
@property
def componentData(self):
# Return the latest version of the model's componet Data
return self._componentData
@property
def bound(self):
# Return whether the rig is bound or not
return self._bound
@property
def built(self):
# Return whether the rig is built or not
return self._built
@property
def baked(self):
# Return whether the rig is baked or not
return self._baked
##############################
# UI Windows #
##############################
class MainComponentWindow(QtWidgets.QMainWindow):
# Event Signals for main button presses
onPreviewClicked = Signal()
onBindClicked = Signal()
onRemoveClicked = Signal()
# A signal to alert the controller when the window has been close
onWindowClosed = Signal()
# A signal for creating a rig
# The string is the directory
onCreateNewRigClicked = Signal()
# A signal for loading rig
# The string is the directory
onLoadRigClicked = Signal(str)
# A signal for saving the current rig
onSaveRigClicked = Signal()
# A signal for saving the rig as
onSaveRigAsClicked = Signal(str)
# A signal for adding a component
# The string is the name of the component type
onAddComponentClicked = Signal(str)
# A signal for removing a component
# The string is the id of the component
onRemoveComponentClicked = Signal(str)
onDuplicateComponentClicked = Signal(str)
# A signal for moving a component
# The string is the id, the bool is whether it should move up or not
onMoveComponentClicked = Signal(str, bool)
# A signal to let the control know it should add selected joints to a component
# The str is the id of the component to update
onAddSelectedClicked = Signal(str)
# A signal to let the controller know debug mode should be on
onDebugToggled = Signal(bool)
onAdvancedToggled = Signal(bool)
onLogToggled = Signal(bool)
# A signal to let the control know it should switch to bake mode
onBakeToggled = Signal(bool)
# A signal to let the control know a new rig was selected
onRigSwitched = Signal(str)
# Widget Signals
# These are sent to slots in widget this window creates
onUpdateComponentWidgets = Signal(dict)
# This is sent to widgets to update their name list
onUpdateNameList = Signal(str, str)
def __init__(self, parent=None):
super(MainComponentWindow, self).__init__(parent=parent)
# Set up a logger
self.logger = addLogger(type(self).__name__)
#self.logger.setLevel(LOG_LEVEL)
#self.logger.addHandler(file_handler)
# Create a list for storing component widgets
self._componentWidgets = []
# Create a default value for the main widget
self.main_widget = None
# Create a default value for the save directory
self._directory = None
# Setup the widgets
self._setup()
def _setup(self):
# Set the default state of the window
self.setWindowTitle('rigloo ' + RIGLOO_VERSION)
# Set the icon for the window
basePath = os.path.dirname(os.path.realpath(__file__))
logoIcon = QtGui.QIcon(basePath + '/icons/logo-black.png')
self.setWindowIcon(logoIcon)
# Set the starting size
self.resize(300, 700)
# Create the menu bar
self._createMenuBar()
# Set up the create rig widget (This will create a main_widget and main_layout
self._createMainWidget()
def _createMenuBar(self):
newAction = QtWidgets.QAction('New Rig', self)
newAction.setStatusTip('New Rig')
newAction.triggered.connect(self.onCreateNewRigClicked)
saveAction = QtWidgets.QAction('Save Rig', self)
saveAction.setStatusTip('Save the current rig')
saveAction.triggered.connect(self.onSave)
saveAsAction = QtWidgets.QAction('Save Rig as...', self)
saveAsAction.setStatusTip('Save the current rig as...')
saveAsAction.triggered.connect(self.onSaveAs)
loadAction = QtWidgets.QAction('Load Rig', self)
loadAction.setStatusTip('Load a rig')
loadAction.triggered.connect(self.onLoadRig)
debugAction = QtWidgets.QAction('Debug Mode', self)
debugAction.setCheckable(True)
debugAction.setChecked(False)
debugAction.setStatusTip('Toggle Debug Mode')
debugAction.toggled.connect(self.onDebugToggled)
advancedAction = QtWidgets.QAction('Advanced Settings', self)
advancedAction.setCheckable(True)
advancedAction.setChecked(False)
advancedAction.setStatusTip('Toggle Advanced Settings')
advancedAction.toggled.connect(self.onAdvancedToggled)
logAction = QtWidgets.QAction('Log to console', self)
logAction.setCheckable(True)
logAction.setChecked(False)
logAction.setStatusTip('Toggle Debug Mode')
logAction.toggled.connect(self.onLogToggled)
bakeAction = QtWidgets.QAction('Bake To Animation', self)
bakeAction.setCheckable(True)
bakeAction.setChecked(False)
bakeAction.setStatusTip('Toggle animation baking on bind')
bakeAction.toggled.connect(self.onBakeToggled)
self.statusBar()
menubar = self.menuBar()
fileMenu = menubar.addMenu('File')
settingsMenu = menubar.addMenu('Settings')
fileMenu.addAction(newAction)
fileMenu.addAction(saveAction)
fileMenu.addAction(saveAsAction)
fileMenu.addAction(loadAction)
settingsMenu.addAction(debugAction)
settingsMenu.addAction(bakeAction)
settingsMenu.addAction(advancedAction)
settingsMenu.addAction(logAction)
def _createMainWidget(self):
# If the main widget exists, delete it
if self.main_widget is not None:
del self.main_widget
self.main_widget = None
# Create a vertical layout for the widget and add it
self.main_widget = QtWidgets.QWidget(self)
self.main_widget.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.main_widget.setMinimumWidth(350)
self.main_layout = QtWidgets.QVBoxLayout(self.main_widget)
self.main_layout.setSpacing(5)
# Set the main widget as the center widget
self.setCentralWidget(self.main_widget)
# Add the rig selector widget
self._addRigSelector()
def _showComponentDataWidget(self):
# Create a new main widget
self._createMainWidget()
# Add the scroll area
self._addScrollWidget()
# Add the buttons to the bottom
self._addButtonWidget()
def _addButtonWidget(self):
# Create the container widget for the buttons
layout = QtWidgets.QGridLayout()
layout.setSpacing(5)
self.main_layout.addLayout(layout)
# Create an 'AddComponent' button
self.addButton = QtWidgets.QPushButton('Add Component')
layout.addWidget(self.addButton, 0, 0, 1, 0)
# Create a 'Remove' button
self.removeButton = QtWidgets.QPushButton('Remove')
removeIcon = QtGui.QIcon(':/deleteActive.png')
self.removeButton.setIcon(removeIcon)
layout.addWidget(self.removeButton, 1, 0)
self.removeButton.clicked.connect(self.onRemoveClicked)
# Create a 'Preview' button
self.previewButton = QtWidgets.QPushButton('Preview')
previewIcon = QtGui.QIcon(':/rebuild.png')
self.previewButton.setIcon(previewIcon)
self.previewButton.setStyleSheet('QPushButton {background-color: #5285a6}')
layout.addWidget(self.previewButton, 1, 1, 1, 2)
self.previewButton.clicked.connect(self.onPreviewClicked)
# Create a 'Bind' button
self.bindButton = QtWidgets.QPushButton('Bind')
self.bindButton.setStyleSheet('QPushButton {background-color: #cc3333}')
layout.addWidget(self.bindButton, 1, 3)
self.bindButton.clicked.connect(self.onBindClicked)
def _addRigSelector(self):
# Create a formlayout for the selector
selectorLayout = QtWidgets.QFormLayout()
self.main_layout.addLayout(selectorLayout)
# Create a combobox to hold the active rigs
self.rigComboBox = QtWidgets.QComboBox(self.main_widget)
selectorLayout.addRow('Rig:', self.rigComboBox)
# Connect the combobox to the rig switch signal
self.rigComboBox.activated.connect(self.onRigSwitched)
def _onRigSwitched(self):
self.onRigSwitched.emit(self.rigComboBox.currentText())
def _addScrollWidget(self):
# Create a scroll area to house container
scroll = QtWidgets.QScrollArea(self.main_widget)
scroll.setWidgetResizable(True)
scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
scroll.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.main_layout.addWidget(scroll)
self.scrollWidget = scroll
def _addHorizontalLine(self):
line = QtWidgets.QFrame(self.main_widget)
line.setFrameShape(QtWidgets.QFrame.HLine)
line.setFrameShadow(QtWidgets.QFrame.Sunken)
line.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
return line
def closeEvent(self, event):
self.logger.debug('Window close event received')
self.onWindowClosed.emit()
event.accept()
# This is called by the controller to update the rig selector
def _refreshActiveRigs(self, rigNames, activeRigName):
self.logger.debug('Refreshing the active rigs. Active Rig: %s, Rig Names: %s', activeRigName, rigNames)
# Clear the combobox
self.rigComboBox.clear()
# Add rigs to the combobox
self.rigComboBox.addItems(rigNames)
self.rigComboBox.setCurrentIndex(rigNames.index(activeRigName))
##### Controller Slots #####
@Slot(dict, list, list, dict, list, str)
# The controller calls this to regenerate the component ui
# The dict is an updated version of the component Data
def refreshComponentWidgets(self, componentData, componentTypeData, controlTypeData, componentSettings,
activeRigs, activeRig):
self._refreshActiveRigs(activeRigs, activeRig)
self.updateAddComponentMenus(componentTypeData)
self.logger.debug('Refreshing component widgets')
# Save the position of the scrollbar
scrollValue = self.scrollWidget.verticalScrollBar().value()
# Clear the component widget list
del self._componentWidgets[:]
# Create a widget to contain the components
self.componentWidget = QtWidgets.QWidget()
layout = QtWidgets.QVBoxLayout()
layout.setAlignment(QtCore.Qt.AlignTop)
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
self.componentWidget.setLayout(layout)
# Add a horizontal line to start
topLine = self._addHorizontalLine()
layout.addWidget(topLine)
# For each component in the components dictionary
for id, data in componentData.iteritems():
try:
index = data['index']
except KeyError:
self.logger.info('%s Component did not have an index value, assigning a new one.', data['type'])
index = len(componentData) + 1
# Create a widget for the component
widget = ComponentWidget(data['name'], componentData, self, id,
componentTypeData,
controlTypeData,
componentSettings,
index)
# Connect the widgets signals
widget.onAddSelected.connect(self.onAddSelectedClicked)
widget.onRemoveComponentClicked.connect(self.onRemoveComponentClicked)
widget.onMoveComponentClicked.connect(self.onMoveComponentClicked)
widget.onDuplicateComponentClicked.connect(self.onDuplicateComponentClicked)
self.onUpdateNameList.connect(widget.onUpdateNameList)
widget.onNameChanged.connect(self.onUpdateNameList)
# Add the widget to the component widget dict
self._componentWidgets.append(widget)
# Sort the widgets by index
# Then add the widget to the layout
list = sorted(self._componentWidgets, key= lambda widget: widget.index)
for widget in list:
# Add the widget
layout.addWidget(widget)
# Also add a horizontal line
line = self._addHorizontalLine()
layout.addWidget(line)
# Then emit a signal to update all widgets that care about components
self.onUpdateComponentWidgets.emit(componentData)
self.scrollWidget.setWidget(self.componentWidget)
self.scrollWidget.verticalScrollBar().setValue(scrollValue)
self.logger.debug('refreshed components successfully')
@Slot(list)
def updateControlTypeData(self, controlTypeData):
self.logger.debug('Updating component widgets with new control type data.')
self.onUpdateControlTypeWidgets.emit(controlTypeData)
@Slot(list)
def updateComponentTypeData(self, componentTypeData):
self.logger.debug('Updating component widgets with new component type data.')
self.onUpdateComponentTypeWidgets.emit(componentTypeData)
@Slot(list)
def updateAddComponentMenus(self, componentTypeData):
self.logger.debug('Updating add component menus with new component type data.')
menu = QtWidgets.QMenu(self.main_widget)
for componentType in componentTypeData:
action = QtWidgets.QAction(self.addButton)
action.setText(componentType)
action.triggered.connect(self._onAddComponentGenerator(componentType))
menu.addAction(action)
self.addButton.setMenu(menu)
@Slot()
def createRigWidget(self):
self._showComponentDataWidget()
@Slot()
def onSave(self):
if self._directory is None:
self.logger.info('No directory set, running a save | |
"squeamishness",
"squeegee",
"squeezable",
"squeeze",
"squeezer",
"squelch",
"squelchy",
"squib",
"squid",
"squiggle",
"squiggly",
"squint",
"squire",
"squirm",
"squirmy",
"squirrel",
"squirt",
"squish",
"squishy",
"stabber",
"stabbing",
"stability",
"stabilization",
"stabilize",
"stabilizer",
"stable",
"stableman",
"stably",
"staccato",
"stack",
"stacked",
"stacks",
"stadia",
"stadium",
"staff",
"staffer",
"staffing",
"stage",
"stagecoach",
"stagecraft",
"stagehand",
"stagestruck",
"stagflation",
"stagger",
"staggering",
"staggeringly",
"staggers",
"staging",
"stagnancy",
"stagnant",
"stagnantly",
"stagnate",
"stagnation",
"stagy",
"staid",
"staidly",
"staidness",
"stain",
"stainless",
"stair",
"staircase",
"stairs",
"stairway",
"stairwell",
"stake",
"stakeholder",
"stakeout",
"stalactite",
"stalagmite",
"stale",
"stalemate",
"staleness",
"stalk",
"stalked",
"stalker",
"stalking",
"stall",
"stallion",
"stalwart",
"stamen",
"stamina",
"stammer",
"stammerer",
"stammeringly",
"stamp",
"stampede",
"stamper",
"stance",
"stanch",
"stanchion",
"stand",
"standalone",
"standard",
"standardization",
"standardize",
"standards",
"standby",
"standee",
"stander",
"standing",
"standings",
"standoff",
"standoffish",
"standout",
"standpipe",
"standpoint",
"stands",
"standstill",
"standup",
"stank",
"stanza",
"staph",
"staphylococcal",
"staphylococci",
"staphylococcus",
"staple",
"stapler",
"starboard",
"starch",
"starched",
"starchily",
"starchiness",
"starchy",
"stardom",
"stardust",
"stare",
"starer",
"starfish",
"stargaze",
"stargazer",
"stark",
"starkly",
"starkness",
"starless",
"starlet",
"starlight",
"starling",
"starlit",
"starry",
"stars",
"start",
"starter",
"startle",
"startled",
"startling",
"starvation",
"starve",
"starveling",
"stash",
"state",
"statecraft",
"statehood",
"statehouse",
"stateless",
"statelessness",
"stateliness",
"stately",
"statement",
"stateroom",
"stateside",
"statesman",
"statesmanlike",
"statesmanship",
"stateswoman",
"static",
"statically",
"station",
"stationary",
"stationer",
"stationery",
"statistic",
"statistical",
"statistically",
"statistician",
"statistics",
"stats",
"statuary",
"statue",
"statuesque",
"statuette",
"stature",
"status",
"statute",
"statutory",
"staunch",
"staunchly",
"staunchness",
"stave",
"staves",
"stead",
"steadfast",
"steadfastly",
"steadfastness",
"steadily",
"steadiness",
"steady",
"steak",
"steakhouse",
"steal",
"stealth",
"stealthily",
"stealthiness",
"stealthy",
"steam",
"steamboat",
"steamer",
"steamfitter",
"steamfitting",
"steaminess",
"steaming",
"steamroll",
"steamroller",
"steamship",
"steamy",
"steed",
"steel",
"steeliness",
"steelworker",
"steelworks",
"steely",
"steelyard",
"steep",
"steepen",
"steeple",
"steeplechase",
"steeplejack",
"steeply",
"steepness",
"steer",
"steerable",
"steerage",
"steering",
"steersman",
"stegosaurus",
"stein",
"stellar",
"stemless",
"stemmed",
"stemware",
"stench",
"stencil",
"steno",
"stenographer",
"stenographic",
"stenography",
"stentorian",
"stepbrother",
"stepchild",
"stepchildren",
"stepdaughter",
"stepfather",
"stepladder",
"stepmother",
"stepparent",
"steppe",
"stepper",
"steppingstone",
"steps",
"stepsister",
"stepson",
"stereo",
"stereophonic",
"stereoscope",
"stereoscopic",
"stereotype",
"stereotyped",
"stereotypical",
"sterile",
"sterility",
"sterilization",
"sterilize",
"sterilizer",
"sterling",
"stern",
"sterna",
"sternly",
"sternness",
"sternum",
"steroid",
"steroidal",
"stertorous",
"stethoscope",
"stetson",
"stevedore",
"steward",
"stewardess",
"stewardship",
"stick",
"sticker",
"stickily",
"stickiness",
"stickleback",
"stickler",
"stickpin",
"sticks",
"stickup",
"sticky",
"stiff",
"stiffen",
"stiffener",
"stiffening",
"stiffly",
"stiffness",
"stifle",
"stifling",
"stiflingly",
"stigma",
"stigmata",
"stigmatic",
"stigmatization",
"stigmatize",
"stile",
"stiletto",
"still",
"stillbirth",
"stillborn",
"stillness",
"stilt",
"stilted",
"stilts",
"stimulant",
"stimulate",
"stimulating",
"stimulation",
"stimulative",
"stimuli",
"stimulus",
"sting",
"stinger",
"stingily",
"stinginess",
"stingray",
"stingy",
"stink",
"stinkbug",
"stinker",
"stinking",
"stinky",
"stint",
"stinting",
"stipend",
"stipple",
"stippling",
"stipulate",
"stipulation",
"stirrer",
"stirring",
"stirringly",
"stirrup",
"stitch",
"stitchery",
"stitching",
"stoat",
"stock",
"stockade",
"stockbreeder",
"stockbroker",
"stockbroking",
"stockholder",
"stockily",
"stockiness",
"stockinette",
"stocking",
"stockpile",
"stockpot",
"stockroom",
"stocks",
"stocktaking",
"stocky",
"stockyard",
"stodgily",
"stodginess",
"stodgy",
"stogie",
"stogy",
"stoic",
"stoical",
"stoically",
"stoicism",
"stoke",
"stoked",
"stoker",
"stole",
"stolen",
"stolid",
"stolidity",
"stolidly",
"stolon",
"stomach",
"stomachache",
"stomacher",
"stomp",
"stone",
"stoned",
"stonemason",
"stonewall",
"stoneware",
"stonewashed",
"stonework",
"stonily",
"stoniness",
"stony",
"stood",
"stooge",
"stool",
"stoop",
"stopcock",
"stopgap",
"stoplight",
"stopover",
"stoppage",
"stopper",
"stopple",
"stopwatch",
"storage",
"store",
"storefront",
"storehouse",
"storekeeper",
"storeroom",
"storey",
"storied",
"stork",
"storm",
"stormily",
"storminess",
"stormy",
"story",
"storyboard",
"storybook",
"storyteller",
"storytelling",
"stoup",
"stout",
"stouthearted",
"stoutly",
"stoutness",
"stove",
"stovepipe",
"stowage",
"stowaway",
"straddle",
"straddler",
"strafe",
"straggle",
"straggler",
"straggly",
"straight",
"straightaway",
"straightedge",
"straighten",
"straightener",
"straightforward",
"straightforwardly",
"straightforwardness",
"straightjacket",
"straightly",
"straightness",
"straightway",
"strain",
"strained",
"strainer",
"strait",
"straiten",
"straitened",
"straitjacket",
"straitlaced",
"straits",
"strand",
"stranded",
"strange",
"strangely",
"strangeness",
"stranger",
"strangle",
"stranglehold",
"strangler",
"strangulate",
"strangulation",
"strap",
"strapless",
"strapped",
"strapping",
"strata",
"stratagem",
"strategic",
"strategically",
"strategics",
"strategist",
"strategy",
"strati",
"stratification",
"stratified",
"stratify",
"stratosphere",
"stratospheric",
"stratum",
"stratus",
"straw",
"strawberry",
"stray",
"streak",
"streaker",
"streaky",
"stream",
"streamer",
"streamline",
"streamlined",
"street",
"streetcar",
"streetlight",
"streetwalker",
"streetwise",
"strength",
"strengthen",
"strengthener",
"strenuous",
"strenuously",
"strenuousness",
"strep",
"streptococcal",
"streptococci",
"streptococcus",
"streptomycin",
"stress",
"stressed",
"stressful",
"stretch",
"stretchable",
"stretcher",
"stretchy",
"strew",
"strewn",
"stria",
"striae",
"striated",
"striation",
"stricken",
"strict",
"strictly",
"strictness",
"stricture",
"stridden",
"stride",
"stridency",
"strident",
"stridently",
"strife",
"strike",
"strikebreaker",
"strikeout",
"striker",
"striking",
"strikingly",
"string",
"stringed",
"stringency",
"stringent",
"stringently",
"stringer",
"stringiness",
"strings",
"stringy",
"strip",
"stripe",
"striped",
"stripling",
"stripper",
"striptease",
"stripteaser",
"strive",
"striven",
"strobe",
"stroboscope",
"stroboscopic",
"strode",
"stroke",
"stroll",
"stroller",
"strong",
"strongbox",
"stronghold",
"strongly",
"strongman",
"strontium",
"strop",
"strophe",
"strophic",
"strove",
"struck",
"structural",
"structurally",
"structure",
"structured",
"strudel",
"struggle",
"strum",
"strumpet",
"strung",
"strut",
"strychnine",
"stubble",
"stubbly",
"stubborn",
"stubbornly",
"stubbornness",
"stubby",
"stucco",
"stuck",
"studbook",
"studded",
"studding",
"student",
"studied",
"studiedly",
"studies",
"studio",
"studious",
"studiously",
"studiousness",
"study",
"stuff",
"stuffily",
"stuffiness",
"stuffing",
"stuffy",
"stultification",
"stultify",
"stumble",
"stumbler",
"stump",
"stumpy",
"stung",
"stunk",
"stunning",
"stunningly",
"stunt",
"stupefaction",
"stupefied",
"stupefy",
"stupefying",
"stupendous",
"stupendously",
"stupid",
"stupidity",
"stupidly",
"stupor",
"sturdily",
"sturdiness",
"sturdy",
"sturgeon",
"stutter",
"stutterer",
"style",
"styli",
"styling",
"stylish",
"stylishly",
"stylishness",
"stylist",
"stylistic",
"stylistically",
"stylize",
"stylized",
"stylus",
"stymie",
"styptic",
"suasion",
"suave",
"suavely",
"suaveness",
"suavity",
"subaltern",
"subarctic",
"subarea",
"subatomic",
"subbranch",
"subcategory",
"subcommittee",
"subcompact",
"subconscious",
"subconsciously",
"subconsciousness",
"subcontinent",
"subcontinental",
"subcontract",
"subcontractor",
"subculture",
"subcutaneous",
"subcutaneously",
"subdivide",
"subdivision",
"subdue",
"subdued",
"subfamily",
"subfreezing",
"subgroup",
"subhead",
"subheading",
"subhuman",
"subject",
"subjection",
"subjective",
"subjectively",
"subjectivity",
"subjoin",
"subjugate",
"subjugation",
"subjunctive",
"sublease",
"sublet",
"sublimate",
"sublimation",
"sublime",
"sublimely",
"subliminal",
"sublimity",
"submarginal",
"submarine",
"submariner",
"submerge",
"submerged",
"submergence",
"submerse",
"submersible",
"submersion",
"submicroscopic",
"submission",
"submissive",
"submissively",
"submissiveness",
"submit",
"subnormal",
"suborbital",
"suborder",
"subordinate",
"subordination",
"suborn",
"subornation",
"subplot",
"subpoena",
"subprofessional",
"subroutine",
"subscribe",
"subscriber",
"subscript",
"subscription",
"subsection",
"subsequent",
"subsequently",
"subservience",
"subservient",
"subserviently",
"subset",
"subside",
"subsidence",
"subsidiary",
"subsidization",
"subsidize",
"subsidized",
"subsidy",
"subsist",
"subsistence",
"subsoil",
"subsonic",
"subspecies",
"substance",
"substandard",
"substantial",
"substantially",
"substantiate",
"substantiation",
"substantive",
"substantively",
"substation",
"substitute",
"substitution",
"substrata",
"substrate",
"substratum",
"substructure",
"subsume",
"subsurface",
"subsystem",
"subteen",
"subtenancy",
"subtenant",
"subterfuge",
"subterranean",
"subtext",
"subtitle",
"subtitled",
"subtitles",
"subtle",
"subtlety",
"subtly",
"subtopic",
"subtotal",
"subtract",
"subtraction",
"subtrahend",
"subtropical",
"subtropics",
"suburb",
"suburban",
"suburbanite",
"suburbia",
"suburbs",
"subvention",
"subversion",
"subversive",
"subversively",
"subversiveness",
"subvert",
"subway",
"subzero",
"succeed",
"succeeding",
"success",
"successful",
"successfully",
"succession",
"successive",
"successively",
"successor",
"succinct",
"succinctly",
"succinctness",
"succor",
"succotash",
"succulence",
"succulent",
"succumb",
"suchlike",
"sucker",
"suckle",
"suckling",
"sucrose",
"suction",
"sudden",
"suddenly",
"suddenness",
"sudsy",
"suede",
"suffer",
"sufferance",
"sufferer",
"suffering",
"suffice",
"sufficiency",
"sufficient",
"sufficiently",
"suffix",
"suffixation",
"suffocate",
"suffocation",
"suffragan",
"suffrage",
"suffragette",
"suffragist",
"suffuse",
"suffusion",
"sugar",
"sugarcane",
"sugarcoat",
"sugared",
"sugarless",
"sugarplum",
"sugary",
"suggest",
"suggestibility",
"suggestible",
"suggestion",
"suggestive",
"suggestively",
"suggestiveness",
"suicidal",
"suicide",
"suitability",
"suitable",
"suitableness",
"suitably",
"suitcase",
"suite",
"suiting",
"suitor",
"sukiyaki",
"sulfa",
"sulfate",
"sulfide",
"sulfur",
"sulfuric",
"sulfurous",
"sulkily",
"sulkiness",
"sulky",
"sullen",
"sullenly",
"sullenness",
"sully",
"sulphur",
"sultan",
"sultana",
"sultanate",
"sultrily",
"sultriness",
"sultry",
"sumac",
"sumach",
"summarily",
"summarize",
"summary",
"summation",
"summer",
"summerhouse",
"summertime",
"summery",
"summit",
"summitry",
"summon",
"summoner",
"summons",
"sumptuous",
"sumptuously",
"sumptuousness",
"sunbath",
"sunbathe",
"sunbather",
"sunbathing",
"sunbeam",
"sunblock",
"sunbonnet",
"sunburn",
"sunburned",
"sunburnt",
"sunburst",
"sundae",
"sunder",
"sundial",
"sundown",
"sundries",
"sundry",
"sunfish",
"sunflower",
"sunglasses",
"sunken",
"sunlamp",
"sunless",
"sunlight",
"sunlit",
"sunniness",
"sunny",
"sunrise",
"sunroof",
"sunscreen",
"sunset",
"sunshade",
"sunshine",
"sunshiny",
"sunspot",
"sunstroke",
"suntan",
"suntanned",
"sunup",
"super",
"superabundance",
"superabundant",
"superannuate",
"superannuated",
"superannuation",
"superb",
"superbly",
"supercargo",
"supercharge",
"supercharger",
"supercilious",
"superciliously",
"superciliousness",
"supercity",
"supercomputer",
"superconducting",
"superconductive",
"superconductivity",
"superconductor",
"superego",
"supererogation",
"supererogatory",
"superficial",
"superficiality",
"superficially",
"superfine",
"superfluity",
"superfluous",
"superfluously",
"superfluousness",
"superhero",
"superhighway",
"superhuman",
"superimpose",
"superimposition",
"superintend",
"superintendence",
"superintendency",
"superintendent",
"superior",
"superiority",
"superlative",
"superlatively",
"superman",
"supermarket",
"supermom",
"supernal",
"supernatural",
"supernaturally",
"supernova",
"supernovae",
"supernumerary",
"superpose",
"superposition",
"superpower",
"supersaturate",
"supersaturation",
"superscribe",
"superscript",
"superscription",
"supersede",
"supersonic",
"superstar",
"superstition",
"superstitious",
"superstitiously",
"superstore",
"superstructure",
"supertanker",
"supervene",
"supervention",
"supervise",
"supervision",
"supervisor",
"supervisory",
"superwoman",
"supine",
"supper",
"supplant",
"supple",
"supplement",
"supplemental",
"supplementary",
"supplementation",
"suppleness",
"suppliant",
"supplicant",
"supplicate",
"supplication",
"supplier",
"supplies",
"supply",
"support",
"supportable",
"supporter",
"supportive",
"suppose",
"supposed",
"supposedly",
"supposing",
"supposition",
"suppository",
"suppress",
"suppressant",
"suppressible",
"suppression",
"suppressor",
"suppurate",
"suppuration",
"supra",
"supranational",
"supremacist",
"supremacy",
"supreme",
"supremely",
"surcease",
"surcharge",
"surcingle",
"surefire",
"surefooted",
"surely",
"sureness",
"surety",
"surface",
"surfboard",
"surfeit",
"surfer",
"surfing",
"surge",
"surgeon",
"surgery",
"surgical",
"surgically",
"surliness",
"surly",
"surmise",
"surmount",
"surmountable",
"surname",
"surpass",
"surpassing",
"surplice",
"surplus",
"surprise",
"surprised",
"surprising",
"surprisingly",
"surreal",
"surrealism",
"surrealist",
"surrealistic",
"surrealistically",
"surrender",
"surreptitious",
"surreptitiously",
"surreptitiousness",
"surrey",
"surrogacy",
"surrogate",
"surround",
"surrounding",
"surroundings",
"surtax",
"surveillance",
"survey",
"surveying",
"surveyor",
"survival",
"survivalist",
"survive",
"survivor",
"susceptibility",
"susceptible",
"sushi",
"suspect",
"suspend",
"suspender",
"suspenders",
"suspense",
"suspenseful",
"suspension",
"suspicion",
"suspicious",
"suspiciously",
"sustain",
"sustainable",
"sustained",
"sustenance",
"sutler",
"suture",
"suzerain",
"suzerainty",
"svelte",
"swaddle",
"swagger",
"swain",
"swallow",
"swallowtail",
"swami",
"swamp",
"swampland",
"swampy",
"swank",
"swankily",
"swankiness",
"swanky",
"swansdown",
"sward",
"swarm",
"swarthy",
"swash",
"swashbuckler",
"swashbuckling",
"swastika",
"swatch",
"swath",
"swathe",
"swatter",
"swayback",
"swaybacked",
"swear",
"swearer",
"swearword",
"sweat",
"sweatband",
"sweater",
"sweatpants",
"sweats",
"sweatshirt",
"sweatshop",
"sweaty",
"sweep",
"sweeper",
"sweeping",
"sweepingly",
"sweepings",
"sweepstake",
"sweepstakes",
"sweet",
"sweetbread",
"sweetbrier",
"sweeten",
"sweetener",
"sweetening",
"sweetheart",
"sweetie",
"sweetish",
"sweetly",
"sweetmeat",
"sweetness",
"sweets",
"swell",
"swellhead",
"swellheaded",
"swelling",
"swelter",
"sweltering",
"swept",
"sweptback",
"swerve",
"swift",
"swiftly",
"swiftness",
"swill",
"swimmer",
"swimming",
"swimmingly",
"swimsuit",
"swindle",
"swindler",
"swine",
"swineherd",
"swing",
"swinger",
"swinging",
"swinish",
"swipe",
"swirl",
"swirly",
"swish",
"switch",
"switchback",
"switchblade",
"switchboard",
"switcher",
"swivel",
"swollen",
"swoon",
"swoop",
"swoosh",
"sword",
"swordfish",
"swordplay",
"swordsman",
"swordsmanship",
"swore",
"sworn",
"swung",
"sybarite",
"sybaritic",
"sycamore",
"sycophancy",
"sycophant",
"sycophantic",
"syllabi",
"syllabic",
"syllabicate",
"syllabication",
"syllabification",
"syllabify",
"syllable",
"syllabus",
"syllogism",
"syllogistic",
"sylph",
| |
Document Password Verifier Derivation */
function crypto_CreatePasswordVerifier_Method1(Password) {
var Verifier = 0x0000, PasswordArray;
var PasswordDecoded = _JS2ANSI(Password);
var len = PasswordDecoded.length + 1, i, PasswordByte;
var Intermediate1, Intermediate2, Intermediate3;
PasswordArray = new_buf(len);
PasswordArray[0] = PasswordDecoded.length;
for(i = 1; i != len; ++i) PasswordArray[i] = PasswordDecoded[i-1];
for(i = len-1; i >= 0; --i) {
PasswordByte = PasswordArray[i];
Intermediate1 = ((Verifier & 0x4000) === 0x0000) ? 0 : 1;
Intermediate2 = (Verifier << 1) & 0x7FFF;
Intermediate3 = Intermediate1 | Intermediate2;
Verifier = Intermediate3 ^ PasswordByte;
}
return Verifier ^ 0xCE4B;
}
/* [MS-OFFCRYPTO] 2.3.7.2 Binary Document XOR Array Initialization */
var crypto_CreateXorArray_Method1 = (function() {
var PadArray = [0xBB, 0xFF, 0xFF, 0xBA, 0xFF, 0xFF, 0xB9, 0x80, 0x00, 0xBE, 0x0F, 0x00, 0xBF, 0x0F, 0x00];
var InitialCode = [0xE1F0, 0x1D0F, 0xCC9C, 0x84C0, 0x110C, 0x0E10, 0xF1CE, 0x313E, 0x1872, 0xE139, 0xD40F, 0x84F9, 0x280C, 0xA96A, 0x4EC3];
var XorMatrix = [0xAEFC, 0x4DD9, 0x9BB2, 0x2745, 0x4E8A, 0x9D14, 0x2A09, 0x7B61, 0xF6C2, 0xFDA5, 0xEB6B, 0xC6F7, 0x9DCF, 0x2BBF, 0x4563, 0x8AC6, 0x05AD, 0x0B5A, 0x16B4, 0x2D68, 0x5AD0, 0x0375, 0x06EA, 0x0DD4, 0x1BA8, 0x3750, 0x6EA0, 0xDD40, 0xD849, 0xA0B3, 0x5147, 0xA28E, 0x553D, 0xAA7A, 0x44D5, 0x6F45, 0xDE8A, 0xAD35, 0x4A4B, 0x9496, 0x390D, 0x721A, 0xEB23, 0xC667, 0x9CEF, 0x29FF, 0x53FE, 0xA7FC, 0x5FD9, 0x47D3, 0x8FA6, 0x0F6D, 0x1EDA, 0x3DB4, 0x7B68, 0xF6D0, 0xB861, 0x60E3, 0xC1C6, 0x93AD, 0x377B, 0x6EF6, 0xDDEC, 0x45A0, 0x8B40, 0x06A1, 0x0D42, 0x1A84, 0x3508, 0x6A10, 0xAA51, 0x4483, 0x8906, 0x022D, 0x045A, 0x08B4, 0x1168, 0x76B4, 0xED68, 0xCAF1, 0x85C3, 0x1BA7, 0x374E, 0x6E9C, 0x3730, 0x6E60, 0xDCC0, 0xA9A1, 0x4363, 0x86C6, 0x1DAD, 0x3331, 0x6662, 0xCCC4, 0x89A9, 0x0373, 0x06E6, 0x0DCC, 0x1021, 0x2042, 0x4084, 0x8108, 0x1231, 0x2462, 0x48C4];
var Ror = function(Byte) { return ((Byte/2) | (Byte*128)) % 0x100; };
var XorRor = function(byte1, byte2) { return Ror(byte1 ^ byte2); };
var CreateXorKey_Method1 = function(Password) {
var XorKey = InitialCode[Password.length - 1];
var CurrentElement = 0x68;
for(var i = Password.length-1; i >= 0; --i) {
var Char = Password[i];
for(var j = 0; j != 7; ++j) {
if(Char & 0x40) XorKey ^= XorMatrix[CurrentElement];
Char *= 2; --CurrentElement;
}
}
return XorKey;
};
return function(password) {
var Password = _<PASSWORD>(password);
var XorKey = CreateXorKey_Method1(Password);
var Index = Password.length;
var ObfuscationArray = new_buf(16);
for(var i = 0; i != 16; ++i) ObfuscationArray[i] = 0x00;
var Temp, PasswordLastChar, PadIndex;
if((Index % 2) === 1) {
Temp = XorKey >> 8;
ObfuscationArray[Index] = XorRor(PadArray[0], Temp);
--Index;
Temp = XorKey & 0xFF;
PasswordLastChar = Password[Password.length - 1];
ObfuscationArray[Index] = XorRor(PasswordLastChar, Temp);
}
while(Index > 0) {
--Index;
Temp = XorKey >> 8;
ObfuscationArray[Index] = XorRor(Password[Index], Temp);
--Index;
Temp = XorKey & 0xFF;
ObfuscationArray[Index] = XorRor(Password[Index], Temp);
}
Index = 15;
PadIndex = 15 - Password.length;
while(PadIndex > 0) {
Temp = XorKey >> 8;
ObfuscationArray[Index] = XorRor(PadArray[PadIndex], Temp);
--Index;
--PadIndex;
Temp = XorKey & 0xFF;
ObfuscationArray[Index] = XorRor(Password[Index], Temp);
--Index;
--PadIndex;
}
return ObfuscationArray;
};
})();
/* [MS-OFFCRYPTO] 2.3.7.3 Binary Document XOR Data Transformation Method 1 */
var crypto_DecryptData_Method1 = function(password, Data, XorArrayIndex, XorArray, O) {
/* If XorArray is set, use it; if O is not set, make changes in-place */
if(!O) O = Data;
if(!XorArray) XorArray = crypto_CreateXorArray_Method1(password);
var Index, Value;
for(Index = 0; Index != Data.length; ++Index) {
Value = Data[Index];
Value ^= XorArray[XorArrayIndex];
Value = ((Value>>5) | (Value<<3)) & 0xFF;
O[Index] = Value;
++XorArrayIndex;
}
return [O, XorArrayIndex, XorArray];
};
var crypto_MakeXorDecryptor = function(password) {
var XorArrayIndex = 0, XorArray = crypto_CreateXorArray_Method1(password);
return function(Data) {
var O = crypto_DecryptData_Method1(null, Data, XorArrayIndex, XorArray);
XorArrayIndex = O[1];
return O[0];
};
};
/* 2.5.343 */
function parse_XORObfuscation(blob, length, opts, out) {
var o = { key: parseuint16(blob), verificationBytes: parseuint16(blob) };
if(opts.password) o.verifier = crypto_CreatePasswordVerifier_Method1(opts.password);
out.valid = o.verificationBytes === o.verifier;
if(out.valid) out.insitu_decrypt = crypto_MakeXorDecryptor(opts.password);
return o;
}
/* 2.4.117 */
function parse_FilePassHeader(blob, length, oo) {
var o = oo || {}; o.Info = blob.read_shift(2); blob.l -= 2;
if(o.Info === 1) o.Data = parse_RC4Header(blob, length);
else o.Data = parse_RC4CryptoHeader(blob, length);
return o;
}
function parse_FilePass(blob, length, opts) {
var o = { Type: blob.read_shift(2) }; /* wEncryptionType */
if(o.Type) parse_FilePassHeader(blob, length-2, o);
else parse_XORObfuscation(blob, length-2, opts, o);
return o;
}
/* Small helpers */
function parseread(l) { return function(blob, length) { blob.l+=l; return; }; }
function parseread1(blob, length) { blob.l+=1; return; }
/* Rgce Helpers */
/* 2.5.51 */
function parse_ColRelU(blob, length) {
var c = blob.read_shift(2);
return [c & 0x3FFF, (c >> 14) & 1, (c >> 15) & 1];
}
/* 172.16.31.10 */
function parse_RgceArea(blob, length) {
var read = blob.read_shift.bind(blob);
var r=read(2), R=read(2);
var c=parse_ColRelU(blob, 2);
var C=parse_ColRelU(blob, 2);
return { s:{r:r, c:c[0], cRel:c[1], rRel:c[2]}, e:{r:R, c:C[0], cRel:C[1], rRel:C[2]} };
}
/* 172.16.31.10 TODO */
function parse_RgceAreaRel(blob, length) {
var read = blob.read_shift.bind(blob);
var r=read(2), R=read(2);
var c=parse_ColRelU(blob, 2);
var C=parse_ColRelU(blob, 2);
return { s:{r:r, c:c[0], cRel:c[1], rRel:c[2]}, e:{r:R, c:C[0], cRel:C[1], rRel:C[2]} };
}
/* 172.16.17.32 */
function parse_RgceLoc(blob, length) {
var r = blob.read_shift(2);
var c = parse_ColRelU(blob, 2);
return {r:r, c:c[0], cRel:c[1], rRel:c[2]};
}
/* 192.168.127.12 */
function parse_RgceLocRel(blob, length) {
var r = blob.read_shift(2);
var cl = blob.read_shift(2);
var cRel = (cl & 0x8000) >> 15, rRel = (cl & 0x4000) >> 14;
cl &= 0x3FFF;
if(cRel !== 0) while(cl >= 0x100) cl -= 0x100;
return {r:r,c:cl,cRel:cRel,rRel:rRel};
}
/* Ptg Tokens */
/* 172.16.31.10 */
function parse_PtgArea(blob, length) {
var type = (blob[blob.l++] & 0x60) >> 5;
var area = parse_RgceArea(blob, 8);
return [type, area];
}
/* 192.168.127.12 */
function parse_PtgArea3d(blob, length) {
var type = (blob[blob.l++] & 0x60) >> 5;
var ixti = blob.read_shift(2);
var area = parse_RgceArea(blob, 8);
return [type, ixti, area];
}
/* 192.168.3.11 */
function parse_PtgAreaErr(blob, length) {
var type = (blob[blob.l++] & 0x60) >> 5;
blob.l += 8;
return [type];
}
/* 172.16.31.10 */
function parse_PtgAreaErr3d(blob, length) {
var type = (blob[blob.l++] & 0x60) >> 5;
var ixti = blob.read_shift(2);
blob.l += 8;
return [type, ixti];
}
/* 172.16.17.32 */
function parse_PtgAreaN(blob, length) {
var type = (blob[blob.l++] & 0x60) >> 5;
var area = parse_RgceAreaRel(blob, 8);
return [type, area];
}
/* 192.168.127.12 -- ignore this and look in PtgExtraArray for shape + values */
function parse_PtgArray(blob, length) {
var type = (blob[blob.l++] & 0x60) >> 5;
blob.l += 7;
return [type];
}
/* 192.168.127.123 */
function parse_PtgAttrBaxcel(blob, length) {
var bitSemi = blob[blob.l+1] & 0x01; /* 1 = volatile */
var bitBaxcel = 1;
blob.l += 4;
return [bitSemi, bitBaxcel];
}
/* 192.168.3.11 */
function parse_PtgAttrChoose(blob, length) {
blob.l +=2;
var offset = blob.read_shift(2);
var o = [];
/* offset is 1 less than the number of elements */
for(var i = 0; i <= offset; ++i) o.push(blob.read_shift(2));
return o;
}
/* 172.16.31.10 */
function parse_PtgAttrGoto(blob, length) {
var bitGoto = (blob[blob.l+1] & 0xFF) ? 1 : 0;
blob.l += 2;
return [bitGoto, blob.read_shift(2)];
}
/* 172.16.17.32 */
function parse_PtgAttrIf(blob, length) {
var bitIf = (blob[blob.l+1] & 0xFF) ? 1 : 0;
blob.l += 2;
return [bitIf, blob.read_shift(2)];
}
/* 172.16.31.10 */
function parse_PtgAttrSemi(blob, length) {
var bitSemi = (blob[blob.l+1] & 0xFF) ? 1 : 0;
blob.l += 4;
return [bitSemi];
}
/* 192.168.3.110 (used by PtgAttrSpace and PtgAttrSpaceSemi) */
function parse_PtgAttrSpaceType(blob, length) {
var type = blob.read_shift(1), cch = blob.read_shift(1);
return [type, cch];
}
/* 192.168.127.128 */
function parse_PtgAttrSpace(blob, length) {
blob.read_shift(2);
return parse_PtgAttrSpaceType(blob, 2);
}
/* 172.16.31.10 */
function parse_PtgAttrSpaceSemi(blob, length) {
blob.read_shift(2);
return parse_PtgAttrSpaceType(blob, 2);
}
/* 172.16.31.10 TODO */
function parse_PtgRef(blob, length) {
var ptg = blob[blob.l] & 0x1F;
var type = (blob[blob.l] & 0x60)>>5;
blob.l += 1;
var loc = parse_RgceLoc(blob,4);
return [type, loc];
}
/* 172.16.31.10 TODO */
function parse_PtgRefN(blob, length) {
var ptg = blob[blob.l] & 0x1F;
var type = (blob[blob.l] & 0x60)>>5;
blob.l += 1;
var loc = parse_RgceLocRel(blob,4);
return [type, loc];
}
/* 192.168.127.12 TODO */
function parse_PtgRef3d(blob, length) {
var ptg = blob[blob.l] & 0x1F;
var type = (blob[blob.l] & 0x60)>>5;
blob.l += 1;
var ixti = blob.read_shift(2); // XtiIndex
var loc = parse_RgceLoc(blob,4);
return [type, ixti, loc];
}
/* 172.16.58.32 TODO */
function parse_PtgFunc(blob, length) {
var ptg = blob[blob.l] & 0x1F;
var type = (blob[blob.l] & 0x60)>>5;
blob.l += 1;
var iftab = blob.read_shift(2);
return [FtabArgc[iftab], Ftab[iftab]];
}
/* 172.16.31.10 TODO */
function parse_PtgFuncVar(blob, length) {
blob.l++;
var cparams = blob.read_shift(1), tab = parsetab(blob);
return [cparams, (tab[0] === 0 ? Ftab : Cetab)[tab[1]]];
}
function parsetab(blob, length) {
return [blob[blob.l+1]>>7, blob.read_shift(2) & 0x7FFF];
}
/* 172.16.58.3 */
var parse_PtgAttrSum = parseread(4);
/* 172.16.31.10 */
var parse_PtgConcat = parseread1;
/* 172.16.31.10 */
function parse_PtgExp(blob, length) {
blob.l++;
var row = blob.read_shift(2);
var col = blob.read_shift(2);
return [row, col];
}
/* 172.16.58.3 */
function parse_PtgErr(blob, length) { blob.l++; return BERR[blob.read_shift(1)]; }
/* 172.16.31.10 TODO */
function parse_PtgInt(blob, length) { blob.l++; return blob.read_shift(2); }
/* 172.16.58.3 */
function parse_PtgBool(blob, length) { blob.l++; return blob.read_shift(1)!==0;}
/* 172.16.31.10 */
function parse_PtgNum(blob, length) { blob.l++; return parse_Xnum(blob, 8); }
/* 172.16.31.10 */
function parse_PtgStr(blob, length) { blob.l++; return parse_ShortXLUnicodeString(blob); }
/* 192.168.3.11 + 172.16.17.32{3,4,5,6,7} */
function parse_SerAr(blob) {
var val = [];
switch((val[0] = blob.read_shift(1))) {
/* 172.16.58.3 */
case 0x04: /* SerBool -- boolean */
val[1] = parsebool(blob, 1) ? 'TRUE' : 'FALSE';
blob.l += 7; break;
/* 172.16.17.32 */
case 0x10: /* SerErr -- error */
val[1] = BERR[blob[blob.l]];
blob.l += 8; break;
/* 192.168.127.12 */
case 0x00: /* SerNil -- honestly, I'm not sure how to reproduce this */
blob.l += 8; break;
/* 192.168.127.12 */
case 0x01: /* SerNum -- Xnum */
val[1] = parse_Xnum(blob, 8); break;
/* 172.16.17.32 */
case 0x02: /* SerStr -- XLUnicodeString (<256 chars) */
val[1] = parse_XLUnicodeString(blob); break;
// default: throw "Bad SerAr: " + val[0]; /* Unreachable */
}
return val;
}
/* 192.168.3.11 */
function parse_PtgExtraMem(blob, cce) {
var count = blob.read_shift(2);
var out = [];
for(var i = 0; i != count; ++i) out.push(parse_Ref8U(blob, 8));
return out;
}
/* 192.168.3.119 */
function parse_PtgExtraArray(blob) {
var cols = 1 + blob.read_shift(1); //DColByteU
var rows = 1 + blob.read_shift(2); //DRw
for(var i = 0, o=[]; i != rows && (o[i] = []); ++i)
for(var | |
0x0f) << 4) | (self.theValue[0] & 0x0f)
# flags to be done in 2nd half
return v & andMask
if self.function == SoftFunction.RLD2:
# 2nd half of the RLD operation
# A[7..4] <- A[7..4], A[3..0] <- (HL)[7..4]
# translated (HL -> TMP -> ALU2, A -> ACT - > ALU1)
# (ALU.OUT)[7..4] <- (ALU1)[7..4], (ALU.OUT)[3..0] <- (ALU2)[7..4]
v = (self.theValue[0] & 0xf0) | ((self.theValue[1] & 0xf0) >> 4)
# only some flags affected
self.setSingleOutFlag( SoftFlag.HALFCARRY, False )
self.setSingleOutFlag( SoftFlag.ADDSUB, False )
self.setSingleOutFlag( SoftFlag.PARITYOVER, v % 2 == 0 )
self.setSingleOutFlag( SoftFlag.ZERO, v == 0 )
self.setSingleOutFlag( SoftFlag.SIGN, v > 127 or v < -128 )
return v & andMask
if self.function == SoftFunction.RRD1:
# 1st half of the RRD operation
# (HL)[7..4] <- A[3..0], (HL)[3..0] <- (HL)[7..4]
# translated (HL -> TMP -> ALU2, A -> ACT - > ALU1)
# (ALU.OUT)[7..4] <- (ALU1)[3..0], (ALU.OUT)[3..0] <- (ALU2)[7..4]
v = ((self.theValue[0] & 0x0f) << 4) | ((self.theValue[1] & 0xf0) >> 4)
# flags to be done in 2nd half
return v & andMask
if self.function == SoftFunction.RRD2:
# 2nd half of the RRD operation
# A[7..4] <- A[7..4], A[3..0] <- (HL)[3..0]
# translated (HL -> TMP -> ALU2, A -> ACT - > ALU1)
# (ALU.OUT)[7..4] <- (ALU1)[7..4], (ALU.OUT)[3..0] <- (ALU2)[3..0]
v = (self.theValue[0] & 0xf0) | (self.theValue[1] & 0x0f)
# only some flags affected
self.setSingleOutFlag( SoftFlag.HALFCARRY, False )
self.setSingleOutFlag( SoftFlag.ADDSUB, False )
self.setSingleOutFlag( SoftFlag.PARITYOVER, v % 2 == 0 )
self.setSingleOutFlag( SoftFlag.ZERO, v == 0 )
self.setSingleOutFlag( SoftFlag.SIGN, v > 127 or v < -128 )
return v & andMask
if self.function == SoftFunction.ADD_TWO_COMPL_OP2:
return self.theValue[0] + Helper.fromTwosComplement(self.theValue[1])
if self.function == SoftFunction.BIT:
# pass back noting
v1 = ((self.theValue[0] & andMask) & 0x38) >> 3
v2 = 1 << (v1 & 0x03)
v = (self.theValue[1] & andMask) & v2
self.setSingleOutFlag( SoftFlag.ADDSUB, False )
# self.setSingleOutFlag( SoftFlag.CARRY, v < 0 )
# self.setSingleOutFlag( SoftFlag.PARITYOVER, v > 127 or v < -128 )
self.setSingleOutFlag( SoftFlag.HALFCARRY, True )
self.setSingleOutFlag( SoftFlag.ZERO, v == 0 )
# self.setSingleOutFlag( SoftFlag.SIGN, v & 0x80 > 0 )
return self.theValue[0]
if self.function == SoftFunction.BIT:
# pass back noting
v1 = ((self.theValue[0] & andMask) & 0x38) >> 3
v2 = 1 << (v1 & 0x03)
v = (self.theValue[1] & andMask) & v2
self.setSingleOutFlag( SoftFlag.ADDSUB, False )
# self.setSingleOutFlag( SoftFlag.CARRY, v < 0 )
# self.setSingleOutFlag( SoftFlag.PARITYOVER, v > 127 or v < -128 )
self.setSingleOutFlag( SoftFlag.HALFCARRY, True )
self.setSingleOutFlag( SoftFlag.ZERO, v == 0 )
# self.setSingleOutFlag( SoftFlag.SIGN, v & 0x80 > 0 )
return self.theValue[0]
if self.function == SoftFunction.SET:
v1 = ((self.theValue[0] & andMask) & 0x38) >> 3
v2 = 1 << (v1 & 0x03)
v = (self.theValue[1] & andMask) | v2
return v
def output(self, byteIdx=-1):
# calculation and HI/LO are mutually exclusive
if self.function == SoftFunction.NONE:
# register behaviour and 16bit special case
v = self.theValue
if self.compositeLow is not None:
v = self.compositeLow.theValue
if self.compositeHi is not None:
v = v + (self.compositeHi.theValue << 8)
else:
# ALU / INCer
v = self.performFunction()
# byte selector?
if byteIdx == 0:
return v & 0x00ff
elif byteIdx == 1:
return (v & 0xff00) >> 8
return v
def setFunction(self, function: SoftFunction):
self.function = function
@property
def value(self):
return self.output()
@value.setter
def value(self, newval):
self.latch(newval)
@property
def flags(self):
return self.outFlags
@flags.setter
def flags(self, newval):
self.inFlags = newval
class SoftCPU:
""" Software emulated CPU. Pre-stage towards FPGA-CPU, therefore this emulation
strives to do the things much hardware-alike """
def addRegister(self, r: SoftRegister):
self.registers[r.name] = r
def registerInfo(self):
""" Short debug string """
# keys = list(self.registers.keys())
# keys.sort()
keys = "<KEY> ACT TMP I R ALU DISP SP INC2 ABUS DBUS ABUF DBUF".split(sep=' ')
res = ""
for k in keys:
r = self.registers[k]
if r.name == 'F':
res += " F "
res += 'C' if ( r.value & SoftFlag.CARRY > 0 ) else '-'
res += 'N' if ( r.value & SoftFlag.ADDSUB > 0 ) else '-'
res += 'PV' if ( r.value & SoftFlag.PARITYOVER > 0 ) else '--'
res += 'H' if ( r.value & SoftFlag.HALFCARRY > 0 ) else '-'
res += 'Z' if ( r.value & SoftFlag.ZERO > 0 ) else '-'
res += 'S' if ( r.value & SoftFlag.SIGN > 0 ) else '-'
res += " "
elif r.value is not None and isinstance(r.value, int):
res += "" + r.name + " {num:{fill}{width}x} ".format(num=r.value, fill='0', width=int(r.bits/4))
else:
res += "" + r.name + " ?? "
return res
def __init__(self):
# allocated register bank
# endianness: https://stackoverflow.com/questions/21639597/z80-register-endianness
self.registers = {}
self.addRegister(SoftRegister('A'))
self.addRegister(SoftRegister("A'"))
self.addRegister(SoftRegister('F'))
self.addRegister(SoftRegister("F'"))
self.addRegister(SoftRegister('B'))
self.addRegister(SoftRegister('C'))
self.addRegister(SoftRegister('D'))
self.addRegister(SoftRegister('E'))
self.addRegister(SoftRegister('E'))
self.addRegister(SoftRegister('H'))
self.addRegister(SoftRegister('L'))
self.addRegister(SoftRegister("B'"))
self.addRegister(SoftRegister("C'"))
self.addRegister(SoftRegister("D'"))
self.addRegister(SoftRegister("E'"))
self.addRegister(SoftRegister("E'"))
self.addRegister(SoftRegister("H'"))
self.addRegister(SoftRegister("L'"))
self.addRegister(SoftRegister('BC', self.registers['C'], self.registers['B'], bits=16))
self.addRegister(SoftRegister('DE', self.registers['E'], self.registers['D'], bits=16))
self.addRegister(SoftRegister('HL', self.registers['L'], self.registers['H'], bits=16))
self.addRegister(SoftRegister('INSTR'))
self.addRegister(SoftRegister('I'))
self.addRegister(SoftRegister('R'))
self.addRegister(SoftRegister('PC', bits=16))
self.addRegister(SoftRegister('SP', bits=16))
self.addRegister(SoftRegister('IX', bits=16))
self.addRegister(SoftRegister('IY', bits=16))
self.addRegister(SoftRegister('ACT'))
self.addRegister(SoftRegister('TMP'))
self.addRegister(SoftRegister('ALU', latchNum=2, function=SoftFunction.NONE))
self.addRegister(SoftRegister('DISP', latchNum=2, function=SoftFunction.ADD_TWO_COMPL_OP2, bits=16))
self.addRegister(SoftRegister('INC2', function=SoftFunction.PURE_INC, bits=16))
self.addRegister(SoftRegister('ABUF', bits=16))
self.addRegister(SoftRegister('CBUF'))
self.addRegister(SoftRegister('DBUF'))
self.addRegister(SoftRegister('ABUS', bits=16))
self.addRegister(SoftRegister('DBUS'))
self.totalCycleCount = 0
self.memory = bytearray()
def setMemory(self, orgstart: int, ba: bytearray):
""" Add a portion of memory to the soft CPU ones """
if len(self.memory) < orgstart:
# add a spacing
self.memory.extend(map(lambda k:0, range(orgstart - len(self.memory))))
self.memory.extend(ba)
else:
for i in range(len(ba)):
if orgstart+i < len(self.memory):
self.memory[orgstart+i] = ba[i]
else:
self.memory.append(ba[i])
def memoryRead(self, adr: int):
""" Read byte """
if adr < 0 or adr >= len(self.memory):
OPTIONS.debug(1, "Invalid memory read access to $%04x" % adr)
return 0
else:
return self.memory[adr]
def memoryWrite(self, adr: int, value: int):
""" Write byte """
if adr < 0 or adr >= len(self.memory):
OPTIONS.debug(1, "Invalid memory write access to $%04x" % adr)
else:
self.memory[adr] = value & 0x00ff
def performCycle(self, operations: str):
""" Performs one HW emulation cycle. `operations` contains a comma divided list of
operation labels, such as `DBUF.L.IN`, which would be: "latch data bus buffer inward enabled" """
self.totalCycleCount += 1
OPTIONS.debug(2, "performCycle (%d) for operations %s" % (self.totalCycleCount, operations))
if OPTIONS.markAtLineNo is not None:
if self.totalCycleCount == OPTIONS.markAtLineNo:
print("*MARK@SOFTCPU*")
# split operations
if isinstance(operations, str):
ops = map(lambda k: k.strip().upper(), operations.split(sep=','))
elif isinstance(operations,list):
ops = operations
else:
ops = []
# shortcut
r = self.registers
# this level of emulation is pretty simple, nearly stupid
# there is an implicit execution order, which is
# basically: address bus, data bus in, data bus out
for op in ops:
# address bus
if op == "BC.OE":
r['ABUS'].value = r['BC'].value
elif op == "DE.OE":
r['ABUS'].value = r['DE'].value
elif op == "HL.OE":
r['ABUS'].value = r['HL'].value
elif op == "PC.OE":
r['ABUS'].value = r['PC'].value
elif op == "SP.OE":
r['ABUS'].value = r['SP'].value
elif op == "IX.OE":
r['ABUS'].value = r['IX'].value
elif op == "IY.OE":
r['ABUS'].value = r['IY'].value
elif op == "INC2.P":
r['INC2'].setFunction(SoftFunction.PURE_INC)
elif op == "INC2.N":
r['INC2'].setFunction(SoftFunction.PURE_DEC)
elif op == "INC2.L":
r['INC2'].value = r['ABUS'].value
elif op == "INC2.OE":
r['ABUS'].value = r['INC2'].value
elif op == "DISP.L.X":
r['DISP'].latch(r['IX'].value, latchIdx=0)
elif op == "DISP.L.X":
r['DISP'].latch(r['IX'].value, latchIdx=0)
elif op == "DISP.L.Y":
r['DISP'].latch(r['IY'].value, latchIdx=0)
elif op == "DISP.L.DBUS":
r['DISP'].latch(r['DBUS'].value, latchIdx=1)
elif op == "DISP.OE":
r['ABUS'].value = r['DISP'].value
elif op == 'ABUF.L':
OPTIONS.debug(2, ".. set external adress bus: $%x" % r['ABUS'].value)
r['ABUF'].value = r['ABUS'].value
# data bus in?
# TODO: check, if "ignoring" DBUS.value is right approach
elif op == 'DBUF.L.IN':
data = self.memoryRead(r['ABUF'].value)
OPTIONS.debug(2, ".. perform read memory $%04x will be: $%02x" % (r['ABUF'].value, data))
r['DBUF'].value = data
r['DBUS'].value = data
# ALU might also generate data for the dbus
elif op == "TMP.OE.ALU":
r['ALU'].latch(r['TMP'].value, latchIdx=1)
elif op == "ACT.OE":
r['ALU'].latch(r['ACT'].value, latchIdx=0)
elif op == "ALU.OP.ADD":
r['ALU'].flags = r['F'].value
r['ALU'].setFunction(SoftFunction.ADD)
elif op == "ALU.OP.ADC":
r['ALU'].flags = r['F'].value
r['ALU'].setFunction(SoftFunction.ADC)
elif op == "ALU.OP.SUB":
r['ALU'].flags = r['F'].value
r['ALU'].setFunction(SoftFunction.SUB)
elif op == "ALU.OP.SBC":
r['ALU'].flags = r['F'].value
r['ALU'].setFunction(SoftFunction.SBC)
elif op == "ALU.OP.CP":
r['ALU'].flags = r['F'].value
r['ALU'].setFunction(SoftFunction.CP)
elif op == "ALU.OP.INC":
r['ALU'].flags = r['F'].value
r['ALU'].setFunction(SoftFunction.INC)
elif op | |
('30',
'Still patient or expected to return for outpatient services (i.e. still a patient)'),
('31 ... 39',
'Still patient to be defined at state level, if necessary (i.e. still a patient)'),
('40', 'Expired (i.e. died) at home'),
('41',
'Expired (i.e. died) in a medical facility; e.g., hospital, SNF, ICF, or free standing '
'hospice'),
('42', 'Expired (i.e. died) - place unknown'))),
'HL70116': ('Bed status',
(('C', 'Closed'),
('H', 'Housekeeping'),
('I', 'Isolated'),
('K', 'Contaminated'),
('O', 'Occupied'),
('U', 'Unoccupied'))),
'HL70121': ('Response flag',
(('D', 'Same as R, also other associated segments'),
('E', 'Report exceptions only'),
('F', 'Same as D, plus confirmations explicitly'),
('N', 'Only the MSA segment is returned'),
('R', 'Same as E, also Replacement and Parent-Child'))),
'HL70122': ('Charge type',
(('CH', 'Charge'),
('CO', 'Contract'),
('CR', 'Credit'),
('DP', 'Department'),
('GR', 'Grant'),
('NC', 'No Charge'),
('PC', 'Professional'),
('RS', 'Research'))),
'HL70123': ('Result status',
(('A', 'Some, but not all, results available'),
('C', 'Correction to results'),
('F',
'Final results; results stored and verified. Can only be changed with a corrected result.'),
('I',
'No results available; specimen received, procedure incomplete'),
('O', 'Order received; specimen not yet received'),
('P',
'Preliminary: A verified early result is available, final results not yet obtained'),
('R', 'Results stored; not yet verified'),
('S',
'No results available; procedure scheduled, but not done'),
('X', 'No results available; Order canceled.'),
('Y',
'No order on record for this test. (Used only on queries)'),
('Z', 'No record of this patient. (Used only on queries)'))),
'HL70124': ('Transportation mode',
(('CART', 'Cart - patient travels on cart or gurney'),
('PORT', "The examining device goes to patient's location"),
('WALK', 'Patient walks to diagnostic service'),
('WHLC', 'Wheelchair'))),
'HL70125': ('Value type',
(('AD', 'Address'),
('CE', 'Coded Entry'),
('CF', 'Coded Element With Formatted Values'),
('CK', 'Composite ID With Check Digit'),
('CN', 'Composite ID And Name'),
('CP', 'Composite Price'),
('CX', 'Extended Composite ID With Check Digit'),
('DT', 'Date'),
('ED', 'Encapsulated Data'),
('FT', 'Formatted Text (Display)'),
('MO', 'Money'),
('NM', 'Numeric'),
('PN', 'Person Name'),
('RP', 'Reference Pointer'),
('SN', 'Structured Numeric'),
('ST', 'String Data.'),
('TM', 'Time'),
('TN', 'Telephone Number'),
('TS', 'Time Stamp (Date & Time)'),
('TX', 'Text Data (Display)'),
('XAD', 'Extended Address'),
('XCN', 'Extended Composite Name And Number For Persons'),
('XON',
'Extended Composite Name And Number For Organizations'),
('XPN', 'Extended Person Name'),
('XTN', 'Extended Telecommunications Number'))),
'HL70126': ('Quantity limited request',
(('CH', 'Characters'),
('LI', 'Lines'),
('PG', 'Pages'),
('RD', 'Records'),
('ZO', 'Locally defined'))),
'HL70127': ('Allergen type',
(('AA', 'Animal Allergy'),
('DA', 'Drug allergy'),
('EA', 'Environmental Allergy'),
('FA', 'Food allergy'),
('LA', 'Pollen Allergy'),
('MA', 'Miscellaneous allergy'),
('MC', 'Miscellaneous contraindication'),
('PA', 'Plant Allergy'))),
'HL70128': ('Allergy severity',
(('MI', 'Mild'),
('MO', 'Moderate'),
('SV', 'Severe'),
('U', 'Unknown'))),
'HL70130': ('Visit user code',
(('HO', 'Home'),
('MO', 'Mobile Unit'),
('PH', 'Phone'),
('TE', 'Teaching'))),
'HL70133': ('Procedure practitioner identifier code type',
(('AN', 'Anesthesiologist/Anesthetist'),
('AS', 'Assistant Surgeon'),
('CM', 'Certified Nurse Midwife'),
('NP', 'Nurse Practitioner'),
('PR', 'Procedure MD/ Surgeon'),
('PS', 'Primary Surgeon'),
('RD', 'Radiologist'),
('RS', 'Resident'),
('SN', 'Scrub Nurse'))),
'HL70135': ('Assignment of benefits',
(('M', 'Modified assignment'), ('N', 'No'), ('Y', 'Yes'))),
'HL70136': ('Yes/no indicator', (('N', 'No'), ('Y', 'Yes'))),
'HL70137': ('Mail claim party',
(('E', 'Employer'),
('G', 'Guarantor'),
('I', 'Insurance company'),
('O', 'Other'),
('P', 'Patient'))),
'HL70140': ('Military service',
(('AUSA', 'Australian Army'),
('AUSAF', 'Australian Air Force'),
('AUSN', 'Australian Navy'),
('NATO', 'North Atlantic Treaty Organization'),
('NOAA', 'National Oceanic and Atmospheric Administration'),
('USA', 'U.S. Army'),
('USAF', 'U.S. Air Force'),
('USCG', 'U.S. Coast Guard'),
('USMC', 'U.S. Marines'),
('USN', 'U.S. Navy'),
('USPHS', 'U.S. Public Health Service'))),
'HL70141': ('Military rank/grade',
(('E1 ... E9', 'Enlisted'),
('O1 ... O10', 'Officers'),
('W1 ... W4', 'Warrant Officers'))),
'HL70142': ('Military status',
(('ACT', 'Active duty'),
('DEC', 'Deceased'),
('RET', 'Retired'))),
'HL70144': ('Eligibility source',
(('1', 'Insurance company'),
('2', 'Employer'),
('3', 'Insured presented policy'),
('4', 'Insured presented card'),
('5', 'Signed statement on file'),
('6', 'Verbal information'),
('7', 'None'))),
'HL70145': ('Room type',
(('2ICU', 'Second intensive care unit'),
('2PRI', 'Second private room'),
('2SPR', 'Second semi-private room'),
('ICU', 'Intensive care unit'),
('PRI', 'Private room'),
('SPR', 'Semi-private room'))),
'HL70146': ('Amount type',
(('DF', 'Differential'),
('LM', 'Limit'),
('PC', 'Percentage'),
('RT', 'Rate'),
('UL', 'Unlimited'))),
'HL70147': ('Policy type',
(('2ANC', 'Second ancillary'),
('2MMD', 'Second major medical'),
('3MMD', 'Third major medical'),
('ANC', 'Ancillary'),
('MMD', 'Major medical'))),
'HL70148': ('Penalty type',
(('AT', 'Currency amount'), ('PC', 'Percentage'))),
'HL70149': ('Day type',
(('AP', 'Approved'), ('DE', 'Denied'), ('PE', 'Pending'))),
'HL70150': ('Pre-certification patient type',
(('ER', 'Emergency'),
('IPE', 'Inpatient elective'),
('OPE', 'Outpatient elective'),
('UR', 'Urgent'))),
'HL70153': ('Value code',
(('01', 'Most common semi-private rate'),
('02', 'Hospital has no semi-private rooms'),
('04',
'Inpatient professional component charges which are combined billed'),
('05',
'Professional component included in charges and also billed separate to carrier'),
('06', 'Medicare blood deductible'),
('08',
'Medicare life time reserve amount in the first calendar year'),
('09',
'Medicare co-insurance amount in the first calendar year'),
('10', 'Lifetime reserve amount in the second calendar year'),
('11', 'Co-insurance amount in the second calendar year'),
('12',
'Working aged beneficiary/spouse with employer group health plan'),
('13',
'ESRD beneficiary in a Medicare coordination period with an employer group health plan'),
('14', 'No Fault including auto/other'),
('15', "Worker's Compensation"),
('16', 'PHS, or other federal agency'),
('17', 'Payer code'),
('21', 'Catastrophic'),
('22', 'Surplus'),
('23', 'Recurring monthly incode'),
('24', 'Medicaid rate code'),
('30', 'Pre-admission testing'),
('31', 'Patient liability amount'),
('37', 'Pints of blood furnished'),
('38', 'Blood deductible pints'),
('39', 'Pints of blood replaced'),
('40',
'New coverage not implemented by HMO (for inpatient service only)'),
('41', 'Black lung'),
('42', 'VA'),
('43', 'Disabled beneficiary under age 64 with LGHP'),
('44',
'Amount provider agreed to accept from primary payer when this amount is less than charges but '
'higher than payment received,, then a Medicare secondary payment is due'),
('45', 'Accident hour'),
('46', 'Number of grace days'),
('47', 'Any liability insurance'),
('48', 'Hemoglobin reading'),
('49', 'Hematocrit reading'),
('50', 'Physical therapy visits'),
('51', 'Occupational therapy visits'),
('52', 'Speech therapy visits'),
('53', 'Cardiac rehab visits'),
('56', 'Skilled nurse - home visit hours'),
('57', 'Home health aide - home visit hours'),
('58', 'Arterial blood gas'),
('59', 'Oxygen saturation'),
('60', 'HHA branch MSA'),
('67', 'Peritoneal dialysis'),
('68', 'EPO-drug'),
('70 ... 72', 'Payer codes'),
('75 ... 79', 'Payer codes'),
('80', 'Psychiatric visits'),
('81', 'Visits subject to co-payment'),
('A1', 'Deductible payer A'),
('A2', 'Coinsurance payer A'),
('A3', 'Estimated responsibility payer A'),
('X0', 'Service excluded on primary policy'),
('X4', 'Supplemental coverage'))),
'HL70155': ('Accept/application acknowledgment conditions',
(('AL', 'Always'),
('ER', 'Error/reject conditions only'),
('NE', 'Never'),
('SU', 'Successful completion only'))),
'HL70156': ('Which date/time qualifier',
(('ANY', 'Any date/time within a range'),
('COL',
'Collection date/time, equivalent to film or sample collection date/time'),
('ORD', 'Order date/time'),
('RCT',
'Specimen receipt date/time, receipt of specimen in filling ancillary (Lab)'),
('REP',
'Report date/time, report date/time at filing ancillary (i.e., Lab)'),
('SCHED', 'Schedule date/time'))),
'HL70157': ('Which date/time status qualifier',
(('ANY', 'Any status'),
('CFN', 'Current final value, whether final or corrected'),
('COR', 'Corrected only (no final with corrections)'),
('FIN', 'Final only (no corrections)'),
('PRE', 'Preliminary'),
('REP', 'Report completion date/time'))),
'HL70158': ('Date/time selection qualifier',
(('1ST', 'First value within range'),
('ALL', 'All values within the range'),
('LST', 'Last value within the range'),
('REV',
'All values within the range returned in reverse chronological order (This is the default if '
'not otherwise specified.)'))),
'HL70159': ('Diet code specification type',
(('D', 'Diet'), ('P', 'Preference'), ('S', 'Supplement'))),
'HL70160': ('Tray type',
(('EARLY', 'Early tray'),
('GUEST', 'Guest tray'),
('LATE', 'Late tray'),
('MSG', 'Tray message only'),
('NO', 'No tray'))),
'HL70161': ('Allow substitution',
(('G', 'Allow generic substitutions.'),
('N',
'Substitutions are NOT authorized. (This is the default - null.)'),
('T', 'Allow therapeutic substitutions'))),
'HL70162': ('Route of administration',
(('AP', 'Apply Externally'),
('B', 'Buccal'),
('DT', 'Dental'),
('EP', 'Epidural'),
('ET', 'Endotrachial Tube*'),
('GTT', 'Gastrostomy Tube'),
('GU', 'GU Irrigant'),
('IA', 'Intra-arterial'),
('IB', 'Intrabursal'),
('IC', 'Intracardiac'),
('ICV', 'Intracervical (uterus)'),
('ID', 'Intradermal'),
('IH', 'Inhalation'),
('IHA', 'Intrahepatic Artery'),
('IM', 'Intramuscular'),
('IMR', 'Immerse (Soak) Body Part'),
('IN', 'Intranasal'),
('IO', 'Intraocular'),
('IP', 'Intraperitoneal'),
('IS', 'Intrasynovial'),
('IT', 'Intrathecal'),
('IU', 'Intrauterine'),
('IV', 'Intravenous'),
('MM', 'Mucous Membrane'),
('MTH', 'Mouth/Throat'),
('NG', 'Nasogastric'),
('NP', 'Nasal Prongs*'),
('NS', 'Nasal'),
('NT', 'Nasotrachial Tube'),
('OP', 'Ophthalmic'),
('OT', 'Otic'),
('OTH', 'Other/Miscellaneous'),
('PF', 'Perfusion'),
('PO', 'Oral'),
('PR', 'Rectal'),
('RM', 'Rebreather Mask*'),
('SC', 'Subcutaneous'),
('SD', 'Soaked Dressing'),
('SL', 'Sublingual'),
('TD', 'Transdermal'),
('TL', 'Translingual'),
('TP', 'Topical'),
| |
size and physical extent (``boxsize`` and ``boxcenter``) that ``mesh1``.
edgesin : dict, array, list
An array of :math:`k`-edges which defines the theory :math:`k`-binning; corresponding derivatives will be computed
(see ``edgesin_type``); or a dictionary of such array for each theory projection.
Else a list of derivatives (callable) of theory correlation function w.r.t. each theory basis vector, e.g. each in :math:`k`-bin;
or a dictionary of such list for each theory projection.
If ``periodic`` is ``True``, this should correspond to the derivatives of theory *power spectrum* (instead of correlation function)
w.r.t. each theory basis vector, e.g. each in :math:`k` bin.
projsin : list, default=None
List of :class:`Projection` instances or (multipole, wide-angle order) tuples.
If ``None``, and ``power_ref`` is provided, the list of projections is set
to be able to compute window convolution of theory power spectrum multipoles of orders ``power_ref.ells``.
power_ref : CatalogFFTPower, MeshFFTPower, PowerSpectrumWedges, PowerSpectrumMultipoles, default=None
"Reference" power spectrum estimation, e.g. of the actual data.
It is used to set default values for ``edges``, ``ells``, ``los``, ``boxcenter``, ``compensations`` and ``wnorm`` if those are ``None``.
edges : tuple, array, default=None
If ``los`` is local (``None``), :math:`k`-edges for :attr:`poles`.
Else, one can also provide :math:`\mu`-edges (hence a tuple ``(kedges, muedges)``) for :attr:`wedges`.
If ``kedges`` is ``None``, defaults to edges containing unique :math:`k` (norm) values, see :func:`find_unique_edges`.
``kedges`` may be a dictionary, with keys 'min' (minimum :math:`k`, defaults to 0), 'max' (maximum :math:`k`, defaults to ``np.pi/(boxsize/nmesh)``),
'step' (if not provided :func:`find_unique_edges` is used to find unique :math:`k` (norm) values between 'min' and 'max').
For both :math:`k` and :math:`\mu`, binning is inclusive on the low end and exclusive on the high end, i.e. ``edges[i] <= x < edges[i+1]``.
However, last :math:`\mu`-bin is inclusive on both ends: ``edges[-2] <= mu <= edges[-1]``.
Therefore, with e.g. :math:`\mu`-edges ``[0.2, 0.4, 1.0]``, the last :math:`\mu`-bin includes modes at :math:`\mu = 1.0`.
Similarly, with :math:`\mu`-edges ``[0.2, 0.4, 0.8]``, the last :math:`\mu`-bin includes modes at :math:`\mu = 0.8`.
If ``None``, defaults to the edges used in estimation of ``power_ref``.
ells : list, tuple, default=(0, 2, 4)
Output multipole orders.
If ``None``, defaults to the multipoles used in estimation of ``power_ref``.
los : string, array, default=None
If ``los`` is 'firstpoint' (resp. 'endpoint'), use local (varying) first point (resp. end point) line-of-sight.
Else, may be 'x', 'y' or 'z', for one of the Cartesian axes.
Else, a 3-vector.
If ``None``, defaults to the line-of-sight used in estimation of ``power_ref``.
periodic : bool, default=False
If ``True``, selection function is assumed uniform, periodic.
In this case, ``mesh1`` may be ``None``; in this case ``nmesh`` and ``boxsize`` default to that of ``power_ref``,
else may be set with ``kwargs``.
boxcenter : float, array, default=None
Box center; defaults to 0.
Used only if provided ``mesh1`` and ``mesh2`` are not ``CatalogMesh``.
If ``None``, defaults to the value used in estimation of ``power_ref``.
compensations : list, tuple, string, default=None
Compensations to apply to mesh to (optionally) correct for particle-mesh assignment scheme;
e.g. 'cic' (resp. 'cic-sn') for cic assignment scheme, with (resp. without) interlacing.
In case ``mesh2`` is not ``None`` (cross-correlation), provide a list (or tuple) of two such strings
(for ``mesh1`` and ``mesh2``, respectively).
Used only if provided ``mesh1`` or ``mesh2`` are not ``CatalogMesh``.
wnorm : float, default=None
Window function normalization.
If ``None``, defaults to the value used in estimation of ``power_ref``,
rescaled to the input random weights --- which yields a correct normalization of the window function
for the power spectrum estimation ``power_ref``.
If ``power_ref`` provided, use internal estimate obtained with :func:`normalization` --- which is wrong
(the normalization :attr:`poles.wnorm` can be reset a posteriori using the above recipe).
shotnoise : float, default=None
Window function shot noise, to use instead of internal estimate, which is 0 in case of cross-correlation
or both ``mesh1`` and ``mesh2`` are :class:`pmesh.pm.RealField`,
and in case of auto-correlation is obtained by dividing :meth:`CatalogMesh.unnormalized_shotnoise`
of ``mesh1`` by window function normalization.
edgesin_type : str, default='smooth'
Technique to transpose ``edgesin`` to Fourier space, relevant only if ``periodic`` is ``False``.
'smooth' uses :func:`get_correlation_function_tophat_derivative`;
'fourier-grid' paints ``edgesin`` on the Fourier mesh (akin to the periodic case), then takes the FFT.
kwargs : dict
Arguments for :class:`ParticleMesh` in case ``mesh1`` is not provided (as may be the case if ``periodic`` is ``True``),
typically ``boxsize``, ``nmesh``, ``mpicomm``.
"""
t0 = time.time()
if power_ref is not None:
if edges is None: edges = _get_attr_in_inst(power_ref, 'edges', insts=(None, 'wedges', 'poles'))
attrs_ref = _get_attr_in_inst(power_ref, 'attrs', insts=(None, 'wedges', 'poles'))
if los is None:
los_type = attrs_ref['los_type']
los = attrs_ref['los']
if los_type != 'global': los = los_type
if boxcenter is None: boxcenter = attrs_ref['boxcenter']
if compensations is None: compensations = attrs_ref['compensations']
if ells is None: ells = _get_attr_in_inst(power_ref, 'ells', insts=(None, 'poles'))
self._set_los(los)
self._set_ells(ells)
self._set_periodic(periodic)
if mesh1 is None:
if not self.periodic:
raise ValueError('mesh1 can be "None" only if periodic = True')
attrs_pm = {'dtype': 'f8', 'mpicomm': mpi.COMM_WORLD}
if power_ref is not None:
attrs_pm.update(boxsize=attrs_ref['boxsize'], nmesh=attrs_ref['nmesh'], dtype=attrs_ref.get('dtype', attrs_pm['dtype']))
attrs_pm.update(kwargs)
translate = {'boxsize': 'BoxSize', 'nmesh': 'Nmesh', 'mpicomm': 'comm'}
attrs_pm = {translate.get(key, key): value for key, value in attrs_pm.items()}
mesh1 = ParticleMesh(**attrs_pm)
self._set_compensations(compensations)
self._set_mesh(mesh1, mesh2=mesh2, boxcenter=boxcenter)
self._set_projsin(projsin)
self._set_edges(edges)
self._set_xin(edgesin, edgesin_type=edgesin_type)
self.wnorm = wnorm
if wnorm is None:
if self.periodic:
self.wnorm = 1.
else:
if power_ref is not None:
ialpha2 = np.prod([self.attrs[name] / power_ref.attrs[name] for name in ['sum_data_weights1', 'sum_data_weights2']])
self.wnorm = ialpha2 * _get_attr_in_inst(power_ref, 'wnorm', insts=(None, 'wedges', 'poles'))
else:
self.wnorm = normalization(mesh1, mesh2)
self.shotnoise = shotnoise
if shotnoise is None:
self.shotnoise = 0.
# Shot noise is non zero only if we can estimate it
if self.autocorr and isinstance(mesh1, CatalogMesh):
self.shotnoise = mesh1.unnormalized_shotnoise() / self.wnorm
self.attrs.update(self._get_attrs())
t1 = time.time()
if self.mpicomm.rank == 0:
self.log_info('Meshes prepared in elapsed time {:.2f} s.'.format(t1 - t0))
self.log_info('Running mesh calculation.')
self.run()
t2 = time.time()
if self.mpicomm.rank == 0:
self.log_info('Mesh calculations performed in elapsed time {:.2f} s.'.format(t2 - t1))
self.log_info('Window function computed in elapsed time {:.2f} s.'.format(t2 - t0))
def _set_periodic(self, periodic=False):
self.periodic = periodic
if self.periodic and self.los_type != 'global':
raise ValueError('Cannot set "periodic" if line-of-sight is local.')
def _set_mesh(self, mesh1, mesh2=None, boxcenter=None):
if self.periodic:
self.attrs = {}
self.autocorr = True
if isinstance(mesh1, ParticleMesh):
self.pm = mesh1
else:
self.pm = mesh1.pm
self.mpicomm = self.pm.comm
self.boxcenter = _make_array(boxcenter if boxcenter is not None else 0., 3, dtype='f8')
else:
super(MeshFFTWindow, self)._set_mesh(mesh1, mesh2=mesh2, boxcenter=boxcenter)
def _set_projsin(self, projsin):
if projsin is None:
if self.ells is None:
raise ValueError('If no output multipoles requested, provide "projsin"')
ellmax = max(self.ells)
projsin = [(ell, 0) for ell in range(0, ellmax + 1, 2)]
if self.los_type in ['firstpoint', 'endpoint']:
projsin += PowerSpectrumOddWideAngleMatrix.propose_out(projsin, wa_orders=1)
self.projsin = [Projection(proj) for proj in projsin]
if self.los_type == 'global' and any(proj.wa_order != 0 for proj in self.projsin):
raise ValueError('With global line-of-sight, input wide_angle order = 0 only is supported')
def _set_xin(self, edgesin, edgesin_type='fourier-grid'):
self.edgesin_type = edgesin_type.lower()
allowed_edgesin_types = ['smooth']
if self.los_type == 'global': allowed_edgesin_types.append('fourier-grid')
if self.edgesin_type not in allowed_edgesin_types:
raise ValueError('edgesin_type must be one of {}'.format(allowed_edgesin_types))
if not isinstance(edgesin, dict):
edgesin = {proj: edgesin for proj in self.projsin}
else:
edgesin = {Projection(proj): edge for proj, edge in edgesin.items()}
self.xin, self.deriv = {}, {}
for proj in self.projsin:
if proj not in edgesin:
raise ValueError('Projection {} not in edgesin'.format(proj))
iscallable = [callable(f) for f in edgesin[proj]]
if any(iscallable):
if not all(iscallable): raise ValueError('Provide callables or floats only for edgesin')
self.deriv[proj] = edgesin[proj]
self.xin[proj] = np.arange(len(self.deriv[proj]))
else:
edges = np.asarray(edgesin[proj])
self.xin[proj] = 3. / 4. * (edges[1:]**4 - edges[:-1]**4) / (edges[1:]**3 - edges[:-1]**3)
if self.periodic or self.edgesin_type == 'fourier-grid':
def _make_fun(low, high):
return lambda k: 1. * ((k >= low) & (k < high))
self.deriv[proj] = [_make_fun(*lh) for lh in zip(edges[:-1], edges[1:])]
else:
self.deriv[proj] = get_correlation_function_tophat_derivative(edges, ell=proj.ell)
def _get_q(self, ellout, mout, projin):
# Called for local (varying) line-of-sight only
# This corresponds to Q defined in https://fr.overleaf.com/read/hpgbwqzmtcxn
# ellout is \ell, mout is m, projin = (\ell^\prime, m^\prime)
Ylmout = get_real_Ylm(ellout, mout)
Ylmins = [get_real_Ylm(projin.ell, m) for m in range(-projin.ell, projin.ell + 1)]
rfield = RealField(self.pm)
| |
<filename>layers/spec.py
#!/usr/bin/python -i
import sys
try:
import urllib.request as urllib2
except ImportError:
import urllib2
from bs4 import BeautifulSoup
import json
import vuid_mapping
#############################
# spec.py script
#
# Overview - this script is intended to generate validation error codes and message strings from the json spec file
# that contains all of the valid usage statements. In addition to generating the header file, it provides a number of
# corrollary services to aid in generating/updating the header.
#
# Ideal flow - Pull the valid usage text and IDs from the spec json, pull the IDs from the validation error database,
# then update the database with any new IDs from the json file and generate new database and header file.
#
# TODO:
# 1. When VUs go away (in error DB, but not in json) need to report them and remove from DB as deleted
#
#############################
out_filename = "vk_validation_error_messages.h" # can override w/ '-out <filename>' option
db_filename = "vk_validation_error_database.txt" # can override w/ '-gendb <filename>' option
json_filename = None # con pass in w/ '-json <filename> option
gen_db = False # set to True when '-gendb <filename>' option provided
json_compare = False # compare existing DB to json file input
json_url = "https://www.khronos.org/registry/vulkan/specs/1.0-extensions/validation/validusage.json"
read_json = False
# This is the root spec link that is used in error messages to point users to spec sections
#old_spec_url = "https://www.khronos.org/registry/vulkan/specs/1.0/xhtml/vkspec.html"
spec_url = "https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html"
core_url = "https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html"
ext_url = "https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html"
# After the custom validation error message, this is the prefix for the standard message that includes the
# spec valid usage language as well as the link to nearest section of spec to that language
error_msg_prefix = "The spec valid usage text states "
validation_error_enum_name = "VALIDATION_ERROR_"
def printHelp():
print ("Usage: python spec.py [-out <headerfile.h>] [-gendb <databasefile.txt>] [-update] [-json <json_file>] [-help]")
print ("\n Default script behavior is to parse the specfile and generate a header of unique error enums and corresponding error messages based on the specfile.\n")
print (" Default specfile is from online at %s" % (spec_url))
print (" Default headerfile is %s" % (out_filename))
print (" Default databasefile is %s" % (db_filename))
print ("\nIf '-gendb' option is specified then a database file is generated to default file or <databasefile.txt> if supplied. The database file stores")
print (" the list of enums and their error messages.")
print ("\nIf '-update' option is specified this triggers the master flow to automate updating header and database files using default db file as baseline")
print (" and online spec file as the latest. The default header and database files will be updated in-place for review and commit to the git repo.")
print ("\nIf '-json' option is used trigger the script to load in data from a json file.")
print ("\nIf '-json-file' option is it will point to a local json file, else '%s' is used from the web." % (json_url))
def get8digithex(dec_num):
"""Convert a decimal # into an 8-digit hex"""
if dec_num > 4294967295:
print ("ERROR: Decimal # %d can't be represented in 8 hex digits" % (dec_num))
sys.exit()
hex_num = hex(dec_num)
return hex_num[2:].zfill(8)
class Specification:
def __init__(self):
self.tree = None
self.error_db_dict = {} # dict of previous error values read in from database file
self.delimiter = '~^~' # delimiter for db file
# Global dicts used for tracking spec updates from old to new VUs
self.orig_no_link_msg_dict = {} # Pair of API,Original msg w/o spec link to ID list mapping
self.orig_core_msg_dict = {} # Pair of API,Original core msg (no link or section) to ID list mapping
self.last_mapped_id = -10 # start as negative so we don't hit an accidental sequence
self.orig_test_imp_enums = set() # Track old enums w/ tests and/or implementation to flag any that aren't carried fwd
# Dict of data from json DB
# Key is API,<short_msg> which leads to dict w/ following values
# 'ext' -> <core|<ext_name>>
# 'string_vuid' -> <string_vuid>
# 'number_vuid' -> <numerical_vuid>
self.json_db = {}
self.json_missing = 0
self.struct_to_func_map = {} # Map structs to the API func that they fall under in the spec
self.duplicate_json_key_count = 0
self.copyright = """/* THIS FILE IS GENERATED. DO NOT EDIT. */
/*
* Vulkan
*
* Copyright (c) 2016 Google Inc.
* Copyright (c) 2016 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: <NAME> <<EMAIL>>
*/"""
def readJSON(self):
"""Read in JSON file"""
if json_filename is not None:
with open(json_filename) as jsf:
self.json_data = json.load(jsf, encoding='utf-8')
else:
response = urllib2.urlopen(json_url).read().decode('utf-8')
self.json_data = json.loads(response)
def parseJSON(self):
"""Parse JSON VUIDs into data struct"""
# Format of JSON file is:
# "API": { "core|EXT": [ {"vuid": "<id>", "text": "<VU txt>"}]},
# "VK_KHX_external_memory" & "VK_KHX_device_group" - extension case (vs. "core")
for top_level in sorted(self.json_data):
if "validation" == top_level:
for api in sorted(self.json_data[top_level]):
for ext in sorted(self.json_data[top_level][api]):
for vu_txt_dict in self.json_data[top_level][api][ext]:
print ("Looking at dict for api:ext entry %s:%s" % (api, ext))
vuid = vu_txt_dict['vuid']
vutxt = vu_txt_dict['text']
#print ("%s:%s:%s:%s" % (api, ext, vuid, vutxt))
#print ("VUTXT orig:%s" % (vutxt))
just_txt = BeautifulSoup(vutxt, 'html.parser')
#print ("VUTXT only:%s" % (just_txt.get_text()))
num_vuid = vuid_mapping.convertVUID(vuid)
self.json_db[vuid] = {}
self.json_db[vuid]['ext'] = ext
self.json_db[vuid]['number_vuid'] = num_vuid
self.json_db[vuid]['struct_func'] = api
just_txt = just_txt.get_text().strip()
unicode_map = {
u"\u2019" : "'",
u"\u2192" : "->",
}
for um in unicode_map:
just_txt = just_txt.replace(um, unicode_map[um])
self.json_db[vuid]['vu_txt'] = just_txt.replace("\\", "")
print ("Spec vu txt:%s" % (self.json_db[vuid]['vu_txt']))
#sys.exit()
def compareJSON(self):
"""Compare parsed json file with existing data read in from DB file"""
json_db_set = set()
for vuid in self.json_db: # pull entries out and see which fields we're missing from error_db
json_db_set.add(vuid)
for enum in self.error_db_dict:
vuid_string = self.error_db_dict[enum]['vuid_string']
if vuid_string not in self.json_db:
#print ("Full string for %s is:%s" % (enum, full_error_string))
print ("WARN: Couldn't find vuid_string in json db:%s" % (vuid_string))
self.json_missing = self.json_missing + 1
self.error_db_dict[enum]['ext'] = 'core'
# TODO: Currently GL843 tracks 2 VUs that are missing from json incorrectly
# Fix will land in 1.0.51 spec. After that we should take some alternative
# action here to indicate that VUs have gone away.
# Can have a removed_enums set that we add to and report to user
#sys.exit()
else:
json_db_set.remove(vuid_string)
self.error_db_dict[enum]['ext'] = self.json_db[vuid_string]['ext']
if 'core' == self.json_db[vuid_string]['ext'] or '!' in self.json_db[vuid_string]['ext']:
spec_link = "%s#%s" % (core_url, vuid_string)
else:
spec_link = "%s#%s" % (ext_url, vuid_string)
self.error_db_dict[enum]['error_msg'] = "%s'%s' (%s)" % (error_msg_prefix, self.json_db[vuid_string]['vu_txt'], spec_link)
print ("Updated error_db error_msg:%s" % (self.error_db_dict[enum]['error_msg']))
#sys.exit()
print ("These json DB entries are not in error DB:")
for extra_vuid in json_db_set:
print ("\t%s" % (extra_vuid))
# Add these missing entries into the error_db
# Create link into core or ext spec as needed
if 'core' == self.json_db[extra_vuid]['ext'] or '!' in self.json_db[extra_vuid]['ext']:
spec_link = "%s#%s" % (core_url, extra_vuid)
else:
spec_link = "%s#%s" % (ext_url, extra_vuid)
error_enum = "%s%s" % (validation_error_enum_name, get8digithex(self.json_db[extra_vuid]['number_vuid']))
self.error_db_dict[error_enum] = {}
self.error_db_dict[error_enum]['check_implemented'] = 'N'
self.error_db_dict[error_enum]['testname'] = 'None'
self.error_db_dict[error_enum]['api'] = self.json_db[extra_vuid]['struct_func']
self.error_db_dict[error_enum]['vuid_string'] = extra_vuid
self.error_db_dict[error_enum]['error_msg'] = "%s'%s' (%s)" % (error_msg_prefix, self.json_db[extra_vuid]['vu_txt'], spec_link)
self.error_db_dict[error_enum]['note'] = ''
self.error_db_dict[error_enum]['ext'] = self.json_db[extra_vuid]['ext']
implicit = False
last_segment = extra_vuid.split("-")[-1]
if last_segment in vuid_mapping.implicit_type_map:
implicit = True
elif not last_segment.isdigit(): # Explicit ids should only have digits in last segment
print ("ERROR: Found last segment of val error ID that isn't in implicit map and doesn't have numbers in last segment: %s" % (last_segment))
sys.exit()
self.error_db_dict[error_enum]['implicit'] = implicit
def genHeader(self, header_file):
"""Generate a header file based on the contents of a parsed spec"""
print ("Generating header %s..." % (header_file))
file_contents = []
file_contents.append(self.copyright)
file_contents.append('\n#pragma once')
file_contents.append('\n// Disable auto-formatting for generated file')
file_contents.append('// clang-format off')
file_contents.append('\n#include <unordered_map>')
file_contents.append('\n// enum values for unique validation error codes')
file_contents.append('// Corresponding | |
#!/usr/bin/env python
# coding=utf-8
"""
"""
from __future__ import division
import os
import pickle
import copy
import numpy as np
import matplotlib.pylab as plt
import pycity_calc.cities.scripts.energy_network_generator as enetgen
import pycity_calc.cities.scripts.energy_sys_generator as esysgen
import pycity_calc.environments.germanmarket as gmarket
import pycity_calc.simulation.energy_balance.city_eb_calc as citeb
import pycity_calc.economic.annuity_calculation as annu
import pycity_calc.economic.city_economic_calc as citecon
def do_wm_comp(city, dhw_scale=True, eeg_pv_limit=False):
# Generate german market instance (if not already included in environment)
ger_market = gmarket.GermanMarket()
# Add GermanMarket object instance to city
city.environment.prices = ger_market
# Scenario 1: Add CHP dim. with single LHN
# #####################################################################
city_scen_1 = copy.deepcopy(city)
# Connect all building nodes to local heating network
dict_e_net_data = {1: {'type': 'heating',
'method': 2,
'nodelist': [1001, 1002, 1003, 1004, 1005, 1006,
1007]}}
# Add energy networks to city
enetgen.add_energy_networks_to_city(city=city_scen_1,
dict_data=dict_e_net_data)
# Generate one feeder with CHP, boiler and TES
list_esys = [(1005, 1, 1)]
# Generate energy systems
esysgen.gen_esys_for_city(city=city_scen_1,
list_data=list_esys,
dhw_scale=dhw_scale)
# Scenario 2: Add decentral CHPs
# #####################################################################
city_scen_2 = copy.deepcopy(city)
# Generate one feeder with CHP, boiler and TES
list_esys = [(1001, 1, 1),
(1002, 1, 1),
(1003, 1, 1),
(1004, 1, 1),
(1005, 1, 1),
(1006, 1, 1),
(1007, 1, 1)]
# Generate energy systems
esysgen.gen_esys_for_city(city=city_scen_2,
list_data=list_esys,
dhw_scale=dhw_scale)
# Scenario 3: Boilers, only
# #####################################################################
city_scen_3 = copy.deepcopy(city)
# Generate one feeder with CHP, boiler and TES
list_esys = [(1001, 0, 1),
(1002, 0, 1),
(1003, 0, 1),
(1004, 0, 1),
(1005, 0, 1),
(1006, 0, 1),
(1007, 0, 1)]
# Generate energy systems
esysgen.gen_esys_for_city(city=city_scen_3,
list_data=list_esys,
dhw_scale=dhw_scale)
# Scenario 4: Boilers and TES, only
# #####################################################################
city_scen_4 = copy.deepcopy(city)
# Generate one feeder with CHP, boiler and TES
list_esys = [(1001, 0, 2),
(1002, 0, 2),
(1003, 0, 2),
(1004, 0, 2),
(1005, 0, 2),
(1006, 0, 2),
(1007, 0, 2)]
# Generate energy systems
esysgen.gen_esys_for_city(city=city_scen_4,
list_data=list_esys,
dhw_scale=dhw_scale)
# Scenario 5: Air/water heat pumps
# #####################################################################
city_scen_5 = copy.deepcopy(city)
# Generate one feeder with CHP, boiler and TES
list_esys = [(1001, 2, 1),
(1002, 2, 1),
(1003, 2, 1),
(1004, 2, 1),
(1005, 2, 1),
(1006, 2, 1),
(1007, 2, 1)]
# Generate energy systems
esysgen.gen_esys_for_city(city=city_scen_5,
list_data=list_esys,
dhw_scale=dhw_scale)
city_scen_5.nodes[1006]['entity'].bes.electricalHeater.qNominal *= 5
# Scenario 6: Air/water heat pumps
# #####################################################################
city_scen_6 = copy.deepcopy(city)
# Generate one feeder with CHP, boiler and TES
list_esys = [(1001, 2, 2),
(1002, 2, 2),
(1003, 2, 2),
(1004, 2, 2),
(1005, 2, 2),
(1006, 2, 2),
(1007, 2, 2)]
# Generate energy systems
esysgen.gen_esys_for_city(city=city_scen_6,
list_data=list_esys,
dhw_scale=dhw_scale)
city_scen_6.nodes[1006]['entity'].bes.electricalHeater.qNominal *= 5
# Scenario 7: Boilers with small PV
# #####################################################################
city_scen_7 = copy.deepcopy(city)
# Generate one feeder with CHP, boiler and TES
list_esys = [(1001, 0, 1),
(1002, 0, 1),
(1003, 0, 1),
(1004, 0, 1),
(1005, 0, 1),
(1006, 0, 1),
(1007, 0, 1),
(1001, 3, 30),
(1002, 3, 30),
(1003, 3, 30),
(1004, 3, 30),
(1005, 3, 30),
(1006, 3, 30),
(1007, 3, 30)
]
# Generate energy systems
esysgen.gen_esys_for_city(city=city_scen_7,
list_data=list_esys,
dhw_scale=dhw_scale)
# Scenario 8: Boilers with medium PV
# #####################################################################
city_scen_8 = copy.deepcopy(city)
# Generate one feeder with CHP, boiler and TES
list_esys = [(1001, 0, 1),
(1002, 0, 1),
(1003, 0, 1),
(1004, 0, 1),
(1005, 0, 1),
(1006, 0, 1),
(1007, 0, 1),
(1001, 3, 60),
(1002, 3, 60),
(1003, 3, 60),
(1004, 3, 60),
(1005, 3, 60),
(1006, 3, 60),
(1007, 3, 60)
]
# Generate energy systems
esysgen.gen_esys_for_city(city=city_scen_8,
list_data=list_esys,
dhw_scale=dhw_scale)
# Scenario 9: Boilers with large PV
# #####################################################################
city_scen_9 = copy.deepcopy(city)
# Generate one feeder with CHP, boiler and TES
list_esys = [(1001, 0, 1),
(1002, 0, 1),
(1003, 0, 1),
(1004, 0, 1),
(1005, 0, 1),
(1006, 0, 1),
(1007, 0, 1),
(1001, 3, 80),
(1002, 3, 80),
(1003, 3, 80),
(1004, 3, 80),
(1005, 3, 80),
(1006, 3, 80),
(1007, 3, 80)
]
# Generate energy systems
esysgen.gen_esys_for_city(city=city_scen_9,
list_data=list_esys,
dhw_scale=dhw_scale)
# Scenario 10: Boilers with PV (over 10 kW peak)
# #####################################################################
city_scen_10 = copy.deepcopy(city)
# Generate one feeder with CHP, boiler and TES
list_esys = [(1001, 0, 1),
(1002, 0, 1),
(1003, 0, 1),
(1004, 0, 1),
(1005, 0, 1),
(1006, 0, 1),
(1007, 0, 1),
(1001, 3, 100),
(1002, 3, 100),
(1003, 3, 100),
(1004, 3, 100),
(1005, 3, 100),
(1006, 3, 100),
(1007, 3, 100)
]
# Generate energy systems
esysgen.gen_esys_for_city(city=city_scen_10,
list_data=list_esys,
dhw_scale=dhw_scale)
# Scenario 11: CHP with PV
# #####################################################################
city_scen_11 = copy.deepcopy(city)
# Connect all building nodes to local heating network
dict_e_net_data = {1: {'type': 'heating',
'method': 2,
'nodelist': [1001, 1002, 1003, 1004, 1005, 1006,
1007]}}
# Add energy networks to city
enetgen.add_energy_networks_to_city(city=city_scen_11,
dict_data=dict_e_net_data)
# Generate one feeder with CHP, boiler and TES (plus PV)
list_esys = [(1005, 1, 1),
(1001, 3, 60),
(1002, 3, 60),
(1003, 3, 60),
(1004, 3, 60),
(1005, 3, 60),
(1006, 3, 60),
(1007, 3, 60)
]
# Generate energy systems
esysgen.gen_esys_for_city(city=city_scen_11,
list_data=list_esys,
dhw_scale=dhw_scale)
# Scenario 12: CHP with PV
# #####################################################################
city_scen_12 = copy.deepcopy(city)
# Connect all building nodes to local heating network
dict_e_net_data = {1: {'type': 'heating',
'method': 2,
'nodelist': [1001, 1002, 1003, 1004, 1005, 1006,
1007]}}
# Add energy networks to city
enetgen.add_energy_networks_to_city(city=city_scen_12,
dict_data=dict_e_net_data)
# Generate one feeder with CHP, boiler and TES (plus PV)
list_esys = [(1005, 1, 1),
(1001, 3, 80),
(1002, 3, 80),
(1003, 3, 80),
(1004, 3, 80),
(1005, 3, 80),
(1006, 3, 80),
(1007, 3, 80)
]
# Generate energy systems
esysgen.gen_esys_for_city(city=city_scen_12,
list_data=list_esys,
dhw_scale=dhw_scale)
# Scenario 13: Boilers with PV (over 10 kW peak)
# #####################################################################
city_scen_13 = copy.deepcopy(city)
# Connect all building nodes to local heating network
dict_e_net_data = {1: {'type': 'heating',
'method': 2,
'nodelist': [1001, 1002, 1003, 1004, 1005, 1006,
1007]}}
# Add energy networks to city
enetgen.add_energy_networks_to_city(city=city_scen_13,
dict_data=dict_e_net_data)
# Generate one feeder with CHP, boiler and TES (plus PV)
list_esys = [(1005, 1, 1),
(1001, 3, 100),
(1002, 3, 100),
(1003, 3, 100),
(1004, 3, 100),
(1005, 3, 100),
(1006, 3, 100),
(1007, 3, 100)
]
# Generate energy systems
esysgen.gen_esys_for_city(city=city_scen_13,
list_data=list_esys,
dhw_scale=dhw_scale)
# #####################################################################
# Generate object instances
# #####################################################################
# Generate annuity object instance
annuity_obj1 = annu.EconomicCalculation()
annuity_obj2 = annu.EconomicCalculation()
annuity_obj3 = annu.EconomicCalculation()
annuity_obj4 = annu.EconomicCalculation()
annuity_obj5 = annu.EconomicCalculation()
annuity_obj6 = annu.EconomicCalculation()
annuity_obj7 = annu.EconomicCalculation()
annuity_obj8 = annu.EconomicCalculation()
annuity_obj9 = annu.EconomicCalculation()
annuity_obj10 = annu.EconomicCalculation()
annuity_obj11 = annu.EconomicCalculation()
annuity_obj12 = annu.EconomicCalculation()
annuity_obj13 = annu.EconomicCalculation()
# Generate energy balance object for city
energy_balance1 = citeb.CityEBCalculator(city=city_scen_1)
energy_balance2 = citeb.CityEBCalculator(city=city_scen_2)
energy_balance3 = citeb.CityEBCalculator(city=city_scen_3)
energy_balance4 = citeb.CityEBCalculator(city=city_scen_4)
energy_balance5 = citeb.CityEBCalculator(city=city_scen_5)
energy_balance6 = citeb.CityEBCalculator(city=city_scen_6)
energy_balance7 = citeb.CityEBCalculator(city=city_scen_7)
energy_balance8 = citeb.CityEBCalculator(city=city_scen_8)
energy_balance9 = citeb.CityEBCalculator(city=city_scen_9)
energy_balance10 = citeb.CityEBCalculator(city=city_scen_10)
energy_balance11 = citeb.CityEBCalculator(city=city_scen_11)
energy_balance12 = citeb.CityEBCalculator(city=city_scen_12)
energy_balance13 = citeb.CityEBCalculator(city=city_scen_13)
# Generate city economic calculator instances
city_eco_calc1 = citecon.CityAnnuityCalc(annuity_obj=annuity_obj1,
energy_balance=energy_balance1)
city_eco_calc2 = citecon.CityAnnuityCalc(annuity_obj=annuity_obj2,
energy_balance=energy_balance2)
city_eco_calc3 = citecon.CityAnnuityCalc(annuity_obj=annuity_obj3,
energy_balance=energy_balance3)
city_eco_calc4 = citecon.CityAnnuityCalc(annuity_obj=annuity_obj4,
energy_balance=energy_balance4)
city_eco_calc5 = citecon.CityAnnuityCalc(annuity_obj=annuity_obj5,
energy_balance=energy_balance5)
city_eco_calc6 = citecon.CityAnnuityCalc(annuity_obj=annuity_obj6,
energy_balance=energy_balance6)
city_eco_calc7 = citecon.CityAnnuityCalc(annuity_obj=annuity_obj7,
energy_balance=energy_balance7)
city_eco_calc8 = citecon.CityAnnuityCalc(annuity_obj=annuity_obj8,
energy_balance=energy_balance8)
city_eco_calc9 = citecon.CityAnnuityCalc(annuity_obj=annuity_obj9,
energy_balance=energy_balance9)
city_eco_calc10 = citecon.CityAnnuityCalc(annuity_obj=annuity_obj10,
energy_balance=energy_balance10)
city_eco_calc11 = citecon.CityAnnuityCalc(annuity_obj=annuity_obj11,
energy_balance=energy_balance11)
city_eco_calc12 = citecon.CityAnnuityCalc(annuity_obj=annuity_obj12,
energy_balance=energy_balance12)
city_eco_calc13 = citecon.CityAnnuityCalc(annuity_obj=annuity_obj13,
energy_balance=energy_balance13)
list_ann = []
list_co2 = []
# Perform energy balance and annuity calculations for all scenarios
(total_annuity_1, co2_1) = city_eco_calc1. \
perform_overall_energy_balance_and_economic_calc(eeg_pv_limit=
eeg_pv_limit)
list_ann.append(total_annuity_1)
list_co2.append(co2_1)
(total_annuity_2, co2_2) = city_eco_calc2. \
perform_overall_energy_balance_and_economic_calc(eeg_pv_limit=
eeg_pv_limit)
list_ann.append(total_annuity_2)
list_co2.append(co2_2)
(total_annuity_3, co2_3) = city_eco_calc3. \
perform_overall_energy_balance_and_economic_calc(eeg_pv_limit=
eeg_pv_limit)
list_ann.append(total_annuity_3)
list_co2.append(co2_3)
(total_annuity_4, co2_4) = city_eco_calc4. \
perform_overall_energy_balance_and_economic_calc(eeg_pv_limit=
eeg_pv_limit)
list_ann.append(total_annuity_4)
list_co2.append(co2_4)
(total_annuity_5, co2_5) = city_eco_calc5. \
perform_overall_energy_balance_and_economic_calc(eeg_pv_limit=
eeg_pv_limit)
list_ann.append(total_annuity_5)
list_co2.append(co2_5)
(total_annuity_6, co2_6) = city_eco_calc6. \
perform_overall_energy_balance_and_economic_calc(eeg_pv_limit=
eeg_pv_limit)
list_ann.append(total_annuity_6)
list_co2.append(co2_6)
(total_annuity_7, co2_7) = city_eco_calc7. \
perform_overall_energy_balance_and_economic_calc(eeg_pv_limit=
eeg_pv_limit)
list_ann.append(total_annuity_7)
list_co2.append(co2_7)
(total_annuity_8, co2_8) = city_eco_calc8. \
perform_overall_energy_balance_and_economic_calc(eeg_pv_limit=
eeg_pv_limit)
list_ann.append(total_annuity_8)
list_co2.append(co2_8)
(total_annuity_9, co2_9) = city_eco_calc9. \
perform_overall_energy_balance_and_economic_calc(eeg_pv_limit=
eeg_pv_limit)
list_ann.append(total_annuity_9)
list_co2.append(co2_9)
(total_annuity_10, co2_10) = city_eco_calc10. \
perform_overall_energy_balance_and_economic_calc(eeg_pv_limit=
eeg_pv_limit)
list_ann.append(total_annuity_10)
list_co2.append(co2_10)
(total_annuity_11, co2_11) = city_eco_calc11. \
perform_overall_energy_balance_and_economic_calc(eeg_pv_limit=
eeg_pv_limit)
list_ann.append(total_annuity_11)
list_co2.append(co2_11)
(total_annuity_12, co2_12) = city_eco_calc12. \
perform_overall_energy_balance_and_economic_calc(eeg_pv_limit=
eeg_pv_limit)
list_ann.append(total_annuity_12)
list_co2.append(co2_12)
(total_annuity_13, co2_13) = city_eco_calc13. \
perform_overall_energy_balance_and_economic_calc(eeg_pv_limit=
eeg_pv_limit)
list_ann.append(total_annuity_13)
list_co2.append(co2_13)
plt.plot([total_annuity_1], [co2_1], label='Scen. 1 (CHP/LHN)', marker='o')
plt.plot([total_annuity_2], [co2_2], label='Scen. 2 (dec. CHP)',
marker='o')
plt.plot([total_annuity_3], [co2_3], label='Scen. 3 (BOI)', marker='o')
plt.plot([total_annuity_4], [co2_4], label='Scen. 4 (BOI/TES)', marker='o')
plt.plot([total_annuity_5], [co2_5], label='Scen. 5 (HP (aw))', marker='o')
plt.plot([total_annuity_6], [co2_6], label='Scen. 6 (HP (ww))', marker='o')
plt.plot([total_annuity_7], [co2_7], label='Scen. 7 (BOI/small PV)',
| |
import pandas as pd
import pathlib
import numpy as np
import sys
import os
from Se_lit_based_dictionary import se_wFGD_dictionary
# For frozen code
fileDir = pathlib.Path(__file__).parents[1]
# For Original Python Code
# fileDir = pathlib.Path(__file__).parents[2]
as_dict = { # Studies for bottom ash partitioning coefficients (in order): Cheng et al. (2009), Rubin (1999),
# Klein et al. (1975), Swanson et al. (2013), Sander (1991), Maier (1990), 3 units from Yokoyama et al (1991),
# Cheng et al. (2009), Otero-Rey et al. (2003), 4 units from Zheng et al. (2017).
'Bottom_Ash': {'gas': [0.936, 0.986, 1, 0.99, 0.893, 0.996, 0.9924, 0.9969, 0.9905, 0.96, 0.995, 0.987,
0.972, 0.974, 0.974],
'solid': [0.064, 0.014, 0, 0.01, 0.107, 0.004, 0.0076, 0.0031, 0.0095, 0.04, 0.005, 0.013,
0.028, 0.026, 0.026],
'liquid': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]},
# Studies for csESp partitioning coefficients (in order): Helble et al. (2000), Klein et al. (1975), Rubin
# (1999), Swanson et al. (2013), Ottero-Rey et al. (2003), 2 units from Weng et al. (2017), Swanson et al. (2013),
# Yokoyama et al. (1991), Chent et al. (2009), Zhao et al. (2016), Zheng et al. (2017), Zhao et al. (2017).
'csESP': {'gas': [0.04, 0.18, 0.03, 0.01, 0.0413, 0.0178, 0.5, 0.003426, 0.05644, 0.000494, 0.176, 0.000393],
'solid': [0.96, 0.82, 0.97, 0.99, 0.9587, 0.9822, 0.5, 0.996574, 0.94356, 0.9995, 0.824, 0.9996],
'liquid': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]},
#Studies for hsESP partitioning coeffients (in order): 2 units from Yokoyama et al. (1991)
'hsESP': {'gas': [0.01324, 0.0104],
'solid': [0.9868, 0.9896],
'liquid': [0, 0]},
#Studies for fabric filter partitioning coeffients (in order): 3 diff erent loads from Zhao et al. (2017)
'FF': {'gas': [0.00146, 0.00091, 0.00098],
'solid': [0.99854, 0.99909, 0.99902],
'liquid': [0, 0, 0]},
#Studies for selective catalytic reduction coefficients (in order):
'SCR': {'gas': [1],
'solid': [0],
'liquid': [0]},
#Studies for activated carbon injection coefficients (in order):
'ACI': {'gas': [1],
'solid': [0],
'liquid': [0]},
# Studies for direct sorbent injection coefficients (in order):
'DSI': {'gas': [1],
'solid': [0],
'liquid': [0]},
#Studies for wetFGD partitioning coefficients (in order): Ondov et al. (1979), Zhu et al. (2014),
# Alvarez-Ayuso (2006), Zhao et al. (2017), Gutberlet et al. (1985)
'wetFGD': {'gas': [0, 0.07, 0, 0.727273, 0],
'solid': [1-6.5e-5, 0.93, 1, 0.17198, 0.941558],
'liquid': [6.5e-5, 0, 0, 0.10075, 0.058442]},
#Studies for dryFGD partitioning coefficients (in order): Karlsson (1986) and Sander (1991)
'dryFGD': {'gas': [0.0051, 0.11],
'solid': [0.995, 0.89],
'liquid': [0, 0]},
'not installed': {'gas': [1],
'solid': [0],
'liquid': [0]}}
cl_dict = { #Studies for bottom ash partitioning coefficeints (in order): Cheng et al. (2009), Rubin (1999) and Klein et al. (1975), Otero-Rey et al. (2003)
'Bottom_Ash': {'gas': [0.9982, 0.999, 0.994, 0.98],
'solid': [0.0018, 0.001, 0.006, 0.02],
'liquid': [0, 0, 0, 0]},
#Studies for csESp partitioning coefficients (in order): Klein et al. (1975), Rubin (1999), Otero-Rey et al. (2003), Cheng et al. (2009).
'csESP': {'gas': [0.98, 1, 0.85, 0.96377],
'solid': [0.02, 0, 0.15, 0.03623],
'liquid': [0, 0, 0, 0]},
#Studies for hsESP partitioning coeffients (in order):
'hsESP': {'gas': [1],
'solid': [0],
'liquid': [0]},
#Studies for fabric filter partitioning coeffients (in order):
'FF': {'gas': [1],
'solid': [0],
'liquid': [0]},
#Studies for selective catalytic reduction coefficients (in order):
'SCR': {'gas': [1],
'solid': [0],
'liquid': [0]},
#Studies for activated carbon injection coefficients (in order):
'ACI': {'gas': [1],
'solid': [0],
'liquid': [0]},
# Studies for direct sorbent injection coefficients (in order):
'DSI': {'gas': [1],
'solid': [0],
'liquid': [0]},
#Studies for wetFGD partitioning coefficients (in order): Ondov et al. (1979).
'wetFGD': {'gas': [0.04],
'solid': [0],
'liquid': [0.96]},
#Studies for dryFGD partitioning coefficients (in order):
'dryFGD': {'gas': [1],
'solid': [0],
'liquid': [0]},
'not installed': {'gas': [1],
'solid': [0],
'liquid': [0]}}
se_dict = { # Studies for bottom ash partitioning coefficeints (in order): Cheng et al. (2009), Rubin (1999), Klein et
# al. (1975), Swanson et al. (2013), Sander (1991), Maier (1990), Yokoyama et al. (1991), Otero-Rey et al. (2003),
# and 4 studies from Zheng et al. (2017).
'Bottom_Ash': {'gas': [0.931, 0.985, 0.837, 1, 0.998, 0.994226, 0.9963, 0.988, 0.96375, 0.959, 0.971, 0.919,
0.9999, 0.965],
'solid': [0.069, 0.015, 0.163, 0, 0.002, 0.005774, 0.0037, 0.012, 0.03625, 0.041, 0.029,
0.081, 0.0001, 0.035],
'liquid': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]},
# Studies for csESp partitioning coefficients (in order): Brekke et al. (1995), Helble et al. (2000), Klein
# et al. (1975), Rubin (1999), Swanson et al. (2013), Guo et al. (2004, 2007), Otero-Rey (2003, Cheng et al. (2009),
# and Zheng et al. (2017).
'csESP': {'gas': [0.80, 0.51, 0.03, 0.39, 0.80, 0.02, 0.04, 0.50967, 0.165],
'solid': [0.20, 0.49, 0.97, 0.61, 0.20, 0.98, 0.96, 0.49033, 0.835],
'liquid': [0, 0, 0, 0, 0, 0, 0, 0, 0]},
#Studies for hsESP partitioning coeffients (in order): Swanson et al. (2013) and 2 studies from Yokoyama et al. (1991)
'hsESP': {'gas': [0.80, 0.46462, 0.51802],
'solid': [0.20, 0.53538, 0.48198],
'liquid': [0, 0, 0]},
#Studies for fabric filter partitioning coeffients (in order): Brekke et al. (1995)
'FF': {'gas': [0.35],
'solid': [0.65],
'liquid': [0]},
#Studies for selective catalytic reduction coefficients (in order):
'SCR': {'gas': [1],
'solid': [0],
'liquid': [0]},
#Studies for activated carbon injection coefficients (in order):
'ACI': {'gas': [1],
'solid': [0],
'liquid': [0]},
# Studies for direct sorbent injection coefficients (in order):
'DSI': {'gas': [1],
'solid': [0],
'liquid': [0]},
#Studies for wetFGD partitioning coefficients (in order): Ondov et al. (1979), Zhu et al. (2014),
# Alvarez-Ayuso et al. (1960), Gutberlet et al. (1985).
'wetFGD': {'gas': [0.01, 0.03, 0, 0.40],
'solid': [0.99, 0.96, 0.98, 0.55],
'liquid': [0.002, 0.01, 0.02, 0.049]},
#Studies for dryFGD partitioning coefficients (in order): Sander (1991)
'dryFGD': {'gas': [0.010],
'solid': [0.990],
'liquid': [0]},
'not installed': {'gas': [1],
'solid': [0],
'liquid': [0]}}
se_wFGD_dict = se_wFGD_dictionary
#studies for limestone forced oxidation systems partitioning coefficients: EPRI 1019870(2010)
#other wFGD systems: EPRI 1017952(2009)
hg_dict = { # Studies for bottom ash partitioning coefficeints (in order): Cheng et al. (2009), Rubin (1999), Klein et
# al. (1975), Devito et al. (2002), Swanson et al. (2013), Sander (1991), Maier (1990), Yokoyama et al. (1991),
# Otero-Rey et al. (2003), 5 studies by Wang et al. (2008), 4 studies by Zheng et al. (2017).
'Bottom_Ash': {'gas': [0.99818, 0.992, 0.981, 0.971, 1, 0.998, 1, 0.999, 0.9967, 0.9942, 0.996, 0.9999,
0.9999, 0.9951, 0.99997, 0.982, 0.999, 0.9996, 0.99998, 0.999],
'solid': [0.00182, 0.008, 0.019, 0.029, 0, 0.002, 0, 0.001, 0.0033, 0.0058, 0.004, 0.0001,
0.0001, 0.005, 0.00003, 0.018, 0.001, 0.0004, 0.00002, 0.001],
'liquid': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]},
# Studies for csESp partitioning coefficients (in order): Brekke et al. (1995), Brown et al. (1999), Helble
# (2000), Klein et al. (1975), Rubin (1999), .Swanson et al. (2013), Aunela-Tapola (1998), Goodarzi (2004), Guo et
# al. (2004), Otero-Rey et al. (2003), 3 studies by Wang et al. (2008), 4 studies by Zheng et al. (2017).
'csESP': {'gas': [0.70, 0.29, 0.71, 0.94, 0.74, 0.98, 0.39, 0.62, 0.86, 0.77, 0.98, 0.66657, 0.85415,
0.44491, 0.82888, 0.9394, 0.788, 0.952, 0.481, 0.659, 0.579, 0.585],
'solid': [0.30, 0.67, 0.29, 0.06, 0.26, 0.02, 0.56, 0.38, 0.14, 0.27, 0.02, 0.33343, 0.14585,
0.55509, 0.17112, 0.061, 0.211, 0.0484, 0.519, 0.341, 0.421, 0.415],
'liquid': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]},
#Studies for hsESP partitioning coeffients (in order): Goodarzi (2004) and Swanson et al. (2013)
'hsESP': {'gas': [0.97, 0.98, 0.74255, 0.97968],
'solid': [0.03, 0.02, 0.25745, 0.02032],
'liquid': [0, 0, 0, 0]},
#Studies for fabric filter partitioning coeffients (in order): Brown et al. (1999), Chu and Porcella (1995), Brekke et al. (2015), 2 studies by Wang et al. (2008)
'FF': {'gas': [0.23, 0.70, 0.40, 0.130, 0.737],
'solid': [0.77, 0.30, 0.60, 0.870, 0.263],
'liquid': [0, 0, 0, 0, 0]},
#Studies for selective catalytic reduction coefficients (in order):
'SCR': {'gas': [1],
'solid': [0],
'liquid': [0]},
#Studies for activated carbon | |
# -*- coding: utf-8 -*-
__author__ = 'ooo'
__date__ = '2019/1/15 12:17'
"""
只有双层耦合的 WaveResNet, 只有 SingleCouple 模块
"""
import math, torch
import torch.nn as nn
import torch.nn.functional as F
class ViewLayer(nn.Module):
def __init__(self, dim=-1):
super(ViewLayer, self).__init__()
self.dim = dim
def forward(self, x):
# print('view-layer -> ', x.size())
x = x.view(x.size(0), self.dim)
return x
class AdaAvgPool(nn.Module):
def __init__(self, size=0):
self.size = size
super(AdaAvgPool, self).__init__()
def forward(self, x):
# print('avg-layer -> ', x.size())
if self.size == -1:
return x
if self.size == 0:
h, w = x.size(2), x.size(3)
assert h == w
elif self.size >= 1:
h, w = self.size, self.size
else:
raise NotImplementedError('check the avg kernel size !')
return F.avg_pool2d(x, kernel_size=(h, w))
class AvgMaxPool(nn.Module):
def __init__(self, method='avg', ksize=2):
super(AvgMaxPool, self).__init__()
if method == 'avg':
self.method = nn.AvgPool2d(ksize)
elif method == 'max':
self.method = nn.MaxPool2d(ksize)
def forward(self, x):
return self.method(x)
class Activate(nn.Module):
def __init__(self, method='relu'):
super(Activate, self).__init__()
if method == 'relu':
self.method = nn.ReLU(inplace=True)
elif method == 'sigmoid':
self.method = nn.Sigmoid()
elif method == 'leaky_relu':
self.method = nn.LeakyReLU(negative_slope=0.02)
else:
raise NotImplementedError('--->%s' % method)
def forward(self, x):
return self.method(x)
class DownSampleA(nn.Module):
def __init__(self, indepth, outdepth, pool='avg', double=False):
super(DownSampleA, self).__init__()
self.double = double
self.bn1 = nn.BatchNorm2d(indepth)
self.conv1 = nn.Conv2d(indepth, outdepth, 3, stride=1, padding=1, bias=False)
self.pool = AvgMaxPool(pool, 2)
if double:
self.bn2 = nn.BatchNorm2d(outdepth)
self.conv2 = nn.Conv2d(outdepth, outdepth, 3, stride=1, padding=1, bias=False)
def forward(self, x):
x = self.conv1(F.relu(self.bn1(x), inplace=True))
x = self.pool(x)
if self.double:
x = self.conv2(F.relu(self.bn2(x), inplace=True))
return x
class DownSampleB(nn.Module):
def __init__(self, indepth, outdepth, pool='avg', double=False):
super(DownSampleB, self).__init__()
self.double = double
self.bn1 = nn.BatchNorm2d(indepth)
self.conv1 = nn.Conv2d(indepth, outdepth, 3, stride=2, padding=1, bias=False)
if double:
self.bn2 = nn.BatchNorm2d(outdepth)
self.conv2 = nn.Conv2d(outdepth, outdepth, 3, stride=1, padding=1, bias=False)
def forward(self, x):
x = self.conv1(F.relu(self.bn1(x), inplace=True))
if self.double:
x = self.conv2(F.relu(self.bn2(x), inplace=True))
return x
class DownSampleC(nn.Module):
def __init__(self, indepth, outdepth, pool='avg', double=True):
super(DownSampleC, self).__init__()
self.bn1 = nn.BatchNorm2d(indepth)
self.conv1 = nn.Conv2d(indepth, outdepth, 3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(outdepth)
self.conv2 = nn.Conv2d(outdepth, outdepth, 3, stride=2, padding=1, bias=False)
def forward(self, x):
x = self.conv1(F.relu(self.bn1(x), inplace=True))
x = self.conv2(F.relu(self.bn2(x), inplace=True))
return x
class SweetBlock(nn.Module):
down_func = {'A': DownSampleA, 'B': DownSampleB, 'C': DownSampleC}
# performace: C >> B > A, C: extra parameters
def __init__(self, depth, inter=1, classify=0, nclass=1000,
downsamp=('A', False), downexp=2, downsize=False,
slink='A', pool='avg'):
super(SweetBlock, self).__init__()
self.down_func = self.down_func[downsamp[0]]
self.downsize = downsize
self.classify = classify
self.slink = slink # perfomance: A ≥ C >> B
self.pool = pool
self.bn1 = nn.BatchNorm2d(depth)
self.conv1 = nn.Conv2d(depth, depth * inter, 3, stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(depth * inter)
self.deconv2 = nn.ConvTranspose2d(depth * inter, depth, 3, stride=2, padding=1, output_padding=1, bias=False)
if downsize:
self.down1 = self.down_func(depth, depth * downexp, pool, double=downsamp[1])
self.down2 = self.down_func(depth, depth * downexp, pool, double=downsamp[1])
if classify > 0:
self.classifier = nn.Sequential(
nn.BatchNorm2d(depth * inter),
nn.ReLU(),
AdaAvgPool(),
ViewLayer(dim=-1),
nn.Linear(depth * inter, nclass)
)
def forward(self, x):
if isinstance(x, (list, tuple)):
assert len(x) == 3, 'len of x is: %s ...' % len(x)
x1, x2, pred = x # (big, small, pred)
else:
x1, x2, pred = x, None, None
if self.slink == 'A':
res1 = self.conv1(F.relu(self.bn1(x1)))
res2 = self.deconv2(F.relu(self.bn2(res1)))
if self.classify > 0:
pred.append(self.classifier(res1))
res1 = res1 + x2
res2 = res2 + x1
if self.downsize:
res2 = self.down2(res2)
res1 = self.down1(res1)
# utils.print_size([res2, res1])
return res2, res1, pred
elif self.slink == 'B':
res1 = self.conv1(F.relu(self.bn1(x1)))
if self.classify > 0:
pred.append(self.classifier(res1))
res1 = res1 + x2
res2 = self.deconv2(F.relu(self.bn2(res1)))
res2 = res2 + x1
if self.downsize:
res2 = self.down2(res2)
res1 = self.down1(res1)
# utils.print_size([res2, res1])
return res2, res1, pred
elif self.slink == 'C':
res1 = self.conv1(F.relu(self.bn1(x1)))
res2 = self.deconv2(F.relu(self.bn2(res1)))
if self.classify > 0:
pred.append(self.classifier(res1))
res2 = res2 + x1
if self.downsize:
res2 = self.down2(res2)
res1 = self.down1(res1)
# utils.print_size([res2, res1])
return res2, res1, pred
else:
raise NotImplementedError('check the slink: %s ...' % self.slink)
class SumaryBlock(nn.Module):
def __init__(self, depth, classify=1, avgpool=True, active='relu', nclass=1000):
super(SumaryBlock, self).__init__()
self.classify = classify
if self.classify >= 1:
self.classifier1 = nn.Sequential(
nn.BatchNorm2d(depth),
Activate(active),
AdaAvgPool(),
ViewLayer(),
nn.Linear(depth, nclass)
)
if self.classify >= 2:
self.classifier2 = nn.Sequential(
nn.BatchNorm2d(depth),
Activate(active),
AdaAvgPool(),
ViewLayer(),
nn.Linear(depth, nclass)
)
def forward(self, x):
if isinstance(x, (list, tuple)):
x1, x2, pred = x
else:
x1, x2, pred = x, None, None
if self.classify == 1:
x1 = self.classifier1(x1)
pred.extend([x1])
elif self.classify == 2:
x1 = self.classifier1(x1)
x2 = self.classifier2(x2)
pred.extend([x2, x1])
else:
raise NotImplementedError
return pred
class RockBlock(nn.Module):
def __init__(self, outdepth, branch=2, dataset='cifar'):
super(RockBlock, self).__init__()
self.branch = branch
if dataset == 'cifar':
self.branch1 = nn.Sequential(
nn.Conv2d(3, outdepth, kernel_size=3, stride=1, padding=1, bias=False),
# nn.BatchNorm2d(depth),
# nn.ReLU(inplace=True),
)
if branch >= 2:
self.branch2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
elif dataset == 'imagenet':
self.branch1 = nn.Sequential(
nn.Conv2d(3, outdepth, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(outdepth),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
self.branch2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
pred = []
if self.branch == 1:
x = self.branch1(x)
return x, None, pred
elif self.branch == 2:
x = self.branch1(x)
x2 = self.branch2(x)
return x, x2, pred
else:
raise ValueError('check branch must be in [1, 2, 3]!')
class SweetNet(nn.Module):
def __init__(self, branch=2, depth=64, layers=(2, 3, 3, 3), expand=(1, 2, 4, 8), inter=(1, 1, 1, 1),
classify=(1, 1, 1, 1, 2), downsamp=('A', False), downexp=2, downlast=False,
slink='A', pool='avg', active='relu', nclass=1000):
super(SweetNet, self).__init__()
self.layers = layers
self.layer0 = RockBlock(depth, branch, dataset='imagenet')
self.layer1 = self._make_sweet_layer(SweetBlock, layers[0], depth * expand[0], inter[0], classify[0],
nclass, downsamp, downexp, True, slink, pool)
self.layer2 = self._make_sweet_layer(SweetBlock, layers[1], depth * expand[1], inter[1], classify[1],
nclass, downsamp, downexp, True, slink, pool)
self.layer3 = self._make_sweet_layer(SweetBlock, layers[2], depth * expand[2], inter[2], classify[2],
nclass, downsamp, downexp, True, slink, pool)
self.layer4 = self._make_sweet_layer(SweetBlock, layers[3], depth * expand[3], inter[3], classify[3],
nclass, downsamp, downexp, downlast, slink, pool)
if downlast:
indepth = depth * expand[3] * downexp
else:
indepth = depth * expand[3]
self.classifier = SumaryBlock(indepth, classify[4], avgpool=True, active=active, nclass=nclass)
def _make_sweet_layer(self, block, nums, depth, inter, cfy, nclass,
downsamp, downexp, down=True, slink='A', pool='avg'):
layers = []
for i in range(nums - 1):
layers.append(block(depth, inter, cfy, nclass,
downsamp, downexp, downsize=False, slink=slink, pool=pool))
layers.append(block(depth, inter, cfy, nclass,
downsamp, downexp, downsize=down, slink=slink, pool=pool))
return nn.Sequential(*layers)
def _make_trans_layer(self, block, indepth, outdepth):
return block(indepth, outdepth)
def forward(self, x):
x = self.layer0(x)
# utils.print_size(x)
x = self.layer1(x)
# utils.print_size(x)
x = self.layer2(x)
# utils.print_size(x)
x = self.layer3(x)
# utils.print_size(x)
x = self.layer4(x)
# utils.print_size(x)
x = self.classifier(x)
return x
class CifarSweetNet(nn.Module):
def __init__(self, branch=2, depth=16, layers=(2, 3, 3), expand=(1, 2, 4), inter=(1, 1, 1),
classify=(1, 1, 1, 2), downsamp=('A', False), downexp=2, downlast=False,
slink='A', pool='avg', active='relu', nclass=10):
super(CifarSweetNet, self).__init__()
assert branch <= 2 and branch >= 1, 'branch !!!'
self.layers = layers
self.layer0 = RockBlock(depth, branch, dataset='cifar')
self.layer1 = self._make_sweet_layer(SweetBlock, layers[0], depth * expand[0], inter[0], classify[0],
nclass, downsamp, downexp, True, slink, pool)
self.layer2 = self._make_sweet_layer(SweetBlock, layers[1], depth * expand[1], inter[1], classify[1],
nclass, downsamp, downexp, True, slink, pool)
self.layer3 = self._make_sweet_layer(SweetBlock, layers[2], depth * expand[2], inter[2], classify[2],
nclass, downsamp, downexp, downlast, slink, pool)
if downlast:
indepth = depth * expand[2] * downexp
else:
indepth = depth * expand[2]
self.classifier = SumaryBlock(indepth, classify[3], avgpool=True, active=active, nclass=nclass)
self.softmax = nn.Softmax(dim=1)
self.cross = nn.CrossEntropyLoss()
self.kldiv = nn.KLDivLoss()
def _make_sweet_layer(self, block, nums, depth, inter, cfy, nclass,
downsamp, downexp, down=True, slink='A', pool='avg'):
layers = []
for i in range(nums - 1):
layers.append(block(depth, inter, cfy, nclass,
downsamp, downexp, downsize=False, slink=slink, pool=pool))
layers.append(block(depth, inter, cfy, nclass,
downsamp, downexp, downsize=down, slink=slink, pool=pool))
return nn.Sequential(*layers)
def _make_trans_layer(self, block, indepth, outdepth):
return block(indepth, outdepth)
def forward(self, x):
x = self.layer0(x)
# utils.print_size(x)
x = self.layer1(x)
# utils.print_size(x)
x = self.layer2(x)
# utils.print_size(x)
x = self.layer3(x)
# utils.print_size(x)
x = self.classifier(x)
# utils.print_size(x)
return x
def kldiv_loss(self, x, sumit=False):
pred = [self.softmax(o) for o in x if o is not None]
loss = [self.kldiv(o, pred[-1]) for o in pred[:-1]]
if sumit:
loss = sum(loss)
return loss
def kldiv_cross_loss(self, x, labels, sumit=False):
klloss = self.kldiv_loss(x, False)
cross = self.cross(x[-1], labels)
loss = klloss.append(cross)
if sumit:
loss = sum(loss)
return loss
if __name__ == '__main__':
import xtils
torch.manual_seed(9528)
criterion = nn.CrossEntropyLoss()
kldiv = nn.KLDivLoss()
# model = SweetNet(branch=2, depth=64, layers=(2, 3, 3, 3), expand=(1, 2, 4, 8), inter=(1, 1, 1, 1),
# downsamp=('A', False), | |
<filename>VCD/vc_dynamics.py
import os
import os.path as osp
import copy
import cv2
import json
import wandb
import numpy as np
import scipy
from tqdm import tqdm
from chester import logger
import torch
import torch_geometric
from softgym.utils.visualization import save_numpy_as_gif
from VCD.models import GNN
from VCD.dataset import ClothDataset
from VCD.utils.data_utils import AggDict
from VCD.utils.utils import extract_numbers, pc_reward_model, visualize
from VCD.utils.camera_utils import get_matrix_world_to_camera, project_to_image
class VCDynamics(object):
def __init__(self, args, env, vcd_edge=None):
# Create Models
self.args = args
self.env = env
self.train_mode = args.train_mode
self.device = torch.device(self.args.cuda_idx)
self.input_types = ['full', 'vsbl'] if self.train_mode == 'graph_imit' else [self.train_mode]
self.models, self.optims, self.schedulers = {}, {}, {}
for m in self.input_types:
self.models[m] = GNN(args, decoder_output_dim=3, name=m, use_reward=False if self.train_mode == 'vsbl' else True) # Predict acceleration
lr = getattr(self.args, m + '_lr') if hasattr(self.args, m + '_lr') else self.args.lr
self.optims[m] = torch.optim.Adam(self.models[m].param(), lr=lr, betas=(self.args.beta1, 0.999))
self.schedulers[m] = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optims[m], 'min', factor=0.8,
patience=3, verbose=True)
self.models[m].to(self.device)
self.vcd_edge = vcd_edge
self.load_model(self.args.load_optim)
print("VCD dynamics models created")
if self.train_mode == 'graph_imit' and not args.tune_teach:
self.models[self.input_types[0]].freeze()
# Create Dataloaders
self.datasets = {phase: ClothDataset(args, self.input_types, phase, env) for phase in ['train', 'valid']}
for phase in ['train', 'valid']: self.datasets[phase].vcd_edge = self.vcd_edge
follow_batch = ['x_{}'.format(t) for t in self.input_types]
self.dataloaders = {x: torch_geometric.data.DataLoader(
self.datasets[x], batch_size=args.batch_size, follow_batch=follow_batch,
shuffle=True if x == 'train' else False, drop_last=True,
num_workers=args.num_workers, pin_memory=True, prefetch_factor=5 if args.num_workers > 0 else 2)
for x in ['train', 'valid']}
self.mse_loss = torch.nn.MSELoss()
self.log_dir = logger.get_dir()
if self.args.use_wandb and args.eval == 0:
# To use wandb, you need to create an account and run 'wandb login'.
wandb.init(project='VCD', name=args.exp_name, resume='allow',
id=None, settings=wandb.Settings(start_method='thread'))
print('Weights & Biases is initialized with run name {}'.format(args.exp_name))
wandb.config.update(args, allow_val_change=True)
def retrieve_data(self, data, key):
""" vsbl: [vsbl], full: [full], dual :[vsbl, full] """
identifier = '_{}'.format(key)
out_data = {k.replace(identifier, ''): v for k, v in data.items() if identifier in k}
return out_data
def generate_dataset(self):
os.system('mkdir -p ' + self.args.dataf)
for phase in ['train', 'valid']:
self.datasets[phase].generate_dataset()
print('Dataset generated in', self.args.dataf)
def resume_training(self):
pass
def load_model(self, load_optim=False):
if self.train_mode == 'vsbl' and self.args.partial_dyn_path is not None: # Resume training of partial model
self.models['vsbl'].load_model(self.args.partial_dyn_path, load_optim=load_optim, optim=self.optims['vsbl'])
self.load_epoch = int(extract_numbers(self.args.partial_dyn_path)[-1])
if self.train_mode == 'full' and self.args.full_dyn_path is not None: # Resume training of full model
self.models['full'].load_model(self.args.full_dyn_path, load_optim=load_optim, optim=self.optims['full'])
self.load_epoch = int(extract_numbers(self.args.full_dyn_path)[-1])
if self.train_mode == 'graph_imit' and self.args.full_dyn_path is not None:
# Imitating the full model using a partial model.
# Need to first load the full model, and then copy weights to the partial model
self.models['full'].load_model(self.args.full_dyn_path, load_optim=False)
self.models['vsbl'].load_model(self.args.full_dyn_path, load_optim=False, load_names=self.args.copy_teach)
self.load_epoch = 0
def load_data_and_rollout(self, m_name, traj_id, phase):
idx = traj_id * (self.args.time_step - self.args.n_his)
dataset = self.datasets[phase]
data = dataset.prepare_transition(idx, eval=True)
data = dataset.remove_suffix(data, m_name)
traj_id = data[
'idx_rollout'] # This can be different from traj_id as some trajectory may not load due to filtering
config_id = int(data['scene_params'][3])
# load action sequences and true particle positions
traj_particle_pos, actions, gt_rewards = [], [], []
pred_time_interval = self.args.pred_time_interval
for t in range(max(0, self.args.n_his - pred_time_interval), self.args.time_step - pred_time_interval,
pred_time_interval):
t_data = dataset.load_rollout_data(traj_id, t)
if m_name == 'vsbl':
traj_particle_pos.append(t_data['positions'][t_data['downsample_idx']][data['partial_pc_mapped_idx']])
else:
traj_particle_pos.append(t_data['positions'][t_data['downsample_idx']])
gt_rewards.append(t_data['gt_reward_crt'])
actions.append(t_data['action'])
res = self.rollout(
dict(model_input_data=copy.deepcopy(data), actions=actions, reward_model=pc_reward_model, m_name=m_name))
model_positions = res['model_positions']
shape_positions = res['shape_positions']
mesh_edges = res['mesh_edges']
pred_rewards = res['pred_rewards']
gt_pos_rewards = res['gt_pos_rewards']
pos_errors = []
reward_pred_errors = []
for i in range(len(actions)):
pos_error = np.mean(np.linalg.norm(model_positions[i] - traj_particle_pos[i], axis=1))
pos_errors.append(pos_error)
reward_pred_errors.append((pred_rewards[i] - gt_pos_rewards[i]) ** 2)
reward_pred_error = np.mean(reward_pred_errors) # measure only reward prediction error
planning_error = (pred_rewards[-1] - gt_rewards[-1]) ** 2 # measure predicted return error
return {'model_positions': model_positions,
'gt_positions': traj_particle_pos,
'shape_positions': shape_positions,
'config_id': config_id,
'mesh_edges': mesh_edges,
'reward_pred_error': reward_pred_error,
'planning_error': planning_error,
'rollout_pos_error': pos_errors}
def train(self):
# Training loop
st_epoch = self.load_epoch if hasattr(self, 'load_epoch') else 0
print('st epoch ', st_epoch)
best_valid_loss = {m_name: np.inf for m_name in self.models}
phases = ['train', 'valid'] if self.args.eval == 0 else ['valid']
for epoch in range(st_epoch, self.args.n_epoch):
for phase in phases:
self.set_mode(phase)
# Log all the useful metrics
epoch_infos = {m: AggDict(is_detach=True) for m in self.models}
epoch_len = len(self.dataloaders[phase])
for i, data in tqdm(enumerate(self.dataloaders[phase]), desc=f'Epoch {epoch}, phase {phase}'):
data = data.to(self.device).to_dict()
iter_infos = {m_name: AggDict(is_detach=False) for m_name in self.models}
preds = {}
last_global = torch.zeros(self.args.batch_size, self.args.global_size, dtype=torch.float32,
device=self.device)
with torch.set_grad_enabled(phase == 'train'):
for (m_name, model), iter_info in zip(self.models.items(), iter_infos.values()):
inputs = self.retrieve_data(data, m_name)
inputs['u'] = last_global
pred = model(inputs)
preds[m_name] = pred
iter_info.add_item('accel_loss', self.mse_loss(pred['accel'], inputs['gt_accel']))
iter_info.add_item('sqrt_accel_loss', torch.sqrt(iter_info['accel_loss']))
if self.train_mode != 'vsbl':
iter_info.add_item('reward_loss',
self.mse_loss(pred['reward_nxt'].squeeze(), inputs['gt_reward_nxt']))
if self.args.train_mode == 'graph_imit': # Graph imitation
iter_infos['vsbl'].add_item('imit_node_loss', self.mse_loss(preds['vsbl']['n_nxt'],
preds['full']['n_nxt'].detach()))
iter_infos['vsbl'].add_item('imit_lat_loss', self.mse_loss(preds['vsbl']['lat_nxt'],
preds['full']['lat_nxt'].detach()))
for m_name in self.models:
iter_info = iter_infos[m_name]
for feat in ['n_nxt', 'lat_nxt']: # Node and global output
iter_info.add_item(feat + '_norm', torch.norm(preds[m_name][feat], dim=1).mean())
if self.args.train_mode == 'vsbl': # Student loss
iter_info.add_item('total_loss', iter_info['accel_loss'])
elif self.args.train_mode == 'graph_imit' and m_name == 'vsbl': # Student loss
iter_info.add_item('imit_loss',
iter_info['imit_lat_loss'] * self.args.imit_w_lat + iter_info[
'imit_node_loss'])
iter_info.add_item('total_loss',
iter_info['accel_loss'] + self.args.imit_w * iter_info[
'imit_loss'] +
+ self.args.reward_w * iter_info['reward_loss'])
else: # Teacher loss or no graph imitation
iter_info.add_item('total_loss',
iter_info['accel_loss'] + self.args.reward_w * iter_info['reward_loss'])
if phase == 'train':
if not (self.train_mode == 'graph_imit' and m_name == 'full' and not self.args.tune_teach):
self.optims[m_name].zero_grad()
iter_info['total_loss'].backward()
self.optims[m_name].step()
epoch_infos[m_name].update_by_add(iter_infos[m_name]) # Aggregate info
# rollout evaluation
nstep_eval_rollout = self.args.nstep_eval_rollout
data_folder = osp.join(self.args.dataf, phase)
traj_ids = np.random.permutation(len(os.listdir(data_folder)))[:nstep_eval_rollout]
rollout_infos = {}
for m_name in self.models:
rollout_info = AggDict()
for idx, traj_id in enumerate(traj_ids):
with torch.no_grad():
self.set_mode('eval')
traj_rollout_info = self.load_data_and_rollout(m_name, traj_id, phase)
rollout_info.update_by_add(
dict(rollout_pos_error=np.array(traj_rollout_info['rollout_pos_error']).mean(),
reward_pred_error=np.array(traj_rollout_info['reward_pred_error']).mean(),
planning_error=np.array(traj_rollout_info['planning_error']).mean()))
frames_model = visualize(self.datasets[phase].env, traj_rollout_info['model_positions'],
traj_rollout_info['shape_positions'],
traj_rollout_info['config_id'])
frames_gt = visualize(self.datasets[phase].env, traj_rollout_info['gt_positions'],
traj_rollout_info['shape_positions'],
traj_rollout_info['config_id'])
mesh_edges = traj_rollout_info['mesh_edges']
if mesh_edges is not None: # Visualization of mesh edges on the predicted model
frames_edge_visual = copy.deepcopy(frames_model)
matrix_world_to_camera = get_matrix_world_to_camera()[:3, :] # 3 x 4
for t in range(len(frames_edge_visual)):
u, v = project_to_image(matrix_world_to_camera, traj_rollout_info['model_positions'][t])
for edge_idx in range(mesh_edges.shape[1]):
s = mesh_edges[0][edge_idx]
r = mesh_edges[1][edge_idx]
start = (u[s], v[s])
end = (u[r], v[r])
color = (255, 0, 0)
thickness = 1
image = cv2.line(frames_edge_visual[t], start, end, color, thickness)
frames_edge_visual[t] = image
combined_frames = [np.hstack([frame_gt, frame_model, frame_edge])
for (frame_gt, frame_model, frame_edge) in
zip(frames_gt, frames_model, frames_edge_visual)]
else:
combined_frames = [np.hstack([frame_gt, frame_model]) for (frame_gt, frame_model) in
zip(frames_gt, frames_model)]
if idx < 5:
save_numpy_as_gif(np.array(combined_frames),
osp.join(self.log_dir,
'{}-{}-{}-{}.gif'.format(m_name, phase, epoch, idx)))
rollout_infos[m_name] = rollout_info.get_mean(f"{m_name}/{phase}/", len(traj_ids))
if phase == 'train' and epoch % self.args.save_model_interval == 0:
for m_name, model in self.models.items():
suffix = '{}'.format(epoch)
model.save_model(self.log_dir, m_name, suffix, self.optims[m_name])
if phase == 'valid':
for m_name, model in self.models.items():
epoch_info = epoch_infos[m_name]
cur_loss = epoch_info[f"{m_name}/{phase}/" + 'total_loss']
if not self.args.fixed_lr:
self.schedulers[m_name].step(cur_loss)
if cur_loss < best_valid_loss[m_name]:
best_valid_loss[m_name] = cur_loss
state_dict = self.args.__dict__
state_dict['best_epoch'] = epoch
state_dict['best_valid_loss'] = cur_loss
with open(osp.join(self.log_dir, 'best_state.json'), 'w') as f:
json.dump(state_dict, f, indent=2, sort_keys=True)
model.save_model(self.log_dir, m_name, 'best', self.optims[m_name])
# logging
logger.record_tabular(phase + '/epoch', epoch)
for m_name in self.models:
epoch_info, rollout_info = epoch_infos[m_name], rollout_infos[m_name]
epoch_info = epoch_info.get_mean(f"{m_name}/{phase}/", epoch_len)
epoch_info['lr'] = self.optims[m_name].param_groups[0]['lr']
logger.log(
f'{phase} [{epoch}/{self.args.n_epoch}] Loss: {epoch_info[f"{m_name}/{phase}/total_loss"]:.4f}',
best_valid_loss[m_name])
for k, v in epoch_info.items():
logger.record_tabular(k, v)
for k, v in rollout_info.items():
logger.record_tabular(k, v)
if self.args.use_wandb and self.args.eval == 0:
wandb.log(epoch_info, step=epoch)
wandb.log(rollout_info, step=epoch)
logger.dump_tabular()
def set_mode(self, mode='train'):
for model in self.models.values():
model.set_mode('train' if mode == 'train' else 'eval')
def to(self, cuda_idx):
for model in self.models.values():
model.to(torch.device("cuda:{}".format(cuda_idx)))
def rollout(self, args):
"""
args need to contain the following contents:
model_input_data: current point cloud, velocity history, picked point, picker position, etc
actions: rollout actions
reward_model: reward function
cuda_idx (optional): default 0
robot_exp (optional): default False
return a dict:
final_ret: final reward of the rollout
model_positions: model predicted point cloud positions
shape_positions: positions of the pickers, for visualization
mesh_edges: predicted mesh edge
time_cost: time cost for different parts of the rollout function
"""
model_input_data = args['model_input_data']
actions = args['actions'] # NOTE: sequence of actions to rollout
reward_model = args['reward_model']
m_name = args['m_name']
dataset = self.datasets['train'] # Both train and valid are the same during inference
H = len(actions) # Planning horizon
cuda_idx = args.get('cuda_idx', 0)
robot_exp = args.get('robot_exp', False)
self.set_mode('eval')
self.to(cuda_idx)
self.device = torch.device(cuda_idx)
pc_pos = model_input_data['pointcloud']
pc_vel_his = model_input_data['vel_his']
picker_pos = model_input_data['picker_position']
# picked_particles = model_input_data['picked_points']
scene_params = model_input_data['scene_params']
observable_particle_index = model_input_data['partial_pc_mapped_idx']
rest_dist = model_input_data.get('rest_dist', None)
mesh_edges = model_input_data.get('mesh_edges', None)
assert rest_dist is None # The rest_dist will be computed from the initial_particle_pos?
# record model predicted point cloud positions
model_positions = np.zeros((H, len(pc_pos), 3))
shape_positions = np.zeros((H, | |
the values
# in column are non-numeric. This is a known bug in
# pandas: https://github.com/pydata/pandas/issues/9589
# Therefore, we need add an additional check after this.
df_excluded['raw'] = pd.to_numeric(df_excluded['raw'], errors='coerce').astype(float)
# filter out the non-numeric machine scores from the rest of the data
newdf, newdf_excluded = self.filter_on_column(df_filtered,
'raw',
'spkitemid',
exclude_zeros=False)
del df_filtered
df_filtered_pred = newdf
# make sure that the remaining data frame is not empty
if len(df_filtered_pred) == 0:
raise ValueError("No responses remaining after filtering out "
"non-numeric machine scores. No further analysis "
"can be run. ")
with np.errstate(divide='ignore'):
df_excluded = pd.concat([df_excluded, newdf_excluded], sort=True)
# if requested, exclude the candidates with less than X responses
# left after filtering
if exclude_listwise:
(df_filtered_candidates,
df_excluded_candidates) = self.select_candidates(df_filtered_pred,
min_items_per_candidate)
# check that there are still responses left for analysis
if len(df_filtered_candidates) == 0:
raise ValueError("After filtering non-numeric human and system scores "
"there were "
"no candidates with {} or more responses "
"left for analysis".format(str(min_items_per_candidate)))
# redefine df_filtered_pred
df_filtered_pred = df_filtered_candidates.copy()
# update df_excluded
df_excluded = pd.concat([df_excluded, df_excluded_candidates], sort=True)
df_excluded = df_excluded[['spkitemid'] + [column for column in df_excluded
if column != 'spkitemid']]
# set default values for scaling
scale_pred_mean = 0
scale_pred_sd = 1
scale_human_mean = 0
scale_human_sd = 1
if data_container_obj.get_frame('scale') is not None:
if ('sc1' not in data_container_obj.scale.columns and
'prediction' not in data_container_obj.scale.columns):
raise KeyError('The CSV file specified for scaling ',
'must have the "prediction" and the "sc1" '
'columns.')
else:
scale_pred_mean, scale_pred_sd = (data_container_obj.scale['prediction'].mean(),
data_container_obj.scale['prediction'].std())
scale_human_mean, scale_human_sd = (data_container_obj.scale['sc1'].mean(),
data_container_obj.scale['sc1'].std())
logging.info('Processing predictions')
df_pred_processed = self.process_predictions(df_filtered_pred,
scale_pred_mean,
scale_pred_sd,
scale_human_mean,
scale_human_sd,
spec_trim_min,
spec_trim_max,
spec_trim_tolerance)
if not scale_with:
expected_score_types = ['raw', 'raw_trim', 'raw_trim_round']
elif scale_with == 'asis':
expected_score_types = ['scale', 'scale_trim', 'scale_trim_round']
else:
expected_score_types = ['raw', 'raw_trim', 'raw_trim_round',
'scale', 'scale_trim', 'scale_trim_round']
# extract separated data frames that we will write out
# as separate files
not_other_columns = set()
prediction_columns = ['spkitemid', 'sc1'] + expected_score_types
df_predictions_only = df_pred_processed[prediction_columns]
not_other_columns.update(prediction_columns)
metadata_columns = ['spkitemid'] + subgroups
if candidate_column:
metadata_columns.append('candidate')
df_test_metadata = df_filtered_pred[metadata_columns]
not_other_columns.update(metadata_columns)
df_test_human_scores = pd.DataFrame()
human_score_columns = ['spkitemid', 'sc1', 'sc2']
if second_human_score_column and 'sc2' in df_filtered_pred:
df_test_human_scores = df_filtered_pred[human_score_columns].copy()
not_other_columns.update(['sc2'])
# filter out any non-numeric values nows
# as well as zeros, if we were asked to
df_test_human_scores['sc2'] = pd.to_numeric(df_test_human_scores['sc2'],
errors='coerce').astype(float)
if exclude_zero_scores:
df_test_human_scores['sc2'] = df_test_human_scores['sc2'].replace(0, np.nan)
# remove 'spkitemid' from `not_other_columns`
# because we want that in the other columns
# data frame
not_other_columns.remove('spkitemid')
# extract all of the other columns in the predictions file
other_columns = [column for column in df_filtered_pred.columns
if column not in not_other_columns]
df_pred_other_columns = df_filtered_pred[other_columns]
# add internal configuration options that we need
new_config_obj = config_obj.copy()
internal_options_dict = {'pred_file_location': pred_file_location,
'exclude_listwise': exclude_listwise,
'use_scaled_predictions': use_scaled_predictions,
'chosen_notebook_files': chosen_notebook_files}
for key, value in internal_options_dict.items():
new_config_obj[key] = value
# we need to make sure that `spkitemid` is the first column
df_excluded = df_excluded[['spkitemid'] + [column for column in df_excluded
if column != 'spkitemid']]
frames = [df_predictions_only,
df_test_metadata,
df_pred_other_columns,
df_test_human_scores,
df_excluded,
df_responses_with_excluded_flags]
names = ['pred_test',
'test_metadata',
'test_other_columns',
'test_human_scores',
'test_excluded',
'test_responses_with_excluded_flags']
new_container = [{'name': name, 'frame': frame}
for frame, name in zip(frames, names)]
new_container = DataContainer(new_container)
return new_config_obj, new_container
def process_data_rsmpredict(self, config_obj, data_container_obj):
"""
Process data for RSM predict.
Parameters
----------
config_obj : configuration_parser.Configuration
A configuration object.
data_container_obj : container.DataContainer
A data container object.
Returns
-------
config_obj : configuration_parser.Configuration
A new configuration object.
data_congtainer : container.DataContainer
A new data container object.
Raises
------
KeyError
If columns in the config file do not exist in the data
ValueError
If data contains duplicate response IDs
"""
df_input = data_container_obj.input_features
df_feature_info = data_container_obj.feature_info
df_postproc_params = data_container_obj.postprocessing_params
# get the column name that will hold the ID
id_column = config_obj['id_column']
# get the column name for human score (if any)
human_score_column = config_obj['human_score_column']
# get the column name for second human score (if any)
second_human_score_column = config_obj['second_human_score_column']
# get the column name for subgroups (if any)
subgroups = config_obj['subgroups']
# get the model
model = config_obj['model']
# should features be standardized?
standardize_features = config_obj.get('standardize_features', True)
# should we predict expected scores
predict_expected_scores = config_obj['predict_expected_scores']
# get the column names for flag columns (if any)
flag_column_dict = config_obj.check_flag_column(partition='test')
# get the name for the candidate_column (if any)
candidate_column = config_obj['candidate_column']
# make sure that the columns specified in the config file actually exist
columns_to_check = [id_column] + subgroups + list(flag_column_dict.keys())
# add subgroups and the flag columns to the list of columns
# that will be added to the final file
columns_to_copy = subgroups + list(flag_column_dict.keys())
# human_score_column will be set to sc1 by default
# we only raise an error if it's set to something else.
# However, since we cannot distinguish whether the column was set
# to sc1 by default or specified as such in the config file
# we append it to output anyway as long as
# it is in the input file
if human_score_column != 'sc1' or 'sc1' in df_input.columns:
columns_to_check.append(human_score_column)
columns_to_copy.append('sc1')
if candidate_column:
columns_to_check.append(candidate_column)
columns_to_copy.append('candidate')
if second_human_score_column:
columns_to_check.append(second_human_score_column)
columns_to_copy.append('sc2')
missing_columns = set(columns_to_check).difference(df_input.columns)
if missing_columns:
raise KeyError("Columns {} from the config file "
"do not exist in the data.".format(missing_columns))
# rename all columns
df_input = self.rename_default_columns(df_input,
[],
id_column,
human_score_column,
second_human_score_column,
None,
None,
candidate_column=candidate_column)
# check that the id_column contains unique values
if df_input['spkitemid'].size != df_input['spkitemid'].unique().size:
raise ValueError("The data contains repeated response IDs in {}. "
"Please make sure all response IDs are unique and "
"re-run the tool.".format(id_column))
(df_features_preprocessed,
df_excluded) = self.preprocess_new_data(df_input,
df_feature_info,
standardize_features)
trim_min = df_postproc_params['trim_min'].values[0]
trim_max = df_postproc_params['trim_max'].values[0]
h1_mean = df_postproc_params['h1_mean'].values[0]
h1_sd = df_postproc_params['h1_sd'].values[0]
# if we are using a newly trained model, use trim_tolerance from the
# df_postproc_params. If not, set it to the default value and show
# warning
if 'trim_tolerance' in df_postproc_params:
trim_tolerance = df_postproc_params['trim_tolerance'].values[0]
else:
trim_tolerance = 0.4998
logging.warning("The tolerance for trimming scores will be assumed to be 0.4998, "
"the default value in previous versions of RSMTool. "
"We recommend re-training the model to ensure future "
"compatibility.")
# now generate the predictions for the features using this model
logged_str = 'Generating predictions'
logged_str += ' (expected scores).' if predict_expected_scores else '.'
logging.info(logged_str)
# compute minimum and maximum score for expected predictions
min_score = int(np.rint(trim_min - trim_tolerance))
max_score = int(np.rint(trim_max + trim_tolerance))
df_predictions = model.predict(df_features_preprocessed,
min_score,
max_score,
predict_expected=predict_expected_scores)
train_predictions_mean = df_postproc_params['train_predictions_mean'].values[0]
train_predictions_sd = df_postproc_params['train_predictions_sd'].values[0]
df_predictions = self.process_predictions(df_predictions,
train_predictions_mean,
train_predictions_sd,
h1_mean,
h1_sd,
trim_min, trim_max,
trim_tolerance)
# add back the columns that we were requested to copy if any
if len(columns_to_copy) > 0:
df_predictions_with_metadata = pd.merge(df_predictions,
df_input[['spkitemid'] + columns_to_copy])
assert(len(df_predictions) == len(df_predictions_with_metadata))
else:
df_predictions_with_metadata = df_predictions.copy()
# we need to make sure that `spkitemid` is the first column
df_excluded = df_excluded[['spkitemid'] + [column for column in df_excluded
if column != 'spkitemid']]
datasets = [{'name': 'features_processed', 'frame': df_features_preprocessed},
{'name': 'excluded', 'frame': df_excluded},
{'name': 'predictions_with_metadata', 'frame': df_predictions_with_metadata},
{'name': 'predictions', 'frame': df_predictions}]
return config_obj, DataContainer(datasets)
def process_data(self, config_obj, data_container_obj, context='rsmtool'):
"""
Process the date for a given context.
Parameters
----------
config_obj : configuration_parser.Configuration
A configuration object.
data_container_obj : container.DataContainer
A data container object.
context : {'rsmtool', 'rsmeval', 'rsmpredict'}
The context of the tool.
Returns
-------
config_obj : configuration_parser.Configuration
A new configuration object.
data_congtainer : container.DataContainer
A new data container object.
Raises
------
ValueError
If the the context is not in {'rsmtool', 'rsmeval', 'rsmpredict'}
"""
if context == 'rsmtool':
return self.process_data_rsmtool(config_obj, data_container_obj)
elif context == 'rsmeval':
return self.process_data_rsmeval(config_obj, data_container_obj)
elif context == 'rsmpredict':
return self.process_data_rsmpredict(config_obj, data_container_obj)
else:
raise ValueError("The `context` argument must be in the set: "
"{'rsmtool', 'rsmeval', 'rsmpredict'}. "
"You passed `{}`.".format(context))
def preprocess_new_data(self,
df_input,
df_feature_info,
standardize_features=True):
"""
Process a data frame with feature values by applying
:ref:`preprocessing parameters <preprocessing_parameters>`
stored in `df_feature_info`.
Parameters
----------
df_input : pandas DataFrame
Data frame with raw feature values that will be used to generate
the scores. Each feature is stored in a separate column. Each row
corresponds to one response. There should also be a column named
`spkitemid` containing a unique ID for each response.
df_feature_info : pandas DataFrame
Data frame | |
if the sz is odd
Otherwise shifts everything by 0.5*spacing
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim == 1:
id = np.mgrid[0:sz[0]]
elif dim == 2:
id = np.mgrid[0:sz[0], 0:sz[1]]
elif dim == 3:
id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array(id.astype(dtype))
if dim == 1:
id = id.reshape(1, sz[0]) # add a dummy first index
for d in range(dim):
id[d] *= spacing[d]
if sz[d]%2==0:
#even
id[d] -= spacing[d]*(sz[d]//2)
else:
#odd
id[d] -= spacing[d]*((sz[d]+1)//2)
# and now store it in a dim+1 array
if dim == 1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0, :] = id[0]
elif dim == 2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0, :, :] = id[0]
idnp[1, :, :] = id[1]
elif dim == 3:
idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0, :, :, :] = id[0]
idnp[1, :, :, :] = id[1]
idnp[2, :, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')
return idnp
#
# def centered_min_normalized_identity_map(sz, spacing, dtype='float32'):
# """
# Returns a centered identity map (with 0 in the middle) if the sz is odd
# Otherwise shifts everything by 0.5*spacing
#
# :param sz: just the spatial dimensions, i.e., XxYxZ
# :param spacing: list with spacing information [sx,sy,sz]
# :param dtype: numpy data-type ('float32', 'float64', ...)
# :return: returns the identity map of dimension dimxXxYxZ
# """
# dim = len(sz)
# if dim == 1:
# id = np.mgrid[0:sz[0]]
# elif dim == 2:
# id = np.mgrid[0:sz[0], 0:sz[1]]
# elif dim == 3:
# id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
#
# min_spacing = np.min(spacing)
# spacing_ratio = spacing/min_spacing
#
#
# # now get it into range [0,(sz-1)*spacing]^d
# id = np.array(id.astype(dtype))
# if dim == 1:
# id = id.reshape(1, sz[0]) # add a dummy first index
#
# for d in range(dim):
# id[d] *= spacing[d]
# if sz[d]%2==0:
# #even
# id[d] -= spacing[d]*(sz[d]//2)
# else:
# #odd
# id[d] -= spacing[d]*((sz[d]+1)//2)
#
# # and now store it in a dim+1 array and rescale by the ratio
# if dim == 1:
# idnp = np.zeros([1, sz[0]], dtype=dtype)
# idnp[0, :] = id[0] * spacing_ratio[0]
# elif dim == 2:
# idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
# idnp[0, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :] = id[1] * spacing_ratio[1]
# elif dim == 3:
# idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)
# idnp[0, :, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :, :] = id[1] * spacing_ratio[1]
# idnp[2, :, :, :] = id[2] * spacing_ratio[2]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')
#
# return idnp
#
# def tranfrom_var_list_into_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =min_spacing/spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
# def recover_var_list_from_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =spacing/min_spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
#
def identity_map(sz,spacing,dtype='float32'):
"""
Returns an identity map.
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim==1:
id = np.mgrid[0:sz[0]]
elif dim==2:
id = np.mgrid[0:sz[0],0:sz[1]]
elif dim==3:
id = np.mgrid[0:sz[0],0:sz[1],0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array( id.astype(dtype) )
if dim==1:
id = id.reshape(1,sz[0]) # add a dummy first index
for d in range(dim):
id[d]*=spacing[d]
#id[d]*=2./(sz[d]-1)
#id[d]-=1.
# and now store it in a dim+1 array
if dim==1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0,:] = id[0]
elif dim==2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0,:, :] = id[0]
idnp[1,:, :] = id[1]
elif dim==3:
idnp = np.zeros([3,sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0,:, :, :] = id[0]
idnp[1,:, :, :] = id[1]
idnp[2,:, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
return idnp
def omt_boundary_weight_mask(img_sz,spacing,mask_range=5,mask_value=5,smoother_std =0.05):
"""generate a smooth weight mask for the omt """
dim = len(img_sz)
mask_sz = [1,1]+ list(img_sz)
mask = AdaptVal(torch.ones(*mask_sz))*mask_value
if dim ==2:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1
elif dim==3:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1
sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)
mask = sm.smooth(mask)
return mask.detach()
def momentum_boundary_weight_mask(img_sz,spacing,mask_range=5,smoother_std =0.05,pow=2):
"""generate a smooth weight mask for the omt """
dim = len(img_sz)
mask_sz = [1,1]+ list(img_sz)
mask = AdaptVal(torch.zeros(*mask_sz))
if dim ==2:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1
elif dim==3:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1
sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)
mask = sm.smooth(mask)
if pow ==2:
mask = mask**2
if pow ==3:
mask = mask*mask*mask
return mask
# def compute_omt_const(stds,param,dim):
# omt_power = param['forward_model']['smoother']['omt_power']
# omt_weight_penalty = param['forward_model']['smoother']['omt_weight_penalty']
# min_std = torch.min(stds)
# max_std = torch.max(stds)
# omt_const = torch.abs(torch.log(max_std/stds))**omt_power
# omt_const = omt_const/(torch.abs(torch.log(max_std / min_std)) ** omt_power)
# omt_const = omt_const*omt_weight_penalty/(EV.reg_factor_in_mermaid*2)
# sz = [1]+ [len(stds)] +[1]*(dim+1)
# return omt_const.view(*sz)
def get_single_gaussian_smoother(gaussian_std,sz,spacing):
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] = gaussian_std
s_m = sf.SmootherFactory(sz, spacing).create_smoother(s_m_params)
return s_m
def get_warped_label_map(label_map, phi, spacing, sched='nn'):
if sched == 'nn':
warped_label_map = compute_warped_image_multiNC(label_map, phi, spacing,spline_order=0,zero_boundary=True)
# check if here should be add assert
assert abs(torch.sum(warped_label_map.data -warped_label_map.data.round()))< 0.1, "nn interpolation is not precise"
else:
raise ValueError(" the label warping method is not implemented")
return warped_label_map
def t2np(v):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
return (v.detach()).cpu().numpy()
def cxyz_to_xyzc( v ):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
dim = len(v.shape)-2
if dim ==2:
v = v.permute(0,2,3,1)
if dim ==3:
v = v.permute(0,2,3,4,1)
return v
def get_scalar(v):
if isinstance(v, float):
return v
elif isinstance(v, np.ndarray) and v.size == 1:
return float(v)
def checkNan(x):
""""
input should be list of Variable
"""
return [len(np.argwhere(np.isnan(elem.detach().cpu().numpy()))) for elem in x]
def noramlized_spacing_to_smallest(spacing):
min_sp = np.min(spacing)
spacing[spacing>min_sp]=min_sp
return spacing
def time_warped_function(f):
def __time_warped_function(input=None):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
output = f(input)
end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
print(start.elapsed_time(end))
return output
return __time_warped_function
def interoplate_boundary_right(tensor):
dim = len(tensor.shape)-2
if dim==1:
tensor[:,:,-1]= tensor[:,:-2]+ tensor[:,:-2]-tensor[:,:-3]
if dim==2:
tensor[:, :, -1,:] = tensor[:, :,-2,:] + tensor[:, :,-2,:] - tensor[:, :,-3,:]
tensor[:, :, :,-1] = tensor[:, :, :,-2] + tensor[:, :, :,-2] - tensor[:, :, :,-3]
if dim==3:
tensor[:, :,:, -1,:, :] = tensor[:, :, -2, :] + tensor[:, :, -2, :] - tensor[:, :, -3, :]
tensor[:, :,:, :, -1, :] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3]
tensor[:, :,:, :, :, -1] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3]
def get_resampled_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):
"""
:param I: B C X Y Z
:param spacing: spx spy spz
:param desiredSize: B C X Y Z
:param spline_order:
:param zero_boundary:
:param identity_map:
:return:
"""
if spacing is None:
img_sz = I.shape[2:]
spacing = 1. / (np.array(img_sz) - 1)
if identity_map is not None: # todo will remove, currently fix for symmetric training
if I.shape[0] != identity_map.shape[0]:
n_batch = I.shape[0]
desiredSize = desiredSize.copy()
desiredSize[0] = n_batch
identity_map = identity_map[:n_batch]
resampled, new_spacing = resample_image(I, spacing, desiredSize, spline_order=spline_order,
zero_boundary=zero_boundary, identity_map=identity_map)
return resampled
def resample_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):
"""
Resample an image to a given desired size
:param I: Input image (expected to be of BxCxXxYxZ format)
:param spacing: array describing the spatial spacing
:param desiredSize: array for the desired size (excluding B and C, i.e, 1 entry for 1D, 2 for 2D, and 3 for 3D)
:return: returns a tuple: the downsampled image, the new spacing after | |
#!/usr/bin/env python3
#
# ******************************************************************
# |docname| - Create a Docker container for the Runestone webservers
# ******************************************************************
# This script provides a user-friendly install process for creating a multi-container Docker application running the Runestone system. It's planned to contain all Docker-related command-line scripts.
#
#
# Build approach
# ==============
# To build a container, this script walks through three steps:
#
# #. The first step occurs when this script is invoked from the terminal/command line, outside Docker. It does some preparation, then invokes the Docker build.
# #. Next, Docker invokes this script from the ``../Dockerfile``. This script installs everything it can.
# #. Finally, Docker invokes this when the container is run. On the first run, this script completes container configuration, then runs the servers. After that, it only runs the servers, since the first-run configuration step is time-consuming.
#
# Since some files are built into the container in step 1 or run only once in step 2, simply editing a file in this repo may not update the file inside the container. Look through the source here to see which files this applies to.
#
# No loops
# --------
# Unlike earlier approaches, this script doesn't cause the container to restart if something goes wrong. Instead, it catches all errors in a try/except block then stops executing so you can see what happened.
#
#
# venvs
# -----
# All Python installs are placed in a virtual environment -- ``/srv/venv`` and also (for dev builds) in a venv managed by Poetry. Before running Python scripts, be sure to activate the relevant venv.
#
#
# Imports
# =======
# These are listed in the order prescribed by PEP 8, with exceptions noted below.
#
# There's a fair amount of bootstrap code here to download and install required imports and their dependencies.
#
# Standard library
# ----------------
from pathlib import Path
import re
import subprocess
import sys
from time import sleep
from traceback import print_exc
from typing import Dict, Tuple
from textwrap import dedent
# Local application
# -----------------
# Everything after this depends on Unix utilities.
if sys.platform == "win32":
print("Run this program in WSL/VirtualBox/VMWare/etc.")
sys.exit()
# Check to see if a program is installed; if not, install it.
def check_install(
# The command to run to check if the program is installed.
check_cmd: str,
# The name of the package containing this program.
install_package: str
) -> None:
check_list = check_cmd.split()
print(f"Checking for {check_list[0]}...")
try:
subprocess.run(check_list, check=True)
except:
print("Not found. Installing...")
subprocess.run(["sudo", "apt-get", "install", "-y", install_package], check=True)
else:
print("Found.")
# We need curl for some (possibly missing) imports -- make sure it's installed.
def check_install_curl() -> None:
check_install("curl --version", "curl")
# The working directory of this script.
wd = Path(__file__).resolve().parent
sys.path.append(str(wd / "../tests"))
try:
# This unused import triggers the script download if it's not present.
import ci_utils
except ImportError:
check_install_curl()
print("Downloading supporting script ci_utils.py...")
subprocess.run([
"curl",
"-fsSLO",
"https://raw.githubusercontent.com/RunestoneInteractive/RunestoneServer/master/tests/ci_utils.py",
], check=True)
from ci_utils import chdir, env, is_linux, mkdir, xqt
# Third-party
# -----------
# This comes after importing ``ci_utils``, since we use that to install click if necessary.
in_venv = sys.prefix != sys.base_prefix
try:
import click
except ImportError:
print("Installing click...")
# Outside a venv, install locally.
user = '' if in_venv else '--user'
xqt(
f"{sys.executable} -m pip install {user} --upgrade pip",
f"{sys.executable} -m pip install {user} --upgrade click",
)
# If pip is upgraded, it won't find click. `Re-load sys.path <https://stackoverflow.com/a/25384923/16038919>`_ to fix this.
import site
from importlib import reload
reload(site)
import click
# ``build`` command
# =================
# Create a series of subcommands for this CLI.
@click.group()
def cli() -> None:
pass
@cli.command()
# Allow users to pass args directly to the underlying ``docker build`` command -- see the `click docs <https://click.palletsprojects.com/en/8.0.x/arguments/#option-like-arguments>`_.
@click.argument("passthrough", nargs=-1, type=click.UNPROCESSED)
@click.option("--arm/--no-arm", default=False, help="Install the ARMv7 toolchain.")
@click.option("--dev/--no-dev", default=False, help="Install tools needed for development with the Runestone.")
@click.option("--pic24/--no-pic24", default=False, help="Install tools needed for development with the PIC24/dsPIC33 family of microcontrollers.")
@click.option("--rust/--no-rust", default=False, help="Install the Rust toolchain.")
@click.option("--tex/--no-tex", default=False, help="Instal LaTeX and related tools.")
def build(arm: bool, dev: bool, passthrough: Tuple, pic24: bool, tex: bool, rust: bool) -> None:
"""
When executed outside a Docker build, build a Docker container for the Runestone webservers.
PASSTHROUGH: These arguments are passed directly to the underlying "docker build" command. To pass options to this command, prefix this argument with "--". For example, use "docker_tools.py build -- -no-cache" instead of "docker_tools.py build -no-cache" (which produces an error).
Inside a Docker build, install all dependencies as root.
"""
# Are we inside the Docker build?
phase = env.IN_DOCKER
if not phase:
# No -- this is the first step in the install.
assert not in_docker()
# Step 1: prepare to run the Docker build
# ---------------------------------------
# Did we add the current user to a group?
did_group_add = False
# Do we need to use ``sudo`` to execute Docker?
docker_sudo = False
# Check to make sure Docker is installed.
try:
xqt("docker --version")
except subprocess.CalledProcessError as e:
check_install_curl()
print(f"Unable to run docker: {e} Installing Docker...")
# Use the `convenience script <https://docs.docker.com/engine/install/ubuntu/#install-using-the-convenience-script>`_.
xqt(
"curl -fsSL https://get.docker.com -o get-docker.sh",
"sudo sh ./get-docker.sh",
"rm get-docker.sh",
# This follows the `Docker docs <https://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user>`__.`
"sudo usermod -aG docker ${USER}",
)
# The group add doesn't take effect until the user logs out then back in. Work around it for now.
did_group_add = True
docker_sudo = True
# ...and docker-compose.
try:
xqt("docker-compose --version")
except subprocess.CalledProcessError as e:
print("Unable to run docker-compose: {e} Installing...")
# This is from the `docker-compose install instructions <https://docs.docker.com/compose/install/#install-compose-on-linux-systems>`_.
xqt(
'sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose',
"sudo chmod +x /usr/local/bin/docker-compose",
)
# Are we inside the Runestone repo?
if not (wd / "uwsgi").is_dir():
change_dir = True
# No, we must be running from a downloaded script. Clone the runestone repo.
try:
xqt("git --version")
except Exception as e:
print(f"Unable to run git: {e} Installing...")
xqt("sudo apt-get install -y git")
print("Didn't find the runestone repo. Cloning...")
# Make this in a path that can eventually include web2py.
mkdir("web2py/applications", parents=True)
chdir("web2py/applications")
xqt("git clone https://github.com/RunestoneInteractive/RunestoneServer.git runestone")
chdir("runestone")
else:
# Make sure we're in the root directory of the web2py repo.
chdir(wd.parent)
change_dir = False
# Make sure the ``docker/.env`` file exists.
if not Path(".env").is_file():
xqt("cp docker/.env.prototype .env")
# Do the same for ``1.py``.
one_py = Path("models/1.py")
if not one_py.is_file():
# add a new setting so that institutions can run using a base book like thinkcspy as their course. On Runestone.academy we don't let anyone be an instructor for the base courses because they are open to anyone. This makes for a much less complicated deployment strategy for an institution that just wants to run their own server and use one or two books.
one_py.write_text(dedent("""\
settings.docker_institution_mode = True
settings.jobe_key = ''
settings.jobe_server = 'http://jobe'
settings.bks = "ns"
settings.python_interpreter = "/srv/venv/bin/python3"
# This must match the secret in the BookServer's ``config.py`` ``settings.secret``.
settings.secret = "supersecret"
"""))
# For development, include extra volumes.
dc = Path("docker-compose.override.yml")
if dev and not dc.is_file():
dc.write_text(dedent("""\
version: "3"
services:
runestone:
# Set up for VNC.
environment:
DISPLAY: ${DISPLAY}
ports:
- "5900:5900"
volumes:
- ../../../RunestoneComponents/:/srv/RunestoneComponents
- ../../../BookServer/:/srv/BookServer
# To make Chrome happy.
- /dev/shm:/dev/shm
"""))
# Ensure the user is in the ``www-data`` group.
print("Checking to see if the current user is in the www-data group...")
if "www-data" not in xqt("groups", capture_output=True, text=True).stdout:
xqt('sudo usermod -a -G www-data "$USER"')
did_group_add = True
if dev:
if is_linux:
# To allow VNC access to the container. Not available on OS X.
check_install("gvncviewer -h", "gvncviewer")
# Allow VS Code / remote access to the container. dpkg isn't available on OS X .
check_install("dpkg -l openssh-server", "openssh-server")
# Run the Docker build.
xqt(f'ENABLE_BUILDKIT=1 {"sudo" if docker_sudo else ""} docker build -t runestone/server . --build-arg DOCKER_BUILD_ARGS="{" ".join(sys.argv[1:])}" --progress plain {" ".join(passthrough)}')
# Print thesse messages last; otherwise, it will be lost in all the build noise.
if change_dir:
print('\nDownloaded the RunestoneServer repo. You must "cd web2py/applications/runestone" before running this script again.')
if did_group_add:
print('\nAdded the current user to the www-data and/or docker group(s). You must log out and log back in for this to take effect, or run "su -s ${USER}".')
return
# Step 3 - startup script for container.
if phase == "2":
try:
_build_phase2(arm, dev, pic24, | |
81)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 82)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 83)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 84)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 85)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 86)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 87)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 88)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 89)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 90)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 91)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 92)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 93)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 94)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 95)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 96)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 97)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 98)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 99)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.FMIRR = 2
oe.ALPHA = 0
oe.FHIT_C = 0
oe.F_EXT = 0
oe.F_DEFAULT = 0
oe.SSOUR = 2900.0
oe.SIMAG = 1000.0
oe.THETA = 2.0002
oe.F_CONVEX = 0
oe.FCYL = 1
oe.CIL_ANG = 90.0
oe.T_INCIDENCE = 2.0
oe.T_REFLECTION = 2.0
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 28.5
beam.traceOE(oe, 100)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 101)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 102)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 103)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 104)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 105)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 106)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 107)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 108)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 109)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 110)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 111)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 112)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 113)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 114)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 115)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 116)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 117)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 118)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 119)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 120)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 121)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 122)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 123)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 124)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 125)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 126)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 127)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 128)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 129)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 130)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 131)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 132)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 133)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 134)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 135)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 136)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 137)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 138)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 139)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 140)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 141)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 142)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 143)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 144)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 145)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 146)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 147)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 148)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 149)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 150)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 151)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 152)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 153)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 154)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 155)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 156)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 157)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 158)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 159)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 160)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 161)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 162)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 163)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 164)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 165)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 166)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 167)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 168)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 169)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE = 3
oe.T_IMAGE = 0.0
oe.T_SOURCE = 1.0
beam.traceOE(oe, 170)
pos = calculate_stats(pos, oe)
oe = Shadow.OE()
oe.DUMMY = 1.0
oe.set_empty(ALPHA=0)
oe.FWRITE |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.