hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4181a23401c75c1fd869a4dcc6e5a4b784b918
| 2,170
|
py
|
Python
|
test/actions-multiple/gyptest-all.py
|
uilianries/gyp
|
d995c5b0906571e0037869e3c9b008f344e8ca92
|
[
"BSD-3-Clause"
] | null | null | null |
test/actions-multiple/gyptest-all.py
|
uilianries/gyp
|
d995c5b0906571e0037869e3c9b008f344e8ca92
|
[
"BSD-3-Clause"
] | null | null | null |
test/actions-multiple/gyptest-all.py
|
uilianries/gyp
|
d995c5b0906571e0037869e3c9b008f344e8ca92
|
[
"BSD-3-Clause"
] | 3
|
2018-11-20T12:04:16.000Z
|
2019-07-01T02:52:04.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies two actions can be attached to the same input files.
"""
import sys
import TestGyp
test = TestGyp.TestGyp()
if test.format == 'xcode-ninja':
test.skip(bug=527)
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
# Test of fine-grained dependencies for generators that can build individual
# files on demand.
# In particular:
# - TargetA depends on TargetB.
# - TargetA and TargetB are 'none' type with actions attached.
# - TargetA has multiple actions.
# - An output from one of the actions in TargetA (not the first listed),
# is requested as the build target.
# Ensure that TargetB gets built.
#
# This sub-test can only be done with generators/build tools that can
# be asked to build individual files rather than whole targets (make, ninja).
if test.format in ['make', 'ninja']:
# Select location of target based on generator.
if test.format == 'make':
target = 'multi2.txt'
elif test.format == 'ninja':
if sys.platform in ['win32', 'cygwin']:
target = '..\\..\\multi2.txt'
else:
target = '../../multi2.txt'
else:
assert False
test.build('actions.gyp', chdir='relocate/src', target=target)
test.must_contain('relocate/src/multi2.txt', 'hello there')
test.must_contain('relocate/src/multi_dep.txt', 'hello there')
# Test that two actions can be attached to the same inputs.
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_contain('relocate/src/output1.txt', 'hello there')
test.must_contain('relocate/src/output2.txt', 'hello there')
test.must_contain('relocate/src/output3.txt', 'hello there')
test.must_contain('relocate/src/output4.txt', 'hello there')
# Test that process_outputs_as_sources works in conjuction with merged
# actions.
test.run_built_executable(
'multiple_action_source_filter',
chdir='relocate/src',
stdout=(
'{\n'
'bar\n'
'car\n'
'dar\n'
'ear\n'
'}\n'
),
)
test.pass_test()
| 28.552632
| 77
| 0.689401
|
import sys
import TestGyp
test = TestGyp.TestGyp()
if test.format == 'xcode-ninja':
test.skip(bug=527)
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
if test.format in ['make', 'ninja']:
if test.format == 'make':
target = 'multi2.txt'
elif test.format == 'ninja':
if sys.platform in ['win32', 'cygwin']:
target = '..\\..\\multi2.txt'
else:
target = '../../multi2.txt'
else:
assert False
test.build('actions.gyp', chdir='relocate/src', target=target)
test.must_contain('relocate/src/multi2.txt', 'hello there')
test.must_contain('relocate/src/multi_dep.txt', 'hello there')
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_contain('relocate/src/output1.txt', 'hello there')
test.must_contain('relocate/src/output2.txt', 'hello there')
test.must_contain('relocate/src/output3.txt', 'hello there')
test.must_contain('relocate/src/output4.txt', 'hello there')
test.run_built_executable(
'multiple_action_source_filter',
chdir='relocate/src',
stdout=(
'{\n'
'bar\n'
'car\n'
'dar\n'
'ear\n'
'}\n'
),
)
test.pass_test()
| true
| true
|
1c4181d2be11ed270ed6292731e8d25169755c2d
| 42,380
|
py
|
Python
|
tests/test_regressions.py
|
takizuka/drf-spectacular
|
208429b7ace8c37a79e5e51bd8532dfaa8e0c853
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_regressions.py
|
takizuka/drf-spectacular
|
208429b7ace8c37a79e5e51bd8532dfaa8e0c853
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_regressions.py
|
takizuka/drf-spectacular
|
208429b7ace8c37a79e5e51bd8532dfaa8e0c853
|
[
"BSD-3-Clause"
] | null | null | null |
import uuid
from unittest import mock
import pytest
from django.core import validators
from django.db import models
from django.db.models import fields
from django.urls import path, re_path
from rest_framework import (
filters, generics, mixins, pagination, parsers, routers, serializers, views, viewsets,
)
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import action, api_view
from rest_framework.views import APIView
from drf_spectacular.extensions import OpenApiSerializerExtension
from drf_spectacular.generators import SchemaGenerator
from drf_spectacular.hooks import preprocess_exclude_path_format
from drf_spectacular.openapi import AutoSchema
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import (
OpenApiParameter, extend_schema, extend_schema_field, extend_schema_serializer,
extend_schema_view, inline_serializer,
)
from drf_spectacular.validation import validate_schema
from tests import generate_schema, get_request_schema, get_response_schema
from tests.models import SimpleModel, SimpleSerializer
def test_primary_key_read_only_queryset_not_found(no_warnings):
# the culprit - looks like a feature not a bug.
# https://github.com/encode/django-rest-framework/blame/4d9f9eb192c5c1ffe4fa9210b90b9adbb00c3fdd/rest_framework/utils/field_mapping.py#L271
class M1(models.Model):
pass # pragma: no cover
class M2(models.Model):
m1_r = models.ForeignKey(M1, on_delete=models.CASCADE)
m1_rw = models.ForeignKey(M1, on_delete=models.CASCADE)
class M2Serializer(serializers.ModelSerializer):
class Meta:
fields = ['m1_rw', 'm1_r']
read_only_fields = ['m1_r'] # this produces the bug
model = M2
class M2Viewset(viewsets.ReadOnlyModelViewSet):
serializer_class = M2Serializer
queryset = M2.objects.none()
schema = generate_schema('m2', M2Viewset)
props = schema['components']['schemas']['M2']['properties']
assert props['m1_rw']['type'] == 'integer'
assert props['m1_r']['type'] == 'integer'
def test_path_implicit_required(no_warnings):
class M2Serializer(serializers.Serializer):
pass # pragma: no cover
class M2Viewset(viewsets.GenericViewSet):
serializer_class = M2Serializer
@extend_schema(parameters=[OpenApiParameter('id', str, 'path')])
def retrieve(self, request, *args, **kwargs):
pass # pragma: no cover
generate_schema('m2', M2Viewset)
def test_free_form_responses(no_warnings):
class XAPIView(APIView):
@extend_schema(responses={200: OpenApiTypes.OBJECT})
def get(self, request):
pass # pragma: no cover
class YAPIView(APIView):
@extend_schema(responses=OpenApiTypes.OBJECT)
def get(self, request):
pass # pragma: no cover
generator = SchemaGenerator(patterns=[
re_path(r'^x$', XAPIView.as_view(), name='x'),
re_path(r'^y$', YAPIView.as_view(), name='y'),
])
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
@mock.patch(
target='drf_spectacular.settings.spectacular_settings.APPEND_COMPONENTS',
new={'schemas': {'SomeExtraComponent': {'type': 'integer'}}}
)
def test_append_extra_components(no_warnings):
class XSerializer(serializers.Serializer):
id = serializers.UUIDField()
class XAPIView(APIView):
@extend_schema(responses={200: XSerializer})
def get(self, request):
pass # pragma: no cover
generator = SchemaGenerator(patterns=[
re_path(r'^x$', XAPIView.as_view(), name='x'),
])
schema = generator.get_schema(request=None, public=True)
assert len(schema['components']['schemas']) == 2
validate_schema(schema)
def test_serializer_retrieval_from_view(no_warnings):
class UnusedSerializer(serializers.Serializer):
pass # pragma: no cover
class XSerializer(serializers.Serializer):
id = serializers.UUIDField()
class YSerializer(serializers.Serializer):
id = serializers.UUIDField()
class X1Viewset(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = UnusedSerializer
def get_serializer(self):
return XSerializer()
class X2Viewset(mixins.ListModelMixin, viewsets.GenericViewSet):
def get_serializer_class(self):
return YSerializer
router = routers.SimpleRouter()
router.register('x1', X1Viewset, basename='x1')
router.register('x2', X2Viewset, basename='x2')
generator = SchemaGenerator(patterns=router.urls)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
assert len(schema['components']['schemas']) == 2
assert 'Unused' not in schema['components']['schemas']
def test_retrieve_on_apiview_get(no_warnings):
class XSerializer(serializers.Serializer):
id = serializers.UUIDField()
class XApiView(APIView):
authentication_classes = []
@extend_schema(
parameters=[OpenApiParameter('id', OpenApiTypes.INT, OpenApiParameter.PATH)],
responses={200: XSerializer},
)
def get(self, request):
pass # pragma: no cover
schema = generate_schema('x', view=XApiView)
operation = schema['paths']['/x']['get']
assert operation['operationId'] == 'x_retrieve'
operation_schema = get_response_schema(operation)
assert '$ref' in operation_schema and 'type' not in operation_schema
def test_list_on_apiview_get(no_warnings):
class XSerializer(serializers.Serializer):
id = serializers.UUIDField()
class XApiView(APIView):
authentication_classes = []
@extend_schema(
parameters=[OpenApiParameter('id', OpenApiTypes.INT, OpenApiParameter.PATH)],
responses={200: XSerializer(many=True)},
)
def get(self, request):
pass # pragma: no cover
schema = generate_schema('x', view=XApiView)
operation = schema['paths']['/x']['get']
assert operation['operationId'] == 'x_list'
operation_schema = get_response_schema(operation)
assert operation_schema['type'] == 'array'
def test_multi_method_action(no_warnings):
class DummySerializer(serializers.Serializer):
id = serializers.UUIDField()
class UpdateSerializer(serializers.Serializer):
id = serializers.UUIDField()
class CreateSerializer(serializers.Serializer):
id = serializers.UUIDField()
class XViewset(viewsets.GenericViewSet):
serializer_class = DummySerializer
# basic usage
@extend_schema(request=UpdateSerializer, methods=['PUT'])
@extend_schema(request=CreateSerializer, methods=['POST'])
@action(detail=False, methods=['PUT', 'POST'])
def multi(self, request, *args, **kwargs):
pass # pragma: no cover
# bolt-on decorator variation
@extend_schema(request=CreateSerializer)
@action(detail=False, methods=['POST'])
def multi2(self, request, *args, **kwargs):
pass # pragma: no cover
@extend_schema(request=UpdateSerializer)
@multi2.mapping.put
def multi2put(self, request, *args, **kwargs):
pass # pragma: no cover
schema = generate_schema('x', XViewset)
def get_req_body(s):
return s['requestBody']['content']['application/json']['schema']['$ref']
assert get_req_body(schema['paths']['/x/multi/']['put']) == '#/components/schemas/Update'
assert get_req_body(schema['paths']['/x/multi/']['post']) == '#/components/schemas/Create'
assert get_req_body(schema['paths']['/x/multi2/']['put']) == '#/components/schemas/Update'
assert get_req_body(schema['paths']['/x/multi2/']['post']) == '#/components/schemas/Create'
def test_serializer_class_on_apiview(no_warnings):
class XSerializer(serializers.Serializer):
field = serializers.UUIDField()
class XView(views.APIView):
serializer_class = XSerializer # not supported by DRF but pick it up anyway
def get(self, request):
pass # pragma: no cover
def post(self, request):
pass # pragma: no cover
schema = generate_schema('x', view=XView)
comp = '#/components/schemas/X'
assert get_response_schema(schema['paths']['/x']['get'])['$ref'] == comp
assert get_response_schema(schema['paths']['/x']['post'])['$ref'] == comp
assert schema['paths']['/x']['post']['requestBody']['content']['application/json']['schema']['$ref'] == comp
def test_customized_list_serializer():
class X(models.Model):
position = models.IntegerField()
class XSerializer(serializers.ModelSerializer):
class Meta:
model = X
fields = ("id", "position")
class XListUpdateSerializer(serializers.ListSerializer):
child = XSerializer()
class XAPIView(generics.GenericAPIView):
model = X
serializer_class = XListUpdateSerializer
def put(self, request, *args, **kwargs):
pass # pragma: no cover
schema = generate_schema('x', view=XAPIView)
operation = schema['paths']['/x']['put']
comp = '#/components/schemas/X'
assert get_request_schema(operation)['type'] == 'array'
assert get_request_schema(operation)['items']['$ref'] == comp
assert get_response_schema(operation)['type'] == 'array'
assert get_response_schema(operation)['items']['$ref'] == comp
assert operation['operationId'] == 'x_update'
assert len(schema['components']['schemas']) == 1 and 'X' in schema['components']['schemas']
def test_api_view_decorator(no_warnings):
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def pi(request):
pass # pragma: no cover
schema = generate_schema('x', view_function=pi)
operation = schema['paths']['/x']['get']
assert get_response_schema(operation)['type'] == 'number'
def test_api_view_decorator_multi(no_warnings):
@extend_schema(request=OpenApiTypes.FLOAT, responses=OpenApiTypes.INT, methods=['POST'])
@extend_schema(responses=OpenApiTypes.FLOAT, methods=['GET'])
@api_view(['GET', 'POST'])
def pi(request):
pass # pragma: no cover
schema = generate_schema('x', view_function=pi)
operation = schema['paths']['/x']['get']
assert get_response_schema(operation)['type'] == 'number'
operation = schema['paths']['/x']['post']
assert get_request_schema(operation)['type'] == 'number'
assert get_response_schema(operation)['type'] == 'integer'
def test_pk_and_no_id(no_warnings):
class XModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
class YModel(models.Model):
x = models.OneToOneField(XModel, primary_key=True, on_delete=models.CASCADE)
class YSerializer(serializers.ModelSerializer):
class Meta:
model = YModel
fields = '__all__'
class YViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = YSerializer
queryset = YModel.objects.all()
schema = generate_schema('y', YViewSet)
assert schema['components']['schemas']['Y']['properties']['x']['format'] == 'uuid'
@pytest.mark.parametrize('allowed', [None, ['json', 'NoRendererAvailable']])
def test_drf_format_suffix_parameter(no_warnings, allowed):
from rest_framework.urlpatterns import format_suffix_patterns
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def view_func(request, format=None):
pass # pragma: no cover
urlpatterns = [
path('pi/', view_func),
path('pi/subpath', view_func),
path('pick', view_func),
]
urlpatterns = format_suffix_patterns(urlpatterns, allowed=allowed)
generator = SchemaGenerator(patterns=urlpatterns)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
# Only seven alternatives are created, as /pi/{format} would be
# /pi/.json which is not supported.
assert list(schema['paths'].keys()) == [
'/pi/',
'/pi{format}',
'/pi/subpath',
'/pi/subpath{format}',
'/pick',
'/pick{format}',
]
assert schema['paths']['/pi/']['get']['operationId'] == 'pi_retrieve'
assert schema['paths']['/pi{format}']['get']['operationId'] == 'pi_formatted_retrieve'
format_parameter = schema['paths']['/pi{format}']['get']['parameters'][0]
assert format_parameter['name'] == 'format'
assert format_parameter['required'] is True
assert format_parameter['in'] == 'path'
assert format_parameter['schema']['type'] == 'string'
# When allowed is not specified, all of the default formats are possible.
# Even if other values are provided, only the valid formats are possible.
assert format_parameter['schema']['enum'] == ['.json']
@mock.patch(
'drf_spectacular.settings.spectacular_settings.PREPROCESSING_HOOKS',
[preprocess_exclude_path_format]
)
def test_drf_format_suffix_parameter_exclude(no_warnings):
from rest_framework.urlpatterns import format_suffix_patterns
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def view_func(request, format=None):
pass # pragma: no cover
urlpatterns = format_suffix_patterns([
path('pi', view_func),
])
generator = SchemaGenerator(patterns=urlpatterns)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
assert list(schema['paths'].keys()) == ['/pi']
def test_regex_path_parameter_discovery(no_warnings):
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def pi(request, foo):
pass # pragma: no cover
urlpatterns = [re_path(r'^/pi/<int:precision>', pi)]
generator = SchemaGenerator(patterns=urlpatterns)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
parameter = schema['paths']['/pi/{precision}']['get']['parameters'][0]
assert parameter['name'] == 'precision'
assert parameter['in'] == 'path'
assert parameter['schema']['type'] == 'integer'
def test_lib_serializer_naming_collision_resolution(no_warnings):
""" parity test in tests.test_warnings.test_serializer_name_reuse """
def x_lib1():
class XSerializer(serializers.Serializer):
x = serializers.UUIDField()
return XSerializer
def x_lib2():
class XSerializer(serializers.Serializer):
x = serializers.IntegerField()
return XSerializer
x_lib1, x_lib2 = x_lib1(), x_lib2()
class XAPIView(APIView):
@extend_schema(request=x_lib1, responses=x_lib2)
def post(self, request):
pass # pragma: no cover
class Lib2XSerializerRename(OpenApiSerializerExtension):
target_class = x_lib2 # also accepts import strings
def get_name(self):
return 'RenamedLib2X'
schema = generate_schema('x', view=XAPIView)
operation = schema['paths']['/x']['post']
assert get_request_schema(operation)['$ref'] == '#/components/schemas/X'
assert get_response_schema(operation)['$ref'] == '#/components/schemas/RenamedLib2X'
def test_owned_serializer_naming_override_with_ref_name(no_warnings):
def x_owned1():
class XSerializer(serializers.Serializer):
x = serializers.UUIDField()
return XSerializer
def x_owned2():
class XSerializer(serializers.Serializer):
x = serializers.IntegerField()
class Meta:
ref_name = 'Y'
return XSerializer
x_owned1, x_owned2 = x_owned1(), x_owned2()
class XAPIView(APIView):
@extend_schema(request=x_owned1, responses=x_owned2)
def post(self, request):
pass # pragma: no cover
schema = generate_schema('x', view=XAPIView)
operation = schema['paths']['/x']['post']
assert get_request_schema(operation)['$ref'] == '#/components/schemas/X'
assert get_response_schema(operation)['$ref'] == '#/components/schemas/Y'
def test_custom_model_field_from_typed_field(no_warnings):
class CustomIntegerField(fields.IntegerField):
pass # pragma: no cover
class CustomTypedFieldModel(models.Model):
custom_int_field = CustomIntegerField()
class XSerializer(serializers.ModelSerializer):
class Meta:
model = CustomTypedFieldModel
fields = '__all__'
class XAPIView(APIView):
@extend_schema(responses=XSerializer)
def get(self, request):
pass # pragma: no cover
schema = generate_schema('x', view=XAPIView)
component = schema['components']['schemas']['X']
assert component['properties']['custom_int_field']['type'] == 'integer'
def test_custom_model_field_from_base_field(no_warnings):
class CustomIntegerField(fields.Field):
def get_internal_type(self):
return 'IntegerField'
class CustomBaseFieldModel(models.Model):
custom_int_field = CustomIntegerField()
class XSerializer(serializers.ModelSerializer):
class Meta:
model = CustomBaseFieldModel
fields = '__all__'
class XAPIView(APIView):
@extend_schema(responses=XSerializer)
def get(self, request):
pass # pragma: no cover
schema = generate_schema('x', view=XAPIView)
component = schema['components']['schemas']['X']
assert component['properties']['custom_int_field']['type'] == 'integer'
def test_follow_field_source_through_intermediate_property_or_function(no_warnings):
class FieldSourceTraversalModel2(models.Model):
x = models.IntegerField(choices=[(1, '1'), (2, '2')])
y = models.IntegerField(choices=[(1, '1'), (2, '2'), (3, '3')])
class FieldSourceTraversalModel1(models.Model):
@property
def prop(self) -> FieldSourceTraversalModel2: # property is required for traversal
return # pragma: no cover
def func(self) -> FieldSourceTraversalModel2: # property is required for traversal
return # pragma: no cover
class XSerializer(serializers.ModelSerializer):
prop = serializers.ReadOnlyField(source='prop.x')
func = serializers.ReadOnlyField(source='func.y')
class Meta:
model = FieldSourceTraversalModel1
fields = '__all__'
class XAPIView(APIView):
@extend_schema(responses=XSerializer)
def get(self, request):
pass # pragma: no cover
# this checks if field type is correctly estimated AND field was initialized
# with the model parameters (choices)
schema = generate_schema('x', view=XAPIView)
assert schema['components']['schemas']['X']['properties']['func']['readOnly'] is True
assert schema['components']['schemas']['X']['properties']['prop']['readOnly'] is True
assert 'enum' in schema['components']['schemas']['PropEnum']
assert 'enum' in schema['components']['schemas']['FuncEnum']
assert schema['components']['schemas']['PropEnum']['type'] == 'integer'
assert schema['components']['schemas']['FuncEnum']['type'] == 'integer'
def test_viewset_list_with_envelope(no_warnings):
class XSerializer(serializers.Serializer):
x = serializers.IntegerField()
def enveloper(serializer_class, list):
@extend_schema_serializer(many=False)
class EnvelopeSerializer(serializers.Serializer):
status = serializers.BooleanField()
data = XSerializer(many=list)
class Meta:
ref_name = 'Enveloped{}{}'.format(
serializer_class.__name__.replace("Serializer", ""),
"List" if list else "",
)
return EnvelopeSerializer
class XViewset(mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):
@extend_schema(responses=enveloper(XSerializer, True))
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs) # pragma: no cover
@extend_schema(
responses=enveloper(XSerializer, False),
parameters=[OpenApiParameter('id', int, OpenApiParameter.PATH)],
)
def retrieve(self, request, *args, **kwargs):
return super().retrieve(request, *args, **kwargs) # pragma: no cover
schema = generate_schema('x', viewset=XViewset)
operation_list = schema['paths']['/x/']['get']
assert operation_list['operationId'] == 'x_list'
assert get_response_schema(operation_list)['$ref'] == '#/components/schemas/EnvelopedXList'
operation_retrieve = schema['paths']['/x/{id}/']['get']
assert operation_retrieve['operationId'] == 'x_retrieve'
assert get_response_schema(operation_retrieve)['$ref'] == '#/components/schemas/EnvelopedX'
@mock.patch('drf_spectacular.settings.spectacular_settings.COMPONENT_SPLIT_REQUEST', True)
def test_component_split_request():
class XSerializer(serializers.Serializer):
ro = serializers.IntegerField(read_only=True)
rw = serializers.IntegerField()
wo = serializers.IntegerField(write_only=True)
@extend_schema(request=XSerializer, responses=XSerializer)
@api_view(['POST'])
def pi(request, format=None):
pass # pragma: no cover
schema = generate_schema('/x', view_function=pi)
operation = schema['paths']['/x']['post']
assert get_response_schema(operation)['$ref'] == '#/components/schemas/X'
assert get_request_schema(operation)['$ref'] == '#/components/schemas/XRequest'
assert len(schema['components']['schemas']['X']['properties']) == 2
assert 'wo' not in schema['components']['schemas']['X']['properties']
assert len(schema['components']['schemas']['XRequest']['properties']) == 2
assert 'ro' not in schema['components']['schemas']['XRequest']['properties']
def test_list_api_view(no_warnings):
class XSerializer(serializers.Serializer):
id = serializers.IntegerField()
class XView(generics.ListAPIView):
serializer_class = XSerializer
schema = generate_schema('/x', view=XView)
operation = schema['paths']['/x']['get']
assert operation['operationId'] == 'x_list'
assert get_response_schema(operation)['type'] == 'array'
@mock.patch('drf_spectacular.settings.spectacular_settings.COMPONENT_SPLIT_REQUEST', True)
def test_file_field_duality_on_split_request(no_warnings):
class XSerializer(serializers.Serializer):
file = serializers.FileField()
class XView(generics.ListCreateAPIView):
serializer_class = XSerializer
parser_classes = [parsers.MultiPartParser]
schema = generate_schema('/x', view=XView)
assert get_response_schema(
schema['paths']['/x']['get']
)['items']['$ref'] == '#/components/schemas/X'
assert get_request_schema(
schema['paths']['/x']['post'], content_type='multipart/form-data'
)['$ref'] == '#/components/schemas/XRequest'
assert schema['components']['schemas']['X']['properties']['file']['format'] == 'uri'
assert schema['components']['schemas']['XRequest']['properties']['file']['format'] == 'binary'
@mock.patch('drf_spectacular.settings.spectacular_settings.COMPONENT_SPLIT_REQUEST', True)
def test_component_split_nested_ro_wo_serializer(no_warnings):
class RoSerializer(serializers.Serializer):
ro_field = serializers.IntegerField(read_only=True)
class WoSerializer(serializers.Serializer):
wo_field = serializers.IntegerField(write_only=True)
class XSerializer(serializers.Serializer):
ro = RoSerializer()
wo = WoSerializer()
class XView(generics.ListCreateAPIView):
serializer_class = XSerializer
schema = generate_schema('/x', view=XView)
assert 'RoRequest' not in schema['components']['schemas']
assert 'Wo' not in schema['components']['schemas']
assert len(schema['components']['schemas']['X']['properties']) == 1
assert len(schema['components']['schemas']['XRequest']['properties']) == 1
@mock.patch('drf_spectacular.settings.spectacular_settings.COMPONENT_SPLIT_REQUEST', True)
def test_component_split_nested_explicit_ro_wo_serializer(no_warnings):
class NestedSerializer(serializers.Serializer):
field = serializers.IntegerField()
class XSerializer(serializers.Serializer):
ro = NestedSerializer(read_only=True)
wo = NestedSerializer(write_only=True, required=False)
class XView(generics.ListCreateAPIView):
serializer_class = XSerializer
schema = generate_schema('/x', view=XView)
assert 'NestedRequest' in schema['components']['schemas']
assert 'Nested' in schema['components']['schemas']
assert len(schema['components']['schemas']['X']['properties']) == 1
assert len(schema['components']['schemas']['XRequest']['properties']) == 1
def test_read_only_many_related_field(no_warnings):
class ManyRelatedTargetModel(models.Model):
field = models.IntegerField()
class ManyRelatedModel(models.Model):
field_m2m = models.ManyToManyField(ManyRelatedTargetModel)
field_m2m_ro = models.ManyToManyField(ManyRelatedTargetModel)
class XSerializer(serializers.ModelSerializer):
class Meta:
model = ManyRelatedModel
fields = '__all__'
read_only_fields = ['field_m2m_ro']
class XAPIView(APIView):
@extend_schema(responses=XSerializer)
def get(self, request):
pass # pragma: no cover
schema = generate_schema('x', view=XAPIView)
assert schema['components']['schemas']['X']['properties']['field_m2m_ro']['readOnly'] is True
# readOnly only needed on outer object, not in items
assert 'readOnly' not in schema['components']['schemas']['X']['properties']['field_m2m_ro']['items']
assert 'readOnly' not in schema['components']['schemas']['X']['properties']['field_m2m']
def test_extension_subclass_discovery(no_warnings):
from rest_framework.authentication import TokenAuthentication
class CustomAuth(TokenAuthentication):
pass
class XSerializer(serializers.Serializer):
field = serializers.IntegerField
class XAPIView(APIView):
authentication_classes = [CustomAuth]
@extend_schema(responses=XSerializer)
def get(self, request):
pass # pragma: no cover
generate_schema('x', view=XAPIView)
def test_extend_schema_no_req_no_res(no_warnings):
class XAPIView(APIView):
@extend_schema(request=None, responses=None)
def post(self, request):
pass # pragma: no cover
schema = generate_schema('/x', view=XAPIView)
operation = schema['paths']['/x']['post']
assert 'requestBody' not in operation
assert len(operation['responses']['200']) == 1
assert 'description' in operation['responses']['200']
def test_extend_schema_field_exclusion(no_warnings):
@extend_schema_field(None)
class CustomField(serializers.IntegerField):
pass # pragma: no cover
class XSerializer(serializers.Serializer):
id = serializers.IntegerField()
hidden = CustomField()
class XView(generics.CreateAPIView):
serializer_class = XSerializer
schema = generate_schema('/x', view=XView)
assert 'hidden' not in schema['components']['schemas']['X']['properties']
def test_extend_schema_serializer_field_exclusion(no_warnings):
@extend_schema_serializer(exclude_fields=['hidden1', 'hidden2'])
class XSerializer(serializers.Serializer):
integer = serializers.IntegerField()
hidden1 = serializers.IntegerField()
hidden2 = serializers.CharField()
class XView(generics.ListCreateAPIView):
serializer_class = XSerializer
schema = generate_schema('/x', view=XView)
assert 'integer' in schema['components']['schemas']['X']['properties']
assert 'hidden1' not in schema['components']['schemas']['X']['properties']
assert 'hidden2' not in schema['components']['schemas']['X']['properties']
def test_schema_contains_only_urlpatterns_first_match(no_warnings):
class XSerializer(serializers.Serializer):
integer = serializers.IntegerField()
class XAPIView(APIView):
@extend_schema(responses=XSerializer)
def get(self, request):
pass # pragma: no cover
class YSerializer(serializers.Serializer):
integer = serializers.DateTimeField()
class YAPIView(APIView):
@extend_schema(responses=YSerializer)
def get(self, request):
pass # pragma: no cover
urlpatterns = [
path('api/x/', XAPIView.as_view()), # only first occurrence is used
path('api/x/', YAPIView.as_view()),
]
generator = SchemaGenerator(patterns=urlpatterns)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
assert len(schema['components']['schemas']) == 1
assert 'X' in schema['components']['schemas']
operation = schema['paths']['/api/x/']['get']
assert '#/components/schemas/X' in get_response_schema(operation)['$ref']
def test_auto_schema_and_extend_parameters(no_warnings):
class CustomAutoSchema(AutoSchema):
def get_override_parameters(self):
return [
OpenApiParameter("id", str, OpenApiParameter.PATH),
OpenApiParameter("foo", str, deprecated=True),
OpenApiParameter("bar", str),
]
class XSerializer(serializers.Serializer):
id = serializers.IntegerField()
with mock.patch('rest_framework.settings.api_settings.DEFAULT_SCHEMA_CLASS', CustomAutoSchema):
class XViewSet(viewsets.GenericViewSet):
serializer_class = XSerializer
@extend_schema(parameters=[OpenApiParameter("bar", int)])
def list(self, request, *args, **kwargs):
pass # pragma: no cover
schema = generate_schema('x', XViewSet)
parameters = schema['paths']['/x/']['get']['parameters']
assert parameters[0]['name'] == 'bar' and parameters[0]['schema']['type'] == 'integer'
assert parameters[1]['name'] == 'foo' and parameters[1]['schema']['type'] == 'string'
assert parameters[1]['deprecated'] is True
assert parameters[2]['name'] == 'id'
def test_list_serializer_with_field_child():
class XSerializer(serializers.Serializer):
field = serializers.ListSerializer(child=serializers.IntegerField())
class XAPIView(views.APIView):
serializer_class = XSerializer
def post(self, request, *args, **kwargs):
pass # pragma: no cover
# assumption on Serializer functionality
assert XSerializer({'field': [1, 2, 3]}).data['field'] == [1, 2, 3]
schema = generate_schema('x', view=XAPIView)
assert get_request_schema(schema['paths']['/x']['post'])['$ref'] == '#/components/schemas/X'
assert get_response_schema(schema['paths']['/x']['post'])['$ref'] == '#/components/schemas/X'
properties = schema['components']['schemas']['X']['properties']
assert properties['field']['type'] == 'array'
assert properties['field']['items']['type'] == 'integer'
def test_list_serializer_with_field_child_on_extend_schema(no_warnings):
class XAPIView(APIView):
@extend_schema(
request=serializers.ListSerializer(child=serializers.IntegerField()),
responses=serializers.ListSerializer(child=serializers.IntegerField()),
)
def post(self, request):
pass # pragma: no cover
schema = generate_schema('x', view=XAPIView)
req_schema = get_request_schema(schema['paths']['/x']['post'])
res_schema = get_response_schema(schema['paths']['/x']['post'])
for s in [req_schema, res_schema]:
assert s['type'] == 'array'
assert s['items']['type'] == 'integer'
def test_list_serializer_with_pagination(no_warnings):
class GenreSerializer(serializers.Serializer):
genre = serializers.CharField()
class XViewSet(viewsets.GenericViewSet):
pagination_class = pagination.LimitOffsetPagination
@extend_schema(responses=GenreSerializer(many=True))
@action(methods=["GET"], detail=False)
def genre(self, request, *args, **kwargs):
pass # pragma: no cover
schema = generate_schema('/x', XViewSet)
response = get_response_schema(schema['paths']['/x/genre/']['get'])
assert response['$ref'] == '#/components/schemas/PaginatedGenreList'
assert 'PaginatedGenreList' in schema['components']['schemas']
assert 'Genre' in schema['components']['schemas']
def test_inline_serializer(no_warnings):
@extend_schema(
responses=inline_serializer(
name='InlineOneOffSerializer',
fields={
'char': serializers.CharField(),
'choice': serializers.ChoiceField(choices=(('A', 'A'), ('B', 'B'))),
'nested_inline': inline_serializer(
name='NestedInlineOneOffSerializer',
fields={
'char': serializers.CharField(),
'int': serializers.IntegerField(),
},
allow_null=True,
)
}
)
)
@api_view(['GET'])
def one_off(request, foo):
pass # pragma: no cover
schema = generate_schema('x', view_function=one_off)
assert get_response_schema(schema['paths']['/x']['get'])['$ref'] == (
'#/components/schemas/InlineOneOff'
)
assert len(schema['components']['schemas']) == 3
one_off = schema['components']['schemas']['InlineOneOff']
one_off_nested = schema['components']['schemas']['NestedInlineOneOff']
assert len(one_off['properties']) == 3
assert one_off['properties']['nested_inline']['nullable'] is True
assert one_off['properties']['nested_inline']['allOf'][0]['$ref'] == (
'#/components/schemas/NestedInlineOneOff'
)
assert len(one_off_nested['properties']) == 2
@mock.patch('drf_spectacular.settings.spectacular_settings.CAMELIZE_NAMES', True)
def test_camelize_names(no_warnings):
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def view_func(request, format=None):
pass # pragma: no cover
schema = generate_schema('/multi/step/path/<str:some_name>/', view_function=view_func)
operation = schema['paths']['/multi/step/path/{someName}/']['get']
assert operation['parameters'][0]['name'] == 'someName'
assert operation['operationId'] == 'multiStepPathRetrieve'
def test_mocked_request_with_get_queryset_get_serializer_class(no_warnings):
class XViewset(viewsets.ReadOnlyModelViewSet):
def get_serializer_class(self):
assert not self.request.user.is_authenticated
assert self.action in ['retrieve', 'list']
return SimpleSerializer
def get_queryset(self):
assert not self.request.user.is_authenticated
assert self.request.method == 'GET'
return SimpleModel.objects.none()
generate_schema('x', XViewset)
def test_queryset_filter_and_ordering_only_on_list(no_warnings):
class XViewset(viewsets.ReadOnlyModelViewSet):
queryset = SimpleModel.objects.none()
serializer_class = SimpleSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter)
schema = generate_schema('x', XViewset)
retrieve_parameters = schema['paths']['/x/']['get']['parameters']
assert len(retrieve_parameters) == 2
assert retrieve_parameters[0]['name'] == 'ordering'
assert retrieve_parameters[1]['name'] == 'search'
list_parameters = schema['paths']['/x/{id}/']['get']['parameters']
assert len(list_parameters) == 1
assert list_parameters[0]['name'] == 'id'
def test_pagination(no_warnings):
class XViewset(viewsets.ReadOnlyModelViewSet):
queryset = SimpleModel.objects.none()
serializer_class = SimpleSerializer
pagination_class = pagination.LimitOffsetPagination
schema = generate_schema('x', XViewset)
# query params only on list
retrieve_parameters = schema['paths']['/x/']['get']['parameters']
assert len(retrieve_parameters) == 2
assert retrieve_parameters[0]['name'] == 'limit'
assert retrieve_parameters[1]['name'] == 'offset'
# no query params on retrieve
list_parameters = schema['paths']['/x/{id}/']['get']['parameters']
assert len(list_parameters) == 1
assert list_parameters[0]['name'] == 'id'
# substituted component on list
assert 'Simple' in schema['components']['schemas']
assert 'PaginatedSimpleList' in schema['components']['schemas']
substitution = schema['components']['schemas']['PaginatedSimpleList']
assert substitution['type'] == 'object'
assert substitution['properties']['results']['items']['$ref'] == '#/components/schemas/Simple'
def test_pagination_reusage(no_warnings):
class XViewset(viewsets.ReadOnlyModelViewSet):
queryset = SimpleModel.objects.all()
serializer_class = SimpleSerializer
pagination_class = pagination.LimitOffsetPagination
@extend_schema(responses={'200': SimpleSerializer(many=True)})
@action(methods=['GET'], detail=False)
def custom_action(self):
pass # pragma: no cover
class YViewset(XViewset):
serializer_class = SimpleSerializer
router = routers.SimpleRouter()
router.register('x', XViewset, basename='x')
router.register('y', YViewset, basename='y')
generator = SchemaGenerator(patterns=router.urls)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
@mock.patch(
'drf_spectacular.settings.spectacular_settings.SECURITY',
[{'apiKeyAuth': []}]
)
@mock.patch(
'drf_spectacular.settings.spectacular_settings.APPEND_COMPONENTS',
{"securitySchemes": {"apiKeyAuth": {"type": "apiKey", "in": "header", "name": "Authorization"}}}
)
def test_manual_security_method_addition(no_warnings):
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def view_func(request, format=None):
pass # pragma: no cover
schema = generate_schema('/x/', view_function=view_func)
operation_security = schema['paths']['/x/']['get']['security']
schema_security = schema['components']['securitySchemes']
assert len(operation_security) == 4 and any(['apiKeyAuth' in os for os in operation_security])
assert len(schema_security) == 3 and 'apiKeyAuth' in schema_security
def test_basic_viewset_without_queryset_with_explicit_pk_typing(no_warnings):
class XSerializer(serializers.Serializer):
field = fields.IntegerField()
class XViewset(viewsets.ViewSet):
serializer_class = XSerializer
def retrieve(self, request, *args, **kwargs):
pass # pragma: no cover
urlpatterns = [
path("api/<path:some_var>/<uuid:pk>/", XViewset.as_view({"get": "retrieve"}))
]
generator = SchemaGenerator(patterns=urlpatterns)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
operation = schema['paths']['/api/{some_var}/{id}/']['get']
assert operation['parameters'][0]['name'] == 'id'
assert operation['parameters'][0]['schema']['format'] == 'uuid'
def test_multiple_media_types(no_warnings):
@extend_schema(responses={
(200, 'application/json'): OpenApiTypes.OBJECT,
(200, 'application/pdf'): OpenApiTypes.BINARY,
})
class XAPIView(APIView):
def get(self, request):
pass # pragma: no cover
schema = generate_schema('x', view=XAPIView)
content = schema['paths']['/x']['get']['responses']['200']['content']
assert content['application/pdf']['schema']['format'] == 'binary'
assert content['application/json']['schema']['type'] == 'object'
def test_token_auth_with_bearer_keyword(no_warnings):
class CustomTokenAuthentication(TokenAuthentication):
keyword = 'Bearer'
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def view_func(request, format=None):
pass # pragma: no cover
view_func.cls.authentication_classes = [CustomTokenAuthentication]
schema = generate_schema('x', view_function=view_func)
assert schema['components']['securitySchemes']['tokenAuth']['scheme'] == 'bearer'
@pytest.mark.parametrize('responses', [
str,
OpenApiTypes.STR,
{'200': str},
{'200': OpenApiTypes.STR},
])
def test_string_response_variations(no_warnings, responses):
@extend_schema(responses=responses)
@api_view(['GET'])
def view_func(request, format=None):
pass # pragma: no cover
schema = generate_schema('x', view_function=view_func)
assert get_response_schema(schema['paths']['/x']['get'])['type'] == 'string'
def test_exclude_discovered_parameter(no_warnings):
@extend_schema_view(list=extend_schema(parameters=[
# keep 'offset', remove 'limit', and add 'random'
OpenApiParameter('limit', exclude=True),
OpenApiParameter('random', bool),
]))
class XViewset(viewsets.ReadOnlyModelViewSet):
queryset = SimpleModel.objects.all()
serializer_class = SimpleSerializer
pagination_class = pagination.LimitOffsetPagination
schema = generate_schema('x', XViewset)
parameters = schema['paths']['/x/']['get']['parameters']
assert len(parameters) == 2
assert parameters[0]['name'] == 'offset'
assert parameters[1]['name'] == 'random'
def test_manual_decimal_validator():
# manually test this validator as it is not part of the default workflow
class XSerializer(serializers.Serializer):
field = serializers.CharField(
validators=[validators.DecimalValidator(max_digits=4, decimal_places=2)]
)
@extend_schema(request=XSerializer, responses=XSerializer)
@api_view(['POST'])
def view_func(request, format=None):
pass # pragma: no cover
schema = generate_schema('x', view_function=view_func)
field = schema['components']['schemas']['X']['properties']['field']
assert field['maximum'] == 100
assert field['minimum'] == -100
| 36.724437
| 143
| 0.674445
|
import uuid
from unittest import mock
import pytest
from django.core import validators
from django.db import models
from django.db.models import fields
from django.urls import path, re_path
from rest_framework import (
filters, generics, mixins, pagination, parsers, routers, serializers, views, viewsets,
)
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import action, api_view
from rest_framework.views import APIView
from drf_spectacular.extensions import OpenApiSerializerExtension
from drf_spectacular.generators import SchemaGenerator
from drf_spectacular.hooks import preprocess_exclude_path_format
from drf_spectacular.openapi import AutoSchema
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import (
OpenApiParameter, extend_schema, extend_schema_field, extend_schema_serializer,
extend_schema_view, inline_serializer,
)
from drf_spectacular.validation import validate_schema
from tests import generate_schema, get_request_schema, get_response_schema
from tests.models import SimpleModel, SimpleSerializer
def test_primary_key_read_only_queryset_not_found(no_warnings):
class M1(models.Model):
pass
class M2(models.Model):
m1_r = models.ForeignKey(M1, on_delete=models.CASCADE)
m1_rw = models.ForeignKey(M1, on_delete=models.CASCADE)
class M2Serializer(serializers.ModelSerializer):
class Meta:
fields = ['m1_rw', 'm1_r']
read_only_fields = ['m1_r']
model = M2
class M2Viewset(viewsets.ReadOnlyModelViewSet):
serializer_class = M2Serializer
queryset = M2.objects.none()
schema = generate_schema('m2', M2Viewset)
props = schema['components']['schemas']['M2']['properties']
assert props['m1_rw']['type'] == 'integer'
assert props['m1_r']['type'] == 'integer'
def test_path_implicit_required(no_warnings):
class M2Serializer(serializers.Serializer):
pass
class M2Viewset(viewsets.GenericViewSet):
serializer_class = M2Serializer
@extend_schema(parameters=[OpenApiParameter('id', str, 'path')])
def retrieve(self, request, *args, **kwargs):
pass
generate_schema('m2', M2Viewset)
def test_free_form_responses(no_warnings):
class XAPIView(APIView):
@extend_schema(responses={200: OpenApiTypes.OBJECT})
def get(self, request):
pass
class YAPIView(APIView):
@extend_schema(responses=OpenApiTypes.OBJECT)
def get(self, request):
pass
generator = SchemaGenerator(patterns=[
re_path(r'^x$', XAPIView.as_view(), name='x'),
re_path(r'^y$', YAPIView.as_view(), name='y'),
])
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
@mock.patch(
target='drf_spectacular.settings.spectacular_settings.APPEND_COMPONENTS',
new={'schemas': {'SomeExtraComponent': {'type': 'integer'}}}
)
def test_append_extra_components(no_warnings):
class XSerializer(serializers.Serializer):
id = serializers.UUIDField()
class XAPIView(APIView):
@extend_schema(responses={200: XSerializer})
def get(self, request):
pass
generator = SchemaGenerator(patterns=[
re_path(r'^x$', XAPIView.as_view(), name='x'),
])
schema = generator.get_schema(request=None, public=True)
assert len(schema['components']['schemas']) == 2
validate_schema(schema)
def test_serializer_retrieval_from_view(no_warnings):
class UnusedSerializer(serializers.Serializer):
pass
class XSerializer(serializers.Serializer):
id = serializers.UUIDField()
class YSerializer(serializers.Serializer):
id = serializers.UUIDField()
class X1Viewset(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = UnusedSerializer
def get_serializer(self):
return XSerializer()
class X2Viewset(mixins.ListModelMixin, viewsets.GenericViewSet):
def get_serializer_class(self):
return YSerializer
router = routers.SimpleRouter()
router.register('x1', X1Viewset, basename='x1')
router.register('x2', X2Viewset, basename='x2')
generator = SchemaGenerator(patterns=router.urls)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
assert len(schema['components']['schemas']) == 2
assert 'Unused' not in schema['components']['schemas']
def test_retrieve_on_apiview_get(no_warnings):
class XSerializer(serializers.Serializer):
id = serializers.UUIDField()
class XApiView(APIView):
authentication_classes = []
@extend_schema(
parameters=[OpenApiParameter('id', OpenApiTypes.INT, OpenApiParameter.PATH)],
responses={200: XSerializer},
)
def get(self, request):
pass
schema = generate_schema('x', view=XApiView)
operation = schema['paths']['/x']['get']
assert operation['operationId'] == 'x_retrieve'
operation_schema = get_response_schema(operation)
assert '$ref' in operation_schema and 'type' not in operation_schema
def test_list_on_apiview_get(no_warnings):
class XSerializer(serializers.Serializer):
id = serializers.UUIDField()
class XApiView(APIView):
authentication_classes = []
@extend_schema(
parameters=[OpenApiParameter('id', OpenApiTypes.INT, OpenApiParameter.PATH)],
responses={200: XSerializer(many=True)},
)
def get(self, request):
pass
schema = generate_schema('x', view=XApiView)
operation = schema['paths']['/x']['get']
assert operation['operationId'] == 'x_list'
operation_schema = get_response_schema(operation)
assert operation_schema['type'] == 'array'
def test_multi_method_action(no_warnings):
class DummySerializer(serializers.Serializer):
id = serializers.UUIDField()
class UpdateSerializer(serializers.Serializer):
id = serializers.UUIDField()
class CreateSerializer(serializers.Serializer):
id = serializers.UUIDField()
class XViewset(viewsets.GenericViewSet):
serializer_class = DummySerializer
@extend_schema(request=UpdateSerializer, methods=['PUT'])
@extend_schema(request=CreateSerializer, methods=['POST'])
@action(detail=False, methods=['PUT', 'POST'])
def multi(self, request, *args, **kwargs):
pass
@extend_schema(request=CreateSerializer)
@action(detail=False, methods=['POST'])
def multi2(self, request, *args, **kwargs):
pass
@extend_schema(request=UpdateSerializer)
@multi2.mapping.put
def multi2put(self, request, *args, **kwargs):
pass
schema = generate_schema('x', XViewset)
def get_req_body(s):
return s['requestBody']['content']['application/json']['schema']['$ref']
assert get_req_body(schema['paths']['/x/multi/']['put']) == '#/components/schemas/Update'
assert get_req_body(schema['paths']['/x/multi/']['post']) == '#/components/schemas/Create'
assert get_req_body(schema['paths']['/x/multi2/']['put']) == '#/components/schemas/Update'
assert get_req_body(schema['paths']['/x/multi2/']['post']) == '#/components/schemas/Create'
def test_serializer_class_on_apiview(no_warnings):
class XSerializer(serializers.Serializer):
field = serializers.UUIDField()
class XView(views.APIView):
serializer_class = XSerializer
def get(self, request):
pass
def post(self, request):
pass
schema = generate_schema('x', view=XView)
comp = '#/components/schemas/X'
assert get_response_schema(schema['paths']['/x']['get'])['$ref'] == comp
assert get_response_schema(schema['paths']['/x']['post'])['$ref'] == comp
assert schema['paths']['/x']['post']['requestBody']['content']['application/json']['schema']['$ref'] == comp
def test_customized_list_serializer():
class X(models.Model):
position = models.IntegerField()
class XSerializer(serializers.ModelSerializer):
class Meta:
model = X
fields = ("id", "position")
class XListUpdateSerializer(serializers.ListSerializer):
child = XSerializer()
class XAPIView(generics.GenericAPIView):
model = X
serializer_class = XListUpdateSerializer
def put(self, request, *args, **kwargs):
pass
schema = generate_schema('x', view=XAPIView)
operation = schema['paths']['/x']['put']
comp = '#/components/schemas/X'
assert get_request_schema(operation)['type'] == 'array'
assert get_request_schema(operation)['items']['$ref'] == comp
assert get_response_schema(operation)['type'] == 'array'
assert get_response_schema(operation)['items']['$ref'] == comp
assert operation['operationId'] == 'x_update'
assert len(schema['components']['schemas']) == 1 and 'X' in schema['components']['schemas']
def test_api_view_decorator(no_warnings):
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def pi(request):
pass
schema = generate_schema('x', view_function=pi)
operation = schema['paths']['/x']['get']
assert get_response_schema(operation)['type'] == 'number'
def test_api_view_decorator_multi(no_warnings):
@extend_schema(request=OpenApiTypes.FLOAT, responses=OpenApiTypes.INT, methods=['POST'])
@extend_schema(responses=OpenApiTypes.FLOAT, methods=['GET'])
@api_view(['GET', 'POST'])
def pi(request):
pass
schema = generate_schema('x', view_function=pi)
operation = schema['paths']['/x']['get']
assert get_response_schema(operation)['type'] == 'number'
operation = schema['paths']['/x']['post']
assert get_request_schema(operation)['type'] == 'number'
assert get_response_schema(operation)['type'] == 'integer'
def test_pk_and_no_id(no_warnings):
class XModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
class YModel(models.Model):
x = models.OneToOneField(XModel, primary_key=True, on_delete=models.CASCADE)
class YSerializer(serializers.ModelSerializer):
class Meta:
model = YModel
fields = '__all__'
class YViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = YSerializer
queryset = YModel.objects.all()
schema = generate_schema('y', YViewSet)
assert schema['components']['schemas']['Y']['properties']['x']['format'] == 'uuid'
@pytest.mark.parametrize('allowed', [None, ['json', 'NoRendererAvailable']])
def test_drf_format_suffix_parameter(no_warnings, allowed):
from rest_framework.urlpatterns import format_suffix_patterns
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def view_func(request, format=None):
pass
urlpatterns = [
path('pi/', view_func),
path('pi/subpath', view_func),
path('pick', view_func),
]
urlpatterns = format_suffix_patterns(urlpatterns, allowed=allowed)
generator = SchemaGenerator(patterns=urlpatterns)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
assert list(schema['paths'].keys()) == [
'/pi/',
'/pi{format}',
'/pi/subpath',
'/pi/subpath{format}',
'/pick',
'/pick{format}',
]
assert schema['paths']['/pi/']['get']['operationId'] == 'pi_retrieve'
assert schema['paths']['/pi{format}']['get']['operationId'] == 'pi_formatted_retrieve'
format_parameter = schema['paths']['/pi{format}']['get']['parameters'][0]
assert format_parameter['name'] == 'format'
assert format_parameter['required'] is True
assert format_parameter['in'] == 'path'
assert format_parameter['schema']['type'] == 'string'
assert format_parameter['schema']['enum'] == ['.json']
@mock.patch(
'drf_spectacular.settings.spectacular_settings.PREPROCESSING_HOOKS',
[preprocess_exclude_path_format]
)
def test_drf_format_suffix_parameter_exclude(no_warnings):
from rest_framework.urlpatterns import format_suffix_patterns
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def view_func(request, format=None):
pass
urlpatterns = format_suffix_patterns([
path('pi', view_func),
])
generator = SchemaGenerator(patterns=urlpatterns)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
assert list(schema['paths'].keys()) == ['/pi']
def test_regex_path_parameter_discovery(no_warnings):
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def pi(request, foo):
pass
urlpatterns = [re_path(r'^/pi/<int:precision>', pi)]
generator = SchemaGenerator(patterns=urlpatterns)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
parameter = schema['paths']['/pi/{precision}']['get']['parameters'][0]
assert parameter['name'] == 'precision'
assert parameter['in'] == 'path'
assert parameter['schema']['type'] == 'integer'
def test_lib_serializer_naming_collision_resolution(no_warnings):
def x_lib1():
class XSerializer(serializers.Serializer):
x = serializers.UUIDField()
return XSerializer
def x_lib2():
class XSerializer(serializers.Serializer):
x = serializers.IntegerField()
return XSerializer
x_lib1, x_lib2 = x_lib1(), x_lib2()
class XAPIView(APIView):
@extend_schema(request=x_lib1, responses=x_lib2)
def post(self, request):
pass
class Lib2XSerializerRename(OpenApiSerializerExtension):
target_class = x_lib2
def get_name(self):
return 'RenamedLib2X'
schema = generate_schema('x', view=XAPIView)
operation = schema['paths']['/x']['post']
assert get_request_schema(operation)['$ref'] == '#/components/schemas/X'
assert get_response_schema(operation)['$ref'] == '#/components/schemas/RenamedLib2X'
def test_owned_serializer_naming_override_with_ref_name(no_warnings):
def x_owned1():
class XSerializer(serializers.Serializer):
x = serializers.UUIDField()
return XSerializer
def x_owned2():
class XSerializer(serializers.Serializer):
x = serializers.IntegerField()
class Meta:
ref_name = 'Y'
return XSerializer
x_owned1, x_owned2 = x_owned1(), x_owned2()
class XAPIView(APIView):
@extend_schema(request=x_owned1, responses=x_owned2)
def post(self, request):
pass
schema = generate_schema('x', view=XAPIView)
operation = schema['paths']['/x']['post']
assert get_request_schema(operation)['$ref'] == '#/components/schemas/X'
assert get_response_schema(operation)['$ref'] == '#/components/schemas/Y'
def test_custom_model_field_from_typed_field(no_warnings):
class CustomIntegerField(fields.IntegerField):
pass
class CustomTypedFieldModel(models.Model):
custom_int_field = CustomIntegerField()
class XSerializer(serializers.ModelSerializer):
class Meta:
model = CustomTypedFieldModel
fields = '__all__'
class XAPIView(APIView):
@extend_schema(responses=XSerializer)
def get(self, request):
pass
schema = generate_schema('x', view=XAPIView)
component = schema['components']['schemas']['X']
assert component['properties']['custom_int_field']['type'] == 'integer'
def test_custom_model_field_from_base_field(no_warnings):
class CustomIntegerField(fields.Field):
def get_internal_type(self):
return 'IntegerField'
class CustomBaseFieldModel(models.Model):
custom_int_field = CustomIntegerField()
class XSerializer(serializers.ModelSerializer):
class Meta:
model = CustomBaseFieldModel
fields = '__all__'
class XAPIView(APIView):
@extend_schema(responses=XSerializer)
def get(self, request):
pass
schema = generate_schema('x', view=XAPIView)
component = schema['components']['schemas']['X']
assert component['properties']['custom_int_field']['type'] == 'integer'
def test_follow_field_source_through_intermediate_property_or_function(no_warnings):
class FieldSourceTraversalModel2(models.Model):
x = models.IntegerField(choices=[(1, '1'), (2, '2')])
y = models.IntegerField(choices=[(1, '1'), (2, '2'), (3, '3')])
class FieldSourceTraversalModel1(models.Model):
@property
def prop(self) -> FieldSourceTraversalModel2:
return
def func(self) -> FieldSourceTraversalModel2:
return
class XSerializer(serializers.ModelSerializer):
prop = serializers.ReadOnlyField(source='prop.x')
func = serializers.ReadOnlyField(source='func.y')
class Meta:
model = FieldSourceTraversalModel1
fields = '__all__'
class XAPIView(APIView):
@extend_schema(responses=XSerializer)
def get(self, request):
pass
schema = generate_schema('x', view=XAPIView)
assert schema['components']['schemas']['X']['properties']['func']['readOnly'] is True
assert schema['components']['schemas']['X']['properties']['prop']['readOnly'] is True
assert 'enum' in schema['components']['schemas']['PropEnum']
assert 'enum' in schema['components']['schemas']['FuncEnum']
assert schema['components']['schemas']['PropEnum']['type'] == 'integer'
assert schema['components']['schemas']['FuncEnum']['type'] == 'integer'
def test_viewset_list_with_envelope(no_warnings):
class XSerializer(serializers.Serializer):
x = serializers.IntegerField()
def enveloper(serializer_class, list):
@extend_schema_serializer(many=False)
class EnvelopeSerializer(serializers.Serializer):
status = serializers.BooleanField()
data = XSerializer(many=list)
class Meta:
ref_name = 'Enveloped{}{}'.format(
serializer_class.__name__.replace("Serializer", ""),
"List" if list else "",
)
return EnvelopeSerializer
class XViewset(mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):
@extend_schema(responses=enveloper(XSerializer, True))
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
@extend_schema(
responses=enveloper(XSerializer, False),
parameters=[OpenApiParameter('id', int, OpenApiParameter.PATH)],
)
def retrieve(self, request, *args, **kwargs):
return super().retrieve(request, *args, **kwargs)
schema = generate_schema('x', viewset=XViewset)
operation_list = schema['paths']['/x/']['get']
assert operation_list['operationId'] == 'x_list'
assert get_response_schema(operation_list)['$ref'] == '#/components/schemas/EnvelopedXList'
operation_retrieve = schema['paths']['/x/{id}/']['get']
assert operation_retrieve['operationId'] == 'x_retrieve'
assert get_response_schema(operation_retrieve)['$ref'] == '#/components/schemas/EnvelopedX'
@mock.patch('drf_spectacular.settings.spectacular_settings.COMPONENT_SPLIT_REQUEST', True)
def test_component_split_request():
class XSerializer(serializers.Serializer):
ro = serializers.IntegerField(read_only=True)
rw = serializers.IntegerField()
wo = serializers.IntegerField(write_only=True)
@extend_schema(request=XSerializer, responses=XSerializer)
@api_view(['POST'])
def pi(request, format=None):
pass
schema = generate_schema('/x', view_function=pi)
operation = schema['paths']['/x']['post']
assert get_response_schema(operation)['$ref'] == '#/components/schemas/X'
assert get_request_schema(operation)['$ref'] == '#/components/schemas/XRequest'
assert len(schema['components']['schemas']['X']['properties']) == 2
assert 'wo' not in schema['components']['schemas']['X']['properties']
assert len(schema['components']['schemas']['XRequest']['properties']) == 2
assert 'ro' not in schema['components']['schemas']['XRequest']['properties']
def test_list_api_view(no_warnings):
class XSerializer(serializers.Serializer):
id = serializers.IntegerField()
class XView(generics.ListAPIView):
serializer_class = XSerializer
schema = generate_schema('/x', view=XView)
operation = schema['paths']['/x']['get']
assert operation['operationId'] == 'x_list'
assert get_response_schema(operation)['type'] == 'array'
@mock.patch('drf_spectacular.settings.spectacular_settings.COMPONENT_SPLIT_REQUEST', True)
def test_file_field_duality_on_split_request(no_warnings):
class XSerializer(serializers.Serializer):
file = serializers.FileField()
class XView(generics.ListCreateAPIView):
serializer_class = XSerializer
parser_classes = [parsers.MultiPartParser]
schema = generate_schema('/x', view=XView)
assert get_response_schema(
schema['paths']['/x']['get']
)['items']['$ref'] == '#/components/schemas/X'
assert get_request_schema(
schema['paths']['/x']['post'], content_type='multipart/form-data'
)['$ref'] == '#/components/schemas/XRequest'
assert schema['components']['schemas']['X']['properties']['file']['format'] == 'uri'
assert schema['components']['schemas']['XRequest']['properties']['file']['format'] == 'binary'
@mock.patch('drf_spectacular.settings.spectacular_settings.COMPONENT_SPLIT_REQUEST', True)
def test_component_split_nested_ro_wo_serializer(no_warnings):
class RoSerializer(serializers.Serializer):
ro_field = serializers.IntegerField(read_only=True)
class WoSerializer(serializers.Serializer):
wo_field = serializers.IntegerField(write_only=True)
class XSerializer(serializers.Serializer):
ro = RoSerializer()
wo = WoSerializer()
class XView(generics.ListCreateAPIView):
serializer_class = XSerializer
schema = generate_schema('/x', view=XView)
assert 'RoRequest' not in schema['components']['schemas']
assert 'Wo' not in schema['components']['schemas']
assert len(schema['components']['schemas']['X']['properties']) == 1
assert len(schema['components']['schemas']['XRequest']['properties']) == 1
@mock.patch('drf_spectacular.settings.spectacular_settings.COMPONENT_SPLIT_REQUEST', True)
def test_component_split_nested_explicit_ro_wo_serializer(no_warnings):
class NestedSerializer(serializers.Serializer):
field = serializers.IntegerField()
class XSerializer(serializers.Serializer):
ro = NestedSerializer(read_only=True)
wo = NestedSerializer(write_only=True, required=False)
class XView(generics.ListCreateAPIView):
serializer_class = XSerializer
schema = generate_schema('/x', view=XView)
assert 'NestedRequest' in schema['components']['schemas']
assert 'Nested' in schema['components']['schemas']
assert len(schema['components']['schemas']['X']['properties']) == 1
assert len(schema['components']['schemas']['XRequest']['properties']) == 1
def test_read_only_many_related_field(no_warnings):
class ManyRelatedTargetModel(models.Model):
field = models.IntegerField()
class ManyRelatedModel(models.Model):
field_m2m = models.ManyToManyField(ManyRelatedTargetModel)
field_m2m_ro = models.ManyToManyField(ManyRelatedTargetModel)
class XSerializer(serializers.ModelSerializer):
class Meta:
model = ManyRelatedModel
fields = '__all__'
read_only_fields = ['field_m2m_ro']
class XAPIView(APIView):
@extend_schema(responses=XSerializer)
def get(self, request):
pass
schema = generate_schema('x', view=XAPIView)
assert schema['components']['schemas']['X']['properties']['field_m2m_ro']['readOnly'] is True
assert 'readOnly' not in schema['components']['schemas']['X']['properties']['field_m2m_ro']['items']
assert 'readOnly' not in schema['components']['schemas']['X']['properties']['field_m2m']
def test_extension_subclass_discovery(no_warnings):
from rest_framework.authentication import TokenAuthentication
class CustomAuth(TokenAuthentication):
pass
class XSerializer(serializers.Serializer):
field = serializers.IntegerField
class XAPIView(APIView):
authentication_classes = [CustomAuth]
@extend_schema(responses=XSerializer)
def get(self, request):
pass
generate_schema('x', view=XAPIView)
def test_extend_schema_no_req_no_res(no_warnings):
class XAPIView(APIView):
@extend_schema(request=None, responses=None)
def post(self, request):
pass
schema = generate_schema('/x', view=XAPIView)
operation = schema['paths']['/x']['post']
assert 'requestBody' not in operation
assert len(operation['responses']['200']) == 1
assert 'description' in operation['responses']['200']
def test_extend_schema_field_exclusion(no_warnings):
@extend_schema_field(None)
class CustomField(serializers.IntegerField):
pass
class XSerializer(serializers.Serializer):
id = serializers.IntegerField()
hidden = CustomField()
class XView(generics.CreateAPIView):
serializer_class = XSerializer
schema = generate_schema('/x', view=XView)
assert 'hidden' not in schema['components']['schemas']['X']['properties']
def test_extend_schema_serializer_field_exclusion(no_warnings):
@extend_schema_serializer(exclude_fields=['hidden1', 'hidden2'])
class XSerializer(serializers.Serializer):
integer = serializers.IntegerField()
hidden1 = serializers.IntegerField()
hidden2 = serializers.CharField()
class XView(generics.ListCreateAPIView):
serializer_class = XSerializer
schema = generate_schema('/x', view=XView)
assert 'integer' in schema['components']['schemas']['X']['properties']
assert 'hidden1' not in schema['components']['schemas']['X']['properties']
assert 'hidden2' not in schema['components']['schemas']['X']['properties']
def test_schema_contains_only_urlpatterns_first_match(no_warnings):
class XSerializer(serializers.Serializer):
integer = serializers.IntegerField()
class XAPIView(APIView):
@extend_schema(responses=XSerializer)
def get(self, request):
pass
class YSerializer(serializers.Serializer):
integer = serializers.DateTimeField()
class YAPIView(APIView):
@extend_schema(responses=YSerializer)
def get(self, request):
pass
urlpatterns = [
path('api/x/', XAPIView.as_view()),
path('api/x/', YAPIView.as_view()),
]
generator = SchemaGenerator(patterns=urlpatterns)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
assert len(schema['components']['schemas']) == 1
assert 'X' in schema['components']['schemas']
operation = schema['paths']['/api/x/']['get']
assert '#/components/schemas/X' in get_response_schema(operation)['$ref']
def test_auto_schema_and_extend_parameters(no_warnings):
class CustomAutoSchema(AutoSchema):
def get_override_parameters(self):
return [
OpenApiParameter("id", str, OpenApiParameter.PATH),
OpenApiParameter("foo", str, deprecated=True),
OpenApiParameter("bar", str),
]
class XSerializer(serializers.Serializer):
id = serializers.IntegerField()
with mock.patch('rest_framework.settings.api_settings.DEFAULT_SCHEMA_CLASS', CustomAutoSchema):
class XViewSet(viewsets.GenericViewSet):
serializer_class = XSerializer
@extend_schema(parameters=[OpenApiParameter("bar", int)])
def list(self, request, *args, **kwargs):
pass
schema = generate_schema('x', XViewSet)
parameters = schema['paths']['/x/']['get']['parameters']
assert parameters[0]['name'] == 'bar' and parameters[0]['schema']['type'] == 'integer'
assert parameters[1]['name'] == 'foo' and parameters[1]['schema']['type'] == 'string'
assert parameters[1]['deprecated'] is True
assert parameters[2]['name'] == 'id'
def test_list_serializer_with_field_child():
class XSerializer(serializers.Serializer):
field = serializers.ListSerializer(child=serializers.IntegerField())
class XAPIView(views.APIView):
serializer_class = XSerializer
def post(self, request, *args, **kwargs):
pass
assert XSerializer({'field': [1, 2, 3]}).data['field'] == [1, 2, 3]
schema = generate_schema('x', view=XAPIView)
assert get_request_schema(schema['paths']['/x']['post'])['$ref'] == '#/components/schemas/X'
assert get_response_schema(schema['paths']['/x']['post'])['$ref'] == '#/components/schemas/X'
properties = schema['components']['schemas']['X']['properties']
assert properties['field']['type'] == 'array'
assert properties['field']['items']['type'] == 'integer'
def test_list_serializer_with_field_child_on_extend_schema(no_warnings):
class XAPIView(APIView):
@extend_schema(
request=serializers.ListSerializer(child=serializers.IntegerField()),
responses=serializers.ListSerializer(child=serializers.IntegerField()),
)
def post(self, request):
pass
schema = generate_schema('x', view=XAPIView)
req_schema = get_request_schema(schema['paths']['/x']['post'])
res_schema = get_response_schema(schema['paths']['/x']['post'])
for s in [req_schema, res_schema]:
assert s['type'] == 'array'
assert s['items']['type'] == 'integer'
def test_list_serializer_with_pagination(no_warnings):
class GenreSerializer(serializers.Serializer):
genre = serializers.CharField()
class XViewSet(viewsets.GenericViewSet):
pagination_class = pagination.LimitOffsetPagination
@extend_schema(responses=GenreSerializer(many=True))
@action(methods=["GET"], detail=False)
def genre(self, request, *args, **kwargs):
pass
schema = generate_schema('/x', XViewSet)
response = get_response_schema(schema['paths']['/x/genre/']['get'])
assert response['$ref'] == '#/components/schemas/PaginatedGenreList'
assert 'PaginatedGenreList' in schema['components']['schemas']
assert 'Genre' in schema['components']['schemas']
def test_inline_serializer(no_warnings):
@extend_schema(
responses=inline_serializer(
name='InlineOneOffSerializer',
fields={
'char': serializers.CharField(),
'choice': serializers.ChoiceField(choices=(('A', 'A'), ('B', 'B'))),
'nested_inline': inline_serializer(
name='NestedInlineOneOffSerializer',
fields={
'char': serializers.CharField(),
'int': serializers.IntegerField(),
},
allow_null=True,
)
}
)
)
@api_view(['GET'])
def one_off(request, foo):
pass
schema = generate_schema('x', view_function=one_off)
assert get_response_schema(schema['paths']['/x']['get'])['$ref'] == (
'#/components/schemas/InlineOneOff'
)
assert len(schema['components']['schemas']) == 3
one_off = schema['components']['schemas']['InlineOneOff']
one_off_nested = schema['components']['schemas']['NestedInlineOneOff']
assert len(one_off['properties']) == 3
assert one_off['properties']['nested_inline']['nullable'] is True
assert one_off['properties']['nested_inline']['allOf'][0]['$ref'] == (
'#/components/schemas/NestedInlineOneOff'
)
assert len(one_off_nested['properties']) == 2
@mock.patch('drf_spectacular.settings.spectacular_settings.CAMELIZE_NAMES', True)
def test_camelize_names(no_warnings):
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def view_func(request, format=None):
pass
schema = generate_schema('/multi/step/path/<str:some_name>/', view_function=view_func)
operation = schema['paths']['/multi/step/path/{someName}/']['get']
assert operation['parameters'][0]['name'] == 'someName'
assert operation['operationId'] == 'multiStepPathRetrieve'
def test_mocked_request_with_get_queryset_get_serializer_class(no_warnings):
class XViewset(viewsets.ReadOnlyModelViewSet):
def get_serializer_class(self):
assert not self.request.user.is_authenticated
assert self.action in ['retrieve', 'list']
return SimpleSerializer
def get_queryset(self):
assert not self.request.user.is_authenticated
assert self.request.method == 'GET'
return SimpleModel.objects.none()
generate_schema('x', XViewset)
def test_queryset_filter_and_ordering_only_on_list(no_warnings):
class XViewset(viewsets.ReadOnlyModelViewSet):
queryset = SimpleModel.objects.none()
serializer_class = SimpleSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter)
schema = generate_schema('x', XViewset)
retrieve_parameters = schema['paths']['/x/']['get']['parameters']
assert len(retrieve_parameters) == 2
assert retrieve_parameters[0]['name'] == 'ordering'
assert retrieve_parameters[1]['name'] == 'search'
list_parameters = schema['paths']['/x/{id}/']['get']['parameters']
assert len(list_parameters) == 1
assert list_parameters[0]['name'] == 'id'
def test_pagination(no_warnings):
class XViewset(viewsets.ReadOnlyModelViewSet):
queryset = SimpleModel.objects.none()
serializer_class = SimpleSerializer
pagination_class = pagination.LimitOffsetPagination
schema = generate_schema('x', XViewset)
retrieve_parameters = schema['paths']['/x/']['get']['parameters']
assert len(retrieve_parameters) == 2
assert retrieve_parameters[0]['name'] == 'limit'
assert retrieve_parameters[1]['name'] == 'offset'
list_parameters = schema['paths']['/x/{id}/']['get']['parameters']
assert len(list_parameters) == 1
assert list_parameters[0]['name'] == 'id'
assert 'Simple' in schema['components']['schemas']
assert 'PaginatedSimpleList' in schema['components']['schemas']
substitution = schema['components']['schemas']['PaginatedSimpleList']
assert substitution['type'] == 'object'
assert substitution['properties']['results']['items']['$ref'] == '#/components/schemas/Simple'
def test_pagination_reusage(no_warnings):
class XViewset(viewsets.ReadOnlyModelViewSet):
queryset = SimpleModel.objects.all()
serializer_class = SimpleSerializer
pagination_class = pagination.LimitOffsetPagination
@extend_schema(responses={'200': SimpleSerializer(many=True)})
@action(methods=['GET'], detail=False)
def custom_action(self):
pass
class YViewset(XViewset):
serializer_class = SimpleSerializer
router = routers.SimpleRouter()
router.register('x', XViewset, basename='x')
router.register('y', YViewset, basename='y')
generator = SchemaGenerator(patterns=router.urls)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
@mock.patch(
'drf_spectacular.settings.spectacular_settings.SECURITY',
[{'apiKeyAuth': []}]
)
@mock.patch(
'drf_spectacular.settings.spectacular_settings.APPEND_COMPONENTS',
{"securitySchemes": {"apiKeyAuth": {"type": "apiKey", "in": "header", "name": "Authorization"}}}
)
def test_manual_security_method_addition(no_warnings):
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def view_func(request, format=None):
pass
schema = generate_schema('/x/', view_function=view_func)
operation_security = schema['paths']['/x/']['get']['security']
schema_security = schema['components']['securitySchemes']
assert len(operation_security) == 4 and any(['apiKeyAuth' in os for os in operation_security])
assert len(schema_security) == 3 and 'apiKeyAuth' in schema_security
def test_basic_viewset_without_queryset_with_explicit_pk_typing(no_warnings):
class XSerializer(serializers.Serializer):
field = fields.IntegerField()
class XViewset(viewsets.ViewSet):
serializer_class = XSerializer
def retrieve(self, request, *args, **kwargs):
pass
urlpatterns = [
path("api/<path:some_var>/<uuid:pk>/", XViewset.as_view({"get": "retrieve"}))
]
generator = SchemaGenerator(patterns=urlpatterns)
schema = generator.get_schema(request=None, public=True)
validate_schema(schema)
operation = schema['paths']['/api/{some_var}/{id}/']['get']
assert operation['parameters'][0]['name'] == 'id'
assert operation['parameters'][0]['schema']['format'] == 'uuid'
def test_multiple_media_types(no_warnings):
@extend_schema(responses={
(200, 'application/json'): OpenApiTypes.OBJECT,
(200, 'application/pdf'): OpenApiTypes.BINARY,
})
class XAPIView(APIView):
def get(self, request):
pass
schema = generate_schema('x', view=XAPIView)
content = schema['paths']['/x']['get']['responses']['200']['content']
assert content['application/pdf']['schema']['format'] == 'binary'
assert content['application/json']['schema']['type'] == 'object'
def test_token_auth_with_bearer_keyword(no_warnings):
class CustomTokenAuthentication(TokenAuthentication):
keyword = 'Bearer'
@extend_schema(responses=OpenApiTypes.FLOAT)
@api_view(['GET'])
def view_func(request, format=None):
pass
view_func.cls.authentication_classes = [CustomTokenAuthentication]
schema = generate_schema('x', view_function=view_func)
assert schema['components']['securitySchemes']['tokenAuth']['scheme'] == 'bearer'
@pytest.mark.parametrize('responses', [
str,
OpenApiTypes.STR,
{'200': str},
{'200': OpenApiTypes.STR},
])
def test_string_response_variations(no_warnings, responses):
@extend_schema(responses=responses)
@api_view(['GET'])
def view_func(request, format=None):
pass
schema = generate_schema('x', view_function=view_func)
assert get_response_schema(schema['paths']['/x']['get'])['type'] == 'string'
def test_exclude_discovered_parameter(no_warnings):
@extend_schema_view(list=extend_schema(parameters=[
OpenApiParameter('limit', exclude=True),
OpenApiParameter('random', bool),
]))
class XViewset(viewsets.ReadOnlyModelViewSet):
queryset = SimpleModel.objects.all()
serializer_class = SimpleSerializer
pagination_class = pagination.LimitOffsetPagination
schema = generate_schema('x', XViewset)
parameters = schema['paths']['/x/']['get']['parameters']
assert len(parameters) == 2
assert parameters[0]['name'] == 'offset'
assert parameters[1]['name'] == 'random'
def test_manual_decimal_validator():
class XSerializer(serializers.Serializer):
field = serializers.CharField(
validators=[validators.DecimalValidator(max_digits=4, decimal_places=2)]
)
@extend_schema(request=XSerializer, responses=XSerializer)
@api_view(['POST'])
def view_func(request, format=None):
pass
schema = generate_schema('x', view_function=view_func)
field = schema['components']['schemas']['X']['properties']['field']
assert field['maximum'] == 100
assert field['minimum'] == -100
| true
| true
|
1c41823bc3beaf18ca16772937ec5ad26af8e9c7
| 4,019
|
py
|
Python
|
examples/seismic/poroelastic/poroelastic_example.py
|
rwalkerlewis/devito
|
262364e5f2855ad01a281d517d400704b7667420
|
[
"MIT"
] | null | null | null |
examples/seismic/poroelastic/poroelastic_example.py
|
rwalkerlewis/devito
|
262364e5f2855ad01a281d517d400704b7667420
|
[
"MIT"
] | null | null | null |
examples/seismic/poroelastic/poroelastic_example.py
|
rwalkerlewis/devito
|
262364e5f2855ad01a281d517d400704b7667420
|
[
"MIT"
] | null | null | null |
import numpy as np
from argparse import ArgumentParser
from devito.logger import info
from examples.seismic.poroelastic import PoroelasticWaveSolver, demo_model
from examples.seismic import AcquisitionGeometry
def poroelastic_setup(shape=(50, 50), spacing=(15.0, 15.0), tn=500., num=200, space_order=4, nbpml=10,
constant=False, **kwargs):
nrec = 2*shape[0]
preset = 'constant-poroelastic' if constant else 'layers-poroelastic'
model = demo_model(preset, space_order=space_order, shape=shape, nbpml=nbpml,
dtype=kwargs.pop('dtype', np.float32), spacing=spacing)
# Source and receiver geometries
src_coordinates = np.empty((1, len(spacing)))
src_coordinates[0, :] = np.array(model.domain_size) * .5
if len(shape) > 1:
src_coordinates[0, -1] = model.origin[-1] + 2 * spacing[-1]
rec_coordinates = np.empty((nrec, len(spacing)))
rec_coordinates[:, 0] = np.linspace(0., model.domain_size[0], num=nrec)
if len(shape) > 1:
rec_coordinates[:, 1] = np.array(model.domain_size)[1] * .5
rec_coordinates[:, -1] = model.origin[-1] + 2 * spacing[-1]
# Source frequency is in Hz
geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,
t0=0.0, tn=tn, src_type='Ricker', f0=40)
# Create solver object to provide relevant operators
solver = PoroelasticWaveSolver(model, geometry, space_order=space_order, **kwargs)
return solver
def run(shape=(50, 50), spacing=(20.0, 20.0), tn=1000.0, num=200,
space_order=4, nbpml=40, autotune=False, constant=False, **kwargs):
solver = poroelastic_setup(shape=shape, spacing=spacing, nbpml=nbpml, tn=tn,
num=num, space_order=space_order, constant=constant, **kwargs)
info("Applying Forward")
# Define receiver geometry (spread across x, just below surface)
rec1, rec2, vx, vz, qx, qz, txx, tzz, txz, p, summary = solver.forward(autotune=autotune)
# iPython debug option
#import matplotlib.pyplot as plt
#from IPython import embed;embed()
return rec1, rec2, vx, vz, qx, qz, txx, tzz, txz, p, summary
if __name__ == "__main__":
description = ("Example script for a set of poroelastic operators.")
parser = ArgumentParser(description=description)
parser.add_argument('--2d', dest='dim2', default=True, action='store_true',
help="Preset to determine the physical problem setup")
parser.add_argument('-a', '--autotune', default=False, action='store_true',
help="Enable autotuning for block sizes")
parser.add_argument("-so", "--space_order", default=4,
type=int, help="Space order of the simulation")
parser.add_argument("--nbpml", default=40,
type=int, help="Number of PML layers around the domain")
parser.add_argument("-dse", default="advanced",
choices=["noop", "basic", "advanced",
"speculative", "aggressive"],
help="Devito symbolic engine (DSE) mode")
parser.add_argument("-dle", default="advanced",
choices=["noop", "advanced", "speculative"],
help="Devito loop engine (DLEE) mode")
parser.add_argument("--constant", default=True, action='store_true',
help="Constant velocity model, default is a constant velocity model")
args = parser.parse_args()
# 2D preset parameters
if args.dim2:
shape = (251, 641)
spacing = (0.5, 0.5)
num = 800
dt = 1.0e-4
tn = 0.05 #(num-1)*dt
# 3D preset parameters
else:
shape = (150, 150, 150)
spacing = (10.0, 10.0, 10.0)
tn = 1250.0
run(shape=shape, spacing=spacing, nbpml=args.nbpml, tn=tn, num=num, dle=args.dle,
space_order=args.space_order, autotune=args.autotune, constant=args.constant,
dse=args.dse)
| 44.655556
| 102
| 0.623289
|
import numpy as np
from argparse import ArgumentParser
from devito.logger import info
from examples.seismic.poroelastic import PoroelasticWaveSolver, demo_model
from examples.seismic import AcquisitionGeometry
def poroelastic_setup(shape=(50, 50), spacing=(15.0, 15.0), tn=500., num=200, space_order=4, nbpml=10,
constant=False, **kwargs):
nrec = 2*shape[0]
preset = 'constant-poroelastic' if constant else 'layers-poroelastic'
model = demo_model(preset, space_order=space_order, shape=shape, nbpml=nbpml,
dtype=kwargs.pop('dtype', np.float32), spacing=spacing)
src_coordinates = np.empty((1, len(spacing)))
src_coordinates[0, :] = np.array(model.domain_size) * .5
if len(shape) > 1:
src_coordinates[0, -1] = model.origin[-1] + 2 * spacing[-1]
rec_coordinates = np.empty((nrec, len(spacing)))
rec_coordinates[:, 0] = np.linspace(0., model.domain_size[0], num=nrec)
if len(shape) > 1:
rec_coordinates[:, 1] = np.array(model.domain_size)[1] * .5
rec_coordinates[:, -1] = model.origin[-1] + 2 * spacing[-1]
geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,
t0=0.0, tn=tn, src_type='Ricker', f0=40)
solver = PoroelasticWaveSolver(model, geometry, space_order=space_order, **kwargs)
return solver
def run(shape=(50, 50), spacing=(20.0, 20.0), tn=1000.0, num=200,
space_order=4, nbpml=40, autotune=False, constant=False, **kwargs):
solver = poroelastic_setup(shape=shape, spacing=spacing, nbpml=nbpml, tn=tn,
num=num, space_order=space_order, constant=constant, **kwargs)
info("Applying Forward")
rec1, rec2, vx, vz, qx, qz, txx, tzz, txz, p, summary = solver.forward(autotune=autotune)
return rec1, rec2, vx, vz, qx, qz, txx, tzz, txz, p, summary
if __name__ == "__main__":
description = ("Example script for a set of poroelastic operators.")
parser = ArgumentParser(description=description)
parser.add_argument('--2d', dest='dim2', default=True, action='store_true',
help="Preset to determine the physical problem setup")
parser.add_argument('-a', '--autotune', default=False, action='store_true',
help="Enable autotuning for block sizes")
parser.add_argument("-so", "--space_order", default=4,
type=int, help="Space order of the simulation")
parser.add_argument("--nbpml", default=40,
type=int, help="Number of PML layers around the domain")
parser.add_argument("-dse", default="advanced",
choices=["noop", "basic", "advanced",
"speculative", "aggressive"],
help="Devito symbolic engine (DSE) mode")
parser.add_argument("-dle", default="advanced",
choices=["noop", "advanced", "speculative"],
help="Devito loop engine (DLEE) mode")
parser.add_argument("--constant", default=True, action='store_true',
help="Constant velocity model, default is a constant velocity model")
args = parser.parse_args()
if args.dim2:
shape = (251, 641)
spacing = (0.5, 0.5)
num = 800
dt = 1.0e-4
tn = 0.05
else:
shape = (150, 150, 150)
spacing = (10.0, 10.0, 10.0)
tn = 1250.0
run(shape=shape, spacing=spacing, nbpml=args.nbpml, tn=tn, num=num, dle=args.dle,
space_order=args.space_order, autotune=args.autotune, constant=args.constant,
dse=args.dse)
| true
| true
|
1c418584edcca91d53edc9629e0784c93c3b3636
| 2,550
|
py
|
Python
|
sprites/vertex.py
|
lmason98/PyGraph
|
22d734cfd97333578c91ba4e331716df0aec668e
|
[
"MIT"
] | null | null | null |
sprites/vertex.py
|
lmason98/PyGraph
|
22d734cfd97333578c91ba4e331716df0aec668e
|
[
"MIT"
] | null | null | null |
sprites/vertex.py
|
lmason98/PyGraph
|
22d734cfd97333578c91ba4e331716df0aec668e
|
[
"MIT"
] | null | null | null |
"""
File: sprites/vertex.py
Author: Luke Mason
Description: A graph vertex pygame sprite
"""
from settings import COLOR
from pygame.sprite import Sprite
from pygame import Surface, draw
TEXT = COLOR.get('white')
class Vertex(Sprite):
def __init__(self, x: int, y: int, color: (int, int, int) = TEXT, radius: int = 10) -> None:
"""
Inits the vertex sprite
"""
Sprite.__init__(self)
self.drag = False # Drag state for the vertex (will be true when vertex is being moved)
self.selected = False
self.color = color
self.radius = radius
self.edges = []
self.connected_vertices = []
# TODO: Figure out how to draw a circle
self.image = Surface((self.radius*2, self.radius*2))
self.image.fill(self.color)
draw.circle(self.image, self.color, (self.radius, self.radius), self.radius) # We want circular vertices
# The position of the sprite, update position by inc/dec self.pos.x and self.pos.y
self.rect = self.image.get_rect(center=(x, y))
def __str__(self):
"""
Converts the vertex class to string for print()
"""
x, y = self.get_pos()
return f'(x={x}, y={y})'
def set_pos(self, x: int, y: int) -> None:
"""
Sets the position of the vertex, this should only be called when placing or moving a vertex
"""
self.rect.x = x - (self.radius / 2)
self.rect.y = y - (self.radius / 2)
def get_pos(self) -> (int, int):
"""
Returns vertex position
"""
return self.rect.x, self.rect.y
def set_color(self, color: (int, int, int)) -> None:
"""
Sets the vertex color
"""
self.color = color
self.image.fill(self.color)
def add_connected_vertex(self, v) -> None:
"""
Adds a vertex to the connected vertices list, this is used for tracking edges.
self.connected_vertices = [{'vertex': v0, 'count': 1}, {'vertex: v1, 'count': 3}]
For any count > 0, there will be parallel edges
"""
found = False
for cv in self.connected_vertices:
# If vertex exists, update count
if cv.get('vertex') == v:
cv.update({'count': cv.get('count', 0) + 1})
found = True
# Otherwise insert with count=1
if not found:
self.connected_vertices.append({'vertex': v, 'count': 1})
def remove_connected_vertex(self, v) -> None:
"""
Removes a vertex from the connected vertices list, this is used for tracking edges.
Returns True/False based on if the passed vertex actually existed in the list
"""
found = False
for cv in self.connected_vertices:
if cv.get('vertex') == v:
self.connected_vertices.remove(cv)
found = True
break
return found
| 25.247525
| 107
| 0.666275
|
from settings import COLOR
from pygame.sprite import Sprite
from pygame import Surface, draw
TEXT = COLOR.get('white')
class Vertex(Sprite):
def __init__(self, x: int, y: int, color: (int, int, int) = TEXT, radius: int = 10) -> None:
Sprite.__init__(self)
self.drag = False
self.selected = False
self.color = color
self.radius = radius
self.edges = []
self.connected_vertices = []
self.image = Surface((self.radius*2, self.radius*2))
self.image.fill(self.color)
draw.circle(self.image, self.color, (self.radius, self.radius), self.radius)
self.rect = self.image.get_rect(center=(x, y))
def __str__(self):
x, y = self.get_pos()
return f'(x={x}, y={y})'
def set_pos(self, x: int, y: int) -> None:
self.rect.x = x - (self.radius / 2)
self.rect.y = y - (self.radius / 2)
def get_pos(self) -> (int, int):
return self.rect.x, self.rect.y
def set_color(self, color: (int, int, int)) -> None:
self.color = color
self.image.fill(self.color)
def add_connected_vertex(self, v) -> None:
found = False
for cv in self.connected_vertices:
if cv.get('vertex') == v:
cv.update({'count': cv.get('count', 0) + 1})
found = True
if not found:
self.connected_vertices.append({'vertex': v, 'count': 1})
def remove_connected_vertex(self, v) -> None:
found = False
for cv in self.connected_vertices:
if cv.get('vertex') == v:
self.connected_vertices.remove(cv)
found = True
break
return found
| true
| true
|
1c41858c96b1c576aa3a7fdf95abe38872adc09e
| 5,381
|
py
|
Python
|
scons/scons-local-2.5.0/SCons/Tool/gdc.py
|
emamanto/Soar
|
72d2bc095068dd87ac78dad4f48938f6edc0353a
|
[
"BSD-2-Clause"
] | 72
|
2020-06-12T06:33:41.000Z
|
2021-03-22T03:15:56.000Z
|
scons/scons-local-2.5.0/SCons/Tool/gdc.py
|
emamanto/Soar
|
72d2bc095068dd87ac78dad4f48938f6edc0353a
|
[
"BSD-2-Clause"
] | 9
|
2020-07-02T09:36:49.000Z
|
2021-03-25T23:54:00.000Z
|
scons/scons-local-2.5.0/SCons/Tool/gdc.py
|
emamanto/Soar
|
72d2bc095068dd87ac78dad4f48938f6edc0353a
|
[
"BSD-2-Clause"
] | 14
|
2020-06-12T03:08:03.000Z
|
2021-02-03T11:43:09.000Z
|
"""SCons.Tool.gdc
Tool-specific initialization for the GDC compiler.
(https://github.com/D-Programming-GDC/GDC)
Developed by Russel Winder (russel@winder.org.uk)
2012-05-09 onwards
Compiler variables:
DC - The name of the D compiler to use. Defaults to gdc.
DPATH - List of paths to search for import modules.
DVERSIONS - List of version tags to enable when compiling.
DDEBUG - List of debug tags to enable when compiling.
Linker related variables:
LIBS - List of library files to link in.
DLINK - Name of the linker to use. Defaults to gdc.
DLINKFLAGS - List of linker flags.
Lib tool variables:
DLIB - Name of the lib tool to use. Defaults to lib.
DLIBFLAGS - List of flags to pass to the lib tool.
LIBS - Same as for the linker. (libraries to pull into the .lib)
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gdc.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import SCons.Action
import SCons.Defaults
import SCons.Tool
import SCons.Tool.DCommon
def generate(env):
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
static_obj.add_action('.d', SCons.Defaults.DAction)
shared_obj.add_action('.d', SCons.Defaults.ShDAction)
static_obj.add_emitter('.d', SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter('.d', SCons.Defaults.SharedObjectEmitter)
env['DC'] = env.Detect('gdc')
env['DCOM'] = '$DC $_DINCFLAGS $_DVERFLAGS $_DDEBUGFLAGS $_DFLAGS -c -o $TARGET $SOURCES'
env['_DINCFLAGS'] = '${_concat(DINCPREFIX, DPATH, DINCSUFFIX, __env__, RDirs, TARGET, SOURCE)}'
env['_DVERFLAGS'] = '${_concat(DVERPREFIX, DVERSIONS, DVERSUFFIX, __env__)}'
env['_DDEBUGFLAGS'] = '${_concat(DDEBUGPREFIX, DDEBUG, DDEBUGSUFFIX, __env__)}'
env['_DFLAGS'] = '${_concat(DFLAGPREFIX, DFLAGS, DFLAGSUFFIX, __env__)}'
env['SHDC'] = '$DC'
env['SHDCOM'] = '$SHDC $_DINCFLAGS $_DVERFLAGS $_DDEBUGFLAGS $_DFLAGS -fPIC -c -o $TARGET $SOURCES'
env['DPATH'] = ['#/']
env['DFLAGS'] = []
env['DVERSIONS'] = []
env['DDEBUG'] = []
if env['DC']:
SCons.Tool.DCommon.addDPATHToEnv(env, env['DC'])
env['DINCPREFIX'] = '-I'
env['DINCSUFFIX'] = ''
env['DVERPREFIX'] = '-version='
env['DVERSUFFIX'] = ''
env['DDEBUGPREFIX'] = '-debug='
env['DDEBUGSUFFIX'] = ''
env['DFLAGPREFIX'] = '-'
env['DFLAGSUFFIX'] = ''
env['DFILESUFFIX'] = '.d'
env['DLINK'] = '$DC'
env['DLINKFLAGS'] = SCons.Util.CLVar('')
env['DLINKCOM'] = '$DLINK -o $TARGET $DLINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['DSHLINK'] = '$DC'
env['DSHLINKFLAGS'] = SCons.Util.CLVar('$DLINKFLAGS -shared')
env['SHDLINKCOM'] = '$DLINK -o $TARGET $DSHLINKFLAGS $__DSHLIBVERSIONFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['DLIB'] = 'lib' if env['PLATFORM'] == 'win32' else 'ar cr'
env['DLIBCOM'] = '$DLIB $_DLIBFLAGS {0}$TARGET $SOURCES $_DLINKLIBFLAGS'.format('-c ' if env['PLATFORM'] == 'win32' else '')
env['_DLIBFLAGS'] = '${_concat(DLIBFLAGPREFIX, DLIBFLAGS, DLIBFLAGSUFFIX, __env__)}'
env['DLIBFLAGPREFIX'] = '-'
env['DLIBFLAGSUFFIX'] = ''
env['DLINKFLAGPREFIX'] = '-'
env['DLINKFLAGSUFFIX'] = ''
# __RPATH is set to $_RPATH in the platform specification if that
# platform supports it.
env['RPATHPREFIX'] = '-Wl,-rpath='
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
# Support for versioned libraries
env['_DSHLIBVERSIONFLAGS'] = '$DSHLIBVERSIONFLAGS -Wl,-soname=$_DSHLIBSONAME'
env['_DSHLIBSONAME'] = '${DShLibSonameGenerator(__env__,TARGET)}'
# NOTE: this is a quick hack, the soname will only work if there is
# c/c++ linker loaded which provides callback for the ShLibSonameGenerator
env['DShLibSonameGenerator'] = SCons.Tool.ShLibSonameGenerator
# NOTE: this is only for further reference, currently $DSHLIBVERSION does
# not work, the user must use $SHLIBVERSION
env['DSHLIBVERSION'] = '$SHLIBVERSION'
env['DSHLIBVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS'
SCons.Tool.createStaticLibBuilder(env)
def exists(env):
return env.Detect('gdc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 38.435714
| 128
| 0.693737
|
__revision__ = "src/engine/SCons/Tool/gdc.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import SCons.Action
import SCons.Defaults
import SCons.Tool
import SCons.Tool.DCommon
def generate(env):
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
static_obj.add_action('.d', SCons.Defaults.DAction)
shared_obj.add_action('.d', SCons.Defaults.ShDAction)
static_obj.add_emitter('.d', SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter('.d', SCons.Defaults.SharedObjectEmitter)
env['DC'] = env.Detect('gdc')
env['DCOM'] = '$DC $_DINCFLAGS $_DVERFLAGS $_DDEBUGFLAGS $_DFLAGS -c -o $TARGET $SOURCES'
env['_DINCFLAGS'] = '${_concat(DINCPREFIX, DPATH, DINCSUFFIX, __env__, RDirs, TARGET, SOURCE)}'
env['_DVERFLAGS'] = '${_concat(DVERPREFIX, DVERSIONS, DVERSUFFIX, __env__)}'
env['_DDEBUGFLAGS'] = '${_concat(DDEBUGPREFIX, DDEBUG, DDEBUGSUFFIX, __env__)}'
env['_DFLAGS'] = '${_concat(DFLAGPREFIX, DFLAGS, DFLAGSUFFIX, __env__)}'
env['SHDC'] = '$DC'
env['SHDCOM'] = '$SHDC $_DINCFLAGS $_DVERFLAGS $_DDEBUGFLAGS $_DFLAGS -fPIC -c -o $TARGET $SOURCES'
env['DPATH'] = ['#/']
env['DFLAGS'] = []
env['DVERSIONS'] = []
env['DDEBUG'] = []
if env['DC']:
SCons.Tool.DCommon.addDPATHToEnv(env, env['DC'])
env['DINCPREFIX'] = '-I'
env['DINCSUFFIX'] = ''
env['DVERPREFIX'] = '-version='
env['DVERSUFFIX'] = ''
env['DDEBUGPREFIX'] = '-debug='
env['DDEBUGSUFFIX'] = ''
env['DFLAGPREFIX'] = '-'
env['DFLAGSUFFIX'] = ''
env['DFILESUFFIX'] = '.d'
env['DLINK'] = '$DC'
env['DLINKFLAGS'] = SCons.Util.CLVar('')
env['DLINKCOM'] = '$DLINK -o $TARGET $DLINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['DSHLINK'] = '$DC'
env['DSHLINKFLAGS'] = SCons.Util.CLVar('$DLINKFLAGS -shared')
env['SHDLINKCOM'] = '$DLINK -o $TARGET $DSHLINKFLAGS $__DSHLIBVERSIONFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['DLIB'] = 'lib' if env['PLATFORM'] == 'win32' else 'ar cr'
env['DLIBCOM'] = '$DLIB $_DLIBFLAGS {0}$TARGET $SOURCES $_DLINKLIBFLAGS'.format('-c ' if env['PLATFORM'] == 'win32' else '')
env['_DLIBFLAGS'] = '${_concat(DLIBFLAGPREFIX, DLIBFLAGS, DLIBFLAGSUFFIX, __env__)}'
env['DLIBFLAGPREFIX'] = '-'
env['DLIBFLAGSUFFIX'] = ''
env['DLINKFLAGPREFIX'] = '-'
env['DLINKFLAGSUFFIX'] = ''
env['RPATHPREFIX'] = '-Wl,-rpath='
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
env['_DSHLIBVERSIONFLAGS'] = '$DSHLIBVERSIONFLAGS -Wl,-soname=$_DSHLIBSONAME'
env['_DSHLIBSONAME'] = '${DShLibSonameGenerator(__env__,TARGET)}'
env['DShLibSonameGenerator'] = SCons.Tool.ShLibSonameGenerator
env['DSHLIBVERSION'] = '$SHLIBVERSION'
env['DSHLIBVERSIONFLAGS'] = '$SHLIBVERSIONFLAGS'
SCons.Tool.createStaticLibBuilder(env)
def exists(env):
return env.Detect('gdc')
| true
| true
|
1c41860d2a7923dca193e3a89e401e2e5bd2cf72
| 33,054
|
py
|
Python
|
boto/vpc/__init__.py
|
yola/boto
|
dccded53cc1eedd565fa50b32cadbdba3990225a
|
[
"MIT"
] | null | null | null |
boto/vpc/__init__.py
|
yola/boto
|
dccded53cc1eedd565fa50b32cadbdba3990225a
|
[
"MIT"
] | null | null | null |
boto/vpc/__init__.py
|
yola/boto
|
dccded53cc1eedd565fa50b32cadbdba3990225a
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a connection to the EC2 service.
"""
from boto.ec2.connection import EC2Connection
from boto.resultset import ResultSet
from boto.vpc.vpc import VPC
from boto.vpc.customergateway import CustomerGateway
from boto.vpc.routetable import RouteTable
from boto.vpc.internetgateway import InternetGateway
from boto.vpc.vpngateway import VpnGateway, Attachment
from boto.vpc.dhcpoptions import DhcpOptions
from boto.vpc.subnet import Subnet
from boto.vpc.vpnconnection import VpnConnection
from boto.ec2 import RegionData
from boto.regioninfo import RegionInfo
def regions(**kw_params):
"""
Get all available regions for the EC2 service.
You may pass any of the arguments accepted by the VPCConnection
object's constructor as keyword arguments and they will be
passed along to the VPCConnection object.
:rtype: list
:return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
"""
regions = []
for region_name in RegionData:
region = RegionInfo(name=region_name,
endpoint=RegionData[region_name],
connection_cls=VPCConnection)
regions.append(region)
return regions
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.vpc.VPCConnection`.
Any additional parameters after the region_name are passed on to
the connect method of the region object.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.vpc.VPCConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions(**kw_params):
if region.name == region_name:
return region.connect(**kw_params)
return None
class VPCConnection(EC2Connection):
# VPC methods
def get_all_vpcs(self, vpc_ids=None, filters=None):
"""
Retrieve information about your VPCs. You can filter results to
return information only about those VPCs that match your search
parameters. Otherwise, all VPCs associated with your account
are returned.
:type vpc_ids: list
:param vpc_ids: A list of strings with the desired VPC ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
* *state* - a list of states of the VPC (pending or available)
* *cidrBlock* - a list CIDR blocks of the VPC
* *dhcpOptionsId* - a list of IDs of a set of DHCP options
:rtype: list
:return: A list of :class:`boto.vpc.vpc.VPC`
"""
params = {}
if vpc_ids:
self.build_list_params(params, vpc_ids, 'VpcId')
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeVpcs', params, [('item', VPC)])
def create_vpc(self, cidr_block):
"""
Create a new Virtual Private Cloud.
:type cidr_block: str
:param cidr_block: A valid CIDR block
:rtype: The newly created VPC
:return: A :class:`boto.vpc.vpc.VPC` object
"""
params = {'CidrBlock' : cidr_block}
return self.get_object('CreateVpc', params, VPC)
def delete_vpc(self, vpc_id):
"""
Delete a Virtual Private Cloud.
:type vpc_id: str
:param vpc_id: The ID of the vpc to be deleted.
:rtype: bool
:return: True if successful
"""
params = {'VpcId': vpc_id}
return self.get_status('DeleteVpc', params)
# Route Tables
def get_all_route_tables(self, route_table_ids=None, filters=None):
"""
Retrieve information about your routing tables. You can filter results
to return information only about those route tables that match your
search parameters. Otherwise, all route tables associated with your
account are returned.
:type route_table_ids: list
:param route_table_ids: A list of strings with the desired route table
IDs.
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
:rtype: list
:return: A list of :class:`boto.vpc.routetable.RouteTable`
"""
params = {}
if route_table_ids:
self.build_list_params(params, route_table_ids, "RouteTableId")
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeRouteTables', params,
[('item', RouteTable)])
def associate_route_table(self, route_table_id, subnet_id):
"""
Associates a route table with a specific subnet.
:type route_table_id: str
:param route_table_id: The ID of the route table to associate.
:type subnet_id: str
:param subnet_id: The ID of the subnet to associate with.
:rtype: str
:return: The ID of the association created
"""
params = {
'RouteTableId': route_table_id,
'SubnetId': subnet_id
}
result = self.get_object('AssociateRouteTable', params, ResultSet)
return result.associationId
def disassociate_route_table(self, association_id):
"""
Removes an association from a route table. This will cause all subnets
that would've used this association to now use the main routing
association instead.
:type association_id: str
:param association_id: The ID of the association to disassociate.
:rtype: bool
:return: True if successful
"""
params = { 'AssociationId': association_id }
return self.get_status('DisassociateRouteTable', params)
def create_route_table(self, vpc_id):
"""
Creates a new route table.
:type vpc_id: str
:param vpc_id: The VPC ID to associate this route table with.
:rtype: The newly created route table
:return: A :class:`boto.vpc.routetable.RouteTable` object
"""
params = { 'VpcId': vpc_id }
return self.get_object('CreateRouteTable', params, RouteTable)
def delete_route_table(self, route_table_id):
"""
Delete a route table.
:type route_table_id: str
:param route_table_id: The ID of the route table to delete.
:rtype: bool
:return: True if successful
"""
params = { 'RouteTableId': route_table_id }
return self.get_status('DeleteRouteTable', params)
def create_route(self, route_table_id, destination_cidr_block,
gateway_id=None, instance_id=None):
"""
Creates a new route in the route table within a VPC. The route's target
can be either a gateway attached to the VPC or a NAT instance in the
VPC.
:type route_table_id: str
:param route_table_id: The ID of the route table for the route.
:type destination_cidr_block: str
:param destination_cidr_block: The CIDR address block used for the
destination match.
:type gateway_id: str
:param gateway_id: The ID of the gateway attached to your VPC.
:type instance_id: str
:param instance_id: The ID of a NAT instance in your VPC.
:rtype: bool
:return: True if successful
"""
params = {
'RouteTableId': route_table_id,
'DestinationCidrBlock': destination_cidr_block
}
if gateway_id is not None:
params['GatewayId'] = gateway_id
elif instance_id is not None:
params['InstanceId'] = instance_id
return self.get_status('CreateRoute', params)
def replace_route(self, route_table_id, destination_cidr_block,
gateway_id=None, instance_id=None, interface_id=None):
"""
Replaces an existing route within a route table in a VPC.
:type route_table_id: str
:param route_table_id: The ID of the route table for the route.
:type destination_cidr_block: str
:param destination_cidr_block: The CIDR address block used for the
destination match.
:type gateway_id: str
:param gateway_id: The ID of the gateway attached to your VPC.
:type instance_id: str
:param instance_id: The ID of a NAT instance in your VPC.
:type interface_id: str
:param interface_id: Allows routing to network interface attachments.
:rtype: bool
:return: True if successful
"""
params = {
'RouteTableId': route_table_id,
'DestinationCidrBlock': destination_cidr_block
}
if gateway_id is not None:
params['GatewayId'] = gateway_id
elif instance_id is not None:
params['InstanceId'] = instance_id
elif interface_id is not None:
params['NetworkInterfaceId'] = interface_id
return self.get_status('ReplaceRoute', params)
def delete_route(self, route_table_id, destination_cidr_block):
"""
Deletes a route from a route table within a VPC.
:type route_table_id: str
:param route_table_id: The ID of the route table with the route.
:type destination_cidr_block: str
:param destination_cidr_block: The CIDR address block used for
destination match.
:rtype: bool
:return: True if successful
"""
params = {
'RouteTableId': route_table_id,
'DestinationCidrBlock': destination_cidr_block
}
return self.get_status('DeleteRoute', params)
# Internet Gateways
def get_all_internet_gateways(self, internet_gateway_ids=None,
filters=None):
"""
Get a list of internet gateways. You can filter results to return information
about only those gateways that you're interested in.
:type internet_gateway_ids: list
:param internet_gateway_ids: A list of strings with the desired gateway IDs.
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
"""
params = {}
if internet_gateway_ids:
self.build_list_params(params, internet_gateway_ids,
'InternetGatewayId')
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeInternetGateways', params,
[('item', InternetGateway)])
def create_internet_gateway(self):
"""
Creates an internet gateway for VPC.
:rtype: Newly created internet gateway.
:return: `boto.vpc.internetgateway.InternetGateway`
"""
return self.get_object('CreateInternetGateway', {}, InternetGateway)
def delete_internet_gateway(self, internet_gateway_id):
"""
Deletes an internet gateway from the VPC.
:type internet_gateway_id: str
:param internet_gateway_id: The ID of the internet gateway to delete.
:rtype: Bool
:return: True if successful
"""
params = { 'InternetGatewayId': internet_gateway_id }
return self.get_status('DeleteInternetGateway', params)
def attach_internet_gateway(self, internet_gateway_id, vpc_id):
"""
Attach an internet gateway to a specific VPC.
:type internet_gateway_id: str
:param internet_gateway_id: The ID of the internet gateway to delete.
:type vpc_id: str
:param vpc_id: The ID of the VPC to attach to.
:rtype: Bool
:return: True if successful
"""
params = {
'InternetGatewayId': internet_gateway_id,
'VpcId': vpc_id
}
return self.get_status('AttachInternetGateway', params)
def detach_internet_gateway(self, internet_gateway_id, vpc_id):
"""
Detach an internet gateway from a specific VPC.
:type internet_gateway_id: str
:param internet_gateway_id: The ID of the internet gateway to detach.
:type vpc_id: str
:param vpc_id: The ID of the VPC to attach to.
:rtype: Bool
:return: True if successful
"""
params = {
'InternetGatewayId': internet_gateway_id,
'VpcId': vpc_id
}
return self.get_status('DetachInternetGateway', params)
# Customer Gateways
def get_all_customer_gateways(self, customer_gateway_ids=None,
filters=None):
"""
Retrieve information about your CustomerGateways. You can filter
results to return information only about those CustomerGateways that
match your search parameters. Otherwise, all CustomerGateways
associated with your account are returned.
:type customer_gateway_ids: list
:param customer_gateway_ids: A list of strings with the desired
CustomerGateway ID's.
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
- *state*, the state of the CustomerGateway
(pending,available,deleting,deleted)
- *type*, the type of customer gateway (ipsec.1)
- *ipAddress* the IP address of customer gateway's
internet-routable external inteface
:rtype: list
:return: A list of :class:`boto.vpc.customergateway.CustomerGateway`
"""
params = {}
if customer_gateway_ids:
self.build_list_params(params, customer_gateway_ids,
'CustomerGatewayId')
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeCustomerGateways', params,
[('item', CustomerGateway)])
def create_customer_gateway(self, type, ip_address, bgp_asn):
"""
Create a new Customer Gateway
:type type: str
:param type: Type of VPN Connection. Only valid valid currently is 'ipsec.1'
:type ip_address: str
:param ip_address: Internet-routable IP address for customer's gateway.
Must be a static address.
:type bgp_asn: str
:param bgp_asn: Customer gateway's Border Gateway Protocol (BGP)
Autonomous System Number (ASN)
:rtype: The newly created CustomerGateway
:return: A :class:`boto.vpc.customergateway.CustomerGateway` object
"""
params = {'Type' : type,
'IpAddress' : ip_address,
'BgpAsn' : bgp_asn}
return self.get_object('CreateCustomerGateway', params, CustomerGateway)
def delete_customer_gateway(self, customer_gateway_id):
"""
Delete a Customer Gateway.
:type customer_gateway_id: str
:param customer_gateway_id: The ID of the customer_gateway to be deleted.
:rtype: bool
:return: True if successful
"""
params = {'CustomerGatewayId': customer_gateway_id}
return self.get_status('DeleteCustomerGateway', params)
# VPN Gateways
def get_all_vpn_gateways(self, vpn_gateway_ids=None, filters=None):
"""
Retrieve information about your VpnGateways. You can filter results to
return information only about those VpnGateways that match your search
parameters. Otherwise, all VpnGateways associated with your account
are returned.
:type vpn_gateway_ids: list
:param vpn_gateway_ids: A list of strings with the desired VpnGateway ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
- *state*, a list of states of the VpnGateway
(pending,available,deleting,deleted)
- *type*, a list types of customer gateway (ipsec.1)
- *availabilityZone*, a list of Availability zones the
VPN gateway is in.
:rtype: list
:return: A list of :class:`boto.vpc.customergateway.VpnGateway`
"""
params = {}
if vpn_gateway_ids:
self.build_list_params(params, vpn_gateway_ids, 'VpnGatewayId')
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeVpnGateways', params,
[('item', VpnGateway)])
def create_vpn_gateway(self, type, availability_zone=None):
"""
Create a new Vpn Gateway
:type type: str
:param type: Type of VPN Connection. Only valid valid currently is 'ipsec.1'
:type availability_zone: str
:param availability_zone: The Availability Zone where you want the VPN gateway.
:rtype: The newly created VpnGateway
:return: A :class:`boto.vpc.vpngateway.VpnGateway` object
"""
params = {'Type' : type}
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('CreateVpnGateway', params, VpnGateway)
def delete_vpn_gateway(self, vpn_gateway_id):
"""
Delete a Vpn Gateway.
:type vpn_gateway_id: str
:param vpn_gateway_id: The ID of the vpn_gateway to be deleted.
:rtype: bool
:return: True if successful
"""
params = {'VpnGatewayId': vpn_gateway_id}
return self.get_status('DeleteVpnGateway', params)
def attach_vpn_gateway(self, vpn_gateway_id, vpc_id):
"""
Attaches a VPN gateway to a VPC.
:type vpn_gateway_id: str
:param vpn_gateway_id: The ID of the vpn_gateway to attach
:type vpc_id: str
:param vpc_id: The ID of the VPC you want to attach the gateway to.
:rtype: An attachment
:return: a :class:`boto.vpc.vpngateway.Attachment`
"""
params = {'VpnGatewayId': vpn_gateway_id,
'VpcId' : vpc_id}
return self.get_object('AttachVpnGateway', params, Attachment)
# Subnets
def get_all_subnets(self, subnet_ids=None, filters=None):
"""
Retrieve information about your Subnets. You can filter results to
return information only about those Subnets that match your search
parameters. Otherwise, all Subnets associated with your account
are returned.
:type subnet_ids: list
:param subnet_ids: A list of strings with the desired Subnet ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
- *state*, a list of states of the Subnet
(pending,available)
- *vpcId*, a list of IDs of teh VPC the subnet is in.
- *cidrBlock*, a list of CIDR blocks of the subnet
- *availabilityZone*, list of the Availability Zones
the subnet is in.
:rtype: list
:return: A list of :class:`boto.vpc.subnet.Subnet`
"""
params = {}
if subnet_ids:
self.build_list_params(params, subnet_ids, 'SubnetId')
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeSubnets', params, [('item', Subnet)])
def create_subnet(self, vpc_id, cidr_block, availability_zone=None):
"""
Create a new Subnet
:type vpc_id: str
:param vpc_id: The ID of the VPC where you want to create the subnet.
:type cidr_block: str
:param cidr_block: The CIDR block you want the subnet to cover.
:type availability_zone: str
:param availability_zone: The AZ you want the subnet in
:rtype: The newly created Subnet
:return: A :class:`boto.vpc.customergateway.Subnet` object
"""
params = {'VpcId' : vpc_id,
'CidrBlock' : cidr_block}
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('CreateSubnet', params, Subnet)
def delete_subnet(self, subnet_id):
"""
Delete a subnet.
:type subnet_id: str
:param subnet_id: The ID of the subnet to be deleted.
:rtype: bool
:return: True if successful
"""
params = {'SubnetId': subnet_id}
return self.get_status('DeleteSubnet', params)
# DHCP Options
def get_all_dhcp_options(self, dhcp_options_ids=None):
"""
Retrieve information about your DhcpOptions.
:type dhcp_options_ids: list
:param dhcp_options_ids: A list of strings with the desired DhcpOption ID's
:rtype: list
:return: A list of :class:`boto.vpc.dhcpoptions.DhcpOptions`
"""
params = {}
if dhcp_options_ids:
self.build_list_params(params, dhcp_options_ids, 'DhcpOptionsId')
return self.get_list('DescribeDhcpOptions', params,
[('item', DhcpOptions)])
def create_dhcp_options(self, domain_name=None, domain_name_servers=None,
ntp_servers=None, netbios_name_servers=None,
netbios_node_type=None):
"""
Create a new DhcpOption
This corresponds to
http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreateDhcpOptions.html
:type domain_name: str
:param domain_name: A domain name of your choice (for example,
example.com)
:type domain_name_servers: list of strings
:param domain_name_servers: The IP address of a domain name server. You
can specify up to four addresses.
:type ntp_servers: list of strings
:param ntp_servers: The IP address of a Network Time Protocol (NTP)
server. You can specify up to four addresses.
:type netbios_name_servers: list of strings
:param netbios_name_servers: The IP address of a NetBIOS name server.
You can specify up to four addresses.
:type netbios_node_type: str
:param netbios_node_type: The NetBIOS node type (1, 2, 4, or 8). For
more information about the values, see RFC 2132. We recommend you
only use 2 at this time (broadcast and multicast are currently not
supported).
:rtype: The newly created DhcpOption
:return: A :class:`boto.vpc.customergateway.DhcpOption` object
"""
key_counter = 1
params = {}
def insert_option(params, name, value):
params['DhcpConfiguration.%d.Key' % (key_counter,)] = name
if isinstance(value, (list, tuple)):
for idx, value in enumerate(value, 1):
key_name = 'DhcpConfiguration.%d.Value.%d' % (
key_counter, idx)
params[key_name] = value
else:
key_name = 'DhcpConfiguration.%d.Value.1' % (key_counter,)
params[key_name] = value
return key_counter + 1
if domain_name:
key_counter = insert_option(params,
'domain-name', domain_name)
if domain_name_servers:
key_counter = insert_option(params,
'domain-name-servers', domain_name_servers)
if ntp_servers:
key_counter = insert_option(params,
'ntp-servers', ntp_servers)
if netbios_name_servers:
key_counter = insert_option(params,
'netbios-name-servers', netbios_name_servers)
if netbios_node_type:
key_counter = insert_option(params,
'netbios-node-type', netbios_node_type)
return self.get_object('CreateDhcpOptions', params, DhcpOptions)
def delete_dhcp_options(self, dhcp_options_id):
"""
Delete a DHCP Options
:type dhcp_options_id: str
:param dhcp_options_id: The ID of the DHCP Options to be deleted.
:rtype: bool
:return: True if successful
"""
params = {'DhcpOptionsId': dhcp_options_id}
return self.get_status('DeleteDhcpOptions', params)
def associate_dhcp_options(self, dhcp_options_id, vpc_id):
"""
Associate a set of Dhcp Options with a VPC.
:type dhcp_options_id: str
:param dhcp_options_id: The ID of the Dhcp Options
:type vpc_id: str
:param vpc_id: The ID of the VPC.
:rtype: bool
:return: True if successful
"""
params = {'DhcpOptionsId': dhcp_options_id,
'VpcId' : vpc_id}
return self.get_status('AssociateDhcpOptions', params)
# VPN Connection
def get_all_vpn_connections(self, vpn_connection_ids=None, filters=None):
"""
Retrieve information about your VPN_CONNECTIONs. You can filter results to
return information only about those VPN_CONNECTIONs that match your search
parameters. Otherwise, all VPN_CONNECTIONs associated with your account
are returned.
:type vpn_connection_ids: list
:param vpn_connection_ids: A list of strings with the desired VPN_CONNECTION ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
- *state*, a list of states of the VPN_CONNECTION
pending,available,deleting,deleted
- *type*, a list of types of connection, currently 'ipsec.1'
- *customerGatewayId*, a list of IDs of the customer gateway
associated with the VPN
- *vpnGatewayId*, a list of IDs of the VPN gateway associated
with the VPN connection
:rtype: list
:return: A list of :class:`boto.vpn_connection.vpnconnection.VpnConnection`
"""
params = {}
if vpn_connection_ids:
self.build_list_params(params, vpn_connection_ids,
'Vpn_ConnectionId')
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeVpnConnections', params,
[('item', VpnConnection)])
def create_vpn_connection(self, type, customer_gateway_id, vpn_gateway_id):
"""
Create a new VPN Connection.
:type type: str
:param type: The type of VPN Connection. Currently only 'ipsec.1'
is supported
:type customer_gateway_id: str
:param customer_gateway_id: The ID of the customer gateway.
:type vpn_gateway_id: str
:param vpn_gateway_id: The ID of the VPN gateway.
:rtype: The newly created VpnConnection
:return: A :class:`boto.vpc.vpnconnection.VpnConnection` object
"""
params = {'Type' : type,
'CustomerGatewayId' : customer_gateway_id,
'VpnGatewayId' : vpn_gateway_id}
return self.get_object('CreateVpnConnection', params, VpnConnection)
def delete_vpn_connection(self, vpn_connection_id):
"""
Delete a VPN Connection.
:type vpn_connection_id: str
:param vpn_connection_id: The ID of the vpn_connection to be deleted.
:rtype: bool
:return: True if successful
"""
params = {'VpnConnectionId': vpn_connection_id}
return self.get_status('DeleteVpnConnection', params)
def disable_vgw_route_propagation(self, route_table_id, gateway_id):
"""
Disables a virtual private gateway (VGW) from propagating routes to the
routing tables of an Amazon VPC.
:type route_table_id: str
:param route_table_id: The ID of the routing table.
:type gateway_id: str
:param gateway_id: The ID of the virtual private gateway.
:rtype: bool
:return: True if successful
"""
params = {
'RouteTableId': route_table_id,
'GatewayId': gateway_id,
}
self.get_status('DisableVgwRoutePropagation', params)
def enable_vgw_route_propagation(self, route_table_id, gateway_id):
"""
Enables a virtual private gateway (VGW) to propagate routes to the
routing tables of an Amazon VPC.
:type route_table_id: str
:param route_table_id: The ID of the routing table.
:type gateway_id: str
:param gateway_id: The ID of the virtual private gateway.
:rtype: bool
:return: True if successful
"""
params = {
'RouteTableId': route_table_id,
'GatewayId': gateway_id,
}
self.get_status('EnableVgwRoutePropagation', params)
def create_vpn_connection_route(self, destination_cidr_block,
vpn_connection_id):
"""
Creates a new static route associated with a VPN connection between an
existing virtual private gateway and a VPN customer gateway. The static
route allows traffic to be routed from the virtual private gateway to
the VPN customer gateway.
:type destination_cidr_block: str
:param destination_cidr_block: The CIDR block associated with the local
subnet of the customer data center.
:type vpn_connection_id: str
:param vpn_connection_id: The ID of the VPN connection.
:rtype: bool
:return: True if successful
"""
params = {
'DestinationCidrBlock': destination_cidr_block,
'VpnConnectionId': vpn_connection_id,
}
self.get_status('CreateVpnConnectionRoute', params)
def delete_vpn_connection_route(self, destination_cidr_block,
vpn_connection_id):
"""
Deletes a static route associated with a VPN connection between an
existing virtual private gateway and a VPN customer gateway. The static
route allows traffic to be routed from the virtual private gateway to
the VPN customer gateway.
:type destination_cidr_block: str
:param destination_cidr_block: The CIDR block associated with the local
subnet of the customer data center.
:type vpn_connection_id: str
:param vpn_connection_id: The ID of the VPN connection.
:rtype: bool
:return: True if successful
"""
params = {
'DestinationCidrBlock': destination_cidr_block,
'VpnConnectionId': vpn_connection_id,
}
self.get_status('DeleteVpnConnectionRoute', params)
| 36.283205
| 110
| 0.621438
|
from boto.ec2.connection import EC2Connection
from boto.resultset import ResultSet
from boto.vpc.vpc import VPC
from boto.vpc.customergateway import CustomerGateway
from boto.vpc.routetable import RouteTable
from boto.vpc.internetgateway import InternetGateway
from boto.vpc.vpngateway import VpnGateway, Attachment
from boto.vpc.dhcpoptions import DhcpOptions
from boto.vpc.subnet import Subnet
from boto.vpc.vpnconnection import VpnConnection
from boto.ec2 import RegionData
from boto.regioninfo import RegionInfo
def regions(**kw_params):
regions = []
for region_name in RegionData:
region = RegionInfo(name=region_name,
endpoint=RegionData[region_name],
connection_cls=VPCConnection)
regions.append(region)
return regions
def connect_to_region(region_name, **kw_params):
for region in regions(**kw_params):
if region.name == region_name:
return region.connect(**kw_params)
return None
class VPCConnection(EC2Connection):
def get_all_vpcs(self, vpc_ids=None, filters=None):
params = {}
if vpc_ids:
self.build_list_params(params, vpc_ids, 'VpcId')
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeVpcs', params, [('item', VPC)])
def create_vpc(self, cidr_block):
params = {'CidrBlock' : cidr_block}
return self.get_object('CreateVpc', params, VPC)
def delete_vpc(self, vpc_id):
params = {'VpcId': vpc_id}
return self.get_status('DeleteVpc', params)
def get_all_route_tables(self, route_table_ids=None, filters=None):
params = {}
if route_table_ids:
self.build_list_params(params, route_table_ids, "RouteTableId")
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeRouteTables', params,
[('item', RouteTable)])
def associate_route_table(self, route_table_id, subnet_id):
params = {
'RouteTableId': route_table_id,
'SubnetId': subnet_id
}
result = self.get_object('AssociateRouteTable', params, ResultSet)
return result.associationId
def disassociate_route_table(self, association_id):
params = { 'AssociationId': association_id }
return self.get_status('DisassociateRouteTable', params)
def create_route_table(self, vpc_id):
params = { 'VpcId': vpc_id }
return self.get_object('CreateRouteTable', params, RouteTable)
def delete_route_table(self, route_table_id):
params = { 'RouteTableId': route_table_id }
return self.get_status('DeleteRouteTable', params)
def create_route(self, route_table_id, destination_cidr_block,
gateway_id=None, instance_id=None):
params = {
'RouteTableId': route_table_id,
'DestinationCidrBlock': destination_cidr_block
}
if gateway_id is not None:
params['GatewayId'] = gateway_id
elif instance_id is not None:
params['InstanceId'] = instance_id
return self.get_status('CreateRoute', params)
def replace_route(self, route_table_id, destination_cidr_block,
gateway_id=None, instance_id=None, interface_id=None):
params = {
'RouteTableId': route_table_id,
'DestinationCidrBlock': destination_cidr_block
}
if gateway_id is not None:
params['GatewayId'] = gateway_id
elif instance_id is not None:
params['InstanceId'] = instance_id
elif interface_id is not None:
params['NetworkInterfaceId'] = interface_id
return self.get_status('ReplaceRoute', params)
def delete_route(self, route_table_id, destination_cidr_block):
params = {
'RouteTableId': route_table_id,
'DestinationCidrBlock': destination_cidr_block
}
return self.get_status('DeleteRoute', params)
def get_all_internet_gateways(self, internet_gateway_ids=None,
filters=None):
params = {}
if internet_gateway_ids:
self.build_list_params(params, internet_gateway_ids,
'InternetGatewayId')
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeInternetGateways', params,
[('item', InternetGateway)])
def create_internet_gateway(self):
return self.get_object('CreateInternetGateway', {}, InternetGateway)
def delete_internet_gateway(self, internet_gateway_id):
params = { 'InternetGatewayId': internet_gateway_id }
return self.get_status('DeleteInternetGateway', params)
def attach_internet_gateway(self, internet_gateway_id, vpc_id):
params = {
'InternetGatewayId': internet_gateway_id,
'VpcId': vpc_id
}
return self.get_status('AttachInternetGateway', params)
def detach_internet_gateway(self, internet_gateway_id, vpc_id):
params = {
'InternetGatewayId': internet_gateway_id,
'VpcId': vpc_id
}
return self.get_status('DetachInternetGateway', params)
def get_all_customer_gateways(self, customer_gateway_ids=None,
filters=None):
params = {}
if customer_gateway_ids:
self.build_list_params(params, customer_gateway_ids,
'CustomerGatewayId')
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeCustomerGateways', params,
[('item', CustomerGateway)])
def create_customer_gateway(self, type, ip_address, bgp_asn):
params = {'Type' : type,
'IpAddress' : ip_address,
'BgpAsn' : bgp_asn}
return self.get_object('CreateCustomerGateway', params, CustomerGateway)
def delete_customer_gateway(self, customer_gateway_id):
params = {'CustomerGatewayId': customer_gateway_id}
return self.get_status('DeleteCustomerGateway', params)
def get_all_vpn_gateways(self, vpn_gateway_ids=None, filters=None):
params = {}
if vpn_gateway_ids:
self.build_list_params(params, vpn_gateway_ids, 'VpnGatewayId')
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeVpnGateways', params,
[('item', VpnGateway)])
def create_vpn_gateway(self, type, availability_zone=None):
params = {'Type' : type}
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('CreateVpnGateway', params, VpnGateway)
def delete_vpn_gateway(self, vpn_gateway_id):
params = {'VpnGatewayId': vpn_gateway_id}
return self.get_status('DeleteVpnGateway', params)
def attach_vpn_gateway(self, vpn_gateway_id, vpc_id):
params = {'VpnGatewayId': vpn_gateway_id,
'VpcId' : vpc_id}
return self.get_object('AttachVpnGateway', params, Attachment)
def get_all_subnets(self, subnet_ids=None, filters=None):
params = {}
if subnet_ids:
self.build_list_params(params, subnet_ids, 'SubnetId')
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeSubnets', params, [('item', Subnet)])
def create_subnet(self, vpc_id, cidr_block, availability_zone=None):
params = {'VpcId' : vpc_id,
'CidrBlock' : cidr_block}
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('CreateSubnet', params, Subnet)
def delete_subnet(self, subnet_id):
params = {'SubnetId': subnet_id}
return self.get_status('DeleteSubnet', params)
def get_all_dhcp_options(self, dhcp_options_ids=None):
params = {}
if dhcp_options_ids:
self.build_list_params(params, dhcp_options_ids, 'DhcpOptionsId')
return self.get_list('DescribeDhcpOptions', params,
[('item', DhcpOptions)])
def create_dhcp_options(self, domain_name=None, domain_name_servers=None,
ntp_servers=None, netbios_name_servers=None,
netbios_node_type=None):
key_counter = 1
params = {}
def insert_option(params, name, value):
params['DhcpConfiguration.%d.Key' % (key_counter,)] = name
if isinstance(value, (list, tuple)):
for idx, value in enumerate(value, 1):
key_name = 'DhcpConfiguration.%d.Value.%d' % (
key_counter, idx)
params[key_name] = value
else:
key_name = 'DhcpConfiguration.%d.Value.1' % (key_counter,)
params[key_name] = value
return key_counter + 1
if domain_name:
key_counter = insert_option(params,
'domain-name', domain_name)
if domain_name_servers:
key_counter = insert_option(params,
'domain-name-servers', domain_name_servers)
if ntp_servers:
key_counter = insert_option(params,
'ntp-servers', ntp_servers)
if netbios_name_servers:
key_counter = insert_option(params,
'netbios-name-servers', netbios_name_servers)
if netbios_node_type:
key_counter = insert_option(params,
'netbios-node-type', netbios_node_type)
return self.get_object('CreateDhcpOptions', params, DhcpOptions)
def delete_dhcp_options(self, dhcp_options_id):
params = {'DhcpOptionsId': dhcp_options_id}
return self.get_status('DeleteDhcpOptions', params)
def associate_dhcp_options(self, dhcp_options_id, vpc_id):
params = {'DhcpOptionsId': dhcp_options_id,
'VpcId' : vpc_id}
return self.get_status('AssociateDhcpOptions', params)
def get_all_vpn_connections(self, vpn_connection_ids=None, filters=None):
params = {}
if vpn_connection_ids:
self.build_list_params(params, vpn_connection_ids,
'Vpn_ConnectionId')
if filters:
self.build_filter_params(params, dict(filters))
return self.get_list('DescribeVpnConnections', params,
[('item', VpnConnection)])
def create_vpn_connection(self, type, customer_gateway_id, vpn_gateway_id):
params = {'Type' : type,
'CustomerGatewayId' : customer_gateway_id,
'VpnGatewayId' : vpn_gateway_id}
return self.get_object('CreateVpnConnection', params, VpnConnection)
def delete_vpn_connection(self, vpn_connection_id):
params = {'VpnConnectionId': vpn_connection_id}
return self.get_status('DeleteVpnConnection', params)
def disable_vgw_route_propagation(self, route_table_id, gateway_id):
params = {
'RouteTableId': route_table_id,
'GatewayId': gateway_id,
}
self.get_status('DisableVgwRoutePropagation', params)
def enable_vgw_route_propagation(self, route_table_id, gateway_id):
params = {
'RouteTableId': route_table_id,
'GatewayId': gateway_id,
}
self.get_status('EnableVgwRoutePropagation', params)
def create_vpn_connection_route(self, destination_cidr_block,
vpn_connection_id):
params = {
'DestinationCidrBlock': destination_cidr_block,
'VpnConnectionId': vpn_connection_id,
}
self.get_status('CreateVpnConnectionRoute', params)
def delete_vpn_connection_route(self, destination_cidr_block,
vpn_connection_id):
params = {
'DestinationCidrBlock': destination_cidr_block,
'VpnConnectionId': vpn_connection_id,
}
self.get_status('DeleteVpnConnectionRoute', params)
| true
| true
|
1c4186e2439425542f19dd835149f20ff44767fe
| 234
|
py
|
Python
|
saleor/product_max_min/error_codes.py
|
hoangtuananh97/saleor
|
94ad493ef61302fb458822868fc2b4a884ec2065
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/product_max_min/error_codes.py
|
hoangtuananh97/saleor
|
94ad493ef61302fb458822868fc2b4a884ec2065
|
[
"CC-BY-4.0"
] | 4
|
2021-09-06T03:55:32.000Z
|
2021-10-15T08:47:58.000Z
|
saleor/product_max_min/error_codes.py
|
hoangtuananh97/saleor
|
94ad493ef61302fb458822868fc2b4a884ec2065
|
[
"CC-BY-4.0"
] | null | null | null |
from enum import Enum
class ProductMaxMinErrorCode(Enum):
ALREADY_EXISTS = "already_exists"
GRAPHQL_ERROR = "graphql_error"
INVALID = "invalid"
NOT_FOUND = "not_found"
REQUIRED = "required"
UNIQUE = "unique"
| 21.272727
| 37
| 0.696581
|
from enum import Enum
class ProductMaxMinErrorCode(Enum):
ALREADY_EXISTS = "already_exists"
GRAPHQL_ERROR = "graphql_error"
INVALID = "invalid"
NOT_FOUND = "not_found"
REQUIRED = "required"
UNIQUE = "unique"
| true
| true
|
1c4187ab85b90cb3e63425592c1c4a9c55e75c74
| 2,908
|
py
|
Python
|
src/primaires/vehicule/__init__.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 14
|
2015-08-21T19:15:21.000Z
|
2017-11-26T13:59:17.000Z
|
src/primaires/vehicule/__init__.py
|
vincent-lg/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 20
|
2015-09-29T20:50:45.000Z
|
2018-06-21T12:58:30.000Z
|
src/primaires/vehicule/__init__.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 3
|
2015-05-02T19:42:03.000Z
|
2018-09-06T10:55:00.000Z
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 DAVY Guillaume
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le module primaire vehicule"""
from abstraits.module import *
from .vehicule import Vehicule
from .vecteur import Vecteur
import time
# Nombre de seconde virtuelle qui s'écoule en une seconde
VIRTSEC = 1
class Module(BaseModule):
"""Classe utilisée pour gérer des véhicules.
"""
def __init__(self, importeur):
"""Constructeur du module"""
BaseModule.__init__(self, importeur, "vehicule", "primaire")
self.commandes = []
self.vehicules = []
self.temps_precedant = time.time()
self.map = {}
def ajouter_vehicule(self, vehicule):
self.vehicules.append(vehicule)
def boucle(self):
"""A chaque tour de boucle synchro, on fait avancer les vehicules
"""
seconde_virtuelle = (time.time() - self.temps_precedant) * VIRTSEC
self.map = {}
for vehicule in self.vehicules:
masque = vehicule.get_prochaine_coordonnees(seconde_virtuelle)
impact = [x for x in masque if x in self.map]
if len(impact):
vehicule.collision(impact)
vehicule.avancer(seconde_virtuelle)
for coords in masque:
self.map[coords] = vehicule
self.temps_precedant = time.time()
| 37.766234
| 79
| 0.69945
|
from abstraits.module import *
from .vehicule import Vehicule
from .vecteur import Vecteur
import time
VIRTSEC = 1
class Module(BaseModule):
def __init__(self, importeur):
BaseModule.__init__(self, importeur, "vehicule", "primaire")
self.commandes = []
self.vehicules = []
self.temps_precedant = time.time()
self.map = {}
def ajouter_vehicule(self, vehicule):
self.vehicules.append(vehicule)
def boucle(self):
seconde_virtuelle = (time.time() - self.temps_precedant) * VIRTSEC
self.map = {}
for vehicule in self.vehicules:
masque = vehicule.get_prochaine_coordonnees(seconde_virtuelle)
impact = [x for x in masque if x in self.map]
if len(impact):
vehicule.collision(impact)
vehicule.avancer(seconde_virtuelle)
for coords in masque:
self.map[coords] = vehicule
self.temps_precedant = time.time()
| true
| true
|
1c4187b0631d6e15249459ffc9c5679b90301371
| 2,560
|
py
|
Python
|
modules/generator/aligner.py
|
vliu15/tts-gan
|
6246c584a83f67dedaa25155c3b1491b99658319
|
[
"MIT"
] | 12
|
2021-02-17T23:37:52.000Z
|
2021-09-05T08:24:58.000Z
|
modules/generator/aligner.py
|
vliu15/tts-gan
|
6246c584a83f67dedaa25155c3b1491b99658319
|
[
"MIT"
] | null | null | null |
modules/generator/aligner.py
|
vliu15/tts-gan
|
6246c584a83f67dedaa25155c3b1491b99658319
|
[
"MIT"
] | 2
|
2021-04-27T12:41:58.000Z
|
2021-08-18T08:31:32.000Z
|
# Copyright (c) 2020 Vincent Liu
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
""" Contains aligner module for GAN-TTS models. """
import torch
import torch.nn as nn
import torch.nn.functional as F
class Aligner(nn.Module):
""" Aligner module, which interpolates input latent variables to output latent variables.
Args:
gamma: the variance (temperature) of the Gaussian kernel applied to logits before softmax.
References:
> (Donahue et al. 2020) End-to-End Adversarial Text-to-Speech, https://arxiv.org/abs/2006.03575
"""
def __init__(self, gamma: float = 10.0):
super().__init__()
self.gamma = gamma
def forward(self, x_latents, x_lengths, x_mask, y_len, y_offset=None):
"""
x_latents: [b, c, t_x]
x_lengths: [b, t_x]
x_mask: [b, 1, t_x]
y_len: [b]
y_offset: [b]
"""
if y_offset is None:
y_offset = torch.zeros_like(y_len)
x_ends = torch.cumsum(x_lengths, dim=-1) # [b, t_x]
x_centers = x_ends - 0.5 * x_lengths # [b, t_x]
pos = torch.arange(y_len.max(), device=y_len.device, dtype=y_len.dtype).unsqueeze(0) + y_offset.unsqueeze(1) # [b, t_y]
dist = x_centers.unsqueeze(-1) - pos.unsqueeze(1).float() # [b, t_x, t_y]
logits = -(dist ** 2 / self.gamma) - 1e9 * (1. - x_mask.permute(0, 2, 1)) # [b, t_x, t_y]
alignment = F.softmax(logits, dim=1) # [b, t_x, t_y]
y_latents = torch.bmm(x_latents, alignment) # [b, c, t_y]
return y_latents
| 40
| 128
| 0.676953
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Aligner(nn.Module):
def __init__(self, gamma: float = 10.0):
super().__init__()
self.gamma = gamma
def forward(self, x_latents, x_lengths, x_mask, y_len, y_offset=None):
if y_offset is None:
y_offset = torch.zeros_like(y_len)
x_ends = torch.cumsum(x_lengths, dim=-1)
x_centers = x_ends - 0.5 * x_lengths
pos = torch.arange(y_len.max(), device=y_len.device, dtype=y_len.dtype).unsqueeze(0) + y_offset.unsqueeze(1)
dist = x_centers.unsqueeze(-1) - pos.unsqueeze(1).float()
logits = -(dist ** 2 / self.gamma) - 1e9 * (1. - x_mask.permute(0, 2, 1))
alignment = F.softmax(logits, dim=1)
y_latents = torch.bmm(x_latents, alignment)
return y_latents
| true
| true
|
1c4187e00768efc44391aa55dd3463f0a0e8db54
| 789
|
py
|
Python
|
convert_savedmodel.py
|
anhlnt/age-gender-estimation
|
0a1c3a289a33c96c586ae8219911dbe51724f6d9
|
[
"MIT"
] | null | null | null |
convert_savedmodel.py
|
anhlnt/age-gender-estimation
|
0a1c3a289a33c96c586ae8219911dbe51724f6d9
|
[
"MIT"
] | null | null | null |
convert_savedmodel.py
|
anhlnt/age-gender-estimation
|
0a1c3a289a33c96c586ae8219911dbe51724f6d9
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from src.factory import get_model
from omegaconf import OmegaConf
from pathlib import Path
def getModel():
weight_file = "pretrained_models/EfficientNetB3_224_weights.26-3.15.hdf5"
model_name, img_size = Path(weight_file).stem.split("_")[:2]
print('model_name: ', model_name, 'img_size: ', img_size)
img_size = int(img_size)
cfg = OmegaConf.from_dotlist([f"model.model_name={model_name}", f"model.img_size={img_size}"])
model = get_model(cfg)
model.load_weights(weight_file)
return model
def saveModel(model, path):
tf.saved_model.save(model, path)
def main():
model = getModel()
savePath = 'pretrained_models/EfficientNetB3_224_weights.26-3.15'
saveModel(model, savePath)
if __name__ == "__main__":
main()
| 28.178571
| 98
| 0.723701
|
import tensorflow as tf
from src.factory import get_model
from omegaconf import OmegaConf
from pathlib import Path
def getModel():
weight_file = "pretrained_models/EfficientNetB3_224_weights.26-3.15.hdf5"
model_name, img_size = Path(weight_file).stem.split("_")[:2]
print('model_name: ', model_name, 'img_size: ', img_size)
img_size = int(img_size)
cfg = OmegaConf.from_dotlist([f"model.model_name={model_name}", f"model.img_size={img_size}"])
model = get_model(cfg)
model.load_weights(weight_file)
return model
def saveModel(model, path):
tf.saved_model.save(model, path)
def main():
model = getModel()
savePath = 'pretrained_models/EfficientNetB3_224_weights.26-3.15'
saveModel(model, savePath)
if __name__ == "__main__":
main()
| true
| true
|
1c41884da87b914bbcb7a7f6201224677146687c
| 11,952
|
py
|
Python
|
telegram/EnglishBot.py
|
eprivalov/-k5-kt4ltj3l4rn3k4jrbjr34hbr3jhrv34
|
387cf92a275e5b1fc4f32d1638b62c65bcc8c1c5
|
[
"Apache-2.0"
] | null | null | null |
telegram/EnglishBot.py
|
eprivalov/-k5-kt4ltj3l4rn3k4jrbjr34hbr3jhrv34
|
387cf92a275e5b1fc4f32d1638b62c65bcc8c1c5
|
[
"Apache-2.0"
] | null | null | null |
telegram/EnglishBot.py
|
eprivalov/-k5-kt4ltj3l4rn3k4jrbjr34hbr3jhrv34
|
387cf92a275e5b1fc4f32d1638b62c65bcc8c1c5
|
[
"Apache-2.0"
] | 1
|
2018-07-16T07:55:45.000Z
|
2018-07-16T07:55:45.000Z
|
import telebot
import psycopg2
import re
from telebot import types
import datetime
"""
Команда Insydia приветствует вас.
Здесь вы можете узнать о последних новостях на нашем портале.
Мы будем поддерживать данное направление и обновлять функционал нашего робота.
Спасибо, что начали пользоваться InsydiaAsiaBot.
"""
TOKEN = "196531742:AAGUaoxgMbin0gAAzOfulW58RPtbECrCkK0"
bot = telebot.TeleBot(TOKEN)
#DB_NAME = "insydia_main_content_database"
#USER = "eprivalov_db"
#PASSWORD = "InsydiaDBAdministrator192239"
DB_NAME = "test"
USER = "testuser"
PASSWORD = "test"
CONNECT_DB = "dbname='%s' user='%s' host='' password='%s'" % (DB_NAME, USER, PASSWORD)
@bot.message_handler(commands=["start"])
def send_welcome(message):
markup = types.ReplyKeyboardMarkup(row_width=3,resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
welcome_text="""Команда Insydia приветствует вас.
Здесь вы можете узнать о последних новостях на нашем портале.
Мы будем поддерживать данное направление и обновлять функционал нашего робота.
Спасибо, что начали пользоваться InsydiaEnglishBot."""
bot.send_message(chat_id=message.chat.id, text=welcome_text, reply_markup=markup)
@bot.message_handler(regexp='^Categories$')
def categories(message):
markup = types.ReplyKeyboardMarkup(row_width=2, resize_keyboard=True)
markup.add('Technology', 'Entertainment', 'Auto', 'Space', 'Bio', 'Menu')
bot.send_message(chat_id=message.chat.id, text="Choose one of the categories below", reply_markup=markup)
@bot.message_handler(regexp='^(Technology|Entertainment|Auto|Space|Bio)$')
def categories(message):
match = re.findall(r'(Technology|Entertainment|Auto|Space|Bio)', message.text)
category = match[0]
markup = types.ReplyKeyboardMarkup(row_width=2, resize_keyboard=True)
markup.add('Last news(%s)' % category[0], 'Last 5 news(%s)' % category[0], 'Menu')
bot.send_message(chat_id=message.chat.id, text="Choose one of the categories below", reply_markup=markup)
@bot.message_handler(regexp='^Last\s[\d]+\snews\((T|E|A|S|B)\)$')
def categories(message):
"""
Last N articles of current category
:param message:
:return:
"""
markup = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
cat_dict = {
"T": 1, # Technology
"E": 2, # Entertainment
"A": 3, # Auto
"S": 4, # Space
"B": 5 # Bio
}
match = re.findall(r'\d+', message.text)
match_cat = re.findall(r'\((T|E|A|S|B)\)', message.text)
amount = match[0]
cat_letter = match_cat[0]
if int(amount) > 10:
bot.send_message(chat_id=message.chat.id, text="1-10")
else:
db = psycopg2.connect(CONNECT_DB)
cursor = db.cursor()
query_set = "SELECT id, news_title_english, news_post_date, slug, teaser_english FROM news WHERE news_category_id=%s ORDER BY id DESC LIMIT %s"
cat_id = cat_dict[cat_letter]
data_query_set = (cat_id, amount,)
cursor.execute(query_set, data_query_set)
item = cursor.fetchall()
for i in range(len(item)):
date = datetime.date.isoformat(item[0][2]).split('-')
article = types.InlineQueryResultArticle(title="*%s*" % item[i][1],
message_text="%s" % item[i][4],
url="https://insydia.com/news/{year}/{month}/{day}/{id}/{slug}/".format(year=int(date[0]),
month=int(date[1]),
day=int(date[2]),
id=item[i][0],
slug=item[i][3]),
id=message.chat.id)
try:
bot.send_message(chat_id=message.chat.id,
reply_markup=markup,
text=article.title+"\n"+article.message_text+"\n"+article.url,
parse_mode="Markdown")
except TypeError:
pass
markup = types.ReplyKeyboardMarkup(row_width=3,resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
try:
bot.send_message(chat_id=message.chat.id, reply_markup=markup)
except TypeError:
pass
@bot.message_handler(regexp='^Last\snews\((T|E|A|S|B)\)$')
def categories_last_one(message):
"""
Last one article of the current category
:param message:
:return:
"""
markup = types.ReplyKeyboardMarkup(row_width=3,resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
cat_dict = {
"T": 1, # Technology
"E": 2, # Entertainment
"A": 3, # Auto
"S": 4, # Space
"B": 5 # Bio
}
match_cat = re.findall(r'\((T|E|A|S|B)\)', message.text)
cat_letter = match_cat[0]
db = psycopg2.connect(CONNECT_DB)
cursor = db.cursor()
query_set = "SELECT id, news_title_english, news_post_date, slug, teaser_english FROM news WHERE news_category_id=%s ORDER BY id DESC LIMIT 1"
cat_id = cat_dict[cat_letter]
data_query_set = (cat_id,)
cursor.execute(query_set, data_query_set)
item = cursor.fetchall()
for i in range(len(item)):
date = datetime.date.isoformat(item[0][2]).split('-')
article = types.InlineQueryResultArticle(title="*%s*" % item[0][1],
message_text="%s" % item[0][4],
url="https://insydia.com/news/{year}/{month}/{day}/{id}/{slug}/".format(year=int(date[0]),
month=int(date[1]),
day=int(date[2]),
id=item[0][0],
slug=item[0][3]),
id=message.chat.id)
try:
bot.send_message(chat_id=message.chat.id,
reply_markup=markup,
text=article.title+"\n"+article.message_text+"\n"+article.url,
parse_mode="Markdown")
except TypeError:
pass
markup = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
try:
bot.send_message(chat_id=message.chat.id, reply_markup=markup)
except TypeError:
pass
@bot.message_handler(regexp='^Menu')
def back_to_menu(message):
markup = types.ReplyKeyboardMarkup(row_width=3,resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
welcome_text="How can I help you?"
bot.send_message(chat_id=message.chat.id, text=welcome_text, reply_markup=markup)
@bot.message_handler(regexp='^Interest')
def back_to_menu(message):
"""
Last one interesting article of the current category
:param message:
:return:
"""
markup = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
db = psycopg2.connect(CONNECT_DB)
cursor = db.cursor()
query_set = "SELECT t1.id, t1.news_title_english, t1.news_post_date, t1.slug, t1.teaser_english FROM news t1 INNER JOIN news_watches t2 ON t1.id=t2.news_id ORDER BY t2.watches DESC LIMIT 1"
# data_query_set = [amount]
cursor.execute(query_set)
item = cursor.fetchall()
date = datetime.date.isoformat(item[0][2]).split('-')
article = types.InlineQueryResultArticle(title="*%s*" % item[0][1],
message_text="%s" % item[0][4],
url="https://insydia.com/news/{year}/{month}/{day}/{id}/{slug}/".format(year=int(date[0]),
month=int(date[1]),
day=int(date[2]),
id=item[0][0],
slug=item[0][3]),
id=message.chat.id)
try:
bot.send_message(chat_id=message.chat.id,
reply_markup=markup,
text=article.title+"\n"+article.message_text+"\n"+article.url,
parse_mode="Markdown")
except TypeError:
pass
@bot.message_handler(regexp='^Latest')
def back_to_menu(message):
markup = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
db = psycopg2.connect(CONNECT_DB)
cursor = db.cursor()
query_set = "SELECT id, news_title_english, news_post_date, slug, teaser_english FROM news ORDER BY news_post_date DESC LIMIT 1"
cursor.execute(query_set)
item = cursor.fetchall()
date = datetime.date.isoformat(item[0][2]).split('-')
article = types.InlineQueryResultArticle(title="*%s*" % item[0][1],
message_text="%s" % item[0][4],
url="https://insydia.com/news/{year}/{month}/{day}/{id}/{slug}/".format(year=int(date[0]),
month=int(date[1]),
day=int(date[2]),
id=item[0][0],
slug=item[0][3]),
id=message.chat.id)
try:
bot.send_message(chat_id=message.chat.id,
reply_markup=markup,
text=article.title+"\n"+article.message_text+"\n"+article.url,
parse_mode="Markdown")
except TypeError:
pass
@bot.message_handler(regexp='^Help$')
def help_menu(message):
"""
Help menu
:param message:
:return:
"""
markup = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True)
markup.add('Menu')
string = """If you have any problems, you can write to support@insydia.com. We will be very glad to help you and
may be tell something interesting.
Also, you can write to advert@insydia.com for the advertisement questions. Let's cooperate and give all news from IT industry all over the World.
Insydia Team
https://insydia.com
"""
bot.send_message(chat_id=message.chat.id, text=string, reply_markup=markup)
@bot.message_handler(regexp='[^Technology|Entertainment|Auto|Space|Bio|Menu|Help|Latest|Interest|(Last\snews\((T|E|A|S|B)\))]$')
def unsupported_symbols(message):
markup = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
string="Sorry, I don't understand you."
bot.send_message(chat_id=message.chat.id, text=string, reply_markup=markup)
bot.polling()
| 45.793103
| 194
| 0.536647
|
import telebot
import psycopg2
import re
from telebot import types
import datetime
TOKEN = "196531742:AAGUaoxgMbin0gAAzOfulW58RPtbECrCkK0"
bot = telebot.TeleBot(TOKEN)
DB_NAME = "test"
USER = "testuser"
PASSWORD = "test"
CONNECT_DB = "dbname='%s' user='%s' host='' password='%s'" % (DB_NAME, USER, PASSWORD)
@bot.message_handler(commands=["start"])
def send_welcome(message):
markup = types.ReplyKeyboardMarkup(row_width=3,resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
welcome_text="""Команда Insydia приветствует вас.
Здесь вы можете узнать о последних новостях на нашем портале.
Мы будем поддерживать данное направление и обновлять функционал нашего робота.
Спасибо, что начали пользоваться InsydiaEnglishBot."""
bot.send_message(chat_id=message.chat.id, text=welcome_text, reply_markup=markup)
@bot.message_handler(regexp='^Categories$')
def categories(message):
markup = types.ReplyKeyboardMarkup(row_width=2, resize_keyboard=True)
markup.add('Technology', 'Entertainment', 'Auto', 'Space', 'Bio', 'Menu')
bot.send_message(chat_id=message.chat.id, text="Choose one of the categories below", reply_markup=markup)
@bot.message_handler(regexp='^(Technology|Entertainment|Auto|Space|Bio)$')
def categories(message):
match = re.findall(r'(Technology|Entertainment|Auto|Space|Bio)', message.text)
category = match[0]
markup = types.ReplyKeyboardMarkup(row_width=2, resize_keyboard=True)
markup.add('Last news(%s)' % category[0], 'Last 5 news(%s)' % category[0], 'Menu')
bot.send_message(chat_id=message.chat.id, text="Choose one of the categories below", reply_markup=markup)
@bot.message_handler(regexp='^Last\s[\d]+\snews\((T|E|A|S|B)\)$')
def categories(message):
markup = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
cat_dict = {
"T": 1,
"E": 2,
"A": 3,
"S": 4,
"B": 5
}
match = re.findall(r'\d+', message.text)
match_cat = re.findall(r'\((T|E|A|S|B)\)', message.text)
amount = match[0]
cat_letter = match_cat[0]
if int(amount) > 10:
bot.send_message(chat_id=message.chat.id, text="1-10")
else:
db = psycopg2.connect(CONNECT_DB)
cursor = db.cursor()
query_set = "SELECT id, news_title_english, news_post_date, slug, teaser_english FROM news WHERE news_category_id=%s ORDER BY id DESC LIMIT %s"
cat_id = cat_dict[cat_letter]
data_query_set = (cat_id, amount,)
cursor.execute(query_set, data_query_set)
item = cursor.fetchall()
for i in range(len(item)):
date = datetime.date.isoformat(item[0][2]).split('-')
article = types.InlineQueryResultArticle(title="*%s*" % item[i][1],
message_text="%s" % item[i][4],
url="https://insydia.com/news/{year}/{month}/{day}/{id}/{slug}/".format(year=int(date[0]),
month=int(date[1]),
day=int(date[2]),
id=item[i][0],
slug=item[i][3]),
id=message.chat.id)
try:
bot.send_message(chat_id=message.chat.id,
reply_markup=markup,
text=article.title+"\n"+article.message_text+"\n"+article.url,
parse_mode="Markdown")
except TypeError:
pass
markup = types.ReplyKeyboardMarkup(row_width=3,resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
try:
bot.send_message(chat_id=message.chat.id, reply_markup=markup)
except TypeError:
pass
@bot.message_handler(regexp='^Last\snews\((T|E|A|S|B)\)$')
def categories_last_one(message):
markup = types.ReplyKeyboardMarkup(row_width=3,resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
cat_dict = {
"T": 1,
"E": 2,
"A": 3,
"S": 4,
"B": 5
}
match_cat = re.findall(r'\((T|E|A|S|B)\)', message.text)
cat_letter = match_cat[0]
db = psycopg2.connect(CONNECT_DB)
cursor = db.cursor()
query_set = "SELECT id, news_title_english, news_post_date, slug, teaser_english FROM news WHERE news_category_id=%s ORDER BY id DESC LIMIT 1"
cat_id = cat_dict[cat_letter]
data_query_set = (cat_id,)
cursor.execute(query_set, data_query_set)
item = cursor.fetchall()
for i in range(len(item)):
date = datetime.date.isoformat(item[0][2]).split('-')
article = types.InlineQueryResultArticle(title="*%s*" % item[0][1],
message_text="%s" % item[0][4],
url="https://insydia.com/news/{year}/{month}/{day}/{id}/{slug}/".format(year=int(date[0]),
month=int(date[1]),
day=int(date[2]),
id=item[0][0],
slug=item[0][3]),
id=message.chat.id)
try:
bot.send_message(chat_id=message.chat.id,
reply_markup=markup,
text=article.title+"\n"+article.message_text+"\n"+article.url,
parse_mode="Markdown")
except TypeError:
pass
markup = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
try:
bot.send_message(chat_id=message.chat.id, reply_markup=markup)
except TypeError:
pass
@bot.message_handler(regexp='^Menu')
def back_to_menu(message):
markup = types.ReplyKeyboardMarkup(row_width=3,resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
welcome_text="How can I help you?"
bot.send_message(chat_id=message.chat.id, text=welcome_text, reply_markup=markup)
@bot.message_handler(regexp='^Interest')
def back_to_menu(message):
markup = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
db = psycopg2.connect(CONNECT_DB)
cursor = db.cursor()
query_set = "SELECT t1.id, t1.news_title_english, t1.news_post_date, t1.slug, t1.teaser_english FROM news t1 INNER JOIN news_watches t2 ON t1.id=t2.news_id ORDER BY t2.watches DESC LIMIT 1"
cursor.execute(query_set)
item = cursor.fetchall()
date = datetime.date.isoformat(item[0][2]).split('-')
article = types.InlineQueryResultArticle(title="*%s*" % item[0][1],
message_text="%s" % item[0][4],
url="https://insydia.com/news/{year}/{month}/{day}/{id}/{slug}/".format(year=int(date[0]),
month=int(date[1]),
day=int(date[2]),
id=item[0][0],
slug=item[0][3]),
id=message.chat.id)
try:
bot.send_message(chat_id=message.chat.id,
reply_markup=markup,
text=article.title+"\n"+article.message_text+"\n"+article.url,
parse_mode="Markdown")
except TypeError:
pass
@bot.message_handler(regexp='^Latest')
def back_to_menu(message):
markup = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
db = psycopg2.connect(CONNECT_DB)
cursor = db.cursor()
query_set = "SELECT id, news_title_english, news_post_date, slug, teaser_english FROM news ORDER BY news_post_date DESC LIMIT 1"
cursor.execute(query_set)
item = cursor.fetchall()
date = datetime.date.isoformat(item[0][2]).split('-')
article = types.InlineQueryResultArticle(title="*%s*" % item[0][1],
message_text="%s" % item[0][4],
url="https://insydia.com/news/{year}/{month}/{day}/{id}/{slug}/".format(year=int(date[0]),
month=int(date[1]),
day=int(date[2]),
id=item[0][0],
slug=item[0][3]),
id=message.chat.id)
try:
bot.send_message(chat_id=message.chat.id,
reply_markup=markup,
text=article.title+"\n"+article.message_text+"\n"+article.url,
parse_mode="Markdown")
except TypeError:
pass
@bot.message_handler(regexp='^Help$')
def help_menu(message):
markup = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True)
markup.add('Menu')
string = """If you have any problems, you can write to support@insydia.com. We will be very glad to help you and
may be tell something interesting.
Also, you can write to advert@insydia.com for the advertisement questions. Let's cooperate and give all news from IT industry all over the World.
Insydia Team
https://insydia.com
"""
bot.send_message(chat_id=message.chat.id, text=string, reply_markup=markup)
@bot.message_handler(regexp='[^Technology|Entertainment|Auto|Space|Bio|Menu|Help|Latest|Interest|(Last\snews\((T|E|A|S|B)\))]$')
def unsupported_symbols(message):
markup = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True)
markup.add('Latest', 'Interest', 'Categories', 'Help')
string="Sorry, I don't understand you."
bot.send_message(chat_id=message.chat.id, text=string, reply_markup=markup)
bot.polling()
| true
| true
|
1c418861fb6e75e48a46198115b66dd8dd3e8209
| 137
|
py
|
Python
|
app/main/errors.py
|
geoffrey45/Baseline-news
|
d211a84e087a222cf1720808f4abe31b9315c632
|
[
"MIT"
] | null | null | null |
app/main/errors.py
|
geoffrey45/Baseline-news
|
d211a84e087a222cf1720808f4abe31b9315c632
|
[
"MIT"
] | null | null | null |
app/main/errors.py
|
geoffrey45/Baseline-news
|
d211a84e087a222cf1720808f4abe31b9315c632
|
[
"MIT"
] | null | null | null |
from flask import render_template
from . import main
@main.app_errorhandler(404)
def fof(error):
return render_template('fof.html'),404
| 22.833333
| 39
| 0.79562
|
from flask import render_template
from . import main
@main.app_errorhandler(404)
def fof(error):
return render_template('fof.html'),404
| true
| true
|
1c4188cfd7f0d0d43d11e3f374e78faf058b8467
| 8,710
|
py
|
Python
|
vkwave/bots/utils/keyboards/keyboard.py
|
krasnovmv/vkwave
|
e0db86cc16f97797765aadfb811ec87ff7945b1f
|
[
"MIT"
] | null | null | null |
vkwave/bots/utils/keyboards/keyboard.py
|
krasnovmv/vkwave
|
e0db86cc16f97797765aadfb811ec87ff7945b1f
|
[
"MIT"
] | null | null | null |
vkwave/bots/utils/keyboards/keyboard.py
|
krasnovmv/vkwave
|
e0db86cc16f97797765aadfb811ec87ff7945b1f
|
[
"MIT"
] | null | null | null |
import json
import typing
from enum import Enum
from vkwave.bots.core.types.json_types import JSONEncoder
from vkwave.bots.utils.keyboards._types import Button
from vkwave.bots.utils.keyboards._vkpayaction import (
VKPayAction,
VKPayActionTransferToUser,
VKPayActionTransferToGroup,
VKPayActionPayToUser,
VKPayActionPayToGroup
)
class ButtonColor(Enum):
PRIMARY = "primary" # blue
SECONDARY = "secondary" # white
NEGATIVE = "negative" # red
POSITIVE = "positive" # green
class ButtonType(Enum):
TEXT = "text"
LINK = "open_link"
CALLBACK = "callback"
LOCATION = "location"
VKPAY = "vkpay"
VKAPPS = "open_app"
class Keyboard:
def __init__(self, one_time: bool = False, inline: bool = False):
"""
Create a keyboard object
:param one_time:
"""
self.one_time = one_time
self.buttons: typing.List[typing.List[Button]] = [[]]
self.keyboard = {
"buttons": self.buttons,
"inline": inline,
}
if not inline:
self.keyboard["one_time"] = one_time
@staticmethod
def _generate_payload(
payload: typing.Optional[typing.Dict[str, str]]
) -> typing.Union[str, typing.Dict[str, str]]:
return payload if payload is not None else ""
def add_row(self) -> None:
"""
:return:
"""
self.buttons.append([])
def _add_button(self, action: dict) -> None:
"""
:param action:
:return:
"""
current_row = self.buttons[-1]
current_row.append(action)
def add_text_button(
self,
text: str,
color: typing.Union[str, ButtonColor] = ButtonColor.PRIMARY,
payload: typing.Optional[typing.Dict[str, str]] = None,
) -> None:
"""
:param text:
:param color:
:param payload:
:return:
"""
action = {
"action": {
"type": ButtonType.TEXT.value,
"payload": self._generate_payload(payload),
"label": text,
},
"color": color.value if isinstance(color, ButtonColor) else color,
}
self._add_button(action)
def add_callback_button(
self,
text: str,
color: typing.Union[str, ButtonColor] = ButtonColor.PRIMARY,
payload: typing.Optional[typing.Dict[str, str]] = None,
):
action = {
"action": {
"type": "callback",
"payload": self._generate_payload(payload),
"label": text,
},
"color": color.value if isinstance(color, ButtonColor) else color,
}
self._add_button(action)
def add_location_button(self, payload: typing.Optional[typing.Dict[str, str]] = None) -> None:
"""
:param payload:
:return:
"""
action = {
"action": {
"type": ButtonType.LOCATION.value,
"payload": self._generate_payload(payload),
}
}
self._add_button(action)
def add_link_button(
self, text: str, link: str, payload: typing.Optional[typing.Dict[str, str]] = None
) -> None:
action = {
"action": {
"type": ButtonType.LINK.value,
"label": text,
"link": link,
"payload": self._generate_payload(payload),
}
}
self._add_button(action)
def add_vkpay_button(
self,
hash_action: typing.Union[VKPayAction, str],
aid: int = 10,
payload: typing.Optional[typing.Dict[str, str]] = None
) -> None:
"""
:param hash_action: subclass of VKPayAction or action string like "action=transfer-to-group&group_id=123"
:param aid: application id (currently not supported)
:param payload:
:return:
"""
_hash: str
if isinstance(hash_action, VKPayAction):
_hash = hash_action.generate_hash()
else:
_hash = hash_action
_hash += f'&aid={aid}'
action = {
"action": {
"type": ButtonType.VKPAY.value,
"payload": self._generate_payload(payload),
"hash": _hash,
}
}
self._add_button(action)
def add_vkapps_button(
self,
app_id: int,
owner_id: int,
label: str,
payload: typing.Optional[typing.Dict[str, str]] = None,
) -> None:
"""
:param app_id:
:param owner_id:
:param payload:
:param label:
:return:
"""
action = {
"action": {
"type": ButtonType.VKAPPS.value,
"app_id": app_id,
"owner_id": owner_id,
"payload": self._generate_payload(payload),
"label": label,
}
}
self._add_button(action)
def get_keyboard(self, json_serialize: JSONEncoder = json.dumps) -> str:
"""
Get keyboard json to send.
If keyboard is 'static', you can generate json once and send it every time.
:return:
"""
return json_serialize(self.keyboard)
# vkPay aliases
def add_vkpay_button_pay_to_group(
self,
amount: int,
group_id: int,
description: typing.Optional[str] = None,
data: typing.Optional[typing.Dict[str, str]] = None,
payload: typing.Optional[typing.Dict[str, str]] = None
):
"""
:param amount: the amount of payment in rubles. The minimum value is 1;
:param group_id:
:param description: payment description
:param data: dictionary with custom parameters (from vk api docs)
:param payload:
"""
action = VKPayActionPayToGroup(
amount=amount,
group_id=group_id,
description=description,
data=data
)
return self.add_vkpay_button(hash_action=action.generate_hash(), payload=payload)
def add_vkpay_button_pay_to_user(
self,
amount: int,
user_id: int,
description: typing.Optional[str] = None,
payload: typing.Optional[typing.Dict[str, str]] = None
):
"""
:param amount: the amount of payment in rubles. The minimum value is 1;
:param user_id:
:param description: payment description
:param payload:
"""
action = VKPayActionPayToUser(
amount=amount,
user_id=user_id,
description=description
)
return self.add_vkpay_button(hash_action=action.generate_hash(), payload=payload)
def add_vkpay_button_transfer_to_group(
self,
group_id: int,
description: typing.Optional[str] = None,
payload: typing.Optional[typing.Dict[str, str]] = None
):
"""
:param group_id:
:param description: payment description
:param payload:
"""
action = VKPayActionTransferToGroup(
group_id=group_id,
description=description
)
return self.add_vkpay_button(hash_action=action.generate_hash(), payload=payload)
def add_vkpay_button_transfer_to_user(
self,
user_id: int,
description: typing.Optional[str] = None,
payload: typing.Optional[typing.Dict[str, str]] = None
):
"""
:param user_id:
:param description: payment description
:param payload:
"""
action = VKPayActionTransferToUser(
user_id=user_id,
description=description
)
return self.add_vkpay_button(hash_action=action.generate_hash(), payload=payload)
@classmethod
def get_empty_keyboard(cls) -> str:
"""
:return:
"""
keyboard = Keyboard(one_time=True)
keyboard.keyboard["buttons"] = []
return keyboard.get_keyboard()
class CallbackEventDataType(Enum):
TEXT = "text"
LINK = "open_link"
VKAPPS = "open_app"
class CallbackAnswer:
# custom dumper?
@classmethod
def show_snackbar(cls, text: str):
return json.dumps({"type": "show_snackbar", "text": text})
@classmethod
def open_link(cls, link: str):
return json.dumps({"type": "open_link", "link": link})
@classmethod
def open_app(cls, app_id: int, hash: str, owner_id: typing.Optional[int] = None):
return json.dumps(
{"type": "open_app", "app_id": app_id, "owner_id": owner_id, "hash": hash}
)
| 27.916667
| 113
| 0.559013
|
import json
import typing
from enum import Enum
from vkwave.bots.core.types.json_types import JSONEncoder
from vkwave.bots.utils.keyboards._types import Button
from vkwave.bots.utils.keyboards._vkpayaction import (
VKPayAction,
VKPayActionTransferToUser,
VKPayActionTransferToGroup,
VKPayActionPayToUser,
VKPayActionPayToGroup
)
class ButtonColor(Enum):
PRIMARY = "primary"
SECONDARY = "secondary"
NEGATIVE = "negative"
POSITIVE = "positive"
class ButtonType(Enum):
TEXT = "text"
LINK = "open_link"
CALLBACK = "callback"
LOCATION = "location"
VKPAY = "vkpay"
VKAPPS = "open_app"
class Keyboard:
def __init__(self, one_time: bool = False, inline: bool = False):
self.one_time = one_time
self.buttons: typing.List[typing.List[Button]] = [[]]
self.keyboard = {
"buttons": self.buttons,
"inline": inline,
}
if not inline:
self.keyboard["one_time"] = one_time
@staticmethod
def _generate_payload(
payload: typing.Optional[typing.Dict[str, str]]
) -> typing.Union[str, typing.Dict[str, str]]:
return payload if payload is not None else ""
def add_row(self) -> None:
self.buttons.append([])
def _add_button(self, action: dict) -> None:
current_row = self.buttons[-1]
current_row.append(action)
def add_text_button(
self,
text: str,
color: typing.Union[str, ButtonColor] = ButtonColor.PRIMARY,
payload: typing.Optional[typing.Dict[str, str]] = None,
) -> None:
action = {
"action": {
"type": ButtonType.TEXT.value,
"payload": self._generate_payload(payload),
"label": text,
},
"color": color.value if isinstance(color, ButtonColor) else color,
}
self._add_button(action)
def add_callback_button(
self,
text: str,
color: typing.Union[str, ButtonColor] = ButtonColor.PRIMARY,
payload: typing.Optional[typing.Dict[str, str]] = None,
):
action = {
"action": {
"type": "callback",
"payload": self._generate_payload(payload),
"label": text,
},
"color": color.value if isinstance(color, ButtonColor) else color,
}
self._add_button(action)
def add_location_button(self, payload: typing.Optional[typing.Dict[str, str]] = None) -> None:
action = {
"action": {
"type": ButtonType.LOCATION.value,
"payload": self._generate_payload(payload),
}
}
self._add_button(action)
def add_link_button(
self, text: str, link: str, payload: typing.Optional[typing.Dict[str, str]] = None
) -> None:
action = {
"action": {
"type": ButtonType.LINK.value,
"label": text,
"link": link,
"payload": self._generate_payload(payload),
}
}
self._add_button(action)
def add_vkpay_button(
self,
hash_action: typing.Union[VKPayAction, str],
aid: int = 10,
payload: typing.Optional[typing.Dict[str, str]] = None
) -> None:
_hash: str
if isinstance(hash_action, VKPayAction):
_hash = hash_action.generate_hash()
else:
_hash = hash_action
_hash += f'&aid={aid}'
action = {
"action": {
"type": ButtonType.VKPAY.value,
"payload": self._generate_payload(payload),
"hash": _hash,
}
}
self._add_button(action)
def add_vkapps_button(
self,
app_id: int,
owner_id: int,
label: str,
payload: typing.Optional[typing.Dict[str, str]] = None,
) -> None:
action = {
"action": {
"type": ButtonType.VKAPPS.value,
"app_id": app_id,
"owner_id": owner_id,
"payload": self._generate_payload(payload),
"label": label,
}
}
self._add_button(action)
def get_keyboard(self, json_serialize: JSONEncoder = json.dumps) -> str:
return json_serialize(self.keyboard)
def add_vkpay_button_pay_to_group(
self,
amount: int,
group_id: int,
description: typing.Optional[str] = None,
data: typing.Optional[typing.Dict[str, str]] = None,
payload: typing.Optional[typing.Dict[str, str]] = None
):
action = VKPayActionPayToGroup(
amount=amount,
group_id=group_id,
description=description,
data=data
)
return self.add_vkpay_button(hash_action=action.generate_hash(), payload=payload)
def add_vkpay_button_pay_to_user(
self,
amount: int,
user_id: int,
description: typing.Optional[str] = None,
payload: typing.Optional[typing.Dict[str, str]] = None
):
action = VKPayActionPayToUser(
amount=amount,
user_id=user_id,
description=description
)
return self.add_vkpay_button(hash_action=action.generate_hash(), payload=payload)
def add_vkpay_button_transfer_to_group(
self,
group_id: int,
description: typing.Optional[str] = None,
payload: typing.Optional[typing.Dict[str, str]] = None
):
action = VKPayActionTransferToGroup(
group_id=group_id,
description=description
)
return self.add_vkpay_button(hash_action=action.generate_hash(), payload=payload)
def add_vkpay_button_transfer_to_user(
self,
user_id: int,
description: typing.Optional[str] = None,
payload: typing.Optional[typing.Dict[str, str]] = None
):
action = VKPayActionTransferToUser(
user_id=user_id,
description=description
)
return self.add_vkpay_button(hash_action=action.generate_hash(), payload=payload)
@classmethod
def get_empty_keyboard(cls) -> str:
keyboard = Keyboard(one_time=True)
keyboard.keyboard["buttons"] = []
return keyboard.get_keyboard()
class CallbackEventDataType(Enum):
TEXT = "text"
LINK = "open_link"
VKAPPS = "open_app"
class CallbackAnswer:
@classmethod
def show_snackbar(cls, text: str):
return json.dumps({"type": "show_snackbar", "text": text})
@classmethod
def open_link(cls, link: str):
return json.dumps({"type": "open_link", "link": link})
@classmethod
def open_app(cls, app_id: int, hash: str, owner_id: typing.Optional[int] = None):
return json.dumps(
{"type": "open_app", "app_id": app_id, "owner_id": owner_id, "hash": hash}
)
| true
| true
|
1c41892a9cf0cd7b8e924beef7797f773203bc37
| 23,526
|
py
|
Python
|
ClientGenerator/src/googleapis/codegen/api_test.py
|
Ramkarthik/google-api-dotnet-client
|
d752f96e8a6de53922c22eedc73ea7077628b106
|
[
"Apache-2.0"
] | 3
|
2017-06-11T10:55:49.000Z
|
2022-01-07T18:49:47.000Z
|
ClientGenerator/src/googleapis/codegen/api_test.py
|
Alexisblues/google-api-dotnet-client
|
c06374c2ebe79068add7ab445c4aa3308370fb8a
|
[
"Apache-2.0"
] | null | null | null |
ClientGenerator/src/googleapis/codegen/api_test.py
|
Alexisblues/google-api-dotnet-client
|
c06374c2ebe79068add7ab445c4aa3308370fb8a
|
[
"Apache-2.0"
] | 2
|
2019-12-30T03:32:56.000Z
|
2022-03-21T10:19:38.000Z
|
#!/usr/bin/python2.7
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for api.py."""
__author__ = 'aiuto@google.com (Tony Aiuto)'
import json
import os
import gflags as flags
from google.apputils import basetest
from googleapis.codegen import data_types
from googleapis.codegen import language_model
from googleapis.codegen.api import Api
from googleapis.codegen.api import AuthScope
from googleapis.codegen.api import Method
from googleapis.codegen.api import Resource
from googleapis.codegen.api import Schema
from googleapis.codegen.api_exception import ApiException
FLAGS = flags.FLAGS
class FakeLanguageModel(language_model.LanguageModel):
def GetCodeTypeFromDictionary(self, def_dict):
return def_dict.get('type')
def ArrayOf(self, unused_var, s):
return 'Array[%s]' % s
class ApiTest(basetest.TestCase):
# The base discovery doc for most tests.
_TEST_DISCOVERY_DOC = 'sample_discovery.json'
_TEST_DISCOVERY_RPC_DOC = 'sample_discovery.rpc.json'
_TEST_SHARED_TYPES_DOC = 'sample_shared.json'
def ApiFromDiscoveryDoc(self, path):
"""Load a discovery doc from a file and creates a library Api.
Args:
path: (str) The path to the document.
Returns:
An Api for that document.
"""
f = open(os.path.join(os.path.dirname(__file__), 'testdata', path))
discovery_doc = json.loads(f.read())
f.close()
return Api(discovery_doc)
def testLazySchemaForCreation(self):
"""Check loading schemas which are known to have a forward reference.
In the test data, "Activity" refers to "Commment", and the nature
(sorted) of the loading code causes "Activity" to be processed
before "Commment". We want to make sure that SchemaFor does the right
thing with the lazy creation of activity.
"""
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
for schema in ['Activity', 'Comment', 'Activity.object']:
self.assertTrue(isinstance(api._schemas[schema], Schema))
def SchemaRefInProperties(self):
"""Make sure that an object ref works in a schema properties list."""
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
activity_schema = api._schemas['Activity']
for prop in activity_schema.values['properties']:
if prop.values['wireName'] == 'object':
self.assertEquals('ActivityObject',
prop.object_type.values['className'])
def testMakeDefaultSchemaNameFromTheDictTag(self):
"""Use the outer tag as id for schemas which have no id in their dict."""
discovery_doc = json.loads(
"""
{
"name": "fake",
"version": "v1",
"schemas": {
"should_use_id": {
"id": "named",
"type": "object",
"properties": { "dummy": { "type": "string" } }
},
"unnamed": {
"type": "object",
"properties": { "dummy": { "type": "string" } }
}
},
"resources": {}
}
""")
gen = Api(discovery_doc)
self.assertTrue('named' in gen._schemas)
self.assertTrue('unnamed' in gen._schemas)
def testUnknownHttpMethod(self):
"""Make sure we get an exception on unknown HTTP types."""
api = Api({'name': 'dummy', 'version': 'v1', 'resources': {}})
unused_resource = Resource(api, 'temp', {'methods': {}})
self.assertRaises(ApiException,
Method, api, 'bad', {
'rpcMethod': 'rpc',
'httpMethod': 'Not GET/POST/PUT/DELETE',
'parameters': {}
})
def testRequiredParameterList(self):
"""Make sure we are computing required parameters correctly."""
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
tests_executed = 0
for resource in api.values['resources']:
if resource.values['wireName'] == 'activities':
for method in resource.values['methods']:
if method.required_parameters:
required_names = [p.values['wireName']
for p in method.required_parameters]
self.assertEquals(method.values['parameterOrder'], required_names)
tests_executed += 1
method = api.MethodByName('chili.activities.get')
optional_names = set(p.values['wireName']
for p in method.optional_parameters)
self.assertEquals(set(['truncateAtom', 'max-comments', 'hl', 'max-liked']),
optional_names)
tests_executed += 1
self.assertEquals(7, tests_executed)
def testSchemaLoadingAsString(self):
"""Test for the "schema as strings" representation."""
api = self.ApiFromDiscoveryDoc('foo.v1.json')
self.assertEquals(4, len(api._schemas))
def testSubResources(self):
"""Test for the APIs with subresources."""
def CountResourceTree(resource):
ret = 0
for r in resource._resources:
ret += 1 + CountResourceTree(r)
return ret
api = self.ApiFromDiscoveryDoc('moderator.v1.json')
top_level_resources = 0
total_resources = 0
non_method_resources = 0
have_sub_resources = 0
have_sub_resources_and_methods = 0
for r in api._resources:
top_level_resources += 1
total_resources += 1 + CountResourceTree(r)
if not r._methods:
non_method_resources += 1
if r._resources:
have_sub_resources += 1
if r._resources and r._methods:
have_sub_resources_and_methods += 1
# Hand counted 18 resources in the file.
self.assertEquals(18, total_resources)
self.assertEquals(11, top_level_resources)
# 4 of them have no methods, only sub resources
self.assertEquals(4, non_method_resources)
# 6 of them have sub resources.
self.assertEquals(6, have_sub_resources)
# And, of course, 2 should have both sub resources and methods
self.assertEquals(2, have_sub_resources_and_methods)
def testParameters(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
delete = api.MethodByName('chili.activities.delete')
self.assertEquals(1, len(delete.query_parameters))
self.assertEquals(3, len(delete.path_parameters))
required_p = FindByWireName(delete.values['parameters'],
'required_parameter')
self.assertEquals('query', required_p.location)
post_id = FindByWireName(delete.values['parameters'], 'postId')
self.assertEquals('path', post_id.location)
def testEnums(self):
gen = self.ApiFromDiscoveryDoc('enums.json')
# Find the method with the enums
m1 = gen.MethodByName('language.translations.list')
language = FindByWireName(m1.values['parameters'], 'language')
e = language.values['enumType']
self.assertEquals(m1, e.parent)
for name, value, desc in e.values['pairs']:
self.assertTrue(name in ['ENGLISH', 'ITALIAN', 'LANG_ZH_CN',
'LANG_ZH_TW'])
self.assertTrue(value in ['english', 'italian', 'lang_zh-CN',
'lang_zh-TW'])
self.assertTrue(desc in ['English (US)', 'Italian',
'Chinese (Simplified)', 'Chinese (Traditional)'])
accuracy = FindByWireName(m1.values['parameters'], 'accuracy')
e = accuracy.values['enumType']
self.assertEquals(m1, e.parent)
for name, value, desc in e.values['pairs']:
self.assertTrue(name in ['VALUE_1', 'VALUE_2', 'VALUE_3'])
self.assertTrue(value in ['1', '2', '3'])
def testArrayParameter(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
search = api.MethodByName('chili.people.search')
filter_param = FindByWireName(search.values['parameters'], 'filters')
self.assertTrue(isinstance(filter_param.data_type,
data_types.ArrayDataType))
self.assertTrue(isinstance(filter_param.data_type._base_type,
data_types.PrimitiveDataType))
self.assertEquals('string',
filter_param.data_type._base_type.values['type'])
def testRepeatedEnum(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
activities = FindByWireName(api.values['resources'], 'activities')
list_method = FindByWireName(activities.values['methods'], 'list')
options = [p for p in list_method.values['parameters']
if p.values['wireName'] == 'options'][0]
# Should be an array of enums of type string
self.assertTrue(isinstance(options.data_type, data_types.ArrayDataType))
self.assertTrue(isinstance(options.data_type._base_type, data_types.Enum))
self.assertEquals('string', options.data_type._base_type.values['type'])
def testScopes(self):
gen = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
scopes = gen.GetTemplateValue('authscopes')
self.assertEquals(2, len(scopes))
self.assertEquals('https://www.googleapis.com/auth/buzz',
scopes[0].GetTemplateValue('value'))
self.assertEquals('BUZZ',
scopes[0].GetTemplateValue('name'))
self.assertEquals('https://www.googleapis.com/auth/buzz.read-only',
scopes[1].GetTemplateValue('value'))
self.assertEquals('BUZZ_READ_ONLY',
scopes[1].GetTemplateValue('name'))
def testAuthScope(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
scope = AuthScope(api,
'https://www.googleapis.com/auth/userinfo.email',
{'description': 'A typical scope'})
self.assertEquals('USERINFO_EMAIL', scope.GetTemplateValue('name'))
self.assertEquals('userinfo.email', scope.GetTemplateValue('lastPart'))
self.assertEquals('A typical scope', scope.GetTemplateValue('description'))
scope = AuthScope(api,
'https://www.googleapis.com/auth/no.description', {})
self.assertEquals('NO_DESCRIPTION', scope.GetTemplateValue('name'))
self.assertEquals('https://www.googleapis.com/auth/no.description',
scope.GetTemplateValue('description'))
scope = AuthScope(api, 'https://www.googleapis.com/auth/trim.slashes//', {})
self.assertEquals('TRIM_SLASHES', scope.GetTemplateValue('name'))
self.assertEquals('https://www.googleapis.com/auth/trim.slashes//',
scope.GetTemplateValue('value'))
scope = AuthScope(api,
'https://www.googleapis.com/auth/product',
{'description': 'A product level scope'})
self.assertEquals('PRODUCT', scope.GetTemplateValue('name'))
scope = AuthScope(api,
'https://mail.google.com/',
{'description': 'A non-googleapis.com scope'})
self.assertEquals('MAIL_GOOGLE_COM', scope.GetTemplateValue('name'))
self.assertEquals('mail.google.com', scope.GetTemplateValue('lastPart'))
self.assertEquals('https://mail.google.com/',
scope.GetTemplateValue('value'))
scope = AuthScope(api,
'https://mail.google.com/abc',
{'description': 'A non-googleapis.com scope'})
self.assertEquals('MAIL_GOOGLE_COM_ABC', scope.GetTemplateValue('name'))
scope = AuthScope(api,
'http://mail.google.com/',
{'description': 'A non-https scope'})
self.assertEquals('HTTP___MAIL_GOOGLE_COM', scope.GetTemplateValue('name'))
scope = AuthScope(api, 'tag:google.com,2010:auth/groups2#email', {})
self.assertEquals('TAG_GOOGLE_COM_2010_AUTH_GROUPS2_EMAIL',
scope.GetTemplateValue('name'))
scope = AuthScope(api, 'email', {})
self.assertEquals('EMAIL', scope.GetTemplateValue('name'))
def testPostVariations(self):
gen = self.ApiFromDiscoveryDoc('post_variations.json')
# Check a normal GET method to make sure it has no request and does have
# a response
r1 = FindByWireName(gen.values['resources'], 'r1')
methods = r1.values['methods']
m = FindByWireName(methods, 'get')
self.assertIsNone(m.values['requestType'])
self.assertEquals('Task', m.values['responseType'].class_name)
# A normal POST with both a request and response
m = FindByWireName(methods, 'insert')
self.assertEquals('Task', m.values['requestType'].class_name)
self.assertEquals('Task', m.values['responseType'].class_name)
# A POST with neither request nor response
m = FindByWireName(methods, 'no_request_no_response')
self.assertIsNone(m.values.get('requestType'))
self.assertTrue(isinstance(m.values.get('responseType'), data_types.Void))
# A POST with no request
m = FindByWireName(methods, 'no_request')
self.assertIsNone(m.values.get('requestType'))
self.assertEquals('Task', m.values['responseType'].class_name)
# A PUT with no response
m = FindByWireName(methods, 'no_response')
self.assertEquals('TaskList', m.values['requestType'].class_name)
self.assertTrue(isinstance(m.values.get('responseType'), data_types.Void))
def testSchemaParenting(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
# Check that top level schemas have no parent
for schema in ['Activity', 'Comment']:
self.assertIsNone(api._schemas[schema].parent)
for schema in ['Person.urls', 'Activity.object',
'Activity.object.attachments']:
self.assertTrue(api._schemas[schema].parent)
# verify the values in the name to schema map
for name, schema in api._schemas.items():
if schema.parent and schema.parent != api:
wire_name = schema.values['wireName']
parent_wire_name = schema.parent.values['wireName']
# Our entry key should never match the wirename of our parent
self.assertNotEquals(name, parent_wire_name)
# our key must look like 'p1.p2....parent.me'. We verify that we at
# least end with 'parent.me'
self.assertTrue(name.endswith('.'.join([parent_wire_name, wire_name])))
def testReadingRpcDiscovery(self):
gen = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_RPC_DOC)
# no resources in RPC
self.assertEquals(0, len(gen.values['resources']))
# but we do expect a few methods
self.assertLess(5, len(gen.values['methods']))
self.assertGreater(100, len(gen.values['methods']))
# RPC methods all have an id, httpMethod should be POST and have no path
for method in gen.values['methods']:
self.assertIsNotNone(method.values['id'])
self.assertEquals('POST', method.values['httpMethod'])
self.assertIsNone(method.values['restPath'])
def testNormalizeUrlComponents(self):
googleapis_base = 'https://www.googleapis.com/'
def LoadApi(discovery_dict):
d = {'name': 'fake', 'version': 'v1'}
d.update(discovery_dict)
api = Api(d)
return api
api = LoadApi({})
self.assertEquals(googleapis_base, api.values['rootUrl'])
self.assertEquals('fake/v1/', api.values['servicePath'])
custom_path = '/testing/fake/v1/'
api = LoadApi({'basePath': custom_path})
self.assertEquals(googleapis_base, api.values['rootUrl'])
self.assertEquals('testing/fake/v1/', api.values['servicePath'])
custom_url = 'https://foo.com/bar/baz/'
api = LoadApi({'basePath': custom_url})
self.assertEquals('https://foo.com/', api.values['rootUrl'])
self.assertEquals('bar/baz/', api.values['servicePath'])
# Make sure baseUrl wins over basePath
api = LoadApi({
'basePath': '/will/not/be/used/',
'baseUrl': custom_url
})
self.assertEquals('https://foo.com/', api.values['rootUrl'])
self.assertEquals('bar/baz/', api.values['servicePath'])
# Make sure rootUrl wins over all
api = LoadApi({
'basePath': '/will/not/be/used/',
'baseUrl': 'https://bar.com/not/used/',
'rootUrl': 'https://foo.com/',
'servicePath': 'bar/baz/',
})
self.assertEquals('https://foo.com/', api.values['rootUrl'])
self.assertEquals('bar/baz/', api.values['servicePath'])
# Test Swarm APIs
api = LoadApi({
'baseUrl': 'https://localhost.appspot.com/_ah/api/fake/v1/',
'basePath': '/_ah/api/fake/v1/',
'rootUrl': 'https://localhost.appspot.com/_ah/api/',
'servicePath': 'fake/v1/',
})
self.assertEquals('https://localhost.appspot.com/_ah/api/',
api.values['rootUrl'])
self.assertEquals('fake/v1/', api.values['servicePath'])
# .. in path
self.assertRaises(ValueError, LoadApi, {'basePath': '/do/not/../go/up'})
# no servicePath
self.assertRaises(ValueError, LoadApi, {'rootUrl': 'https://foo.com/'})
# batchPath
api = LoadApi({})
self.assertEquals(None, api.values['batchPath'])
api = LoadApi({
'batchPath': 'batch'
})
self.assertEquals("batch", api.values['batchPath'])
api = LoadApi({
'batchPath': '/batch'
})
self.assertEquals("batch", api.values['batchPath'])
api = LoadApi({
'batchPath': '//batch'
})
self.assertEquals("batch", api.values['batchPath'])
def testCanonicalName(self):
d = {'name': 'fake', 'version': 'v1', 'canonicalName': 'My API'}
api = Api(d)
self.assertEquals('fake', api.values['name'])
self.assertEquals('MyAPI', api._class_name)
def testNormalizeOwnerInformation(self):
def LoadApi(**kwargs):
d = {'name': 'fake', 'version': 'v1'}
d.update(kwargs)
return Api(d)
api = LoadApi()
self.assertEquals('Google', api.values['ownerName'])
self.assertEquals('google', api.values['owner'])
self.assertEquals('google.com', api.values['ownerDomain'])
api = LoadApi(ownerName='Google', ownerDomain='youtube.com')
self.assertEquals('Google', api.values['ownerName'])
self.assertEquals('google', api.values['owner'])
self.assertEquals('youtube.com', api.values['ownerDomain'])
api = LoadApi(ownerDomain='youtube.com')
self.assertEquals('youtube_com', api.values['owner'])
self.assertEquals('youtube.com', api.values['ownerDomain'])
# owner is explicitly declared
api = LoadApi(owner='You Tube', ownerDomain='youtube.com')
self.assertEquals('You Tube', api.values['owner'])
self.assertEquals('youtube.com', api.values['ownerDomain'])
api = LoadApi(servicePath='/fake',
rootUrl='https://www.foobar.co.uk:8080/root')
self.assertEquals('www.foobar.co.uk', api['ownerDomain'])
self.assertEquals('www_foobar_co_uk', api['owner'])
api = LoadApi(servicePath='/fake',
rootUrl='https://whathaveyou.googleplex.com')
self.assertEquals('google.com', api['ownerDomain'])
self.assertEquals('Google', api['ownerName'])
self.assertEquals('google', api['owner'])
api = LoadApi(servicePath='/fake',
rootUrl='https://whathaveyou.googleapis.com')
self.assertEquals('google.com', api['ownerDomain'])
self.assertEquals('Google', api['ownerName'])
self.assertEquals('google', api['owner'])
api = LoadApi(servicePath='/fake',
rootUrl='https://whathaveyou.google.com')
self.assertEquals('google.com', api['ownerDomain'])
self.assertEquals('Google', api['ownerName'])
self.assertEquals('google', api['owner'])
def testSharedTypes(self):
api = self.ApiFromDiscoveryDoc(self._TEST_SHARED_TYPES_DOC)
api.VisitAll(lambda o: o.SetLanguageModel(language_model.LanguageModel()))
# class defined by the API
photos_feed_schema = api._schemas['PhotosFeed']
# type defined from a shared type repo
photo_schema = api._schemas[
'http://www.googleapis.com/types/v1/com.google/plus/v2/photo']
self.assertEquals('PhotosFeed', photos_feed_schema.values['wireName'])
self.assertEquals('com.google.myservice', photos_feed_schema.module.name)
self.assertEquals('Photo', photo_schema.values['wireName'])
self.assertEquals('com.google.plus.pictures', photo_schema.module.name)
self.assertEquals('com/google/plus/pictures', photo_schema.module.path)
def testMethods(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
self.assertEquals(api, api.top_level_methods[0].parent)
self.assertLess(25, len(api.all_methods))
self.assertLess(0, len(api.top_level_methods))
def testApiHasTitle(self):
api_def = {'name': 'fake',
'version': 'v1',
'schemas': {},
'resources': {}}
api = Api(api_def)
self.assertEquals('fake', api['title'])
def testExponentialBackoffDefault(self):
# Make sure exponentialBackoffDefault defaults to False.
discovery_doc = json.loads(
"""
{
"name": "fake",
"version": "v1",
"schemas": {},
"resources": {}
}
""")
api = Api(discovery_doc)
# Make sure exponentialBackoffDefault gets set to True.
self.assertFalse(api.values['exponentialBackoffDefault'])
discovery_doc2 = json.loads(
"""
{
"name": "fake",
"version": "v1",
"schemas": {},
"resources": {},
"exponentialBackoffDefault": true
}
""")
api2 = Api(discovery_doc2)
self.assertTrue(api2.values['exponentialBackoffDefault'])
class ApiModulesTest(basetest.TestCase):
def setUp(self):
self.discovery_doc = json.loads(
"""
{
"name": "fake",
"version": "v1",
"schemas": {},
"resources": {}
}
""")
self.language_model = FakeLanguageModel()
def testModuleOwnerDomain(self):
self.discovery_doc['ownerDomain'] = 'foo.bar'
api = Api(self.discovery_doc)
api.VisitAll(lambda o: o.SetLanguageModel(self.language_model))
self.assertEquals('bar/foo/fake', api.values['module'].path)
def testModulePackagePath(self):
self.discovery_doc['packagePath'] = 'foo/BAR'
api = Api(self.discovery_doc)
api.VisitAll(lambda o: o.SetLanguageModel(self.language_model))
self.assertEquals('com/google/foo/BAR/fake', api.values['module'].path)
def testModuleOwnerDomainAndPackagePath(self):
self.discovery_doc['ownerDomain'] = 'toasty.com'
self.discovery_doc['packagePath'] = 'foo/BAR'
api = Api(self.discovery_doc)
api.VisitAll(lambda o: o.SetLanguageModel(self.language_model))
self.assertEquals('com/toasty/foo/BAR/fake', api.values['module'].path)
def FindByWireName(list_of_resource_or_method, wire_name):
"""Find an element in a list by its "wireName".
The "wireName" is the name of the method "on the wire", which is the raw name
as it appears in the JSON.
Args:
list_of_resource_or_method: A list of resource or methods as annotated by
the Api.
wire_name: (str): the name to fine.
Returns:
dict or None
"""
for x in list_of_resource_or_method:
if x.values['wireName'] == wire_name:
return x
return None
if __name__ == '__main__':
basetest.main()
| 39.21
| 80
| 0.655233
|
__author__ = 'aiuto@google.com (Tony Aiuto)'
import json
import os
import gflags as flags
from google.apputils import basetest
from googleapis.codegen import data_types
from googleapis.codegen import language_model
from googleapis.codegen.api import Api
from googleapis.codegen.api import AuthScope
from googleapis.codegen.api import Method
from googleapis.codegen.api import Resource
from googleapis.codegen.api import Schema
from googleapis.codegen.api_exception import ApiException
FLAGS = flags.FLAGS
class FakeLanguageModel(language_model.LanguageModel):
def GetCodeTypeFromDictionary(self, def_dict):
return def_dict.get('type')
def ArrayOf(self, unused_var, s):
return 'Array[%s]' % s
class ApiTest(basetest.TestCase):
_TEST_DISCOVERY_DOC = 'sample_discovery.json'
_TEST_DISCOVERY_RPC_DOC = 'sample_discovery.rpc.json'
_TEST_SHARED_TYPES_DOC = 'sample_shared.json'
def ApiFromDiscoveryDoc(self, path):
f = open(os.path.join(os.path.dirname(__file__), 'testdata', path))
discovery_doc = json.loads(f.read())
f.close()
return Api(discovery_doc)
def testLazySchemaForCreation(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
for schema in ['Activity', 'Comment', 'Activity.object']:
self.assertTrue(isinstance(api._schemas[schema], Schema))
def SchemaRefInProperties(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
activity_schema = api._schemas['Activity']
for prop in activity_schema.values['properties']:
if prop.values['wireName'] == 'object':
self.assertEquals('ActivityObject',
prop.object_type.values['className'])
def testMakeDefaultSchemaNameFromTheDictTag(self):
discovery_doc = json.loads(
"""
{
"name": "fake",
"version": "v1",
"schemas": {
"should_use_id": {
"id": "named",
"type": "object",
"properties": { "dummy": { "type": "string" } }
},
"unnamed": {
"type": "object",
"properties": { "dummy": { "type": "string" } }
}
},
"resources": {}
}
""")
gen = Api(discovery_doc)
self.assertTrue('named' in gen._schemas)
self.assertTrue('unnamed' in gen._schemas)
def testUnknownHttpMethod(self):
api = Api({'name': 'dummy', 'version': 'v1', 'resources': {}})
unused_resource = Resource(api, 'temp', {'methods': {}})
self.assertRaises(ApiException,
Method, api, 'bad', {
'rpcMethod': 'rpc',
'httpMethod': 'Not GET/POST/PUT/DELETE',
'parameters': {}
})
def testRequiredParameterList(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
tests_executed = 0
for resource in api.values['resources']:
if resource.values['wireName'] == 'activities':
for method in resource.values['methods']:
if method.required_parameters:
required_names = [p.values['wireName']
for p in method.required_parameters]
self.assertEquals(method.values['parameterOrder'], required_names)
tests_executed += 1
method = api.MethodByName('chili.activities.get')
optional_names = set(p.values['wireName']
for p in method.optional_parameters)
self.assertEquals(set(['truncateAtom', 'max-comments', 'hl', 'max-liked']),
optional_names)
tests_executed += 1
self.assertEquals(7, tests_executed)
def testSchemaLoadingAsString(self):
api = self.ApiFromDiscoveryDoc('foo.v1.json')
self.assertEquals(4, len(api._schemas))
def testSubResources(self):
def CountResourceTree(resource):
ret = 0
for r in resource._resources:
ret += 1 + CountResourceTree(r)
return ret
api = self.ApiFromDiscoveryDoc('moderator.v1.json')
top_level_resources = 0
total_resources = 0
non_method_resources = 0
have_sub_resources = 0
have_sub_resources_and_methods = 0
for r in api._resources:
top_level_resources += 1
total_resources += 1 + CountResourceTree(r)
if not r._methods:
non_method_resources += 1
if r._resources:
have_sub_resources += 1
if r._resources and r._methods:
have_sub_resources_and_methods += 1
self.assertEquals(18, total_resources)
self.assertEquals(11, top_level_resources)
self.assertEquals(4, non_method_resources)
self.assertEquals(6, have_sub_resources)
self.assertEquals(2, have_sub_resources_and_methods)
def testParameters(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
delete = api.MethodByName('chili.activities.delete')
self.assertEquals(1, len(delete.query_parameters))
self.assertEquals(3, len(delete.path_parameters))
required_p = FindByWireName(delete.values['parameters'],
'required_parameter')
self.assertEquals('query', required_p.location)
post_id = FindByWireName(delete.values['parameters'], 'postId')
self.assertEquals('path', post_id.location)
def testEnums(self):
gen = self.ApiFromDiscoveryDoc('enums.json')
m1 = gen.MethodByName('language.translations.list')
language = FindByWireName(m1.values['parameters'], 'language')
e = language.values['enumType']
self.assertEquals(m1, e.parent)
for name, value, desc in e.values['pairs']:
self.assertTrue(name in ['ENGLISH', 'ITALIAN', 'LANG_ZH_CN',
'LANG_ZH_TW'])
self.assertTrue(value in ['english', 'italian', 'lang_zh-CN',
'lang_zh-TW'])
self.assertTrue(desc in ['English (US)', 'Italian',
'Chinese (Simplified)', 'Chinese (Traditional)'])
accuracy = FindByWireName(m1.values['parameters'], 'accuracy')
e = accuracy.values['enumType']
self.assertEquals(m1, e.parent)
for name, value, desc in e.values['pairs']:
self.assertTrue(name in ['VALUE_1', 'VALUE_2', 'VALUE_3'])
self.assertTrue(value in ['1', '2', '3'])
def testArrayParameter(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
search = api.MethodByName('chili.people.search')
filter_param = FindByWireName(search.values['parameters'], 'filters')
self.assertTrue(isinstance(filter_param.data_type,
data_types.ArrayDataType))
self.assertTrue(isinstance(filter_param.data_type._base_type,
data_types.PrimitiveDataType))
self.assertEquals('string',
filter_param.data_type._base_type.values['type'])
def testRepeatedEnum(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
activities = FindByWireName(api.values['resources'], 'activities')
list_method = FindByWireName(activities.values['methods'], 'list')
options = [p for p in list_method.values['parameters']
if p.values['wireName'] == 'options'][0]
self.assertTrue(isinstance(options.data_type, data_types.ArrayDataType))
self.assertTrue(isinstance(options.data_type._base_type, data_types.Enum))
self.assertEquals('string', options.data_type._base_type.values['type'])
def testScopes(self):
gen = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
scopes = gen.GetTemplateValue('authscopes')
self.assertEquals(2, len(scopes))
self.assertEquals('https://www.googleapis.com/auth/buzz',
scopes[0].GetTemplateValue('value'))
self.assertEquals('BUZZ',
scopes[0].GetTemplateValue('name'))
self.assertEquals('https://www.googleapis.com/auth/buzz.read-only',
scopes[1].GetTemplateValue('value'))
self.assertEquals('BUZZ_READ_ONLY',
scopes[1].GetTemplateValue('name'))
def testAuthScope(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
scope = AuthScope(api,
'https://www.googleapis.com/auth/userinfo.email',
{'description': 'A typical scope'})
self.assertEquals('USERINFO_EMAIL', scope.GetTemplateValue('name'))
self.assertEquals('userinfo.email', scope.GetTemplateValue('lastPart'))
self.assertEquals('A typical scope', scope.GetTemplateValue('description'))
scope = AuthScope(api,
'https://www.googleapis.com/auth/no.description', {})
self.assertEquals('NO_DESCRIPTION', scope.GetTemplateValue('name'))
self.assertEquals('https://www.googleapis.com/auth/no.description',
scope.GetTemplateValue('description'))
scope = AuthScope(api, 'https://www.googleapis.com/auth/trim.slashes//', {})
self.assertEquals('TRIM_SLASHES', scope.GetTemplateValue('name'))
self.assertEquals('https://www.googleapis.com/auth/trim.slashes//',
scope.GetTemplateValue('value'))
scope = AuthScope(api,
'https://www.googleapis.com/auth/product',
{'description': 'A product level scope'})
self.assertEquals('PRODUCT', scope.GetTemplateValue('name'))
scope = AuthScope(api,
'https://mail.google.com/',
{'description': 'A non-googleapis.com scope'})
self.assertEquals('MAIL_GOOGLE_COM', scope.GetTemplateValue('name'))
self.assertEquals('mail.google.com', scope.GetTemplateValue('lastPart'))
self.assertEquals('https://mail.google.com/',
scope.GetTemplateValue('value'))
scope = AuthScope(api,
'https://mail.google.com/abc',
{'description': 'A non-googleapis.com scope'})
self.assertEquals('MAIL_GOOGLE_COM_ABC', scope.GetTemplateValue('name'))
scope = AuthScope(api,
'http://mail.google.com/',
{'description': 'A non-https scope'})
self.assertEquals('HTTP___MAIL_GOOGLE_COM', scope.GetTemplateValue('name'))
scope = AuthScope(api, 'tag:google.com,2010:auth/groups2#email', {})
self.assertEquals('TAG_GOOGLE_COM_2010_AUTH_GROUPS2_EMAIL',
scope.GetTemplateValue('name'))
scope = AuthScope(api, 'email', {})
self.assertEquals('EMAIL', scope.GetTemplateValue('name'))
def testPostVariations(self):
gen = self.ApiFromDiscoveryDoc('post_variations.json')
r1 = FindByWireName(gen.values['resources'], 'r1')
methods = r1.values['methods']
m = FindByWireName(methods, 'get')
self.assertIsNone(m.values['requestType'])
self.assertEquals('Task', m.values['responseType'].class_name)
m = FindByWireName(methods, 'insert')
self.assertEquals('Task', m.values['requestType'].class_name)
self.assertEquals('Task', m.values['responseType'].class_name)
m = FindByWireName(methods, 'no_request_no_response')
self.assertIsNone(m.values.get('requestType'))
self.assertTrue(isinstance(m.values.get('responseType'), data_types.Void))
m = FindByWireName(methods, 'no_request')
self.assertIsNone(m.values.get('requestType'))
self.assertEquals('Task', m.values['responseType'].class_name)
m = FindByWireName(methods, 'no_response')
self.assertEquals('TaskList', m.values['requestType'].class_name)
self.assertTrue(isinstance(m.values.get('responseType'), data_types.Void))
def testSchemaParenting(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
for schema in ['Activity', 'Comment']:
self.assertIsNone(api._schemas[schema].parent)
for schema in ['Person.urls', 'Activity.object',
'Activity.object.attachments']:
self.assertTrue(api._schemas[schema].parent)
for name, schema in api._schemas.items():
if schema.parent and schema.parent != api:
wire_name = schema.values['wireName']
parent_wire_name = schema.parent.values['wireName']
self.assertNotEquals(name, parent_wire_name)
self.assertTrue(name.endswith('.'.join([parent_wire_name, wire_name])))
def testReadingRpcDiscovery(self):
gen = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_RPC_DOC)
self.assertEquals(0, len(gen.values['resources']))
self.assertLess(5, len(gen.values['methods']))
self.assertGreater(100, len(gen.values['methods']))
for method in gen.values['methods']:
self.assertIsNotNone(method.values['id'])
self.assertEquals('POST', method.values['httpMethod'])
self.assertIsNone(method.values['restPath'])
def testNormalizeUrlComponents(self):
googleapis_base = 'https://www.googleapis.com/'
def LoadApi(discovery_dict):
d = {'name': 'fake', 'version': 'v1'}
d.update(discovery_dict)
api = Api(d)
return api
api = LoadApi({})
self.assertEquals(googleapis_base, api.values['rootUrl'])
self.assertEquals('fake/v1/', api.values['servicePath'])
custom_path = '/testing/fake/v1/'
api = LoadApi({'basePath': custom_path})
self.assertEquals(googleapis_base, api.values['rootUrl'])
self.assertEquals('testing/fake/v1/', api.values['servicePath'])
custom_url = 'https://foo.com/bar/baz/'
api = LoadApi({'basePath': custom_url})
self.assertEquals('https://foo.com/', api.values['rootUrl'])
self.assertEquals('bar/baz/', api.values['servicePath'])
api = LoadApi({
'basePath': '/will/not/be/used/',
'baseUrl': custom_url
})
self.assertEquals('https://foo.com/', api.values['rootUrl'])
self.assertEquals('bar/baz/', api.values['servicePath'])
api = LoadApi({
'basePath': '/will/not/be/used/',
'baseUrl': 'https://bar.com/not/used/',
'rootUrl': 'https://foo.com/',
'servicePath': 'bar/baz/',
})
self.assertEquals('https://foo.com/', api.values['rootUrl'])
self.assertEquals('bar/baz/', api.values['servicePath'])
api = LoadApi({
'baseUrl': 'https://localhost.appspot.com/_ah/api/fake/v1/',
'basePath': '/_ah/api/fake/v1/',
'rootUrl': 'https://localhost.appspot.com/_ah/api/',
'servicePath': 'fake/v1/',
})
self.assertEquals('https://localhost.appspot.com/_ah/api/',
api.values['rootUrl'])
self.assertEquals('fake/v1/', api.values['servicePath'])
self.assertRaises(ValueError, LoadApi, {'basePath': '/do/not/../go/up'})
self.assertRaises(ValueError, LoadApi, {'rootUrl': 'https://foo.com/'})
api = LoadApi({})
self.assertEquals(None, api.values['batchPath'])
api = LoadApi({
'batchPath': 'batch'
})
self.assertEquals("batch", api.values['batchPath'])
api = LoadApi({
'batchPath': '/batch'
})
self.assertEquals("batch", api.values['batchPath'])
api = LoadApi({
'batchPath': '//batch'
})
self.assertEquals("batch", api.values['batchPath'])
def testCanonicalName(self):
d = {'name': 'fake', 'version': 'v1', 'canonicalName': 'My API'}
api = Api(d)
self.assertEquals('fake', api.values['name'])
self.assertEquals('MyAPI', api._class_name)
def testNormalizeOwnerInformation(self):
def LoadApi(**kwargs):
d = {'name': 'fake', 'version': 'v1'}
d.update(kwargs)
return Api(d)
api = LoadApi()
self.assertEquals('Google', api.values['ownerName'])
self.assertEquals('google', api.values['owner'])
self.assertEquals('google.com', api.values['ownerDomain'])
api = LoadApi(ownerName='Google', ownerDomain='youtube.com')
self.assertEquals('Google', api.values['ownerName'])
self.assertEquals('google', api.values['owner'])
self.assertEquals('youtube.com', api.values['ownerDomain'])
api = LoadApi(ownerDomain='youtube.com')
self.assertEquals('youtube_com', api.values['owner'])
self.assertEquals('youtube.com', api.values['ownerDomain'])
api = LoadApi(owner='You Tube', ownerDomain='youtube.com')
self.assertEquals('You Tube', api.values['owner'])
self.assertEquals('youtube.com', api.values['ownerDomain'])
api = LoadApi(servicePath='/fake',
rootUrl='https://www.foobar.co.uk:8080/root')
self.assertEquals('www.foobar.co.uk', api['ownerDomain'])
self.assertEquals('www_foobar_co_uk', api['owner'])
api = LoadApi(servicePath='/fake',
rootUrl='https://whathaveyou.googleplex.com')
self.assertEquals('google.com', api['ownerDomain'])
self.assertEquals('Google', api['ownerName'])
self.assertEquals('google', api['owner'])
api = LoadApi(servicePath='/fake',
rootUrl='https://whathaveyou.googleapis.com')
self.assertEquals('google.com', api['ownerDomain'])
self.assertEquals('Google', api['ownerName'])
self.assertEquals('google', api['owner'])
api = LoadApi(servicePath='/fake',
rootUrl='https://whathaveyou.google.com')
self.assertEquals('google.com', api['ownerDomain'])
self.assertEquals('Google', api['ownerName'])
self.assertEquals('google', api['owner'])
def testSharedTypes(self):
api = self.ApiFromDiscoveryDoc(self._TEST_SHARED_TYPES_DOC)
api.VisitAll(lambda o: o.SetLanguageModel(language_model.LanguageModel()))
photos_feed_schema = api._schemas['PhotosFeed']
photo_schema = api._schemas[
'http://www.googleapis.com/types/v1/com.google/plus/v2/photo']
self.assertEquals('PhotosFeed', photos_feed_schema.values['wireName'])
self.assertEquals('com.google.myservice', photos_feed_schema.module.name)
self.assertEquals('Photo', photo_schema.values['wireName'])
self.assertEquals('com.google.plus.pictures', photo_schema.module.name)
self.assertEquals('com/google/plus/pictures', photo_schema.module.path)
def testMethods(self):
api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)
self.assertEquals(api, api.top_level_methods[0].parent)
self.assertLess(25, len(api.all_methods))
self.assertLess(0, len(api.top_level_methods))
def testApiHasTitle(self):
api_def = {'name': 'fake',
'version': 'v1',
'schemas': {},
'resources': {}}
api = Api(api_def)
self.assertEquals('fake', api['title'])
def testExponentialBackoffDefault(self):
discovery_doc = json.loads(
"""
{
"name": "fake",
"version": "v1",
"schemas": {},
"resources": {}
}
""")
api = Api(discovery_doc)
self.assertFalse(api.values['exponentialBackoffDefault'])
discovery_doc2 = json.loads(
"""
{
"name": "fake",
"version": "v1",
"schemas": {},
"resources": {},
"exponentialBackoffDefault": true
}
""")
api2 = Api(discovery_doc2)
self.assertTrue(api2.values['exponentialBackoffDefault'])
class ApiModulesTest(basetest.TestCase):
def setUp(self):
self.discovery_doc = json.loads(
"""
{
"name": "fake",
"version": "v1",
"schemas": {},
"resources": {}
}
""")
self.language_model = FakeLanguageModel()
def testModuleOwnerDomain(self):
self.discovery_doc['ownerDomain'] = 'foo.bar'
api = Api(self.discovery_doc)
api.VisitAll(lambda o: o.SetLanguageModel(self.language_model))
self.assertEquals('bar/foo/fake', api.values['module'].path)
def testModulePackagePath(self):
self.discovery_doc['packagePath'] = 'foo/BAR'
api = Api(self.discovery_doc)
api.VisitAll(lambda o: o.SetLanguageModel(self.language_model))
self.assertEquals('com/google/foo/BAR/fake', api.values['module'].path)
def testModuleOwnerDomainAndPackagePath(self):
self.discovery_doc['ownerDomain'] = 'toasty.com'
self.discovery_doc['packagePath'] = 'foo/BAR'
api = Api(self.discovery_doc)
api.VisitAll(lambda o: o.SetLanguageModel(self.language_model))
self.assertEquals('com/toasty/foo/BAR/fake', api.values['module'].path)
def FindByWireName(list_of_resource_or_method, wire_name):
for x in list_of_resource_or_method:
if x.values['wireName'] == wire_name:
return x
return None
if __name__ == '__main__':
basetest.main()
| true
| true
|
1c41896d23db8966b5ca3fc9304f280ab04c2298
| 126
|
py
|
Python
|
pychatwork/api/model/Account.py
|
a-yasui/pyChatWork
|
5a4d60d8927ee288bdaafe86d09c6c5065bebccb
|
[
"MIT"
] | null | null | null |
pychatwork/api/model/Account.py
|
a-yasui/pyChatWork
|
5a4d60d8927ee288bdaafe86d09c6c5065bebccb
|
[
"MIT"
] | null | null | null |
pychatwork/api/model/Account.py
|
a-yasui/pyChatWork
|
5a4d60d8927ee288bdaafe86d09c6c5065bebccb
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from . import Model
class Account (Model):
def __init__(self, data):
Model.__init__(self, data)
| 15.75
| 34
| 0.650794
|
from . import Model
class Account (Model):
def __init__(self, data):
Model.__init__(self, data)
| true
| true
|
1c41898a988ebc460ec7deaedfd5f5390c2f5fb8
| 1,844
|
py
|
Python
|
colour/models/rgb/__init__.py
|
gutenzwerg/colour
|
299eceb57483213e2544d532a6d3727887e49426
|
[
"BSD-3-Clause"
] | 6
|
2019-06-18T18:53:29.000Z
|
2021-09-10T21:02:45.000Z
|
colour/models/rgb/__init__.py
|
gutenzwerg/colour
|
299eceb57483213e2544d532a6d3727887e49426
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/rgb/__init__.py
|
gutenzwerg/colour
|
299eceb57483213e2544d532a6d3727887e49426
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from .derivation import (normalised_primary_matrix,
chromatically_adapted_primaries, primaries_whitepoint,
RGB_luminance_equation, RGB_luminance)
from .rgb_colourspace import RGB_Colourspace
from .rgb_colourspace import XYZ_to_RGB, RGB_to_XYZ
from .rgb_colourspace import matrix_RGB_to_RGB, RGB_to_RGB
from .transfer_functions import * # noqa
from . import transfer_functions
from .datasets import * # noqa
from . import datasets
from .common import XYZ_to_sRGB, sRGB_to_XYZ
from .cylindrical import RGB_to_HSV, HSV_to_RGB, RGB_to_HSL, HSL_to_RGB
from .cmyk import RGB_to_CMY, CMY_to_RGB, CMY_to_CMYK, CMYK_to_CMY
from .prismatic import RGB_to_Prismatic, Prismatic_to_RGB
from .ycbcr import (WEIGHTS_YCBCR, matrix_YCbCr, offset_YCbCr, RGB_to_YCbCr,
YCbCr_to_RGB, RGB_to_YcCbcCrc, YcCbcCrc_to_RGB)
from .ycocg import RGB_to_YCoCg, YCoCg_to_RGB
from .ictcp import RGB_to_ICtCp, ICtCp_to_RGB, XYZ_to_ICtCp, ICtCp_to_XYZ
__all__ = [
'normalised_primary_matrix', 'chromatically_adapted_primaries',
'primaries_whitepoint', 'RGB_luminance_equation', 'RGB_luminance'
]
__all__ += ['RGB_Colourspace']
__all__ += ['XYZ_to_RGB', 'RGB_to_XYZ']
__all__ += ['matrix_RGB_to_RGB', 'RGB_to_RGB']
__all__ += transfer_functions.__all__
__all__ += datasets.__all__
__all__ += ['XYZ_to_sRGB', 'sRGB_to_XYZ']
__all__ += ['RGB_to_HSV', 'HSV_to_RGB', 'RGB_to_HSL', 'HSL_to_RGB']
__all__ += ['RGB_to_CMY', 'CMY_to_RGB', 'CMY_to_CMYK', 'CMYK_to_CMY']
__all__ += ['RGB_to_Prismatic', 'Prismatic_to_RGB']
__all__ += [
'WEIGHTS_YCBCR', 'matrix_YCbCr', 'offset_YCbCr', 'RGB_to_YCbCr',
'YCbCr_to_RGB', 'RGB_to_YcCbcCrc', 'YcCbcCrc_to_RGB'
]
__all__ += ['RGB_to_YCoCg', 'YCoCg_to_RGB']
__all__ += ['RGB_to_ICtCp', 'ICtCp_to_RGB', 'XYZ_to_ICtCp', 'ICtCp_to_XYZ']
| 44.97561
| 79
| 0.756508
|
from .derivation import (normalised_primary_matrix,
chromatically_adapted_primaries, primaries_whitepoint,
RGB_luminance_equation, RGB_luminance)
from .rgb_colourspace import RGB_Colourspace
from .rgb_colourspace import XYZ_to_RGB, RGB_to_XYZ
from .rgb_colourspace import matrix_RGB_to_RGB, RGB_to_RGB
from .transfer_functions import *
from . import transfer_functions
from .datasets import *
from . import datasets
from .common import XYZ_to_sRGB, sRGB_to_XYZ
from .cylindrical import RGB_to_HSV, HSV_to_RGB, RGB_to_HSL, HSL_to_RGB
from .cmyk import RGB_to_CMY, CMY_to_RGB, CMY_to_CMYK, CMYK_to_CMY
from .prismatic import RGB_to_Prismatic, Prismatic_to_RGB
from .ycbcr import (WEIGHTS_YCBCR, matrix_YCbCr, offset_YCbCr, RGB_to_YCbCr,
YCbCr_to_RGB, RGB_to_YcCbcCrc, YcCbcCrc_to_RGB)
from .ycocg import RGB_to_YCoCg, YCoCg_to_RGB
from .ictcp import RGB_to_ICtCp, ICtCp_to_RGB, XYZ_to_ICtCp, ICtCp_to_XYZ
__all__ = [
'normalised_primary_matrix', 'chromatically_adapted_primaries',
'primaries_whitepoint', 'RGB_luminance_equation', 'RGB_luminance'
]
__all__ += ['RGB_Colourspace']
__all__ += ['XYZ_to_RGB', 'RGB_to_XYZ']
__all__ += ['matrix_RGB_to_RGB', 'RGB_to_RGB']
__all__ += transfer_functions.__all__
__all__ += datasets.__all__
__all__ += ['XYZ_to_sRGB', 'sRGB_to_XYZ']
__all__ += ['RGB_to_HSV', 'HSV_to_RGB', 'RGB_to_HSL', 'HSL_to_RGB']
__all__ += ['RGB_to_CMY', 'CMY_to_RGB', 'CMY_to_CMYK', 'CMYK_to_CMY']
__all__ += ['RGB_to_Prismatic', 'Prismatic_to_RGB']
__all__ += [
'WEIGHTS_YCBCR', 'matrix_YCbCr', 'offset_YCbCr', 'RGB_to_YCbCr',
'YCbCr_to_RGB', 'RGB_to_YcCbcCrc', 'YcCbcCrc_to_RGB'
]
__all__ += ['RGB_to_YCoCg', 'YCoCg_to_RGB']
__all__ += ['RGB_to_ICtCp', 'ICtCp_to_RGB', 'XYZ_to_ICtCp', 'ICtCp_to_XYZ']
| true
| true
|
1c418a5bd251b0ee4e7ca16f98c471e981f7b315
| 50,101
|
py
|
Python
|
src/cclib/parser/adfparser.py
|
maxscheurer/cclib
|
722a8b534686465d4e3ae57b8dd285a56f197e4a
|
[
"BSD-3-Clause"
] | null | null | null |
src/cclib/parser/adfparser.py
|
maxscheurer/cclib
|
722a8b534686465d4e3ae57b8dd285a56f197e4a
|
[
"BSD-3-Clause"
] | null | null | null |
src/cclib/parser/adfparser.py
|
maxscheurer/cclib
|
722a8b534686465d4e3ae57b8dd285a56f197e4a
|
[
"BSD-3-Clause"
] | null | null | null |
## -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Parser for ADF output files"""
from __future__ import print_function
import itertools
import re
import numpy
from cclib.parser import logfileparser
from cclib.parser import utils
class ADF(logfileparser.Logfile):
"""An ADF log file"""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(ADF, self).__init__(logname="ADF", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "ADF log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'ADF("%s")' % (self.filename)
def normalisesym(self, label):
"""Use standard symmetry labels instead of ADF labels.
To normalise:
(1) any periods are removed (except in the case of greek letters)
(2) XXX is replaced by X, and a " added.
(3) XX is replaced by X, and a ' added.
(4) The greek letters Sigma, Pi, Delta and Phi are replaced by
their lowercase equivalent.
"""
greeks = ['Sigma', 'Pi', 'Delta', 'Phi']
for greek in greeks:
if label.startswith(greek):
return label.lower()
ans = label.replace(".", "")
if ans[1:3] == "''":
temp = ans[0] + '"'
ans = temp
l = len(ans)
if l > 1 and ans[0] == ans[1]: # Python only tests the second condition if the first is true
if l > 2 and ans[1] == ans[2]:
ans = ans.replace(ans[0]*3, ans[0]) + '"'
else:
ans = ans.replace(ans[0]*2, ans[0]) + "'"
return ans
def normalisedegenerates(self, label, num, ndict=None):
"""Generate a string used for matching degenerate orbital labels
To normalise:
(1) if label is E or T, return label:num
(2) if label is P or D, look up in dict, and return answer
"""
if not ndict:
ndict = {
'P': {0: "P:x", 1: "P:y", 2: "P:z"},
'D': {0: "D:z2", 1: "D:x2-y2", 2: "D:xy", 3: "D:xz", 4: "D:yz"}
}
if label in ndict:
if num in ndict[label]:
return ndict[label][num]
else:
return "%s:%i" % (label, num+1)
else:
return "%s:%i" % (label, num+1)
def before_parsing(self):
# Used to avoid extracting the final geometry twice in a GeoOpt
self.NOTFOUND, self.GETLAST, self.NOMORE = list(range(3))
self.finalgeometry = self.NOTFOUND
# Used for calculating the scftarget (variables names taken from the ADF manual)
self.accint = self.SCFconv = self.sconv2 = None
# keep track of nosym and unrestricted case to parse Energies since it doens't have an all Irreps section
self.nosymflag = False
self.unrestrictedflag = False
SCFCNV, SCFCNV2 = list(range(2)) # used to index self.scftargets[]
maxelem, norm = list(range(2)) # used to index scf.values
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
# If a file contains multiple calculations, currently we want to print a warning
# and skip to the end of the file, since cclib parses only the main system, which
# is usually the largest. Here we test this by checking if scftargets has already
# been parsed when another INPUT FILE segment is found, although this might
# not always be the best indicator.
if line.strip() == "(INPUT FILE)" and hasattr(self, "scftargets"):
self.logger.warning("Skipping remaining calculations")
inputfile.seek(0, 2)
return
# We also want to check to make sure we aren't parsing "Create" jobs,
# which normally come before the calculation we actually want to parse.
if line.strip() == "(INPUT FILE)":
while True:
self.updateprogress(inputfile, "Unsupported Information", self.fupdate)
line = next(inputfile) if line.strip() == "(INPUT FILE)" else None
if line and not line[:6] in ("Create", "create"):
break
line = next(inputfile)
# In ADF 2014.01, there are (INPUT FILE) messages, so we need to use just
# the lines that start with 'Create' and run until the title or something
# else we are sure is is the calculation proper. It would be good to combine
# this with the previous block, if possible.
if line[:6] == "Create":
while line[:5] != "title" and "NO TITLE" not in line:
line = inputfile.next()
if line[1:10] == "Symmetry:":
info = line.split()
if info[1] == "NOSYM":
self.nosymflag = True
# Use this to read the subspecies of irreducible representations.
# It will be a list, with each element representing one irrep.
if line.strip() == "Irreducible Representations, including subspecies":
self.skip_line(inputfile, 'dashes')
self.irreps = []
line = next(inputfile)
while line.strip() != "":
self.irreps.append(line.split())
line = next(inputfile)
if line[4:13] == 'Molecule:':
info = line.split()
if info[1] == 'UNrestricted':
self.unrestrictedflag = True
if line[1:6] == "ATOMS":
# Find the number of atoms and their atomic numbers
# Also extract the starting coordinates (for a GeoOpt anyway)
# and the atommasses (previously called vibmasses)
self.updateprogress(inputfile, "Attributes", self.cupdate)
self.atomcoords = []
self.skip_lines(inputfile, ['header1', 'header2', 'header3'])
atomnos = []
atommasses = []
atomcoords = []
coreelectrons = []
line = next(inputfile)
while len(line) > 2: # ensure that we are reading no blank lines
info = line.split()
element = info[1].split('.')[0]
atomnos.append(self.table.number[element])
atomcoords.append(list(map(float, info[2:5])))
coreelectrons.append(int(float(info[5]) - float(info[6])))
atommasses.append(float(info[7]))
line = next(inputfile)
self.atomcoords.append(atomcoords)
self.set_attribute('natom', len(atomnos))
self.set_attribute('atomnos', atomnos)
self.set_attribute('atommasses', atommasses)
self.set_attribute('coreelectrons', coreelectrons)
if line[1:10] == "FRAGMENTS":
header = next(inputfile)
self.frags = []
self.fragnames = []
line = next(inputfile)
while len(line) > 2: # ensure that we are reading no blank lines
info = line.split()
if len(info) == 7: # fragment name is listed here
self.fragnames.append("%s_%s" % (info[1], info[0]))
self.frags.append([])
self.frags[-1].append(int(info[2]) - 1)
elif len(info) == 5: # add atoms into last fragment
self.frags[-1].append(int(info[0]) - 1)
line = next(inputfile)
# Extract charge
if line[1:11] == "Net Charge":
charge = int(line.split()[2])
self.set_attribute('charge', charge)
line = next(inputfile)
if len(line.strip()):
# Spin polar: 1 (Spin_A minus Spin_B electrons)
# (Not sure about this for higher multiplicities)
mult = int(line.split()[2]) + 1
else:
mult = 1
self.set_attribute('mult', mult)
if line[1:22] == "S C F U P D A T E S":
# find targets for SCF convergence
if not hasattr(self, "scftargets"):
self.scftargets = []
self.skip_lines(inputfile, ['e', 'b', 'numbers'])
line = next(inputfile)
self.SCFconv = float(line.split()[-1])
line = next(inputfile)
self.sconv2 = float(line.split()[-1])
# In ADF 2013, the default numerical integration method is fuzzy cells,
# although it used to be Voronoi polyhedra. Both methods apparently set
# the accint parameter, although the latter does so indirectly, based on
# a 'grid quality' setting. This is translated into accint using a
# dictionary with values taken from the documentation.
if "Numerical Integration : Voronoi Polyhedra (Te Velde)" in line:
self.integration_method = "voronoi_polyhedra"
if line[1:27] == 'General Accuracy Parameter':
# Need to know the accuracy of the integration grid to
# calculate the scftarget...note that it changes with time
self.accint = float(line.split()[-1])
if "Numerical Integration : Fuzzy Cells (Becke)" in line:
self.integration_method = 'fuzzy_cells'
if line[1:19] == "Becke grid quality":
self.grid_quality = line.split()[-1]
quality2accint = {
'BASIC': 2.0,
'NORMAL': 4.0,
'GOOD': 6.0,
'VERYGOOD': 8.0,
'EXCELLENT': 10.0,
}
self.accint = quality2accint[self.grid_quality]
# Half of the atomic orbital overlap matrix is printed since it is symmetric,
# but this requires "PRINT Smat" to be in the input. There are extra blank lines
# at the end of the block, which are used to terminate the parsing.
#
# ====== smat
#
# column 1 2 3 4
# row
# 1 1.00000000000000E+00
# 2 2.43370854175315E-01 1.00000000000000E+00
# 3 0.00000000000000E+00 0.00000000000000E+00 1.00000000000000E+00
# ...
#
if "====== smat" in line:
# Initialize the matrix with Nones so we can easily check all has been parsed.
overlaps = [[None] * self.nbasis for i in range(self.nbasis)]
self.skip_line(inputfile, 'blank')
line = inputfile.next()
while line.strip():
colline = line
assert colline.split()[0] == "column"
columns = [int(i) for i in colline.split()[1:]]
rowline = inputfile.next()
assert rowline.strip() == "row"
line = inputfile.next()
while line.strip():
i = int(line.split()[0])
vals = [float(col) for col in line.split()[1:]]
for j, o in enumerate(vals):
k = columns[j]
overlaps[k-1][i-1] = o
overlaps[i-1][k-1] = o
line = inputfile.next()
line = inputfile.next()
# Now all values should be parsed, and so no Nones remaining.
assert all([all([x is not None for x in ao]) for ao in overlaps])
self.set_attribute('aooverlaps', overlaps)
if line[1:11] == "CYCLE 1":
self.updateprogress(inputfile, "QM convergence", self.fupdate)
newlist = []
line = next(inputfile)
if not hasattr(self, "geovalues"):
# This is the first SCF cycle
self.scftargets.append([self.sconv2*10, self.sconv2])
elif self.finalgeometry in [self.GETLAST, self.NOMORE]:
# This is the final SCF cycle
self.scftargets.append([self.SCFconv*10, self.SCFconv])
else:
# This is an intermediate SCF cycle in a geometry optimization,
# in which case the SCF convergence target needs to be derived
# from the accint parameter. For Voronoi polyhedra integration,
# accint is printed and parsed. For fuzzy cells, it can be inferred
# from the grid quality setting, as is done somewhere above.
if self.accint:
oldscftst = self.scftargets[-1][1]
grdmax = self.geovalues[-1][1]
scftst = max(self.SCFconv, min(oldscftst, grdmax/30, 10**(-self.accint)))
self.scftargets.append([scftst*10, scftst])
while line.find("SCF CONVERGED") == -1 and line.find("SCF not fully converged, result acceptable") == -1 and line.find("SCF NOT CONVERGED") == -1:
if line[4:12] == "SCF test":
if not hasattr(self, "scfvalues"):
self.scfvalues = []
info = line.split()
newlist.append([float(info[4]), abs(float(info[6]))])
try:
line = next(inputfile)
except StopIteration: # EOF reached?
self.logger.warning("SCF did not converge, so attributes may be missing")
break
if line.find("SCF not fully converged, result acceptable") > 0:
self.logger.warning("SCF not fully converged, results acceptable")
if line.find("SCF NOT CONVERGED") > 0:
self.logger.warning("SCF did not converge! moenergies and mocoeffs are unreliable")
if hasattr(self, "scfvalues"):
self.scfvalues.append(newlist)
# Parse SCF energy for SP calcs from bonding energy decomposition section.
# It seems ADF does not print it earlier for SP calculations.
# Geometry optimization runs also print this, and we want to parse it
# for them, too, even if it repeats the last "Geometry Convergence Tests"
# section (but it's usually a bit different).
if line[:21] == "Total Bonding Energy:":
if not hasattr(self, "scfenergies"):
self.scfenergies = []
energy = utils.convertor(float(line.split()[3]), "hartree", "eV")
self.scfenergies.append(energy)
if line[51:65] == "Final Geometry":
self.finalgeometry = self.GETLAST
# Get the coordinates from each step of the GeoOpt.
if line[1:24] == "Coordinates (Cartesian)" and self.finalgeometry in [self.NOTFOUND, self.GETLAST]:
self.skip_lines(inputfile, ['e', 'b', 'title', 'title', 'd'])
atomcoords = []
line = next(inputfile)
while list(set(line.strip())) != ['-']:
atomcoords.append(list(map(float, line.split()[5:8])))
line = next(inputfile)
if not hasattr(self, "atomcoords"):
self.atomcoords = []
self.atomcoords.append(atomcoords)
# Don't get any more coordinates in this case.
# KML: I think we could combine this with optdone (see below).
if self.finalgeometry == self.GETLAST:
self.finalgeometry = self.NOMORE
# There have been some changes in the format of the geometry convergence information,
# and this is how it is printed in older versions (2007.01 unit tests).
#
# ==========================
# Geometry Convergence Tests
# ==========================
#
# Energy old : -5.14170647
# new : -5.15951374
#
# Convergence tests:
# (Energies in hartree, Gradients in hartree/angstr or radian, Lengths in angstrom, Angles in degrees)
#
# Item Value Criterion Conv. Ratio
# -------------------------------------------------------------------------
# change in energy -0.01780727 0.00100000 NO 0.00346330
# gradient max 0.03219530 0.01000000 NO 0.30402650
# gradient rms 0.00858685 0.00666667 NO 0.27221261
# cart. step max 0.07674971 0.01000000 NO 0.75559435
# cart. step rms 0.02132310 0.00666667 NO 0.55335378
#
if line[1:27] == 'Geometry Convergence Tests':
if not hasattr(self, "geotargets"):
self.geovalues = []
self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0], "d")
if not hasattr(self, "scfenergies"):
self.scfenergies = []
self.skip_lines(inputfile, ['e', 'b'])
energies_old = next(inputfile)
energies_new = next(inputfile)
self.scfenergies.append(utils.convertor(float(energies_new.split()[-1]), "hartree", "eV"))
self.skip_lines(inputfile, ['b', 'convergence', 'units', 'b', 'header', 'd'])
values = []
for i in range(5):
temp = next(inputfile).split()
self.geotargets[i] = float(temp[-3])
values.append(float(temp[-4]))
self.geovalues.append(values)
# This is to make geometry optimization always have the optdone attribute,
# even if it is to be empty for unconverged runs.
if not hasattr(self, 'optdone'):
self.optdone = []
# After the test, there is a message if the search is converged:
#
# ***************************************************************************************************
# Geometry CONVERGED
# ***************************************************************************************************
#
if line.strip() == "Geometry CONVERGED":
self.skip_line(inputfile, 'stars')
self.optdone.append(len(self.geovalues) - 1)
# Here is the corresponding geometry convergence info from the 2013.01 unit test.
# Note that the step number is given, which it will be prudent to use in an assertion.
#
#----------------------------------------------------------------------
#Geometry Convergence after Step 3 (Hartree/Angstrom,Angstrom)
#----------------------------------------------------------------------
#current energy -5.16274478 Hartree
#energy change -0.00237544 0.00100000 F
#constrained gradient max 0.00884999 0.00100000 F
#constrained gradient rms 0.00249569 0.00066667 F
#gradient max 0.00884999
#gradient rms 0.00249569
#cart. step max 0.03331296 0.01000000 F
#cart. step rms 0.00844037 0.00666667 F
if line[:31] == "Geometry Convergence after Step":
stepno = int(line.split()[4])
# This is to make geometry optimization always have the optdone attribute,
# even if it is to be empty for unconverged runs.
if not hasattr(self, 'optdone'):
self.optdone = []
# The convergence message is inline in this block, not later as it was before.
if "** CONVERGED **" in line:
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.geovalues) - 1)
self.skip_line(inputfile, 'dashes')
current_energy = next(inputfile)
energy_change = next(inputfile)
constrained_gradient_max = next(inputfile)
constrained_gradient_rms = next(inputfile)
gradient_max = next(inputfile)
gradient_rms = next(inputfile)
cart_step_max = next(inputfile)
cart_step_rms = next(inputfile)
if not hasattr(self, "scfenergies"):
self.scfenergies = []
energy = utils.convertor(float(current_energy.split()[-2]), "hartree", "eV")
self.scfenergies.append(energy)
if not hasattr(self, "geotargets"):
self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0], "d")
self.geotargets[0] = float(energy_change.split()[-2])
self.geotargets[1] = float(constrained_gradient_max.split()[-2])
self.geotargets[2] = float(constrained_gradient_rms.split()[-2])
self.geotargets[3] = float(cart_step_max.split()[-2])
self.geotargets[4] = float(cart_step_rms.split()[-2])
if not hasattr(self, "geovalues"):
self.geovalues = []
self.geovalues.append([])
self.geovalues[-1].append(float(energy_change.split()[-3]))
self.geovalues[-1].append(float(constrained_gradient_max.split()[-3]))
self.geovalues[-1].append(float(constrained_gradient_rms.split()[-3]))
self.geovalues[-1].append(float(cart_step_max.split()[-3]))
self.geovalues[-1].append(float(cart_step_rms.split()[-3]))
if line.find('Orbital Energies, per Irrep and Spin') > 0 and not hasattr(self, "mosyms") and self.nosymflag and not self.unrestrictedflag:
#Extracting orbital symmetries and energies, homos for nosym case
#Should only be for restricted case because there is a better text block for unrestricted and nosym
self.mosyms = [[]]
self.moenergies = [[]]
self.skip_lines(inputfile, ['e', 'header', 'd', 'label'])
line = next(inputfile)
info = line.split()
if not info[0] == '1':
self.logger.warning("MO info up to #%s is missing" % info[0])
#handle case where MO information up to a certain orbital are missing
while int(info[0]) - 1 != len(self.moenergies[0]):
self.moenergies[0].append(99999)
self.mosyms[0].append('A')
homoA = None
while len(line) > 10:
info = line.split()
self.mosyms[0].append('A')
self.moenergies[0].append(utils.convertor(float(info[2]), 'hartree', 'eV'))
if info[1] == '0.000' and not hasattr(self, 'homos'):
self.set_attribute('homos', [len(self.moenergies[0]) - 2])
line = next(inputfile)
self.moenergies = [numpy.array(self.moenergies[0], "d")]
if line[1:29] == 'Orbital Energies, both Spins' and not hasattr(self, "mosyms") and self.nosymflag and self.unrestrictedflag:
#Extracting orbital symmetries and energies, homos for nosym case
#should only be here if unrestricted and nosym
self.mosyms = [[], []]
moenergies = [[], []]
self.skip_lines(inputfile, ['d', 'b', 'header', 'd'])
homoa = 0
homob = None
line = next(inputfile)
while len(line) > 5:
info = line.split()
if info[2] == 'A':
self.mosyms[0].append('A')
moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
if info[3] != '0.00':
homoa = len(moenergies[0]) - 1
elif info[2] == 'B':
self.mosyms[1].append('A')
moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
if info[3] != '0.00':
homob = len(moenergies[1]) - 1
else:
print(("Error reading line: %s" % line))
line = next(inputfile)
self.moenergies = [numpy.array(x, "d") for x in moenergies]
self.set_attribute('homos', [homoa, homob])
# Extracting orbital symmetries and energies, homos.
if line[1:29] == 'Orbital Energies, all Irreps' and not hasattr(self, "mosyms"):
self.symlist = {}
self.mosyms = [[]]
self.moenergies = [[]]
self.skip_lines(inputfile, ['e', 'b', 'header', 'd'])
homoa = None
homob = None
#multiple = {'E':2, 'T':3, 'P':3, 'D':5}
# The above is set if there are no special irreps
names = [irrep[0].split(':')[0] for irrep in self.irreps]
counts = [len(irrep) for irrep in self.irreps]
multiple = dict(list(zip(names, counts)))
irrepspecies = {}
for n in range(len(names)):
indices = list(range(counts[n]))
subspecies = self.irreps[n]
irrepspecies[names[n]] = dict(list(zip(indices, subspecies)))
line = next(inputfile)
while line.strip():
info = line.split()
if len(info) == 5: # this is restricted
#count = multiple.get(info[0][0],1)
count = multiple.get(info[0], 1)
for repeat in range(count): # i.e. add E's twice, T's thrice
self.mosyms[0].append(self.normalisesym(info[0]))
self.moenergies[0].append(utils.convertor(float(info[3]), 'hartree', 'eV'))
sym = info[0]
if count > 1: # add additional sym label
sym = self.normalisedegenerates(info[0], repeat, ndict=irrepspecies)
try:
self.symlist[sym][0].append(len(self.moenergies[0])-1)
except KeyError:
self.symlist[sym] = [[]]
self.symlist[sym][0].append(len(self.moenergies[0])-1)
if info[2] == '0.00' and not hasattr(self, 'homos'):
self.homos = [len(self.moenergies[0]) - (count + 1)] # count, because need to handle degenerate cases
line = next(inputfile)
elif len(info) == 6: # this is unrestricted
if len(self.moenergies) < 2: # if we don't have space, create it
self.moenergies.append([])
self.mosyms.append([])
# count = multiple.get(info[0][0], 1)
count = multiple.get(info[0], 1)
if info[2] == 'A':
for repeat in range(count): # i.e. add E's twice, T's thrice
self.mosyms[0].append(self.normalisesym(info[0]))
self.moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
sym = info[0]
if count > 1: # add additional sym label
sym = self.normalisedegenerates(info[0], repeat)
try:
self.symlist[sym][0].append(len(self.moenergies[0])-1)
except KeyError:
self.symlist[sym] = [[], []]
self.symlist[sym][0].append(len(self.moenergies[0])-1)
if info[3] == '0.00' and homoa is None:
homoa = len(self.moenergies[0]) - (count + 1) # count because degenerate cases need to be handled
if info[2] == 'B':
for repeat in range(count): # i.e. add E's twice, T's thrice
self.mosyms[1].append(self.normalisesym(info[0]))
self.moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
sym = info[0]
if count > 1: # add additional sym label
sym = self.normalisedegenerates(info[0], repeat)
try:
self.symlist[sym][1].append(len(self.moenergies[1])-1)
except KeyError:
self.symlist[sym] = [[], []]
self.symlist[sym][1].append(len(self.moenergies[1])-1)
if info[3] == '0.00' and homob is None:
homob = len(self.moenergies[1]) - (count + 1)
line = next(inputfile)
else: # different number of lines
print(("Error", info))
if len(info) == 6: # still unrestricted, despite being out of loop
self.set_attribute('homos', [homoa, homob])
self.moenergies = [numpy.array(x, "d") for x in self.moenergies]
# Section on extracting vibdisps
# Also contains vibfreqs, but these are extracted in the
# following section (see below)
if line[1:28] == "Vibrations and Normal Modes":
self.vibdisps = []
self.skip_lines(inputfile, ['e', 'b', 'header', 'header', 'b', 'b'])
freqs = next(inputfile)
while freqs.strip() != "":
minus = next(inputfile)
p = [[], [], []]
for i in range(len(self.atomnos)):
broken = list(map(float, next(inputfile).split()[1:]))
for j in range(0, len(broken), 3):
p[j//3].append(broken[j:j+3])
self.vibdisps.extend(p[:(len(broken)//3)])
self.skip_lines(inputfile, ['b', 'b'])
freqs = next(inputfile)
self.vibdisps = numpy.array(self.vibdisps, "d")
if line[1:24] == "List of All Frequencies":
# Start of the IR/Raman frequency section
self.updateprogress(inputfile, "Frequency information", self.fupdate)
# self.vibsyms = [] # Need to look into this a bit more
self.vibirs = []
self.vibfreqs = []
for i in range(8):
line = next(inputfile)
line = next(inputfile).strip()
while line:
temp = line.split()
self.vibfreqs.append(float(temp[0]))
self.vibirs.append(float(temp[2])) # or is it temp[1]?
line = next(inputfile).strip()
self.vibfreqs = numpy.array(self.vibfreqs, "d")
self.vibirs = numpy.array(self.vibirs, "d")
if hasattr(self, "vibramans"):
self.vibramans = numpy.array(self.vibramans, "d")
#******************************************************************************************************************8
#delete this after new implementation using smat, eigvec print,eprint?
# Extract the number of basis sets
if line[1:49] == "Total nr. of (C)SFOs (summation over all irreps)":
nbasis = int(line.split(":")[1].split()[0])
self.set_attribute('nbasis', nbasis)
# now that we're here, let's extract aonames
self.fonames = []
self.start_indeces = {}
self.atombasis = [[] for frag in self.frags] # parse atombasis in the case of trivial SFOs
self.skip_line(inputfile, 'blank')
note = next(inputfile)
symoffset = 0
self.skip_line(inputfile, 'blank')
line = next(inputfile)
if len(line) > 2: # fix for ADF2006.01 as it has another note
self.skip_line(inputfile, 'blank')
line = next(inputfile)
self.skip_line(inputfile, 'blank')
self.nosymreps = []
while len(self.fonames) < self.nbasis:
symline = next(inputfile)
sym = symline.split()[1]
line = next(inputfile)
num = int(line.split(':')[1].split()[0])
self.nosymreps.append(num)
#read until line "--------..." is found
while line.find('-----') < 0:
line = next(inputfile)
line = next(inputfile) # the start of the first SFO
while len(self.fonames) < symoffset + num:
info = line.split()
#index0 index1 occ2 energy3/4 fragname5 coeff6 orbnum7 orbname8 fragname9
if not sym in list(self.start_indeces.keys()):
#have we already set the start index for this symmetry?
self.start_indeces[sym] = int(info[1])
orbname = info[8]
orbital = info[7] + orbname.replace(":", "")
fragname = info[5]
frag = fragname + info[9]
coeff = float(info[6])
# parse atombasis only in the case that all coefficients are 1
# and delete it otherwise
if hasattr(self, 'atombasis'):
if coeff == 1.:
ibas = int(info[0]) - 1
ifrag = int(info[9]) - 1
iat = self.frags[ifrag][0]
self.atombasis[iat].append(ibas)
else:
del self.atombasis
line = next(inputfile)
while line.strip() and not line[:7].strip(): # while it's the same SFO
# i.e. while not completely blank, but blank at the start
info = line[43:].split()
if len(info) > 0: # len(info)==0 for the second line of dvb_ir.adfout
frag += "+" + fragname + info[-1]
coeff = float(info[-4])
if coeff < 0:
orbital += '-' + info[-3] + info[-2].replace(":", "")
else:
orbital += '+' + info[-3] + info[-2].replace(":", "")
line = next(inputfile)
# At this point, we are either at the start of the next SFO or at
# a blank line...the end
self.fonames.append("%s_%s" % (frag, orbital))
symoffset += num
# blankline blankline
next(inputfile)
next(inputfile)
if line[1:32] == "S F O P O P U L A T I O N S ,":
#Extract overlap matrix
# self.fooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")
symoffset = 0
for nosymrep in self.nosymreps:
line = next(inputfile)
while line.find('===') < 10: # look for the symmetry labels
line = next(inputfile)
self.skip_lines(inputfile, ['b', 'b'])
text = next(inputfile)
if text[13:20] != "Overlap": # verify this has overlap info
break
self.skip_lines(inputfile, ['b', 'col', 'row'])
if not hasattr(self, "fooverlaps"): # make sure there is a matrix to store this
self.fooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")
base = 0
while base < nosymrep: # have we read all the columns?
for i in range(nosymrep - base):
self.updateprogress(inputfile, "Overlap", self.fupdate)
line = next(inputfile)
parts = line.split()[1:]
for j in range(len(parts)):
k = float(parts[j])
self.fooverlaps[base + symoffset + j, base + symoffset + i] = k
self.fooverlaps[base + symoffset + i, base + symoffset + j] = k
#blank, blank, column
for i in range(3):
next(inputfile)
base += 4
symoffset += nosymrep
base = 0
# The commented code below makes the atombasis attribute based on the BAS function in ADF,
# but this is probably not so useful, since SFOs are used to build MOs in ADF.
# if line[1:54] == "BAS: List of all Elementary Cartesian Basis Functions":
#
# self.atombasis = []
#
# # There will be some text, followed by a line:
# # (power of) X Y Z R Alpha on Atom
# while not line[1:11] == "(power of)":
# line = inputfile.next()
# dashes = inputfile.next()
# blank = inputfile.next()
# line = inputfile.next()
# # There will be two blank lines when there are no more atom types.
# while line.strip() != "":
# atoms = [int(i)-1 for i in line.split()[1:]]
# for n in range(len(atoms)):
# self.atombasis.append([])
# dashes = inputfile.next()
# line = inputfile.next()
# while line.strip() != "":
# indices = [int(i)-1 for i in line.split()[5:]]
# for i in range(len(indices)):
# self.atombasis[atoms[i]].append(indices[i])
# line = inputfile.next()
# line = inputfile.next()
if line[48:67] == "SFO MO coefficients":
self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d")]
spin = 0
symoffset = 0
lastrow = 0
# Section ends with "1" at beggining of a line.
while line[0] != "1":
line = next(inputfile)
# If spin is specified, then there will be two coefficient matrices.
if line.strip() == "***** SPIN 1 *****":
self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d"),
numpy.zeros((self.nbasis, self.nbasis), "d")]
# Bump up the spin.
if line.strip() == "***** SPIN 2 *****":
spin = 1
symoffset = 0
lastrow = 0
# Next symmetry.
if line.strip()[:4] == "=== ":
sym = line.split()[1]
if self.nosymflag:
aolist = list(range(self.nbasis))
else:
aolist = self.symlist[sym][spin]
# Add to the symmetry offset of AO ordering.
symoffset += lastrow
# Blocks with coefficient always start with "MOs :".
if line[1:6] == "MOs :":
# Next line has the MO index contributed to.
monumbers = [int(n) for n in line[6:].split()]
self.skip_lines(inputfile, ['occup', 'label'])
# The table can end with a blank line or "1".
row = 0
line = next(inputfile)
while not line.strip() in ["", "1"]:
info = line.split()
if int(info[0]) < self.start_indeces[sym]:
#check to make sure we aren't parsing CFs
line = next(inputfile)
continue
self.updateprogress(inputfile, "Coefficients", self.fupdate)
row += 1
coeffs = [float(x) for x in info[1:]]
moindices = [aolist[n-1] for n in monumbers]
# The AO index is 1 less than the row.
aoindex = symoffset + row - 1
for i in range(len(monumbers)):
self.mocoeffs[spin][moindices[i], aoindex] = coeffs[i]
line = next(inputfile)
lastrow = row
# **************************************************************************
# * *
# * Final excitation energies from Davidson algorithm *
# * *
# **************************************************************************
#
# Number of loops in Davidson routine = 20
# Number of matrix-vector multiplications = 24
# Type of excitations = SINGLET-SINGLET
#
# Symmetry B.u
#
# ... several blocks ...
#
# Normal termination of EXCITATION program part
if line[4:53] == "Final excitation energies from Davidson algorithm":
while line[1:9] != "Symmetry" and "Normal termination" not in line:
line = next(inputfile)
symm = self.normalisesym(line.split()[1])
# Excitation energies E in a.u. and eV, dE wrt prev. cycle,
# oscillator strengths f in a.u.
#
# no. E/a.u. E/eV f dE/a.u.
# -----------------------------------------------------
# 1 0.17084 4.6488 0.16526E-01 0.28E-08
# ...
while line.split() != ['no.', 'E/a.u.', 'E/eV', 'f', 'dE/a.u.'] and "Normal termination" not in line:
line = next(inputfile)
self.skip_line(inputfile, 'dashes')
etenergies = []
etoscs = []
etsyms = []
line = next(inputfile)
while len(line) > 2:
info = line.split()
etenergies.append(utils.convertor(float(info[2]), "eV", "cm-1"))
etoscs.append(float(info[3]))
etsyms.append(symm)
line = next(inputfile)
# There is another section before this, with transition dipole moments,
# but this should just skip past it.
while line[1:53] != "Major MO -> MO transitions for the above excitations":
line = next(inputfile)
# Note that here, and later, the number of blank lines can vary between
# version of ADF (extra lines are seen in 2013.01 unit tests, for example).
self.skip_line(inputfile, 'blank')
excitation_occupied = next(inputfile)
header = next(inputfile)
while not header.strip():
header = next(inputfile)
header2 = next(inputfile)
x_y_z = next(inputfile)
line = next(inputfile)
while not line.strip():
line = next(inputfile)
# Before we start handeling transitions, we need to create mosyms
# with indices; only restricted calcs are possible in ADF.
counts = {}
syms = []
for mosym in self.mosyms[0]:
if list(counts.keys()).count(mosym) == 0:
counts[mosym] = 1
else:
counts[mosym] += 1
syms.append(str(counts[mosym]) + mosym)
etsecs = []
printed_warning = False
for i in range(len(etenergies)):
etsec = []
info = line.split()
while len(info) > 0:
match = re.search('[^0-9]', info[1])
index1 = int(info[1][:match.start(0)])
text = info[1][match.start(0):]
symtext = text[0].upper() + text[1:]
sym1 = str(index1) + self.normalisesym(symtext)
match = re.search('[^0-9]', info[3])
index2 = int(info[3][:match.start(0)])
text = info[3][match.start(0):]
symtext = text[0].upper() + text[1:]
sym2 = str(index2) + self.normalisesym(symtext)
try:
index1 = syms.index(sym1)
except ValueError:
if not printed_warning:
self.logger.warning("Etsecs are not accurate!")
printed_warning = True
try:
index2 = syms.index(sym2)
except ValueError:
if not printed_warning:
self.logger.warning("Etsecs are not accurate!")
printed_warning = True
etsec.append([(index1, 0), (index2, 0), float(info[4])])
line = next(inputfile)
info = line.split()
etsecs.append(etsec)
# Again, the number of blank lines between transition can vary.
line = next(inputfile)
while not line.strip():
line = next(inputfile)
if not hasattr(self, "etenergies"):
self.etenergies = etenergies
else:
self.etenergies += etenergies
if not hasattr(self, "etoscs"):
self.etoscs = etoscs
else:
self.etoscs += etoscs
if not hasattr(self, "etsyms"):
self.etsyms = etsyms
else:
self.etsyms += etsyms
if not hasattr(self, "etsecs"):
self.etsecs = etsecs
else:
self.etsecs += etsecs
if "M U L L I K E N P O P U L A T I O N S" in line:
if not hasattr(self, "atomcharges"):
self.atomcharges = {}
while line[1:5] != "Atom":
line = next(inputfile)
self.skip_line(inputfile, 'dashes')
mulliken = []
line = next(inputfile)
while line.strip():
mulliken.append(float(line.split()[2]))
line = next(inputfile)
self.atomcharges["mulliken"] = mulliken
# Dipole moment is always printed after a point calculation,
# and the reference point for this is always the origin (0,0,0)
# and not necessarily the center of mass, as explained on the
# ADF user mailing list (see cclib/cclib#113 for details).
#
# =============
# Dipole Moment *** (Debye) ***
# =============
#
# Vector : 0.00000000 0.00000000 0.00000000
# Magnitude: 0.00000000
#
if line.strip()[:13] == "Dipole Moment":
self.skip_line(inputfile, 'equals')
# There is not always a blank line here, for example when the dipole and quadrupole
# moments are printed after the multipole derived atomic charges. Still, to the best
# of my knowledge (KML) the values are still in Debye.
line = next(inputfile)
if not line.strip():
line = next(inputfile)
assert line.split()[0] == "Vector"
dipole = [float(d) for d in line.split()[-3:]]
reference = [0.0, 0.0, 0.0]
if not hasattr(self, 'moments'):
self.moments = [reference, dipole]
else:
try:
assert self.moments[1] == dipole
except AssertionError:
self.logger.warning('Overwriting previous multipole moments with new values')
self.moments = [reference, dipole]
# Molecular response properties.
if line.strip()[1:-1].strip() == "RESPONSE program part":
while line.strip() != "Normal termination of RESPONSE program part":
if "THE DIPOLE-DIPOLE POLARIZABILITY TENSOR:" in line:
if not hasattr(self, 'polarizabilities'):
self.polarizabilities = []
polarizability = numpy.empty(shape=(3, 3))
self.skip_lines(inputfile, ['b', 'FREQUENCY', 'coordinates'])
# Ordering of rows/columns is Y, Z, X.
ordering = [1, 2, 0]
indices = list(itertools.product(ordering, ordering))
for i in range(3):
tokens = next(inputfile).split()
for j in range(3):
polarizability[indices[(i*3)+j]] = tokens[j]
self.polarizabilities.append(polarizability)
line = next(inputfile)
| 43.415078
| 159
| 0.479352
|
_ import print_function
import itertools
import re
import numpy
from cclib.parser import logfileparser
from cclib.parser import utils
class ADF(logfileparser.Logfile):
def __init__(self, *args, **kwargs):
super(ADF, self).__init__(logname="ADF", *args, **kwargs)
def __str__(self):
return "ADF log file %s" % (self.filename)
def __repr__(self):
return 'ADF("%s")' % (self.filename)
def normalisesym(self, label):
greeks = ['Sigma', 'Pi', 'Delta', 'Phi']
for greek in greeks:
if label.startswith(greek):
return label.lower()
ans = label.replace(".", "")
if ans[1:3] == "''":
temp = ans[0] + '"'
ans = temp
l = len(ans)
if l > 1 and ans[0] == ans[1]: # Python only tests the second condition if the first is true
if l > 2 and ans[1] == ans[2]:
ans = ans.replace(ans[0]*3, ans[0]) + '"'
else:
ans = ans.replace(ans[0]*2, ans[0]) + "'"
return ans
def normalisedegenerates(self, label, num, ndict=None):
if not ndict:
ndict = {
'P': {0: "P:x", 1: "P:y", 2: "P:z"},
'D': {0: "D:z2", 1: "D:x2-y2", 2: "D:xy", 3: "D:xz", 4: "D:yz"}
}
if label in ndict:
if num in ndict[label]:
return ndict[label][num]
else:
return "%s:%i" % (label, num+1)
else:
return "%s:%i" % (label, num+1)
def before_parsing(self):
# Used to avoid extracting the final geometry twice in a GeoOpt
self.NOTFOUND, self.GETLAST, self.NOMORE = list(range(3))
self.finalgeometry = self.NOTFOUND
# Used for calculating the scftarget (variables names taken from the ADF manual)
self.accint = self.SCFconv = self.sconv2 = None
# keep track of nosym and unrestricted case to parse Energies since it doens't have an all Irreps section
self.nosymflag = False
self.unrestrictedflag = False
SCFCNV, SCFCNV2 = list(range(2))
maxelem, norm = list(range(2))
def extract(self, inputfile, line):
if line.strip() == "(INPUT FILE)" and hasattr(self, "scftargets"):
self.logger.warning("Skipping remaining calculations")
inputfile.seek(0, 2)
return
# which normally come before the calculation we actually want to parse.
if line.strip() == "(INPUT FILE)":
while True:
self.updateprogress(inputfile, "Unsupported Information", self.fupdate)
line = next(inputfile) if line.strip() == "(INPUT FILE)" else None
if line and not line[:6] in ("Create", "create"):
break
line = next(inputfile)
# In ADF 2014.01, there are (INPUT FILE) messages, so we need to use just
# the lines that start with 'Create' and run until the title or something
# else we are sure is is the calculation proper. It would be good to combine
# this with the previous block, if possible.
if line[:6] == "Create":
while line[:5] != "title" and "NO TITLE" not in line:
line = inputfile.next()
if line[1:10] == "Symmetry:":
info = line.split()
if info[1] == "NOSYM":
self.nosymflag = True
# Use this to read the subspecies of irreducible representations.
# It will be a list, with each element representing one irrep.
if line.strip() == "Irreducible Representations, including subspecies":
self.skip_line(inputfile, 'dashes')
self.irreps = []
line = next(inputfile)
while line.strip() != "":
self.irreps.append(line.split())
line = next(inputfile)
if line[4:13] == 'Molecule:':
info = line.split()
if info[1] == 'UNrestricted':
self.unrestrictedflag = True
if line[1:6] == "ATOMS":
# Find the number of atoms and their atomic numbers
# Also extract the starting coordinates (for a GeoOpt anyway)
# and the atommasses (previously called vibmasses)
self.updateprogress(inputfile, "Attributes", self.cupdate)
self.atomcoords = []
self.skip_lines(inputfile, ['header1', 'header2', 'header3'])
atomnos = []
atommasses = []
atomcoords = []
coreelectrons = []
line = next(inputfile)
while len(line) > 2: # ensure that we are reading no blank lines
info = line.split()
element = info[1].split('.')[0]
atomnos.append(self.table.number[element])
atomcoords.append(list(map(float, info[2:5])))
coreelectrons.append(int(float(info[5]) - float(info[6])))
atommasses.append(float(info[7]))
line = next(inputfile)
self.atomcoords.append(atomcoords)
self.set_attribute('natom', len(atomnos))
self.set_attribute('atomnos', atomnos)
self.set_attribute('atommasses', atommasses)
self.set_attribute('coreelectrons', coreelectrons)
if line[1:10] == "FRAGMENTS":
header = next(inputfile)
self.frags = []
self.fragnames = []
line = next(inputfile)
while len(line) > 2: # ensure that we are reading no blank lines
info = line.split()
if len(info) == 7: # fragment name is listed here
self.fragnames.append("%s_%s" % (info[1], info[0]))
self.frags.append([])
self.frags[-1].append(int(info[2]) - 1)
elif len(info) == 5: # add atoms into last fragment
self.frags[-1].append(int(info[0]) - 1)
line = next(inputfile)
# Extract charge
if line[1:11] == "Net Charge":
charge = int(line.split()[2])
self.set_attribute('charge', charge)
line = next(inputfile)
if len(line.strip()):
# Spin polar: 1 (Spin_A minus Spin_B electrons)
# (Not sure about this for higher multiplicities)
mult = int(line.split()[2]) + 1
else:
mult = 1
self.set_attribute('mult', mult)
if line[1:22] == "S C F U P D A T E S":
# find targets for SCF convergence
if not hasattr(self, "scftargets"):
self.scftargets = []
self.skip_lines(inputfile, ['e', 'b', 'numbers'])
line = next(inputfile)
self.SCFconv = float(line.split()[-1])
line = next(inputfile)
self.sconv2 = float(line.split()[-1])
# In ADF 2013, the default numerical integration method is fuzzy cells,
# although it used to be Voronoi polyhedra. Both methods apparently set
# the accint parameter, although the latter does so indirectly, based on
# a 'grid quality' setting. This is translated into accint using a
# dictionary with values taken from the documentation.
if "Numerical Integration : Voronoi Polyhedra (Te Velde)" in line:
self.integration_method = "voronoi_polyhedra"
if line[1:27] == 'General Accuracy Parameter':
# Need to know the accuracy of the integration grid to
# calculate the scftarget...note that it changes with time
self.accint = float(line.split()[-1])
if "Numerical Integration : Fuzzy Cells (Becke)" in line:
self.integration_method = 'fuzzy_cells'
if line[1:19] == "Becke grid quality":
self.grid_quality = line.split()[-1]
quality2accint = {
'BASIC': 2.0,
'NORMAL': 4.0,
'GOOD': 6.0,
'VERYGOOD': 8.0,
'EXCELLENT': 10.0,
}
self.accint = quality2accint[self.grid_quality]
# Half of the atomic orbital overlap matrix is printed since it is symmetric,
# but this requires "PRINT Smat" to be in the input. There are extra blank lines
# at the end of the block, which are used to terminate the parsing.
#
# ====== smat
#
# column 1 2 3 4
# row
# 1 1.00000000000000E+00
# 2 2.43370854175315E-01 1.00000000000000E+00
# 3 0.00000000000000E+00 0.00000000000000E+00 1.00000000000000E+00
# ...
#
if "====== smat" in line:
# Initialize the matrix with Nones so we can easily check all has been parsed.
overlaps = [[None] * self.nbasis for i in range(self.nbasis)]
self.skip_line(inputfile, 'blank')
line = inputfile.next()
while line.strip():
colline = line
assert colline.split()[0] == "column"
columns = [int(i) for i in colline.split()[1:]]
rowline = inputfile.next()
assert rowline.strip() == "row"
line = inputfile.next()
while line.strip():
i = int(line.split()[0])
vals = [float(col) for col in line.split()[1:]]
for j, o in enumerate(vals):
k = columns[j]
overlaps[k-1][i-1] = o
overlaps[i-1][k-1] = o
line = inputfile.next()
line = inputfile.next()
# Now all values should be parsed, and so no Nones remaining.
assert all([all([x is not None for x in ao]) for ao in overlaps])
self.set_attribute('aooverlaps', overlaps)
if line[1:11] == "CYCLE 1":
self.updateprogress(inputfile, "QM convergence", self.fupdate)
newlist = []
line = next(inputfile)
if not hasattr(self, "geovalues"):
# This is the first SCF cycle
self.scftargets.append([self.sconv2*10, self.sconv2])
elif self.finalgeometry in [self.GETLAST, self.NOMORE]:
# This is the final SCF cycle
self.scftargets.append([self.SCFconv*10, self.SCFconv])
else:
# This is an intermediate SCF cycle in a geometry optimization,
# in which case the SCF convergence target needs to be derived
# from the accint parameter. For Voronoi polyhedra integration,
# accint is printed and parsed. For fuzzy cells, it can be inferred
# from the grid quality setting, as is done somewhere above.
if self.accint:
oldscftst = self.scftargets[-1][1]
grdmax = self.geovalues[-1][1]
scftst = max(self.SCFconv, min(oldscftst, grdmax/30, 10**(-self.accint)))
self.scftargets.append([scftst*10, scftst])
while line.find("SCF CONVERGED") == -1 and line.find("SCF not fully converged, result acceptable") == -1 and line.find("SCF NOT CONVERGED") == -1:
if line[4:12] == "SCF test":
if not hasattr(self, "scfvalues"):
self.scfvalues = []
info = line.split()
newlist.append([float(info[4]), abs(float(info[6]))])
try:
line = next(inputfile)
except StopIteration: # EOF reached?
self.logger.warning("SCF did not converge, so attributes may be missing")
break
if line.find("SCF not fully converged, result acceptable") > 0:
self.logger.warning("SCF not fully converged, results acceptable")
if line.find("SCF NOT CONVERGED") > 0:
self.logger.warning("SCF did not converge! moenergies and mocoeffs are unreliable")
if hasattr(self, "scfvalues"):
self.scfvalues.append(newlist)
# Parse SCF energy for SP calcs from bonding energy decomposition section.
# It seems ADF does not print it earlier for SP calculations.
# Geometry optimization runs also print this, and we want to parse it
# for them, too, even if it repeats the last "Geometry Convergence Tests"
# section (but it's usually a bit different).
if line[:21] == "Total Bonding Energy:":
if not hasattr(self, "scfenergies"):
self.scfenergies = []
energy = utils.convertor(float(line.split()[3]), "hartree", "eV")
self.scfenergies.append(energy)
if line[51:65] == "Final Geometry":
self.finalgeometry = self.GETLAST
if line[1:24] == "Coordinates (Cartesian)" and self.finalgeometry in [self.NOTFOUND, self.GETLAST]:
self.skip_lines(inputfile, ['e', 'b', 'title', 'title', 'd'])
atomcoords = []
line = next(inputfile)
while list(set(line.strip())) != ['-']:
atomcoords.append(list(map(float, line.split()[5:8])))
line = next(inputfile)
if not hasattr(self, "atomcoords"):
self.atomcoords = []
self.atomcoords.append(atomcoords)
# KML: I think we could combine this with optdone (see below).
if self.finalgeometry == self.GETLAST:
self.finalgeometry = self.NOMORE
# There have been some changes in the format of the geometry convergence information,
# and this is how it is printed in older versions (2007.01 unit tests).
#
# ==========================
# Geometry Convergence Tests
# ==========================
#
# Energy old : -5.14170647
# new : -5.15951374
#
# Convergence tests:
# (Energies in hartree, Gradients in hartree/angstr or radian, Lengths in angstrom, Angles in degrees)
#
# Item Value Criterion Conv. Ratio
# -------------------------------------------------------------------------
# change in energy -0.01780727 0.00100000 NO 0.00346330
# gradient max 0.03219530 0.01000000 NO 0.30402650
# gradient rms 0.00858685 0.00666667 NO 0.27221261
# cart. step max 0.07674971 0.01000000 NO 0.75559435
# cart. step rms 0.02132310 0.00666667 NO 0.55335378
#
if line[1:27] == 'Geometry Convergence Tests':
if not hasattr(self, "geotargets"):
self.geovalues = []
self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0], "d")
if not hasattr(self, "scfenergies"):
self.scfenergies = []
self.skip_lines(inputfile, ['e', 'b'])
energies_old = next(inputfile)
energies_new = next(inputfile)
self.scfenergies.append(utils.convertor(float(energies_new.split()[-1]), "hartree", "eV"))
self.skip_lines(inputfile, ['b', 'convergence', 'units', 'b', 'header', 'd'])
values = []
for i in range(5):
temp = next(inputfile).split()
self.geotargets[i] = float(temp[-3])
values.append(float(temp[-4]))
self.geovalues.append(values)
# This is to make geometry optimization always have the optdone attribute,
# even if it is to be empty for unconverged runs.
if not hasattr(self, 'optdone'):
self.optdone = []
# After the test, there is a message if the search is converged:
#
# ***************************************************************************************************
# Geometry CONVERGED
# ***************************************************************************************************
#
if line.strip() == "Geometry CONVERGED":
self.skip_line(inputfile, 'stars')
self.optdone.append(len(self.geovalues) - 1)
# Here is the corresponding geometry convergence info from the 2013.01 unit test.
# Note that the step number is given, which it will be prudent to use in an assertion.
#
#----------------------------------------------------------------------
#Geometry Convergence after Step 3 (Hartree/Angstrom,Angstrom)
#----------------------------------------------------------------------
#current energy -5.16274478 Hartree
#energy change -0.00237544 0.00100000 F
#constrained gradient max 0.00884999 0.00100000 F
#constrained gradient rms 0.00249569 0.00066667 F
#gradient max 0.00884999
#gradient rms 0.00249569
#cart. step max 0.03331296 0.01000000 F
#cart. step rms 0.00844037 0.00666667 F
if line[:31] == "Geometry Convergence after Step":
stepno = int(line.split()[4])
# This is to make geometry optimization always have the optdone attribute,
# even if it is to be empty for unconverged runs.
if not hasattr(self, 'optdone'):
self.optdone = []
# The convergence message is inline in this block, not later as it was before.
if "** CONVERGED **" in line:
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.geovalues) - 1)
self.skip_line(inputfile, 'dashes')
current_energy = next(inputfile)
energy_change = next(inputfile)
constrained_gradient_max = next(inputfile)
constrained_gradient_rms = next(inputfile)
gradient_max = next(inputfile)
gradient_rms = next(inputfile)
cart_step_max = next(inputfile)
cart_step_rms = next(inputfile)
if not hasattr(self, "scfenergies"):
self.scfenergies = []
energy = utils.convertor(float(current_energy.split()[-2]), "hartree", "eV")
self.scfenergies.append(energy)
if not hasattr(self, "geotargets"):
self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0], "d")
self.geotargets[0] = float(energy_change.split()[-2])
self.geotargets[1] = float(constrained_gradient_max.split()[-2])
self.geotargets[2] = float(constrained_gradient_rms.split()[-2])
self.geotargets[3] = float(cart_step_max.split()[-2])
self.geotargets[4] = float(cart_step_rms.split()[-2])
if not hasattr(self, "geovalues"):
self.geovalues = []
self.geovalues.append([])
self.geovalues[-1].append(float(energy_change.split()[-3]))
self.geovalues[-1].append(float(constrained_gradient_max.split()[-3]))
self.geovalues[-1].append(float(constrained_gradient_rms.split()[-3]))
self.geovalues[-1].append(float(cart_step_max.split()[-3]))
self.geovalues[-1].append(float(cart_step_rms.split()[-3]))
if line.find('Orbital Energies, per Irrep and Spin') > 0 and not hasattr(self, "mosyms") and self.nosymflag and not self.unrestrictedflag:
#Extracting orbital symmetries and energies, homos for nosym case
#Should only be for restricted case because there is a better text block for unrestricted and nosym
self.mosyms = [[]]
self.moenergies = [[]]
self.skip_lines(inputfile, ['e', 'header', 'd', 'label'])
line = next(inputfile)
info = line.split()
if not info[0] == '1':
self.logger.warning("MO info up to #%s is missing" % info[0])
#handle case where MO information up to a certain orbital are missing
while int(info[0]) - 1 != len(self.moenergies[0]):
self.moenergies[0].append(99999)
self.mosyms[0].append('A')
homoA = None
while len(line) > 10:
info = line.split()
self.mosyms[0].append('A')
self.moenergies[0].append(utils.convertor(float(info[2]), 'hartree', 'eV'))
if info[1] == '0.000' and not hasattr(self, 'homos'):
self.set_attribute('homos', [len(self.moenergies[0]) - 2])
line = next(inputfile)
self.moenergies = [numpy.array(self.moenergies[0], "d")]
if line[1:29] == 'Orbital Energies, both Spins' and not hasattr(self, "mosyms") and self.nosymflag and self.unrestrictedflag:
#Extracting orbital symmetries and energies, homos for nosym case
#should only be here if unrestricted and nosym
self.mosyms = [[], []]
moenergies = [[], []]
self.skip_lines(inputfile, ['d', 'b', 'header', 'd'])
homoa = 0
homob = None
line = next(inputfile)
while len(line) > 5:
info = line.split()
if info[2] == 'A':
self.mosyms[0].append('A')
moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
if info[3] != '0.00':
homoa = len(moenergies[0]) - 1
elif info[2] == 'B':
self.mosyms[1].append('A')
moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
if info[3] != '0.00':
homob = len(moenergies[1]) - 1
else:
print(("Error reading line: %s" % line))
line = next(inputfile)
self.moenergies = [numpy.array(x, "d") for x in moenergies]
self.set_attribute('homos', [homoa, homob])
# Extracting orbital symmetries and energies, homos.
if line[1:29] == 'Orbital Energies, all Irreps' and not hasattr(self, "mosyms"):
self.symlist = {}
self.mosyms = [[]]
self.moenergies = [[]]
self.skip_lines(inputfile, ['e', 'b', 'header', 'd'])
homoa = None
homob = None
#multiple = {'E':2, 'T':3, 'P':3, 'D':5}
# The above is set if there are no special irreps
names = [irrep[0].split(':')[0] for irrep in self.irreps]
counts = [len(irrep) for irrep in self.irreps]
multiple = dict(list(zip(names, counts)))
irrepspecies = {}
for n in range(len(names)):
indices = list(range(counts[n]))
subspecies = self.irreps[n]
irrepspecies[names[n]] = dict(list(zip(indices, subspecies)))
line = next(inputfile)
while line.strip():
info = line.split()
if len(info) == 5: # this is restricted
#count = multiple.get(info[0][0],1)
count = multiple.get(info[0], 1)
for repeat in range(count): # i.e. add E's twice, T's thrice
self.mosyms[0].append(self.normalisesym(info[0]))
self.moenergies[0].append(utils.convertor(float(info[3]), 'hartree', 'eV'))
sym = info[0]
if count > 1: # add additional sym label
sym = self.normalisedegenerates(info[0], repeat, ndict=irrepspecies)
try:
self.symlist[sym][0].append(len(self.moenergies[0])-1)
except KeyError:
self.symlist[sym] = [[]]
self.symlist[sym][0].append(len(self.moenergies[0])-1)
if info[2] == '0.00' and not hasattr(self, 'homos'):
self.homos = [len(self.moenergies[0]) - (count + 1)] # count, because need to handle degenerate cases
line = next(inputfile)
elif len(info) == 6: # this is unrestricted
if len(self.moenergies) < 2: # if we don't have space, create it
self.moenergies.append([])
self.mosyms.append([])
count = multiple.get(info[0], 1)
if info[2] == 'A':
for repeat in range(count):
self.mosyms[0].append(self.normalisesym(info[0]))
self.moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
sym = info[0]
if count > 1:
sym = self.normalisedegenerates(info[0], repeat)
try:
self.symlist[sym][0].append(len(self.moenergies[0])-1)
except KeyError:
self.symlist[sym] = [[], []]
self.symlist[sym][0].append(len(self.moenergies[0])-1)
if info[3] == '0.00' and homoa is None:
homoa = len(self.moenergies[0]) - (count + 1)
if info[2] == 'B':
for repeat in range(count):
self.mosyms[1].append(self.normalisesym(info[0]))
self.moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
sym = info[0]
if count > 1:
sym = self.normalisedegenerates(info[0], repeat)
try:
self.symlist[sym][1].append(len(self.moenergies[1])-1)
except KeyError:
self.symlist[sym] = [[], []]
self.symlist[sym][1].append(len(self.moenergies[1])-1)
if info[3] == '0.00' and homob is None:
homob = len(self.moenergies[1]) - (count + 1)
line = next(inputfile)
else:
print(("Error", info))
if len(info) == 6:
self.set_attribute('homos', [homoa, homob])
self.moenergies = [numpy.array(x, "d") for x in self.moenergies]
if line[1:28] == "Vibrations and Normal Modes":
self.vibdisps = []
self.skip_lines(inputfile, ['e', 'b', 'header', 'header', 'b', 'b'])
freqs = next(inputfile)
while freqs.strip() != "":
minus = next(inputfile)
p = [[], [], []]
for i in range(len(self.atomnos)):
broken = list(map(float, next(inputfile).split()[1:]))
for j in range(0, len(broken), 3):
p[j//3].append(broken[j:j+3])
self.vibdisps.extend(p[:(len(broken)//3)])
self.skip_lines(inputfile, ['b', 'b'])
freqs = next(inputfile)
self.vibdisps = numpy.array(self.vibdisps, "d")
if line[1:24] == "List of All Frequencies":
self.updateprogress(inputfile, "Frequency information", self.fupdate)
self.vibfreqs = []
for i in range(8):
line = next(inputfile)
line = next(inputfile).strip()
while line:
temp = line.split()
self.vibfreqs.append(float(temp[0]))
self.vibirs.append(float(temp[2]))
line = next(inputfile).strip()
self.vibfreqs = numpy.array(self.vibfreqs, "d")
self.vibirs = numpy.array(self.vibirs, "d")
if hasattr(self, "vibramans"):
self.vibramans = numpy.array(self.vibramans, "d")
if line[1:49] == "Total nr. of (C)SFOs (summation over all irreps)":
nbasis = int(line.split(":")[1].split()[0])
self.set_attribute('nbasis', nbasis)
self.fonames = []
self.start_indeces = {}
self.atombasis = [[] for frag in self.frags]
self.skip_line(inputfile, 'blank')
note = next(inputfile)
symoffset = 0
self.skip_line(inputfile, 'blank')
line = next(inputfile)
if len(line) > 2:
self.skip_line(inputfile, 'blank')
line = next(inputfile)
self.skip_line(inputfile, 'blank')
self.nosymreps = []
while len(self.fonames) < self.nbasis:
symline = next(inputfile)
sym = symline.split()[1]
line = next(inputfile)
num = int(line.split(':')[1].split()[0])
self.nosymreps.append(num)
while line.find('-----') < 0:
line = next(inputfile)
line = next(inputfile)
while len(self.fonames) < symoffset + num:
info = line.split()
if not sym in list(self.start_indeces.keys()):
self.start_indeces[sym] = int(info[1])
orbname = info[8]
orbital = info[7] + orbname.replace(":", "")
fragname = info[5]
frag = fragname + info[9]
coeff = float(info[6])
if hasattr(self, 'atombasis'):
if coeff == 1.:
ibas = int(info[0]) - 1
ifrag = int(info[9]) - 1
iat = self.frags[ifrag][0]
self.atombasis[iat].append(ibas)
else:
del self.atombasis
line = next(inputfile)
while line.strip() and not line[:7].strip():
# i.e. while not completely blank, but blank at the start
info = line[43:].split()
if len(info) > 0: # len(info)==0 for the second line of dvb_ir.adfout
frag += "+" + fragname + info[-1]
coeff = float(info[-4])
if coeff < 0:
orbital += '-' + info[-3] + info[-2].replace(":", "")
else:
orbital += '+' + info[-3] + info[-2].replace(":", "")
line = next(inputfile)
# At this point, we are either at the start of the next SFO or at
# a blank line...the end
self.fonames.append("%s_%s" % (frag, orbital))
symoffset += num
# blankline blankline
next(inputfile)
next(inputfile)
if line[1:32] == "S F O P O P U L A T I O N S ,":
#Extract overlap matrix
# self.fooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")
symoffset = 0
for nosymrep in self.nosymreps:
line = next(inputfile)
while line.find('===') < 10: # look for the symmetry labels
line = next(inputfile)
self.skip_lines(inputfile, ['b', 'b'])
text = next(inputfile)
if text[13:20] != "Overlap": # verify this has overlap info
break
self.skip_lines(inputfile, ['b', 'col', 'row'])
if not hasattr(self, "fooverlaps"): # make sure there is a matrix to store this
self.fooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")
base = 0
while base < nosymrep: # have we read all the columns?
for i in range(nosymrep - base):
self.updateprogress(inputfile, "Overlap", self.fupdate)
line = next(inputfile)
parts = line.split()[1:]
for j in range(len(parts)):
k = float(parts[j])
self.fooverlaps[base + symoffset + j, base + symoffset + i] = k
self.fooverlaps[base + symoffset + i, base + symoffset + j] = k
#blank, blank, column
for i in range(3):
next(inputfile)
base += 4
symoffset += nosymrep
base = 0
# The commented code below makes the atombasis attribute based on the BAS function in ADF,
# but this is probably not so useful, since SFOs are used to build MOs in ADF.
# if line[1:54] == "BAS: List of all Elementary Cartesian Basis Functions":
#
# self.atombasis = []
#
# # There will be some text, followed by a line:
# # (power of) X Y Z R Alpha on Atom
# while not line[1:11] == "(power of)":
# line = inputfile.next()
# dashes = inputfile.next()
# blank = inputfile.next()
# line = inputfile.next()
# # There will be two blank lines when there are no more atom types.
# while line.strip() != "":
# atoms = [int(i)-1 for i in line.split()[1:]]
# for n in range(len(atoms)):
# self.atombasis.append([])
# dashes = inputfile.next()
# line = inputfile.next()
# while line.strip() != "":
# indices = [int(i)-1 for i in line.split()[5:]]
# for i in range(len(indices)):
# self.atombasis[atoms[i]].append(indices[i])
# line = inputfile.next()
# line = inputfile.next()
if line[48:67] == "SFO MO coefficients":
self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d")]
spin = 0
symoffset = 0
lastrow = 0
# Section ends with "1" at beggining of a line.
while line[0] != "1":
line = next(inputfile)
# If spin is specified, then there will be two coefficient matrices.
if line.strip() == "***** SPIN 1 *****":
self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d"),
numpy.zeros((self.nbasis, self.nbasis), "d")]
# Bump up the spin.
if line.strip() == "***** SPIN 2 *****":
spin = 1
symoffset = 0
lastrow = 0
# Next symmetry.
if line.strip()[:4] == "=== ":
sym = line.split()[1]
if self.nosymflag:
aolist = list(range(self.nbasis))
else:
aolist = self.symlist[sym][spin]
# Add to the symmetry offset of AO ordering.
symoffset += lastrow
# Blocks with coefficient always start with "MOs :".
if line[1:6] == "MOs :":
# Next line has the MO index contributed to.
monumbers = [int(n) for n in line[6:].split()]
self.skip_lines(inputfile, ['occup', 'label'])
# The table can end with a blank line or "1".
row = 0
line = next(inputfile)
while not line.strip() in ["", "1"]:
info = line.split()
if int(info[0]) < self.start_indeces[sym]:
#check to make sure we aren't parsing CFs
line = next(inputfile)
continue
self.updateprogress(inputfile, "Coefficients", self.fupdate)
row += 1
coeffs = [float(x) for x in info[1:]]
moindices = [aolist[n-1] for n in monumbers]
aoindex = symoffset + row - 1
for i in range(len(monumbers)):
self.mocoeffs[spin][moindices[i], aoindex] = coeffs[i]
line = next(inputfile)
lastrow = row
if line[4:53] == "Final excitation energies from Davidson algorithm":
while line[1:9] != "Symmetry" and "Normal termination" not in line:
line = next(inputfile)
symm = self.normalisesym(line.split()[1])
while line.split() != ['no.', 'E/a.u.', 'E/eV', 'f', 'dE/a.u.'] and "Normal termination" not in line:
line = next(inputfile)
self.skip_line(inputfile, 'dashes')
etenergies = []
etoscs = []
etsyms = []
line = next(inputfile)
while len(line) > 2:
info = line.split()
etenergies.append(utils.convertor(float(info[2]), "eV", "cm-1"))
etoscs.append(float(info[3]))
etsyms.append(symm)
line = next(inputfile)
while line[1:53] != "Major MO -> MO transitions for the above excitations":
line = next(inputfile)
self.skip_line(inputfile, 'blank')
excitation_occupied = next(inputfile)
header = next(inputfile)
while not header.strip():
header = next(inputfile)
header2 = next(inputfile)
x_y_z = next(inputfile)
line = next(inputfile)
while not line.strip():
line = next(inputfile)
counts = {}
syms = []
for mosym in self.mosyms[0]:
if list(counts.keys()).count(mosym) == 0:
counts[mosym] = 1
else:
counts[mosym] += 1
syms.append(str(counts[mosym]) + mosym)
etsecs = []
printed_warning = False
for i in range(len(etenergies)):
etsec = []
info = line.split()
while len(info) > 0:
match = re.search('[^0-9]', info[1])
index1 = int(info[1][:match.start(0)])
text = info[1][match.start(0):]
symtext = text[0].upper() + text[1:]
sym1 = str(index1) + self.normalisesym(symtext)
match = re.search('[^0-9]', info[3])
index2 = int(info[3][:match.start(0)])
text = info[3][match.start(0):]
symtext = text[0].upper() + text[1:]
sym2 = str(index2) + self.normalisesym(symtext)
try:
index1 = syms.index(sym1)
except ValueError:
if not printed_warning:
self.logger.warning("Etsecs are not accurate!")
printed_warning = True
try:
index2 = syms.index(sym2)
except ValueError:
if not printed_warning:
self.logger.warning("Etsecs are not accurate!")
printed_warning = True
etsec.append([(index1, 0), (index2, 0), float(info[4])])
line = next(inputfile)
info = line.split()
etsecs.append(etsec)
line = next(inputfile)
while not line.strip():
line = next(inputfile)
if not hasattr(self, "etenergies"):
self.etenergies = etenergies
else:
self.etenergies += etenergies
if not hasattr(self, "etoscs"):
self.etoscs = etoscs
else:
self.etoscs += etoscs
if not hasattr(self, "etsyms"):
self.etsyms = etsyms
else:
self.etsyms += etsyms
if not hasattr(self, "etsecs"):
self.etsecs = etsecs
else:
self.etsecs += etsecs
if "M U L L I K E N P O P U L A T I O N S" in line:
if not hasattr(self, "atomcharges"):
self.atomcharges = {}
while line[1:5] != "Atom":
line = next(inputfile)
self.skip_line(inputfile, 'dashes')
mulliken = []
line = next(inputfile)
while line.strip():
mulliken.append(float(line.split()[2]))
line = next(inputfile)
self.atomcharges["mulliken"] = mulliken
if line.strip()[:13] == "Dipole Moment":
self.skip_line(inputfile, 'equals')
line = next(inputfile)
if not line.strip():
line = next(inputfile)
assert line.split()[0] == "Vector"
dipole = [float(d) for d in line.split()[-3:]]
reference = [0.0, 0.0, 0.0]
if not hasattr(self, 'moments'):
self.moments = [reference, dipole]
else:
try:
assert self.moments[1] == dipole
except AssertionError:
self.logger.warning('Overwriting previous multipole moments with new values')
self.moments = [reference, dipole]
if line.strip()[1:-1].strip() == "RESPONSE program part":
while line.strip() != "Normal termination of RESPONSE program part":
if "THE DIPOLE-DIPOLE POLARIZABILITY TENSOR:" in line:
if not hasattr(self, 'polarizabilities'):
self.polarizabilities = []
polarizability = numpy.empty(shape=(3, 3))
self.skip_lines(inputfile, ['b', 'FREQUENCY', 'coordinates'])
ordering = [1, 2, 0]
indices = list(itertools.product(ordering, ordering))
for i in range(3):
tokens = next(inputfile).split()
for j in range(3):
polarizability[indices[(i*3)+j]] = tokens[j]
self.polarizabilities.append(polarizability)
line = next(inputfile)
| true
| true
|
1c418ae6d868b44c271c4cbc30d61ab7f197e815
| 11,954
|
py
|
Python
|
tests/integration/cqlengine/test_lwt_conditional.py
|
clohfink/python-driver
|
30a0e27cd1b8999267c146f0a93adf962a50790b
|
[
"Apache-2.0"
] | 1,163
|
2015-01-01T03:02:05.000Z
|
2022-03-22T13:04:00.000Z
|
tests/integration/cqlengine/test_lwt_conditional.py
|
clohfink/python-driver
|
30a0e27cd1b8999267c146f0a93adf962a50790b
|
[
"Apache-2.0"
] | 556
|
2015-01-05T16:39:29.000Z
|
2022-03-26T20:51:36.000Z
|
tests/integration/cqlengine/test_lwt_conditional.py
|
clohfink/python-driver
|
30a0e27cd1b8999267c146f0a93adf962a50790b
|
[
"Apache-2.0"
] | 449
|
2015-01-05T10:28:59.000Z
|
2022-03-14T23:15:32.000Z
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import mock
import six
from uuid import uuid4
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import sync_table, drop_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.query import BatchQuery, LWTException
from cassandra.cqlengine.statements import ConditionalClause
from tests.integration.cqlengine.base import BaseCassEngTestCase
from tests.integration import greaterthancass20
class TestConditionalModel(Model):
id = columns.UUID(primary_key=True, default=uuid4)
count = columns.Integer()
text = columns.Text(required=False)
class TestUpdateModel(Model):
partition = columns.Integer(primary_key=True)
cluster = columns.Integer(primary_key=True)
value = columns.Integer(required=False)
text = columns.Text(required=False, index=True)
@greaterthancass20
class TestConditional(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestConditional, cls).setUpClass()
sync_table(TestConditionalModel)
@classmethod
def tearDownClass(cls):
super(TestConditional, cls).tearDownClass()
drop_table(TestConditionalModel)
def test_update_using_conditional(self):
t = TestConditionalModel.if_not_exists().create(text='blah blah')
t.text = 'new blah'
with mock.patch.object(self.session, 'execute') as m:
t.iff(text='blah blah').save()
args = m.call_args
self.assertIn('IF "text" = %(0)s', args[0][0].query_string)
def test_update_conditional_success(self):
t = TestConditionalModel.if_not_exists().create(text='blah blah', count=5)
id = t.id
t.text = 'new blah'
t.iff(text='blah blah').save()
updated = TestConditionalModel.objects(id=id).first()
self.assertEqual(updated.count, 5)
self.assertEqual(updated.text, 'new blah')
def test_update_failure(self):
t = TestConditionalModel.if_not_exists().create(text='blah blah')
t.text = 'new blah'
t = t.iff(text='something wrong')
with self.assertRaises(LWTException) as assertion:
t.save()
self.assertEqual(assertion.exception.existing, {
'text': 'blah blah',
'[applied]': False,
})
def test_blind_update(self):
t = TestConditionalModel.if_not_exists().create(text='blah blah')
t.text = 'something else'
uid = t.id
with mock.patch.object(self.session, 'execute') as m:
TestConditionalModel.objects(id=uid).iff(text='blah blah').update(text='oh hey der')
args = m.call_args
self.assertIn('IF "text" = %(1)s', args[0][0].query_string)
def test_blind_update_fail(self):
t = TestConditionalModel.if_not_exists().create(text='blah blah')
t.text = 'something else'
uid = t.id
qs = TestConditionalModel.objects(id=uid).iff(text='Not dis!')
with self.assertRaises(LWTException) as assertion:
qs.update(text='this will never work')
self.assertEqual(assertion.exception.existing, {
'text': 'blah blah',
'[applied]': False,
})
def test_conditional_clause(self):
tc = ConditionalClause('some_value', 23)
tc.set_context_id(3)
self.assertEqual('"some_value" = %(3)s', six.text_type(tc))
self.assertEqual('"some_value" = %(3)s', str(tc))
def test_batch_update_conditional(self):
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
id = t.id
with BatchQuery() as b:
t.batch(b).iff(count=5).update(text='something else')
updated = TestConditionalModel.objects(id=id).first()
self.assertEqual(updated.text, 'something else')
b = BatchQuery()
updated.batch(b).iff(count=6).update(text='and another thing')
with self.assertRaises(LWTException) as assertion:
b.execute()
self.assertEqual(assertion.exception.existing, {
'id': id,
'count': 5,
'[applied]': False,
})
updated = TestConditionalModel.objects(id=id).first()
self.assertEqual(updated.text, 'something else')
@unittest.skip("Skipping until PYTHON-943 is resolved")
def test_batch_update_conditional_several_rows(self):
sync_table(TestUpdateModel)
self.addCleanup(drop_table, TestUpdateModel)
first_row = TestUpdateModel.create(partition=1, cluster=1, value=5, text="something")
second_row = TestUpdateModel.create(partition=1, cluster=2, value=5, text="something")
b = BatchQuery()
TestUpdateModel.batch(b).if_not_exists().create(partition=1, cluster=1, value=5, text='something else')
TestUpdateModel.batch(b).if_not_exists().create(partition=1, cluster=2, value=5, text='something else')
TestUpdateModel.batch(b).if_not_exists().create(partition=1, cluster=3, value=5, text='something else')
# The response will be more than two rows because two of the inserts will fail
with self.assertRaises(LWTException):
b.execute()
first_row.delete()
second_row.delete()
b.execute()
def test_delete_conditional(self):
# DML path
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
t.iff(count=9999).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
t.iff(count=5).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0)
# QuerySet path
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
TestConditionalModel.objects(id=t.id).iff(count=9999).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
TestConditionalModel.objects(id=t.id).iff(count=5).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0)
def test_delete_lwt_ne(self):
"""
Test to ensure that deletes using IF and not equals are honored correctly
@since 3.2
@jira_ticket PYTHON-328
@expected_result Delete conditional with NE should be honored
@test_category object_mapper
"""
# DML path
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
t.iff(count__ne=5).delete()
t.iff(count__ne=2).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0)
# QuerySet path
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
TestConditionalModel.objects(id=t.id).iff(count__ne=5).delete()
TestConditionalModel.objects(id=t.id).iff(count__ne=2).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0)
def test_update_lwt_ne(self):
"""
Test to ensure that update using IF and not equals are honored correctly
@since 3.2
@jira_ticket PYTHON-328
@expected_result update conditional with NE should be honored
@test_category object_mapper
"""
# DML path
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
t.iff(count__ne=5).update(text='nothing')
t.iff(count__ne=2).update(text='nothing')
self.assertEqual(TestConditionalModel.objects(id=t.id).first().text, 'nothing')
t.delete()
# QuerySet path
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
TestConditionalModel.objects(id=t.id).iff(count__ne=5).update(text='nothing')
TestConditionalModel.objects(id=t.id).iff(count__ne=2).update(text='nothing')
self.assertEqual(TestConditionalModel.objects(id=t.id).first().text, 'nothing')
t.delete()
def test_update_to_none(self):
# This test is done because updates to none are split into deletes
# for old versions of cassandra. Can be removed when we drop that code
# https://github.com/datastax/python-driver/blob/3.1.1/cassandra/cqlengine/query.py#L1197-L1200
# DML path
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
t.iff(count=9999).update(text=None)
self.assertIsNotNone(TestConditionalModel.objects(id=t.id).first().text)
t.iff(count=5).update(text=None)
self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text)
# QuerySet path
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
TestConditionalModel.objects(id=t.id).iff(count=9999).update(text=None)
self.assertIsNotNone(TestConditionalModel.objects(id=t.id).first().text)
TestConditionalModel.objects(id=t.id).iff(count=5).update(text=None)
self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text)
def test_column_delete_after_update(self):
# DML path
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
t.iff(count=5).update(text=None, count=6)
self.assertIsNone(t.text)
self.assertEqual(t.count, 6)
# QuerySet path
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
TestConditionalModel.objects(id=t.id).iff(count=5).update(text=None, count=6)
self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text)
self.assertEqual(TestConditionalModel.objects(id=t.id).first().count, 6)
def test_conditional_without_instance(self):
"""
Test to ensure that the iff method is honored if it's called
directly from the Model class
@jira_ticket PYTHON-505
@expected_result the value is updated
@test_category object_mapper
"""
uuid = uuid4()
TestConditionalModel.if_not_exists().create(id=uuid, text='test_for_cassandra', count=5)
# This uses the iff method directly from the model class without
# an instance having been created
TestConditionalModel.iff(count=5).filter(id=uuid).update(text=None, count=6)
t = TestConditionalModel.filter(id=uuid).first()
self.assertIsNone(t.text)
self.assertEqual(t.count, 6)
| 39.846667
| 111
| 0.672829
|
try:
import unittest2 as unittest
except ImportError:
import unittest
import mock
import six
from uuid import uuid4
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import sync_table, drop_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.query import BatchQuery, LWTException
from cassandra.cqlengine.statements import ConditionalClause
from tests.integration.cqlengine.base import BaseCassEngTestCase
from tests.integration import greaterthancass20
class TestConditionalModel(Model):
id = columns.UUID(primary_key=True, default=uuid4)
count = columns.Integer()
text = columns.Text(required=False)
class TestUpdateModel(Model):
partition = columns.Integer(primary_key=True)
cluster = columns.Integer(primary_key=True)
value = columns.Integer(required=False)
text = columns.Text(required=False, index=True)
@greaterthancass20
class TestConditional(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestConditional, cls).setUpClass()
sync_table(TestConditionalModel)
@classmethod
def tearDownClass(cls):
super(TestConditional, cls).tearDownClass()
drop_table(TestConditionalModel)
def test_update_using_conditional(self):
t = TestConditionalModel.if_not_exists().create(text='blah blah')
t.text = 'new blah'
with mock.patch.object(self.session, 'execute') as m:
t.iff(text='blah blah').save()
args = m.call_args
self.assertIn('IF "text" = %(0)s', args[0][0].query_string)
def test_update_conditional_success(self):
t = TestConditionalModel.if_not_exists().create(text='blah blah', count=5)
id = t.id
t.text = 'new blah'
t.iff(text='blah blah').save()
updated = TestConditionalModel.objects(id=id).first()
self.assertEqual(updated.count, 5)
self.assertEqual(updated.text, 'new blah')
def test_update_failure(self):
t = TestConditionalModel.if_not_exists().create(text='blah blah')
t.text = 'new blah'
t = t.iff(text='something wrong')
with self.assertRaises(LWTException) as assertion:
t.save()
self.assertEqual(assertion.exception.existing, {
'text': 'blah blah',
'[applied]': False,
})
def test_blind_update(self):
t = TestConditionalModel.if_not_exists().create(text='blah blah')
t.text = 'something else'
uid = t.id
with mock.patch.object(self.session, 'execute') as m:
TestConditionalModel.objects(id=uid).iff(text='blah blah').update(text='oh hey der')
args = m.call_args
self.assertIn('IF "text" = %(1)s', args[0][0].query_string)
def test_blind_update_fail(self):
t = TestConditionalModel.if_not_exists().create(text='blah blah')
t.text = 'something else'
uid = t.id
qs = TestConditionalModel.objects(id=uid).iff(text='Not dis!')
with self.assertRaises(LWTException) as assertion:
qs.update(text='this will never work')
self.assertEqual(assertion.exception.existing, {
'text': 'blah blah',
'[applied]': False,
})
def test_conditional_clause(self):
tc = ConditionalClause('some_value', 23)
tc.set_context_id(3)
self.assertEqual('"some_value" = %(3)s', six.text_type(tc))
self.assertEqual('"some_value" = %(3)s', str(tc))
def test_batch_update_conditional(self):
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
id = t.id
with BatchQuery() as b:
t.batch(b).iff(count=5).update(text='something else')
updated = TestConditionalModel.objects(id=id).first()
self.assertEqual(updated.text, 'something else')
b = BatchQuery()
updated.batch(b).iff(count=6).update(text='and another thing')
with self.assertRaises(LWTException) as assertion:
b.execute()
self.assertEqual(assertion.exception.existing, {
'id': id,
'count': 5,
'[applied]': False,
})
updated = TestConditionalModel.objects(id=id).first()
self.assertEqual(updated.text, 'something else')
@unittest.skip("Skipping until PYTHON-943 is resolved")
def test_batch_update_conditional_several_rows(self):
sync_table(TestUpdateModel)
self.addCleanup(drop_table, TestUpdateModel)
first_row = TestUpdateModel.create(partition=1, cluster=1, value=5, text="something")
second_row = TestUpdateModel.create(partition=1, cluster=2, value=5, text="something")
b = BatchQuery()
TestUpdateModel.batch(b).if_not_exists().create(partition=1, cluster=1, value=5, text='something else')
TestUpdateModel.batch(b).if_not_exists().create(partition=1, cluster=2, value=5, text='something else')
TestUpdateModel.batch(b).if_not_exists().create(partition=1, cluster=3, value=5, text='something else')
with self.assertRaises(LWTException):
b.execute()
first_row.delete()
second_row.delete()
b.execute()
def test_delete_conditional(self):
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
t.iff(count=9999).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
t.iff(count=5).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0)
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
TestConditionalModel.objects(id=t.id).iff(count=9999).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
TestConditionalModel.objects(id=t.id).iff(count=5).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0)
def test_delete_lwt_ne(self):
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
t.iff(count__ne=5).delete()
t.iff(count__ne=2).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0)
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
TestConditionalModel.objects(id=t.id).iff(count__ne=5).delete()
TestConditionalModel.objects(id=t.id).iff(count__ne=2).delete()
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 0)
def test_update_lwt_ne(self):
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
t.iff(count__ne=5).update(text='nothing')
t.iff(count__ne=2).update(text='nothing')
self.assertEqual(TestConditionalModel.objects(id=t.id).first().text, 'nothing')
t.delete()
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
TestConditionalModel.objects(id=t.id).iff(count__ne=5).update(text='nothing')
TestConditionalModel.objects(id=t.id).iff(count__ne=2).update(text='nothing')
self.assertEqual(TestConditionalModel.objects(id=t.id).first().text, 'nothing')
t.delete()
def test_update_to_none(self):
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
t.iff(count=9999).update(text=None)
self.assertIsNotNone(TestConditionalModel.objects(id=t.id).first().text)
t.iff(count=5).update(text=None)
self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text)
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
self.assertEqual(TestConditionalModel.objects(id=t.id).count(), 1)
with self.assertRaises(LWTException):
TestConditionalModel.objects(id=t.id).iff(count=9999).update(text=None)
self.assertIsNotNone(TestConditionalModel.objects(id=t.id).first().text)
TestConditionalModel.objects(id=t.id).iff(count=5).update(text=None)
self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text)
def test_column_delete_after_update(self):
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
t.iff(count=5).update(text=None, count=6)
self.assertIsNone(t.text)
self.assertEqual(t.count, 6)
t = TestConditionalModel.if_not_exists().create(text='something', count=5)
TestConditionalModel.objects(id=t.id).iff(count=5).update(text=None, count=6)
self.assertIsNone(TestConditionalModel.objects(id=t.id).first().text)
self.assertEqual(TestConditionalModel.objects(id=t.id).first().count, 6)
def test_conditional_without_instance(self):
uuid = uuid4()
TestConditionalModel.if_not_exists().create(id=uuid, text='test_for_cassandra', count=5)
TestConditionalModel.iff(count=5).filter(id=uuid).update(text=None, count=6)
t = TestConditionalModel.filter(id=uuid).first()
self.assertIsNone(t.text)
self.assertEqual(t.count, 6)
| true
| true
|
1c418c2e5c01c544657d9dfe132516af41943430
| 3,027
|
py
|
Python
|
survey_app_repo/survey/tests/factory.py
|
devbkhadka/survey_app
|
51c4a4844e57f771d1157be8a8307552d390df67
|
[
"Apache-1.1"
] | 1
|
2020-01-12T06:48:28.000Z
|
2020-01-12T06:48:28.000Z
|
survey_app_repo/survey/tests/factory.py
|
devbkhadka/survey_app
|
51c4a4844e57f771d1157be8a8307552d390df67
|
[
"Apache-1.1"
] | null | null | null |
survey_app_repo/survey/tests/factory.py
|
devbkhadka/survey_app
|
51c4a4844e57f771d1157be8a8307552d390df67
|
[
"Apache-1.1"
] | null | null | null |
'''Utility function to populate data needed for tests'''
from ..models import Survey, Question, QuestionTypes, SurveyResponse, ResponseText
RAW_SURVEYS = [
{
'title': 'Your favourite candidate',
'summary': 'Answer questions like who is your favourite candidate and why',
'published_date': '2019-4-20 00:00+0545',
},
{
'title': 'Your view on inflation',
'summary': 'What do you feel about value of money, do you have some examples?',
'published_date': '2019-4-20 00:00+0545',
},
{
'title': 'Top movie of 2019',
'summary': 'Which movie do you like most in the year 2019',
'published_date': '2019-4-20 00:00+0545',
}
]
RAW_QUESTIONS = [
{
'question': 'Read Description Below',
'description': 'This is description for qoutation',
'question_type': QuestionTypes.DESC.name
},
{
'question': 'Please enter your text response',
'description': 'You can enter free text below',
'question_type': QuestionTypes.TEXT.name
},
{
'question': 'Check out',
'description': 'This is description for qoutation',
'question_type': QuestionTypes.DESC.name
}
]
def create_surveys():
'''create dummy surveys for test'''
surveys = []
for raw in RAW_SURVEYS:
surveys.append(Survey.objects.create(**raw))
return surveys
def create_survey_with_questions():
'''create survey example with some questions'''
survey = Survey.objects.create(**RAW_SURVEYS[0])
for raw in RAW_QUESTIONS:
question = Question.objects.create(survey=survey, **raw)
survey.questions.add(question)
return survey
def add_responses_to_surveys(surveys):
'''Add some responses in each survey'''
completed_dates = [
['2019-4-20 00:00+0545', None, '2019-5-13 00:00+0545'],
[None, '2019-4-8 00:00+0545', '2019-5-13 00:00+0545', '2019-6-02 00:00+0545', '2019-4-7 00:00+0545'],
['2019-4-8 00:00+0545'],
]
for survey, dates in zip(surveys, completed_dates):
for date in dates:
SurveyResponse.objects.create(survey=survey, completed_date=date)
def create_survey_with_text_question_and_answer():
'''create survey example with only one question of type text'''
survey = Survey.objects.create(**RAW_SURVEYS[0])
for raw in RAW_QUESTIONS:
if raw['question_type'] == QuestionTypes.TEXT.name:
question = Question.objects.create(survey=survey, **raw)
survey.questions.add(question)
break
survey_response = SurveyResponse.objects.create(survey=survey)
ResponseText.objects.create(survey_response=survey_response, question=question)
return survey, survey_response
def get_question_and_index_of_type(survey, qtype):
questions = Question.objects.filter(survey=survey)
for i, question in enumerate(questions):
if question.question_type == str(qtype):
return question, i + 1
return None
| 31.863158
| 109
| 0.648827
|
from ..models import Survey, Question, QuestionTypes, SurveyResponse, ResponseText
RAW_SURVEYS = [
{
'title': 'Your favourite candidate',
'summary': 'Answer questions like who is your favourite candidate and why',
'published_date': '2019-4-20 00:00+0545',
},
{
'title': 'Your view on inflation',
'summary': 'What do you feel about value of money, do you have some examples?',
'published_date': '2019-4-20 00:00+0545',
},
{
'title': 'Top movie of 2019',
'summary': 'Which movie do you like most in the year 2019',
'published_date': '2019-4-20 00:00+0545',
}
]
RAW_QUESTIONS = [
{
'question': 'Read Description Below',
'description': 'This is description for qoutation',
'question_type': QuestionTypes.DESC.name
},
{
'question': 'Please enter your text response',
'description': 'You can enter free text below',
'question_type': QuestionTypes.TEXT.name
},
{
'question': 'Check out',
'description': 'This is description for qoutation',
'question_type': QuestionTypes.DESC.name
}
]
def create_surveys():
surveys = []
for raw in RAW_SURVEYS:
surveys.append(Survey.objects.create(**raw))
return surveys
def create_survey_with_questions():
survey = Survey.objects.create(**RAW_SURVEYS[0])
for raw in RAW_QUESTIONS:
question = Question.objects.create(survey=survey, **raw)
survey.questions.add(question)
return survey
def add_responses_to_surveys(surveys):
completed_dates = [
['2019-4-20 00:00+0545', None, '2019-5-13 00:00+0545'],
[None, '2019-4-8 00:00+0545', '2019-5-13 00:00+0545', '2019-6-02 00:00+0545', '2019-4-7 00:00+0545'],
['2019-4-8 00:00+0545'],
]
for survey, dates in zip(surveys, completed_dates):
for date in dates:
SurveyResponse.objects.create(survey=survey, completed_date=date)
def create_survey_with_text_question_and_answer():
survey = Survey.objects.create(**RAW_SURVEYS[0])
for raw in RAW_QUESTIONS:
if raw['question_type'] == QuestionTypes.TEXT.name:
question = Question.objects.create(survey=survey, **raw)
survey.questions.add(question)
break
survey_response = SurveyResponse.objects.create(survey=survey)
ResponseText.objects.create(survey_response=survey_response, question=question)
return survey, survey_response
def get_question_and_index_of_type(survey, qtype):
questions = Question.objects.filter(survey=survey)
for i, question in enumerate(questions):
if question.question_type == str(qtype):
return question, i + 1
return None
| true
| true
|
1c418cfcc6d1ee460a06ff697a614c2418050041
| 393
|
py
|
Python
|
yatube/yatube/urls.py
|
themasterid/hw02_community
|
8e9980df3f10bb00ee521d92079313dafa9af066
|
[
"MIT"
] | null | null | null |
yatube/yatube/urls.py
|
themasterid/hw02_community
|
8e9980df3f10bb00ee521d92079313dafa9af066
|
[
"MIT"
] | null | null | null |
yatube/yatube/urls.py
|
themasterid/hw02_community
|
8e9980df3f10bb00ee521d92079313dafa9af066
|
[
"MIT"
] | null | null | null |
# yatube/urls.py
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('auth/', include('users.urls', namespace='users')),
path('auth/', include('django.contrib.auth.urls')),
path('admin/', admin.site.urls),
path('', include('posts.urls', namespace='index')),
# path('group/<slug:slug>/', include('posts.urls', namespace='posts')),
]
| 32.75
| 75
| 0.659033
|
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('auth/', include('users.urls', namespace='users')),
path('auth/', include('django.contrib.auth.urls')),
path('admin/', admin.site.urls),
path('', include('posts.urls', namespace='index')),
]
| true
| true
|
1c418d5da30c5843481756efb37736bb9b1e5529
| 621
|
py
|
Python
|
problems/general_abbr.py
|
stachenov/PyLeetCode
|
cb13700d428854eff46a762542a63d691578d5b6
|
[
"Unlicense"
] | null | null | null |
problems/general_abbr.py
|
stachenov/PyLeetCode
|
cb13700d428854eff46a762542a63d691578d5b6
|
[
"Unlicense"
] | null | null | null |
problems/general_abbr.py
|
stachenov/PyLeetCode
|
cb13700d428854eff46a762542a63d691578d5b6
|
[
"Unlicense"
] | null | null | null |
class Solution(object):
def generateAbbreviations(self, word):
"""
:type word: str
:rtype: List[str]
"""
def generate(abbr, pos):
if pos == len(word):
yield abbr
else:
if not abbr or not abbr[-1].isdigit():
for i in xrange(pos + 1, len(word) + 1):
for res in generate(abbr + str(i - pos), i):
yield res
for res in generate(abbr + word[pos:pos + 1], pos + 1):
yield res
return [w for w in generate("", 0)]
| 34.5
| 71
| 0.431562
|
class Solution(object):
def generateAbbreviations(self, word):
def generate(abbr, pos):
if pos == len(word):
yield abbr
else:
if not abbr or not abbr[-1].isdigit():
for i in xrange(pos + 1, len(word) + 1):
for res in generate(abbr + str(i - pos), i):
yield res
for res in generate(abbr + word[pos:pos + 1], pos + 1):
yield res
return [w for w in generate("", 0)]
| true
| true
|
1c418d6be54f413656434d3aadc217088071f2b0
| 535
|
py
|
Python
|
main_app/migrations/0002_alter_golfgroup_members.py
|
makmizi15/golfhub
|
9073a990e2ddebf1bc346d3d49ccd6c4dd8d79ce
|
[
"MIT"
] | null | null | null |
main_app/migrations/0002_alter_golfgroup_members.py
|
makmizi15/golfhub
|
9073a990e2ddebf1bc346d3d49ccd6c4dd8d79ce
|
[
"MIT"
] | null | null | null |
main_app/migrations/0002_alter_golfgroup_members.py
|
makmizi15/golfhub
|
9073a990e2ddebf1bc346d3d49ccd6c4dd8d79ce
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-11-19 08:47
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='golfgroup',
name='members',
field=models.ManyToManyField(null=True, related_name='members', to=settings.AUTH_USER_MODEL),
),
]
| 25.47619
| 105
| 0.657944
|
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='golfgroup',
name='members',
field=models.ManyToManyField(null=True, related_name='members', to=settings.AUTH_USER_MODEL),
),
]
| true
| true
|
1c418e20e64c922867e06acee53a789cc13d54f7
| 2,416
|
py
|
Python
|
alien_fullfunction/scoreboard.py
|
yiyidhuang/PythonCrashCrouse2nd
|
3512f9ab8fcf32c6145604a37e2a62feddf174d1
|
[
"MIT"
] | null | null | null |
alien_fullfunction/scoreboard.py
|
yiyidhuang/PythonCrashCrouse2nd
|
3512f9ab8fcf32c6145604a37e2a62feddf174d1
|
[
"MIT"
] | null | null | null |
alien_fullfunction/scoreboard.py
|
yiyidhuang/PythonCrashCrouse2nd
|
3512f9ab8fcf32c6145604a37e2a62feddf174d1
|
[
"MIT"
] | null | null | null |
import pygame.font
from pygame.sprite import Group
from ship import Ship
class Scoreboard:
def __init__(self, ai_game):
self.ai_game = ai_game
self.screen = ai_game.screen
self.screen_rect = self.screen.get_rect()
self.settings = ai_game.settings
self.stats = ai_game.stats
self.text_color = (30, 30, 30)
self.font = pygame.font.SysFont(None, 48)
self.prep_score()
self.prep_high_score()
self.prep_level()
self.prep_ships()
def prep_score(self):
rounded_score = round(self.stats.score, -1)
score_str = "{:,}".format(rounded_score)
self.score_image = self.font.render(score_str, True,
self.text_color, self.settings.bg_color)
self.score_rect = self.score_image.get_rect()
self.score_rect.right = self.screen_rect.right - 20
self.score_rect.top = 20
def prep_high_score(self):
high_score = round(self.stats.high_score, -1)
high_score_str = "{:,}".format(high_score)
self.high_score_image = self.font.render(high_score_str, True, self.text_color, self.settings.bg_color)
self.high_score_rect = self.high_score_image.get_rect()
self.high_score_rect.centerx = self.screen_rect.centerx
self.high_score_rect.top = self.score_rect.top
def prep_level(self):
level_str = str(self.stats.level)
self.level_image = self.font.render(level_str, True, self.text_color, self.settings.bg_color)
self.level_rect = self.level_image.get_rect()
self.level_rect.right = self.score_rect.right
self.level_rect.top = self.score_rect.bottom + 10
def prep_ships(self):
self.ships = Group()
for ship_number in range(self.stats.ships_left):
ship = Ship(self.ai_game)
ship.rect.x = 10 + ship_number * ship.rect.width
ship.rect.y = 10
self.ships.add(ship)
def check_high_score(self):
if self.stats.score > self.stats.high_score:
self.stats.high_score = self.stats.score
self.prep_high_score()
def show_score(self):
self.screen.blit(self.score_image, self.score_rect)
self.screen.blit(self.high_score_image, self.high_score_rect)
self.screen.blit(self.level_image, self.level_rect)
self.ships.draw(self.screen)
| 35.529412
| 111
| 0.647351
|
import pygame.font
from pygame.sprite import Group
from ship import Ship
class Scoreboard:
def __init__(self, ai_game):
self.ai_game = ai_game
self.screen = ai_game.screen
self.screen_rect = self.screen.get_rect()
self.settings = ai_game.settings
self.stats = ai_game.stats
self.text_color = (30, 30, 30)
self.font = pygame.font.SysFont(None, 48)
self.prep_score()
self.prep_high_score()
self.prep_level()
self.prep_ships()
def prep_score(self):
rounded_score = round(self.stats.score, -1)
score_str = "{:,}".format(rounded_score)
self.score_image = self.font.render(score_str, True,
self.text_color, self.settings.bg_color)
self.score_rect = self.score_image.get_rect()
self.score_rect.right = self.screen_rect.right - 20
self.score_rect.top = 20
def prep_high_score(self):
high_score = round(self.stats.high_score, -1)
high_score_str = "{:,}".format(high_score)
self.high_score_image = self.font.render(high_score_str, True, self.text_color, self.settings.bg_color)
self.high_score_rect = self.high_score_image.get_rect()
self.high_score_rect.centerx = self.screen_rect.centerx
self.high_score_rect.top = self.score_rect.top
def prep_level(self):
level_str = str(self.stats.level)
self.level_image = self.font.render(level_str, True, self.text_color, self.settings.bg_color)
self.level_rect = self.level_image.get_rect()
self.level_rect.right = self.score_rect.right
self.level_rect.top = self.score_rect.bottom + 10
def prep_ships(self):
self.ships = Group()
for ship_number in range(self.stats.ships_left):
ship = Ship(self.ai_game)
ship.rect.x = 10 + ship_number * ship.rect.width
ship.rect.y = 10
self.ships.add(ship)
def check_high_score(self):
if self.stats.score > self.stats.high_score:
self.stats.high_score = self.stats.score
self.prep_high_score()
def show_score(self):
self.screen.blit(self.score_image, self.score_rect)
self.screen.blit(self.high_score_image, self.high_score_rect)
self.screen.blit(self.level_image, self.level_rect)
self.ships.draw(self.screen)
| true
| true
|
1c418e5cf2168c58612ee32837893a2edc13243f
| 23,804
|
py
|
Python
|
python/tvm/target/target.py
|
embodyme/tvm
|
3c05eb6a4bc026b7b194e6708d96b2dc9eea070a
|
[
"Apache-2.0"
] | 8
|
2021-08-02T14:17:39.000Z
|
2021-11-16T12:37:51.000Z
|
python/tvm/target/target.py
|
redbopo/tvm
|
b54beed37ca2baad6002990b014a2119223e0900
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/target/target.py
|
redbopo/tvm
|
b54beed37ca2baad6002990b014a2119223e0900
|
[
"Apache-2.0"
] | 7
|
2021-08-03T14:24:00.000Z
|
2021-11-11T04:34:37.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Target data structure."""
import json
import os
import re
import warnings
import tvm._ffi
from tvm._ffi import register_func as _register_func
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object
class TargetKind(Object):
"""Kind of a compilation target"""
@property
def options(self):
"""Returns the dict of available option names and types"""
return dict(_ffi_api.ListTargetKindOptions(self))
@staticmethod
def options_from_name(kind_name: str):
"""Returns the dict of available option names and types from a name of TargetKind"""
return dict(_ffi_api.ListTargetKindOptionsFromName(kind_name))
@tvm._ffi.register_object
class Target(Object):
"""Target device information, use through TVM API.
Note
----
You can create target using the constructor or the following functions
- :py:func:`tvm.target.arm_cpu` create arm_cpu target
- :py:func:`tvm.target.cuda` create CUDA target
- :py:func:`tvm.target.rocm` create ROCM target
- :py:func:`tvm.target.mali` create Mali target
- :py:func:`tvm.target.intel_graphics` create Intel Graphics target
"""
def __init__(self, target, host=None):
"""Construct a TVM target object from
1) Raw target string
2) Target config dict
3) Target tag
Parameters
----------
target : Union[str, Dict[str, Any]]
Can be one of a literal target string, a json string describing
a configuration, or a dictionary of configuration options.
When using a dictionary or json string to configure target, the
possible values are:
kind : str (required)
Which codegen path to use, for example 'llvm' or 'cuda'.
keys : List of str (optional)
A set of strategies that can be dispatched to. When using
"kind=opencl" for example, one could set keys to ["mali", "opencl", "gpu"].
device : str (optional)
A single key that corresponds to the actual device being run on.
This will be effectively appended to the keys.
libs : List of str (optional)
The set of external libraries to use. For example ['cblas', 'mkl'].
system-lib : bool (optional)
If True, build a module that contains self registered functions.
Useful for environments where dynamic loading like dlopen is banned.
mcpu : str (optional)
The specific cpu being run on. Serves only as an annotation.
model : str (optional)
An annotation indicating what model a workload came from.
runtime : str (optional)
An annotation indicating which runtime to use with a workload.
mtriple : str (optional)
The llvm triplet describing the target, for example "arm64-linux-android".
mattr : List of str (optional)
The llvm features to compile with, for example ["+avx512f", "+mmx"].
mfloat-abi : str (optional)
An llvm setting that is one of 'hard' or 'soft' indicating whether to use
hardware or software floating-point operations.
mabi : str (optional)
An llvm setting. Generate code for the specified ABI, for example "lp64d".
host : Union[str, Dict[str, Any]] (optional)
Description for target host. Can be recursive. Similar to target.
host : Optional[Union[str, Dict[str, Any]]]
Similar to target but for target host. Can be one of a literal target host string,
a json string describing a configuration, or a dictionary of configuration options.
When using a dictionary or json string to configure target, the possible values are
same as target.
"""
if target is None or not isinstance(target, (dict, str, Target)):
raise ValueError("target has to be a string or dictionary.")
if host is not None:
if not isinstance(host, (dict, str, Target)):
raise ValueError("target host has to be a string or dictionary.")
self.__init_handle_by_constructor__(_ffi_api.Target, Target(target), Target(host))
else:
self.__init_handle_by_constructor__(_ffi_api.Target, target)
def __enter__(self):
_ffi_api.TargetEnterScope(self)
return self
def __exit__(self, ptype, value, trace):
_ffi_api.TargetExitScope(self)
def export(self):
return _ffi_api.TargetExport(self)
def with_host(self, host=None):
return _ffi_api.WithHost(self, Target(host))
@staticmethod
def current(allow_none=True):
"""Returns the current target.
Parameters
----------
allow_none : bool
Whether allow the current target to be none
Raises
------
ValueError if current target is not set.
"""
return _ffi_api.TargetCurrent(allow_none)
@property
def arch(self):
"""Returns the cuda arch from the target if it exists."""
return str(self.attrs.get("arch", ""))
@property
def max_num_threads(self):
"""Returns the max_num_threads from the target if it exists."""
return int(self.attrs["max_num_threads"])
@property
def thread_warp_size(self):
"""Returns the thread_warp_size from the target if it exists."""
return int(self.attrs["thread_warp_size"])
@property
def max_function_args(self):
return int(self.attrs.get("max_function_args", -1))
@property
def device_name(self):
return str(self.attrs.get("device", ""))
@property
def model(self):
"""Returns model from the target if it exists."""
return str(self.attrs.get("model", "unknown"))
@property
def mcpu(self):
"""Returns the mcpu from the target if it exists."""
return str(self.attrs.get("mcpu", ""))
@property
def mattr(self):
"""Returns the mattr from the target if it exists."""
return list(self.attrs.get("mattr", []))
@property
def libs(self):
return list(self.attrs.get("libs", []))
@staticmethod
def list_kinds():
"""Returns the list of available target names."""
return list(_ffi_api.ListTargetKinds())
@staticmethod
def check_and_update_host_consist(target, host=None, target_is_dict_key=True):
"""A helper function that merges a legacy "target, target_host" pair, then returns
the merged target and its host field. The function is for legacy target and target
host pair only, and should not be used in the new target system.
Parameters
----------
target : Union[str, Dict[str, Any], Target]
The target or heterogeneous target
host : Union[str, Dict[str, Any], Target, None]
The target host
target_is_dict_key : Bool
When the type of target is dict, whether Target is the key (Otherwise the value)
"""
if target is None:
assert host is None, "Target host is not empty when target is empty."
return target, host
if isinstance(target, dict) and "kind" not in target:
new_target = {}
for tgt, mod in target.items():
if not target_is_dict_key:
tgt, mod = mod, tgt
if isinstance(tgt, (dict, str, Target)):
tgt, host = Target.check_and_update_host_consist(tgt, host)
if not target_is_dict_key:
tgt, mod = mod, tgt
new_target[tgt] = mod
target = new_target
else:
target = Target(target, host)
host = target.host
return target, host
# TODO(@tvm-team): Deprecate the helper functions below. Encourage the usage of config dict instead.
def _merge_opts(opts, new_opts):
"""Helper function to merge options"""
if isinstance(new_opts, str):
new_opts = new_opts.split()
if new_opts:
opt_set = set(opts)
new_opts = [opt for opt in new_opts if opt not in opt_set]
return opts + new_opts
return opts
def cuda(model="unknown", arch=None, options=None):
"""Returns a cuda target.
Parameters
----------
model: str
The model of cuda device (e.g. 1080ti)
arch: str
The cuda architecture (e.g. sm_61)
options : str or list of str
Additional options
"""
opts = _merge_opts(["-model=%s" % model], options)
if arch:
opts = _merge_opts(["-arch=%s" % arch], opts)
if not any(["-arch" in opt for opt in opts]):
warnings.warn("Try specifying cuda arch by adding 'arch=sm_xx' to your target.")
return Target(" ".join(["cuda"] + opts))
def rocm(model="unknown", options=None):
"""Returns a ROCM target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = _merge_opts(["-model=%s" % model], options)
return Target(" ".join(["rocm"] + opts))
def mali(model="unknown", options=None):
"""Returns a ARM Mali GPU target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = ["-device=mali", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
def intel_graphics(model="unknown", options=None):
"""Returns an Intel Graphics target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = ["-device=intel_graphics", "-model=%s" % model, "-thread_warp_size=16"]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
MICRO_SUPPORTED_MODELS = {
"host": [],
"atsamd51": ["-mcpu=cortex-m4"],
"cxd5602gg": ["-mcpu=cortex-m4"],
"esp32": [],
"imxrt10xx": ["-mcpu=cortex-m7"],
"mps2_an521": ["-mcpu=cortex-m33"],
"nrf52840": ["-mcpu=cortex-m4"],
"nrf5340dk": ["-mcpu=cortex-m33"],
"sam3x8e": ["-mcpu=cortex-m3"],
"stm32f746xx": ["-mcpu=cortex-m7", "-march=armv7e-m"],
"stm32l4r5zi": ["-mcpu=cortex-m4"],
"zynq_mp_r5": ["-mcpu=cortex-r5"],
}
def micro(model="unknown", options=None):
"""Returns a microTVM target.
Parameters
----------
model : str
Canonically identifies the target device. This is typically a device board level name.
The allowed values are MICRO_SUPPORTED_MODELS.keys().
options : str or list of str
Additional options
"""
if model not in MICRO_SUPPORTED_MODELS:
raise ValueError(f"Model {model} not supported by tvm.target.micro.")
opts = _merge_opts(
MICRO_SUPPORTED_MODELS[model] + [f"-model={model}"],
options,
)
# NOTE: in the future, the default micro target will be LLVM except when
# external dependencies are present.
return Target(" ".join(["c"] + opts))
def arm_cpu(model="unknown", options=None):
"""Returns a ARM CPU target.
This function will also download pre-tuned op parameters when there is none.
Parameters
----------
model: str
SoC name or phone name of the arm board.
options : str or list of str
Additional options
"""
trans_table = {
"pixel2": ["-model=snapdragon835", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"mate10": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"mate10pro": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"p20": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"p20pro": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"rasp3b": ["-model=bcm2837", "-mtriple=armv7l-linux-gnueabihf", "-mattr=+neon"],
"rasp4b": [
"-model=bcm2711",
"-mtriple=armv8l-linux-gnueabihf",
"-mattr=+neon",
"-mcpu=cortex-a72",
],
"rasp4b64": [
"-model=bcm2711",
"-mtriple=aarch64-linux-gnu",
"-mattr=+neon",
"-mcpu=cortex-a72",
],
"rk3399": ["-model=rk3399", "-mtriple=aarch64-linux-gnu", "-mattr=+neon"],
"pynq": ["-model=pynq", "-mtriple=armv7a-linux-eabi", "-mattr=+neon"],
"ultra96": ["-model=ultra96", "-mtriple=aarch64-linux-gnu", "-mattr=+neon"],
"beagleai": [
"-model=beagleai",
"-mtriple=armv7a-linux-gnueabihf",
"-mattr=+neon,+vfp4,+thumb2",
"-mcpu=cortex-a15",
],
"stm32mp1": [
"-model=stm32mp1",
"-mtriple=armv7a-linux-gnueabihf",
"-mattr=+neon,+vfp4,+thumb2",
"-mcpu=cortex-a7",
],
"thunderx": [
"-model=thunderx",
"-mtriple=aarch64-linux-gnu",
"-mattr=+neon,+crc,+lse",
"-mcpu=thunderxt88",
],
}
pre_defined_opt = trans_table.get(model, ["-model=%s" % model])
opts = ["-device=arm_cpu"] + pre_defined_opt
opts = _merge_opts(opts, options)
return Target(" ".join(["llvm"] + opts))
def rasp(options=None):
"""Return a Raspberry 3b target.
Parameters
----------
options : str or list of str
Additional options
"""
warnings.warn(
"tvm.target.rasp() is going to be deprecated. " 'Please use tvm.target.arm_cpu("rasp3b")'
)
return arm_cpu("rasp3b", options)
def vta(model="unknown", options=None):
opts = ["-device=vta", "-keys=vta,cpu", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["ext_dev"] + opts))
def bifrost(model="unknown", options=None):
"""Return an ARM Mali GPU target (Bifrost architecture).
Parameters
----------
options : str or list of str
Additional options
"""
opts = ["-device=bifrost", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
def riscv_cpu(model="sifive-u54", options=None):
"""Returns a RISC-V CPU target.
Default: sifive-u54 rv64gc
Parameters
----------
model: str
CPU name.
options : str or list of str
Additional options
"""
trans_table = {
"sifive-e31": [
"-model=sifive-e31",
"-mtriple=riscv32-unknown-linux-gnu",
"-mcpu=sifive-e31",
"-mabi=ilp32",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv32imac -mabi=ilp32 -mcpu=sifive-e31
],
"sifive-e76": [
"-model=sifive-e76",
"-mtriple=riscv32-unknown-linux-gnu",
"-mcpu=sifive-e76",
"-mabi=ilp32",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv32imafc -mabi=ilp32 -mcpu=sifive-e76
],
"sifive-u54": [
"-model=sifive-u54",
"-mtriple=riscv64-unknown-linux-gnu",
"-mcpu=sifive-u54",
"-mabi=lp64d",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv64gc -mabi=lp64d -mcpu=sifive-u54
],
"sifive-u74": [
"-model=sifive-u74",
"-mtriple=riscv64-unknown-linux-gnu",
"-mcpu=sifive-u74",
"-mabi=lp64d",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv64gc -mabi=lp64d -mcpu=sifive-u74
],
}
pre_defined_opt = trans_table.get(model, ["-model=%s" % model])
opts = ["-device=arm_cpu"] + pre_defined_opt
opts = _merge_opts(opts, options)
return Target(" ".join(["llvm"] + opts))
def hexagon(cpu_ver="v66", **kwargs):
"""Returns a Hexagon target.
Parameters
----------
cpu_ver : str (default: "v66")
CPU version used for code generation. Not all allowed cpu str
will be valid, LLVM will throw an error.
Recognized keyword parameters
-----------------------------
hvx : int (default: 128)
Size of HVX vector in bytes. Value of 0 disables HVX codegen.
sim_options : str or list of str (default: None)
User defined sim arguments. CPU version defaults to cpu_ver.
Otherwise, separate versions are used for codegen and sim. Not
all allowed cpu strings will be valid, simulator will throw an
error if invalid. Does not affect codegen.
llvm_options : str or list of str (default: None)
User defined compiler arguments.
link_params : bool (default: False)
Whether to link graph parameters into the LLVM module.
"""
# Some of the target parameters correspond to target kind attributes
# listed in src/target/target_kind.cc. For those parameters, their
# names follow the attribute names with the exception of '_' being used
# in place of '-'.
# Example compiler arguments
# llvm -mtriple=hexagon -mcpu=hexagonv66 -mattr=+hvxv66,+hvx-length128b
# Check for valid codegen cpu
valid_hex = ["v60", "v62", "v65", "v66", "v67", "v67t", "v68"]
try:
cpu_ver = cpu_ver[cpu_ver.index("v") :].lower()
assert cpu_ver in valid_hex
except:
msg = "{} is not a valid Hexagon version\nvalid versions include {}"
raise ValueError(msg.format(cpu_ver, valid_hex)) from None
# Target configuration:
config = {
"hvx": 128,
"sim_options": None,
"llvm_options": None,
"link_params": False,
}
config.update(kwargs)
# Warn about obsolete parameter names.
if config.get("sim_args"):
msg = "The keyword parameter 'sim_args' is deprecated, use 'sim_options' instead"
warnings.warn(msg, stacklevel=2)
config.update({"sim_options": config["sim_args"]})
if config.get("llvm_args"):
msg = "The keyword parameter 'llvm_args' is deprecated, use 'llvm_options' instead"
warnings.warn(msg, stacklevel=2)
config.update({"llvm_options": config["llvm_args"]})
# LLVM target string
def create_llvm_target(cpu_ver, config):
"""Create LLVM target string."""
target = " -mtriple=hexagon"
mcpu = " -mcpu=hexagon" + cpu_ver
# Process the options that affect target features and return the
# target feature string.
def create_target_features(config):
tfs = []
if config["hvx"] > 0:
valid_hvx = [0, 64, 128]
if not config["hvx"] in valid_hvx:
raise ValueError("Invalid hvx value, should be one of " + str(valid_hvx))
tfs += ["+hvx" + cpu_ver, "+hvx-length" + str(config["hvx"]) + "b"]
else:
tfs += ["-hvx"]
return "-mattr=" + ",".join(tfs) if tfs else ""
return target + mcpu + " " + create_target_features(config)
# Simulator options string
def create_sim_options(cpu_ver, config):
"""Create simulator option string."""
def validate_hvx_length(codegen_hvx, sim_options):
if sim_options and "--hvx_length" in sim_options:
# If --hvx_length was specified, check HVX length of sim
# vs codegen
i = sim_options.index("hvx_length") + len("hvx_length") + 1
sim_hvx = sim_options[i : i + 3]
if sim_hvx != str(codegen_hvx):
msg = "sim hvx {} and codegen hvx {} mismatch!".format(sim_hvx, codegen_hvx)
# Set the stacklevel to the tvm.target.hexagon() call.
warnings.warn(msg, stacklevel=4)
elif codegen_hvx != 0:
# If --hvx_length was not given, add it if HVX is enabled
sim_options = sim_options + " " if isinstance(sim_options, str) else ""
sim_options += "--hvx_length " + str(codegen_hvx)
return sim_options or ""
hvx = config["hvx"]
sim_options = config["sim_options"]
if not sim_options:
return cpu_ver + " " + validate_hvx_length(hvx, sim_options)
sim_cpu = cpu_ver + " "
# Add user defined args
if isinstance(sim_options, list):
sim_options = " ".join(sim_options)
# Check for supplied sim cpu version
if "v6" in sim_options:
sim_cpu = ""
# Regex match for allowed cpus
valid_cpu_str_regex = (
r"(?P<pre>--.*\s)?(--m)?"
+ r"(?P<base_version>v6[25678])(?P<sub_version>[a-z])?"
+ r"(?P<l2_size>_[0-9]+)?(?P<rev>_rev[0-9])?\s?(?P<post>--.*)?"
)
m = re.match(valid_cpu_str_regex, sim_options.lower())
if not m:
raise ValueError('Invalid simulator argument string "{}"'.format(sim_options))
# Parse options into correct order
cpu_attr = {x: str(m.groupdict()[x] or "") for x in m.groupdict()}
sim_options = (
cpu_attr["base_version"]
+ cpu_attr["sub_version"]
+ cpu_attr["l2_size"]
+ cpu_attr["rev"]
+ " "
+ cpu_attr["pre"]
+ cpu_attr["post"]
)
return sim_cpu + " " + validate_hvx_length(hvx, sim_options)
# LLVM options string
def create_llvm_options(cpu_ver, config): # pylint: disable=unused-argument
"""Create LLVM options string."""
llvm_options = config["llvm_options"]
# TVM's option parser doesn't allow '=' in values, but '=' can
# appear in LLVM flags. Replace it with '@', since it's unlikely
# that '@' will be used in another context.
if llvm_options is None or len(llvm_options.strip()) == 0:
return ""
args = [s.replace("=", "@") for s in llvm_options.split()]
return "--llvm-options=" + ",".join(args)
# Sim args
os.environ["HEXAGON_SIM_ARGS"] = create_sim_options(cpu_ver, config)
target_str = create_llvm_target(cpu_ver, config)
llvm_str = create_llvm_options(cpu_ver, config)
args_list = target_str.split() + llvm_str.split()
return Target(" ".join(["hexagon"] + args_list))
def create(target):
"""Deprecated. Use the constructor of :py:mod:`tvm.target.Target` directly."""
warnings.warn("tvm.target.create() is being deprecated. Please use tvm.target.Target() instead")
return Target(target)
@_register_func("target._load_config_dict")
def _load_config_dict(config_dict_str):
try:
config = json.loads(config_dict_str)
except json.decoder.JSONDecodeError:
return None
if not isinstance(config, dict):
return None
for key in config.keys():
if not isinstance(key, str):
return None
return config
| 35.528358
| 100
| 0.596286
|
import json
import os
import re
import warnings
import tvm._ffi
from tvm._ffi import register_func as _register_func
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object
class TargetKind(Object):
@property
def options(self):
return dict(_ffi_api.ListTargetKindOptions(self))
@staticmethod
def options_from_name(kind_name: str):
return dict(_ffi_api.ListTargetKindOptionsFromName(kind_name))
@tvm._ffi.register_object
class Target(Object):
def __init__(self, target, host=None):
if target is None or not isinstance(target, (dict, str, Target)):
raise ValueError("target has to be a string or dictionary.")
if host is not None:
if not isinstance(host, (dict, str, Target)):
raise ValueError("target host has to be a string or dictionary.")
self.__init_handle_by_constructor__(_ffi_api.Target, Target(target), Target(host))
else:
self.__init_handle_by_constructor__(_ffi_api.Target, target)
def __enter__(self):
_ffi_api.TargetEnterScope(self)
return self
def __exit__(self, ptype, value, trace):
_ffi_api.TargetExitScope(self)
def export(self):
return _ffi_api.TargetExport(self)
def with_host(self, host=None):
return _ffi_api.WithHost(self, Target(host))
@staticmethod
def current(allow_none=True):
return _ffi_api.TargetCurrent(allow_none)
@property
def arch(self):
return str(self.attrs.get("arch", ""))
@property
def max_num_threads(self):
return int(self.attrs["max_num_threads"])
@property
def thread_warp_size(self):
return int(self.attrs["thread_warp_size"])
@property
def max_function_args(self):
return int(self.attrs.get("max_function_args", -1))
@property
def device_name(self):
return str(self.attrs.get("device", ""))
@property
def model(self):
return str(self.attrs.get("model", "unknown"))
@property
def mcpu(self):
return str(self.attrs.get("mcpu", ""))
@property
def mattr(self):
return list(self.attrs.get("mattr", []))
@property
def libs(self):
return list(self.attrs.get("libs", []))
@staticmethod
def list_kinds():
return list(_ffi_api.ListTargetKinds())
@staticmethod
def check_and_update_host_consist(target, host=None, target_is_dict_key=True):
if target is None:
assert host is None, "Target host is not empty when target is empty."
return target, host
if isinstance(target, dict) and "kind" not in target:
new_target = {}
for tgt, mod in target.items():
if not target_is_dict_key:
tgt, mod = mod, tgt
if isinstance(tgt, (dict, str, Target)):
tgt, host = Target.check_and_update_host_consist(tgt, host)
if not target_is_dict_key:
tgt, mod = mod, tgt
new_target[tgt] = mod
target = new_target
else:
target = Target(target, host)
host = target.host
return target, host
def _merge_opts(opts, new_opts):
if isinstance(new_opts, str):
new_opts = new_opts.split()
if new_opts:
opt_set = set(opts)
new_opts = [opt for opt in new_opts if opt not in opt_set]
return opts + new_opts
return opts
def cuda(model="unknown", arch=None, options=None):
opts = _merge_opts(["-model=%s" % model], options)
if arch:
opts = _merge_opts(["-arch=%s" % arch], opts)
if not any(["-arch" in opt for opt in opts]):
warnings.warn("Try specifying cuda arch by adding 'arch=sm_xx' to your target.")
return Target(" ".join(["cuda"] + opts))
def rocm(model="unknown", options=None):
opts = _merge_opts(["-model=%s" % model], options)
return Target(" ".join(["rocm"] + opts))
def mali(model="unknown", options=None):
opts = ["-device=mali", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
def intel_graphics(model="unknown", options=None):
opts = ["-device=intel_graphics", "-model=%s" % model, "-thread_warp_size=16"]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
MICRO_SUPPORTED_MODELS = {
"host": [],
"atsamd51": ["-mcpu=cortex-m4"],
"cxd5602gg": ["-mcpu=cortex-m4"],
"esp32": [],
"imxrt10xx": ["-mcpu=cortex-m7"],
"mps2_an521": ["-mcpu=cortex-m33"],
"nrf52840": ["-mcpu=cortex-m4"],
"nrf5340dk": ["-mcpu=cortex-m33"],
"sam3x8e": ["-mcpu=cortex-m3"],
"stm32f746xx": ["-mcpu=cortex-m7", "-march=armv7e-m"],
"stm32l4r5zi": ["-mcpu=cortex-m4"],
"zynq_mp_r5": ["-mcpu=cortex-r5"],
}
def micro(model="unknown", options=None):
if model not in MICRO_SUPPORTED_MODELS:
raise ValueError(f"Model {model} not supported by tvm.target.micro.")
opts = _merge_opts(
MICRO_SUPPORTED_MODELS[model] + [f"-model={model}"],
options,
)
return Target(" ".join(["c"] + opts))
def arm_cpu(model="unknown", options=None):
trans_table = {
"pixel2": ["-model=snapdragon835", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"mate10": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"mate10pro": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"p20": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"p20pro": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"rasp3b": ["-model=bcm2837", "-mtriple=armv7l-linux-gnueabihf", "-mattr=+neon"],
"rasp4b": [
"-model=bcm2711",
"-mtriple=armv8l-linux-gnueabihf",
"-mattr=+neon",
"-mcpu=cortex-a72",
],
"rasp4b64": [
"-model=bcm2711",
"-mtriple=aarch64-linux-gnu",
"-mattr=+neon",
"-mcpu=cortex-a72",
],
"rk3399": ["-model=rk3399", "-mtriple=aarch64-linux-gnu", "-mattr=+neon"],
"pynq": ["-model=pynq", "-mtriple=armv7a-linux-eabi", "-mattr=+neon"],
"ultra96": ["-model=ultra96", "-mtriple=aarch64-linux-gnu", "-mattr=+neon"],
"beagleai": [
"-model=beagleai",
"-mtriple=armv7a-linux-gnueabihf",
"-mattr=+neon,+vfp4,+thumb2",
"-mcpu=cortex-a15",
],
"stm32mp1": [
"-model=stm32mp1",
"-mtriple=armv7a-linux-gnueabihf",
"-mattr=+neon,+vfp4,+thumb2",
"-mcpu=cortex-a7",
],
"thunderx": [
"-model=thunderx",
"-mtriple=aarch64-linux-gnu",
"-mattr=+neon,+crc,+lse",
"-mcpu=thunderxt88",
],
}
pre_defined_opt = trans_table.get(model, ["-model=%s" % model])
opts = ["-device=arm_cpu"] + pre_defined_opt
opts = _merge_opts(opts, options)
return Target(" ".join(["llvm"] + opts))
def rasp(options=None):
warnings.warn(
"tvm.target.rasp() is going to be deprecated. " 'Please use tvm.target.arm_cpu("rasp3b")'
)
return arm_cpu("rasp3b", options)
def vta(model="unknown", options=None):
opts = ["-device=vta", "-keys=vta,cpu", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["ext_dev"] + opts))
def bifrost(model="unknown", options=None):
opts = ["-device=bifrost", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
def riscv_cpu(model="sifive-u54", options=None):
trans_table = {
"sifive-e31": [
"-model=sifive-e31",
"-mtriple=riscv32-unknown-linux-gnu",
"-mcpu=sifive-e31",
"-mabi=ilp32",
],
"sifive-e76": [
"-model=sifive-e76",
"-mtriple=riscv32-unknown-linux-gnu",
"-mcpu=sifive-e76",
"-mabi=ilp32",
],
"sifive-u54": [
"-model=sifive-u54",
"-mtriple=riscv64-unknown-linux-gnu",
"-mcpu=sifive-u54",
"-mabi=lp64d",
],
"sifive-u74": [
"-model=sifive-u74",
"-mtriple=riscv64-unknown-linux-gnu",
"-mcpu=sifive-u74",
"-mabi=lp64d",
],
}
pre_defined_opt = trans_table.get(model, ["-model=%s" % model])
opts = ["-device=arm_cpu"] + pre_defined_opt
opts = _merge_opts(opts, options)
return Target(" ".join(["llvm"] + opts))
def hexagon(cpu_ver="v66", **kwargs):
valid_hex = ["v60", "v62", "v65", "v66", "v67", "v67t", "v68"]
try:
cpu_ver = cpu_ver[cpu_ver.index("v") :].lower()
assert cpu_ver in valid_hex
except:
msg = "{} is not a valid Hexagon version\nvalid versions include {}"
raise ValueError(msg.format(cpu_ver, valid_hex)) from None
config = {
"hvx": 128,
"sim_options": None,
"llvm_options": None,
"link_params": False,
}
config.update(kwargs)
if config.get("sim_args"):
msg = "The keyword parameter 'sim_args' is deprecated, use 'sim_options' instead"
warnings.warn(msg, stacklevel=2)
config.update({"sim_options": config["sim_args"]})
if config.get("llvm_args"):
msg = "The keyword parameter 'llvm_args' is deprecated, use 'llvm_options' instead"
warnings.warn(msg, stacklevel=2)
config.update({"llvm_options": config["llvm_args"]})
def create_llvm_target(cpu_ver, config):
target = " -mtriple=hexagon"
mcpu = " -mcpu=hexagon" + cpu_ver
def create_target_features(config):
tfs = []
if config["hvx"] > 0:
valid_hvx = [0, 64, 128]
if not config["hvx"] in valid_hvx:
raise ValueError("Invalid hvx value, should be one of " + str(valid_hvx))
tfs += ["+hvx" + cpu_ver, "+hvx-length" + str(config["hvx"]) + "b"]
else:
tfs += ["-hvx"]
return "-mattr=" + ",".join(tfs) if tfs else ""
return target + mcpu + " " + create_target_features(config)
def create_sim_options(cpu_ver, config):
def validate_hvx_length(codegen_hvx, sim_options):
if sim_options and "--hvx_length" in sim_options:
i = sim_options.index("hvx_length") + len("hvx_length") + 1
sim_hvx = sim_options[i : i + 3]
if sim_hvx != str(codegen_hvx):
msg = "sim hvx {} and codegen hvx {} mismatch!".format(sim_hvx, codegen_hvx)
warnings.warn(msg, stacklevel=4)
elif codegen_hvx != 0:
sim_options = sim_options + " " if isinstance(sim_options, str) else ""
sim_options += "--hvx_length " + str(codegen_hvx)
return sim_options or ""
hvx = config["hvx"]
sim_options = config["sim_options"]
if not sim_options:
return cpu_ver + " " + validate_hvx_length(hvx, sim_options)
sim_cpu = cpu_ver + " "
if isinstance(sim_options, list):
sim_options = " ".join(sim_options)
if "v6" in sim_options:
sim_cpu = ""
valid_cpu_str_regex = (
r"(?P<pre>--.*\s)?(--m)?"
+ r"(?P<base_version>v6[25678])(?P<sub_version>[a-z])?"
+ r"(?P<l2_size>_[0-9]+)?(?P<rev>_rev[0-9])?\s?(?P<post>--.*)?"
)
m = re.match(valid_cpu_str_regex, sim_options.lower())
if not m:
raise ValueError('Invalid simulator argument string "{}"'.format(sim_options))
cpu_attr = {x: str(m.groupdict()[x] or "") for x in m.groupdict()}
sim_options = (
cpu_attr["base_version"]
+ cpu_attr["sub_version"]
+ cpu_attr["l2_size"]
+ cpu_attr["rev"]
+ " "
+ cpu_attr["pre"]
+ cpu_attr["post"]
)
return sim_cpu + " " + validate_hvx_length(hvx, sim_options)
def create_llvm_options(cpu_ver, config):
llvm_options = config["llvm_options"]
# that '@' will be used in another context.
if llvm_options is None or len(llvm_options.strip()) == 0:
return ""
args = [s.replace("=", "@") for s in llvm_options.split()]
return "--llvm-options=" + ",".join(args)
# Sim args
os.environ["HEXAGON_SIM_ARGS"] = create_sim_options(cpu_ver, config)
target_str = create_llvm_target(cpu_ver, config)
llvm_str = create_llvm_options(cpu_ver, config)
args_list = target_str.split() + llvm_str.split()
return Target(" ".join(["hexagon"] + args_list))
def create(target):
warnings.warn("tvm.target.create() is being deprecated. Please use tvm.target.Target() instead")
return Target(target)
@_register_func("target._load_config_dict")
def _load_config_dict(config_dict_str):
try:
config = json.loads(config_dict_str)
except json.decoder.JSONDecodeError:
return None
if not isinstance(config, dict):
return None
for key in config.keys():
if not isinstance(key, str):
return None
return config
| true
| true
|
1c418edd52415a476c4b7126f0a4eb8c8d888556
| 3,345
|
py
|
Python
|
source/position/position.py
|
homeoffice-ys/EliteQuant_Python
|
4384494b9abe2b2622752bd59532efcdc034bc1c
|
[
"Apache-2.0"
] | 51
|
2019-02-01T19:43:37.000Z
|
2022-03-16T09:07:03.000Z
|
source/position/position.py
|
ajmal017/EliteQuant_Python
|
28ed64d742d9f010836d4070cd26bab78d9623d0
|
[
"Apache-2.0"
] | 2
|
2019-02-23T18:54:22.000Z
|
2019-11-09T01:30:32.000Z
|
source/position/position.py
|
ajmal017/EliteQuant_Python
|
28ed64d742d9f010836d4070cd26bab78d9623d0
|
[
"Apache-2.0"
] | 35
|
2019-02-08T02:00:31.000Z
|
2022-03-01T23:17:00.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ..util.util_func import retrieve_multiplier_from_full_symbol
class Position(object):
def __init__(self, full_symbol, average_price, size, realized_pnl=0):
"""
Position includes zero/closed security
"""
## TODO: add cumulative_commission, long_trades, short_trades, round_trip etc
self.full_symbol = full_symbol
# average price includes commission
self.average_price = average_price
self.size = size
self.realized_pnl = 0
self.unrealized_pnl = 0
self.api = ''
self.account = ''
def mark_to_market(self, last_price, multiplier):
"""
given new market price, update the position
"""
# if long or size > 0, pnl is positive if last_price > average_price
# else if short or size < 0, pnl is positive if last_price < average_price
self.unrealized_pnl = (last_price - self.average_price) * self.size * multiplier
def on_fill(self, fill_event, multiplier):
"""
adjust average_price and size according to new fill/trade/transaction
"""
if self.full_symbol != fill_event.full_symbol:
print(
"Position symbol %s and fill event symbol %s do not match. "
% (self.full_symbol, fill_event.full_symbol)
)
if self.size > 0: # existing long
if fill_event.fill_size > 0: # long more
self.average_price = (self.average_price * self.size + fill_event.fill_price * fill_event.fill_size
+ fill_event.commission / multiplier) \
// (self.size + fill_event.fill_size)
else: # flat long
if abs(self.size) >= abs(fill_event.fill_size): # stay long
self.realized_pnl += (self.average_price - fill_event.fill_price) * fill_event.fill_size \
* multiplier - fill_event.commission
else: # flip to short
self.realized_pnl += (fill_event.fill_size - self.average_price) * self.size \
* multiplier - fill_event.commission
self.average_price = fill_event.fill_price
else: # existing short
if fill_event.fill_size < 0: # short more
self.average_price = (self.average_price * self.size + fill_event.fill_price * fill_event.fill_size
+ fill_event.commission / multiplier) \
// (self.size + fill_event.fill_size)
else: # flat short
if abs(self.size) >= abs(fill_event.fill_size): # stay short
self.realized_pnl += (self.average_price - fill_event.fill_price) * fill_event.fill_size \
* multiplier - fill_event.commission
else: # flip to long
self.realized_pnl += (fill_event.fill_size - self.average_price) * self.size \
* multiplier - fill_event.commission
self.average_price = fill_event.fill_price
self.size += fill_event.fill_size
| 51.461538
| 115
| 0.560837
|
from ..util.util_func import retrieve_multiplier_from_full_symbol
class Position(object):
def __init__(self, full_symbol, average_price, size, realized_pnl=0):
= average_price
self.size = size
self.realized_pnl = 0
self.unrealized_pnl = 0
self.api = ''
self.account = ''
def mark_to_market(self, last_price, multiplier):
self.unrealized_pnl = (last_price - self.average_price) * self.size * multiplier
def on_fill(self, fill_event, multiplier):
if self.full_symbol != fill_event.full_symbol:
print(
"Position symbol %s and fill event symbol %s do not match. "
% (self.full_symbol, fill_event.full_symbol)
)
if self.size > 0:
if fill_event.fill_size > 0:
self.average_price = (self.average_price * self.size + fill_event.fill_price * fill_event.fill_size
+ fill_event.commission / multiplier) \
// (self.size + fill_event.fill_size)
else:
if abs(self.size) >= abs(fill_event.fill_size):
self.realized_pnl += (self.average_price - fill_event.fill_price) * fill_event.fill_size \
* multiplier - fill_event.commission
else:
self.realized_pnl += (fill_event.fill_size - self.average_price) * self.size \
* multiplier - fill_event.commission
self.average_price = fill_event.fill_price
else:
if fill_event.fill_size < 0:
self.average_price = (self.average_price * self.size + fill_event.fill_price * fill_event.fill_size
+ fill_event.commission / multiplier) \
// (self.size + fill_event.fill_size)
else:
if abs(self.size) >= abs(fill_event.fill_size):
self.realized_pnl += (self.average_price - fill_event.fill_price) * fill_event.fill_size \
* multiplier - fill_event.commission
else:
self.realized_pnl += (fill_event.fill_size - self.average_price) * self.size \
* multiplier - fill_event.commission
self.average_price = fill_event.fill_price
self.size += fill_event.fill_size
| true
| true
|
1c418f004546888f8db9ad78d063e787719b2295
| 798
|
py
|
Python
|
hypervector/scripts/get.py
|
ploomber/posts
|
5f739cf04ff77932c34d5d3ad8d6d94dfe97f051
|
[
"Apache-2.0"
] | 15
|
2020-11-30T19:31:30.000Z
|
2022-01-16T15:09:16.000Z
|
hypervector/scripts/get.py
|
ploomber/posts
|
5f739cf04ff77932c34d5d3ad8d6d94dfe97f051
|
[
"Apache-2.0"
] | 3
|
2022-01-13T03:51:14.000Z
|
2022-03-12T01:01:41.000Z
|
hypervector/scripts/get.py
|
ploomber/posts
|
5f739cf04ff77932c34d5d3ad8d6d94dfe97f051
|
[
"Apache-2.0"
] | 8
|
2021-07-28T02:19:00.000Z
|
2022-02-06T16:03:24.000Z
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
from sklearn.datasets import load_iris
# + tags=["parameters"]
# extract_upstream=True in your pipeline.yaml file, if this task has
# dependencies, list them them here (e.g. upstream = ['some_task']), otherwise
# leave as None
upstream = None
# extract_product=False in your pipeline.yaml file, leave this as None, the
# value in the YAML spec will be added here during task execution
product = None
# -
df = load_iris(as_frame=True)['frame']
df.head()
df.to_csv(product['data'], index=False)
| 22.8
| 78
| 0.681704
|
import pandas as pd
from sklearn.datasets import load_iris
upstream = None
product = None
df = load_iris(as_frame=True)['frame']
df.head()
df.to_csv(product['data'], index=False)
| true
| true
|
1c418f3ddc2e63804d591924c6ec31465718f551
| 19,596
|
py
|
Python
|
python/ccxt/bitforex.py
|
lottaeouss/ccxt
|
694fd3dc30154057f6c2310920552329df973031
|
[
"MIT"
] | null | null | null |
python/ccxt/bitforex.py
|
lottaeouss/ccxt
|
694fd3dc30154057f6c2310920552329df973031
|
[
"MIT"
] | 2
|
2019-06-24T02:24:54.000Z
|
2019-07-03T01:43:45.000Z
|
python/ccxt/bitforex.py
|
lottaeouss/ccxt
|
694fd3dc30154057f6c2310920552329df973031
|
[
"MIT"
] | 1
|
2019-08-07T15:44:10.000Z
|
2019-08-07T15:44:10.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
class bitforex (Exchange):
def describe(self):
return self.deep_extend(super(bitforex, self).describe(), {
'id': 'bitforex',
'name': 'Bitforex',
'countries': ['CN'],
'version': 'v1',
'has': {
'fetchBalance': True,
'fetchMarkets': True,
'createOrder': True,
'cancelOrder': True,
'fetchTicker': True,
'fetchTickers': False,
'fetchMyTrades': False,
'fetchTrades': True,
'fetchOrder': True,
'fetchOrders': False,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/44310033-69e9e600-a3d8-11e8-873d-54d74d1bc4e4.jpg',
'api': 'https://api.bitforex.com',
'www': 'https://www.bitforex.com',
'doc': 'https://github.com/bitforexapi/API_Docs/wiki',
'fees': 'https://help.bitforex.com/en_us/?cat=13',
'referral': 'https://www.bitforex.com/registered?inviterId=1867438',
},
'api': {
'public': {
'get': [
'api/v1/market/symbols',
'api/v1/market/ticker',
'api/v1/market/depth',
'api/v1/market/trades',
'api/v1/market/kline',
],
},
'private': {
'post': [
'api/v1/fund/mainAccount',
'api/v1/fund/allAccount',
'api/v1/trade/placeOrder',
'api/v1/trade/cancelOrder',
'api/v1/trade/orderInfo',
'api/v1/trade/orderInfos',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.1 / 100,
'taker': 0.1 / 100,
},
'funding': {
'tierBased': False,
'percentage': True,
'deposit': {},
'withdraw': {
'BTC': 0.0005,
'ETH': 0.01,
'BCH': 0.0001,
'LTC': 0.001,
'ETC': 0.005,
'USDT': 5,
'CMCT': 30,
'AION': 3,
'LVT': 0,
'DATA': 40,
'RHP': 50,
'NEO': 0,
'AIDOC': 10,
'BQT': 2,
'R': 2,
'DPY': 0.8,
'GTC': 40,
'AGI': 30,
'DENT': 100,
'SAN': 1,
'SPANK': 8,
'AID': 5,
'OMG': 0.1,
'BFT': 5,
'SHOW': 150,
'TRX': 20,
'ABYSS': 10,
'THM': 25,
'ZIL': 20,
'PPT': 0.2,
'WTC': 0.4,
'LRC': 7,
'BNT': 1,
'CTXC': 1,
'MITH': 20,
'TRUE': 4,
'LYM': 10,
'VEE': 100,
'AUTO': 200,
'REN': 50,
'TIO': 2.5,
'NGC': 1.5,
'PST': 10,
'CRE': 200,
'IPC': 5,
'PTT': 1000,
'XMCT': 20,
'ATMI': 40,
'TERN': 40,
'XLM': 0.01,
'ODE': 15,
'FTM': 100,
'RTE': 100,
'DCC': 100,
'IMT': 500,
'GOT': 3,
'EGT': 500,
'DACC': 1000,
'UBEX': 500,
'ABL': 100,
'OLT': 100,
'DAV': 40,
'THRT': 10,
'RMESH': 3,
'UPP': 20,
'SDT': 0,
'SHR': 10,
'MTV': 3,
'ESS': 100,
'MET': 3,
'TTC': 20,
'LXT': 10,
'XCLP': 100,
'LUK': 100,
'UBC': 100,
'DTX': 10,
'BEAT': 20,
'DEED': 2,
'BGX': 3000,
'PRL': 20,
'ELY': 50,
'CARD': 300,
'SQR': 15,
'VRA': 400,
'BWX': 3500,
'MAS': 75,
'FLP': 0.6,
'UNC': 300,
'CRNC': 15,
'MFG': 70,
'ZXC': 70,
'TRT': 30,
'ZIX': 35,
'XRA': 10,
'AMO': 1600,
'IPG': 3,
'uDoo': 50,
'URB': 30,
'ARCONA': 3,
'CRAD': 5,
'NOBS': 1000,
'ADF': 2,
'ELF': 5,
'LX': 20,
'PATH': 15,
'SILK': 120,
'SKYFT': 50,
'EDN': 50,
'ADE': 50,
'EDR': 10,
'TIME': 0.25,
'SPRK': 20,
'QTUM': 0.01,
'BF': 5,
'ZPR': 100,
'HYB': 10,
'CAN': 30,
'CEL': 10,
'ATS': 50,
'KCASH': 1,
'ACT': 0.01,
'MT': 300,
'DXT': 30,
'WAB': 4000,
'HYDRO': 400,
'LQD': 5,
'OPTC': 200,
'EQUAD': 80,
'LATX': 50,
'LEDU': 100,
'RIT': 70,
'ACDC': 500,
'FSN': 2,
},
},
},
'exceptions': {
'4004': OrderNotFound,
'1013': AuthenticationError,
'1016': AuthenticationError,
'3002': InsufficientFunds,
'10204': DDoSProtection,
},
})
def fetch_markets(self, params={}):
response = self.publicGetApiV1MarketSymbols()
data = response['data']
result = []
for i in range(0, len(data)):
market = data[i]
id = market['symbol']
symbolParts = id.split('-')
baseId = symbolParts[2]
quoteId = symbolParts[1]
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
active = True
precision = {
'amount': market['amountPrecision'],
'price': market['pricePrecision'],
}
limits = {
'amount': {
'min': market['minOrderAmount'],
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'limits': limits,
'info': market,
})
return result
def parse_trade(self, trade, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(trade, 'time')
id = self.safe_string(trade, 'tid')
orderId = None
amount = self.safe_float(trade, 'amount')
price = self.safe_float(trade, 'price')
cost = None
if price is not None:
if amount is not None:
cost = amount * price
sideId = self.safe_integer(trade, 'direction')
side = self.parse_side(sideId)
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'order': orderId,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
if limit is not None:
request['size'] = limit
market = self.market(symbol)
response = self.publicGetApiV1MarketTrades(self.extend(request, params))
return self.parse_trades(response['data'], market, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostApiV1FundAllAccount(params)
data = response['data']
result = {'info': response}
for i in range(0, len(data)):
current = data[i]
currencyId = current['currency']
code = currencyId.upper()
if currencyId in self.currencies_by_id:
code = self.currencies_by_id[currencyId]['code']
else:
code = self.common_currency_code(code)
account = self.account()
result[code] = account
result[code]['used'] = self.safe_float(current, 'frozen')
result[code]['free'] = self.safe_float(current, 'active')
result[code]['total'] = self.safe_float(current, 'fix')
return self.parse_balance(result)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.markets[symbol]
request = {
'symbol': market['id'],
}
response = self.publicGetApiV1MarketTicker(self.extend(request, params))
data = response['data']
timestamp = self.safe_integer(data, 'date')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(data, 'high'),
'low': self.safe_float(data, 'low'),
'bid': self.safe_float(data, 'buy'),
'bidVolume': None,
'ask': self.safe_float(data, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': self.safe_float(data, 'last'),
'last': self.safe_float(data, 'last'),
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(data, 'vol'),
'quoteVolume': None,
'info': response,
}
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
marketId = self.market_id(symbol)
request = {
'symbol': marketId,
}
if limit is not None:
request['size'] = limit
response = self.publicGetApiV1MarketDepth(self.extend(request, params))
data = response['data']
timestamp = response['time']
bidsKey = 'bids'
asksKey = 'asks'
priceKey = 'price'
amountKey = 'amount'
orderbook = self.parse_order_book(data, timestamp, bidsKey, asksKey, priceKey, amountKey)
return orderbook
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'canceled',
'4': 'canceled',
}
return statuses[status] if (status in list(statuses.keys())) else status
def parse_side(self, sideId):
if sideId == 1:
return 'buy'
elif sideId == 2:
return 'sell'
else:
return None
def parse_order(self, order, market=None):
id = self.safe_string(order, 'orderId')
timestamp = self.safe_float(order, 'createTime')
lastTradeTimestamp = self.safe_float(order, 'lastTime')
symbol = market['symbol']
sideId = self.safe_integer(order, 'tradeType')
side = self.parse_side(sideId)
type = None
price = self.safe_float(order, 'orderPrice')
average = self.safe_float(order, 'avgPrice')
amount = self.safe_float(order, 'orderAmount')
filled = self.safe_float(order, 'dealAmount')
remaining = amount - filled
status = self.parse_order_status(self.safe_string(order, 'orderState'))
cost = filled * price
feeSide = 'base' if (side == 'buy') else 'quote'
feeCurrency = market[feeSide]
fee = {
'cost': self.safe_float(order, 'tradeFee'),
'currency': feeCurrency,
}
result = {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'average': average,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
}
return result
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': self.market_id(symbol),
'orderId': id,
}
response = self.privatePostApiV1TradeOrderInfo(self.extend(request, params))
order = self.parse_order(response['data'], market)
return order
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': self.market_id(symbol),
'state': 0,
}
response = self.privatePostApiV1TradeOrderInfos(self.extend(request, params))
return self.parse_orders(response['data'], market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': self.market_id(symbol),
'state': 1,
}
response = self.privatePostApiV1TradeOrderInfos(self.extend(request, params))
return self.parse_orders(response['data'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
sideId = None
if side == 'buy':
sideId = 1
elif side == 'sell':
sideId = 2
request = {
'symbol': self.market_id(symbol),
'price': price,
'amount': amount,
'tradeType': sideId,
}
response = self.privatePostApiV1TradePlaceOrder(self.extend(request, params))
data = response['data']
return {
'info': response,
'id': self.safe_string(data, 'orderId'),
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'orderId': id,
}
if symbol is not None:
request['symbol'] = self.market_id(symbol)
results = self.privatePostApiV1TradeCancelOrder(self.extend(request, params))
success = results['success']
returnVal = {'info': results, 'success': success}
return returnVal
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
payload = self.urlencode({'accessKey': self.apiKey})
query['nonce'] = self.milliseconds()
if query:
payload += '&' + self.urlencode(self.keysort(query))
# message = '/' + 'api/' + self.version + '/' + path + '?' + payload
message = '/' + path + '?' + payload
signature = self.hmac(self.encode(message), self.encode(self.secret))
body = payload + '&signData=' + signature
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response):
if not isinstance(body, basestring):
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
feedback = self.id + ' ' + body
success = self.safe_value(response, 'success')
if success is not None:
if not success:
code = self.safe_string(response, 'code')
if code in self.exceptions:
raise self.exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| 36.022059
| 126
| 0.414676
|
ge import Exchange
try:
basestring
except NameError:
basestring = str
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
class bitforex (Exchange):
def describe(self):
return self.deep_extend(super(bitforex, self).describe(), {
'id': 'bitforex',
'name': 'Bitforex',
'countries': ['CN'],
'version': 'v1',
'has': {
'fetchBalance': True,
'fetchMarkets': True,
'createOrder': True,
'cancelOrder': True,
'fetchTicker': True,
'fetchTickers': False,
'fetchMyTrades': False,
'fetchTrades': True,
'fetchOrder': True,
'fetchOrders': False,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/44310033-69e9e600-a3d8-11e8-873d-54d74d1bc4e4.jpg',
'api': 'https://api.bitforex.com',
'www': 'https://www.bitforex.com',
'doc': 'https://github.com/bitforexapi/API_Docs/wiki',
'fees': 'https://help.bitforex.com/en_us/?cat=13',
'referral': 'https://www.bitforex.com/registered?inviterId=1867438',
},
'api': {
'public': {
'get': [
'api/v1/market/symbols',
'api/v1/market/ticker',
'api/v1/market/depth',
'api/v1/market/trades',
'api/v1/market/kline',
],
},
'private': {
'post': [
'api/v1/fund/mainAccount',
'api/v1/fund/allAccount',
'api/v1/trade/placeOrder',
'api/v1/trade/cancelOrder',
'api/v1/trade/orderInfo',
'api/v1/trade/orderInfos',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.1 / 100,
'taker': 0.1 / 100,
},
'funding': {
'tierBased': False,
'percentage': True,
'deposit': {},
'withdraw': {
'BTC': 0.0005,
'ETH': 0.01,
'BCH': 0.0001,
'LTC': 0.001,
'ETC': 0.005,
'USDT': 5,
'CMCT': 30,
'AION': 3,
'LVT': 0,
'DATA': 40,
'RHP': 50,
'NEO': 0,
'AIDOC': 10,
'BQT': 2,
'R': 2,
'DPY': 0.8,
'GTC': 40,
'AGI': 30,
'DENT': 100,
'SAN': 1,
'SPANK': 8,
'AID': 5,
'OMG': 0.1,
'BFT': 5,
'SHOW': 150,
'TRX': 20,
'ABYSS': 10,
'THM': 25,
'ZIL': 20,
'PPT': 0.2,
'WTC': 0.4,
'LRC': 7,
'BNT': 1,
'CTXC': 1,
'MITH': 20,
'TRUE': 4,
'LYM': 10,
'VEE': 100,
'AUTO': 200,
'REN': 50,
'TIO': 2.5,
'NGC': 1.5,
'PST': 10,
'CRE': 200,
'IPC': 5,
'PTT': 1000,
'XMCT': 20,
'ATMI': 40,
'TERN': 40,
'XLM': 0.01,
'ODE': 15,
'FTM': 100,
'RTE': 100,
'DCC': 100,
'IMT': 500,
'GOT': 3,
'EGT': 500,
'DACC': 1000,
'UBEX': 500,
'ABL': 100,
'OLT': 100,
'DAV': 40,
'THRT': 10,
'RMESH': 3,
'UPP': 20,
'SDT': 0,
'SHR': 10,
'MTV': 3,
'ESS': 100,
'MET': 3,
'TTC': 20,
'LXT': 10,
'XCLP': 100,
'LUK': 100,
'UBC': 100,
'DTX': 10,
'BEAT': 20,
'DEED': 2,
'BGX': 3000,
'PRL': 20,
'ELY': 50,
'CARD': 300,
'SQR': 15,
'VRA': 400,
'BWX': 3500,
'MAS': 75,
'FLP': 0.6,
'UNC': 300,
'CRNC': 15,
'MFG': 70,
'ZXC': 70,
'TRT': 30,
'ZIX': 35,
'XRA': 10,
'AMO': 1600,
'IPG': 3,
'uDoo': 50,
'URB': 30,
'ARCONA': 3,
'CRAD': 5,
'NOBS': 1000,
'ADF': 2,
'ELF': 5,
'LX': 20,
'PATH': 15,
'SILK': 120,
'SKYFT': 50,
'EDN': 50,
'ADE': 50,
'EDR': 10,
'TIME': 0.25,
'SPRK': 20,
'QTUM': 0.01,
'BF': 5,
'ZPR': 100,
'HYB': 10,
'CAN': 30,
'CEL': 10,
'ATS': 50,
'KCASH': 1,
'ACT': 0.01,
'MT': 300,
'DXT': 30,
'WAB': 4000,
'HYDRO': 400,
'LQD': 5,
'OPTC': 200,
'EQUAD': 80,
'LATX': 50,
'LEDU': 100,
'RIT': 70,
'ACDC': 500,
'FSN': 2,
},
},
},
'exceptions': {
'4004': OrderNotFound,
'1013': AuthenticationError,
'1016': AuthenticationError,
'3002': InsufficientFunds,
'10204': DDoSProtection,
},
})
def fetch_markets(self, params={}):
response = self.publicGetApiV1MarketSymbols()
data = response['data']
result = []
for i in range(0, len(data)):
market = data[i]
id = market['symbol']
symbolParts = id.split('-')
baseId = symbolParts[2]
quoteId = symbolParts[1]
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
active = True
precision = {
'amount': market['amountPrecision'],
'price': market['pricePrecision'],
}
limits = {
'amount': {
'min': market['minOrderAmount'],
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'limits': limits,
'info': market,
})
return result
def parse_trade(self, trade, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(trade, 'time')
id = self.safe_string(trade, 'tid')
orderId = None
amount = self.safe_float(trade, 'amount')
price = self.safe_float(trade, 'price')
cost = None
if price is not None:
if amount is not None:
cost = amount * price
sideId = self.safe_integer(trade, 'direction')
side = self.parse_side(sideId)
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'order': orderId,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
if limit is not None:
request['size'] = limit
market = self.market(symbol)
response = self.publicGetApiV1MarketTrades(self.extend(request, params))
return self.parse_trades(response['data'], market, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostApiV1FundAllAccount(params)
data = response['data']
result = {'info': response}
for i in range(0, len(data)):
current = data[i]
currencyId = current['currency']
code = currencyId.upper()
if currencyId in self.currencies_by_id:
code = self.currencies_by_id[currencyId]['code']
else:
code = self.common_currency_code(code)
account = self.account()
result[code] = account
result[code]['used'] = self.safe_float(current, 'frozen')
result[code]['free'] = self.safe_float(current, 'active')
result[code]['total'] = self.safe_float(current, 'fix')
return self.parse_balance(result)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.markets[symbol]
request = {
'symbol': market['id'],
}
response = self.publicGetApiV1MarketTicker(self.extend(request, params))
data = response['data']
timestamp = self.safe_integer(data, 'date')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(data, 'high'),
'low': self.safe_float(data, 'low'),
'bid': self.safe_float(data, 'buy'),
'bidVolume': None,
'ask': self.safe_float(data, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': self.safe_float(data, 'last'),
'last': self.safe_float(data, 'last'),
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(data, 'vol'),
'quoteVolume': None,
'info': response,
}
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
marketId = self.market_id(symbol)
request = {
'symbol': marketId,
}
if limit is not None:
request['size'] = limit
response = self.publicGetApiV1MarketDepth(self.extend(request, params))
data = response['data']
timestamp = response['time']
bidsKey = 'bids'
asksKey = 'asks'
priceKey = 'price'
amountKey = 'amount'
orderbook = self.parse_order_book(data, timestamp, bidsKey, asksKey, priceKey, amountKey)
return orderbook
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'canceled',
'4': 'canceled',
}
return statuses[status] if (status in list(statuses.keys())) else status
def parse_side(self, sideId):
if sideId == 1:
return 'buy'
elif sideId == 2:
return 'sell'
else:
return None
def parse_order(self, order, market=None):
id = self.safe_string(order, 'orderId')
timestamp = self.safe_float(order, 'createTime')
lastTradeTimestamp = self.safe_float(order, 'lastTime')
symbol = market['symbol']
sideId = self.safe_integer(order, 'tradeType')
side = self.parse_side(sideId)
type = None
price = self.safe_float(order, 'orderPrice')
average = self.safe_float(order, 'avgPrice')
amount = self.safe_float(order, 'orderAmount')
filled = self.safe_float(order, 'dealAmount')
remaining = amount - filled
status = self.parse_order_status(self.safe_string(order, 'orderState'))
cost = filled * price
feeSide = 'base' if (side == 'buy') else 'quote'
feeCurrency = market[feeSide]
fee = {
'cost': self.safe_float(order, 'tradeFee'),
'currency': feeCurrency,
}
result = {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'average': average,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
}
return result
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': self.market_id(symbol),
'orderId': id,
}
response = self.privatePostApiV1TradeOrderInfo(self.extend(request, params))
order = self.parse_order(response['data'], market)
return order
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': self.market_id(symbol),
'state': 0,
}
response = self.privatePostApiV1TradeOrderInfos(self.extend(request, params))
return self.parse_orders(response['data'], market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': self.market_id(symbol),
'state': 1,
}
response = self.privatePostApiV1TradeOrderInfos(self.extend(request, params))
return self.parse_orders(response['data'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
sideId = None
if side == 'buy':
sideId = 1
elif side == 'sell':
sideId = 2
request = {
'symbol': self.market_id(symbol),
'price': price,
'amount': amount,
'tradeType': sideId,
}
response = self.privatePostApiV1TradePlaceOrder(self.extend(request, params))
data = response['data']
return {
'info': response,
'id': self.safe_string(data, 'orderId'),
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'orderId': id,
}
if symbol is not None:
request['symbol'] = self.market_id(symbol)
results = self.privatePostApiV1TradeCancelOrder(self.extend(request, params))
success = results['success']
returnVal = {'info': results, 'success': success}
return returnVal
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
payload = self.urlencode({'accessKey': self.apiKey})
query['nonce'] = self.milliseconds()
if query:
payload += '&' + self.urlencode(self.keysort(query))
message = '/' + path + '?' + payload
signature = self.hmac(self.encode(message), self.encode(self.secret))
body = payload + '&signData=' + signature
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response):
if not isinstance(body, basestring):
return
if (body[0] == '{') or (body[0] == '['):
feedback = self.id + ' ' + body
success = self.safe_value(response, 'success')
if success is not None:
if not success:
code = self.safe_string(response, 'code')
if code in self.exceptions:
raise self.exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| true
| true
|
1c41907c50737308182a896121ca20a6a7ad4dda
| 28,565
|
py
|
Python
|
iasi/evaluation.py
|
Peter42/iasi
|
fc799d542c2bb80c3f559bc2f9e833ac330a5506
|
[
"MIT"
] | null | null | null |
iasi/evaluation.py
|
Peter42/iasi
|
fc799d542c2bb80c3f559bc2f9e833ac330a5506
|
[
"MIT"
] | 3
|
2019-05-02T12:49:21.000Z
|
2019-06-12T09:11:00.000Z
|
iasi/evaluation.py
|
Peter42/iasi
|
fc799d542c2bb80c3f559bc2f9e833ac330a5506
|
[
"MIT"
] | 1
|
2019-10-18T21:33:33.000Z
|
2019-10-18T21:33:33.000Z
|
from functools import partial
import math
import os
import luigi
import numpy as np
import pandas as pd
from netCDF4 import Dataset, Group, Variable
from sklearn.model_selection import ParameterGrid
from iasi.composition import Composition
from iasi.compression import CompressDataset, SelectSingleVariable, DecompressDataset
from iasi.file import MoveVariables, FileTask
from iasi.metrics import Covariance
from iasi.quadrant import Quadrant, AssembleFourQuadrants
from iasi.util import CustomTask
import logging
logger = logging.getLogger(__name__)
class EvaluationTask(FileTask):
gases = luigi.ListParameter()
variables = luigi.ListParameter()
threshold_values = luigi.ListParameter(default=[1e-2, 1e-3, 1e-4, 1e-5])
ancestor = None
def requires(self):
compression_parameter = {
'ancestor': [self.ancestor],
'file': [self.file],
'dst': [self.dst],
'threshold': self.threshold_values,
'gas': self.gases,
'variable': self.variables
}
compressed_param_grid = list(ParameterGrid(compression_parameter))
tasks = [SelectSingleVariable(**params)
for params in compressed_param_grid]
# for uncompressed dataset we do not need multiple threshold values
uncompressed_parameter = {
'ancestor': ['MoveVariables'],
'file': [self.file],
'dst': [self.dst],
'threshold': [0],
'gas': self.gases,
'variable': self.variables
}
uncompressed_param_grid = list(ParameterGrid(uncompressed_parameter))
single_variables = tasks + \
[SelectSingleVariable(**params)
for params in uncompressed_param_grid]
# exclude cross average kernel from atmospheric temperature.
# atmospheric temperature has only avk and noise matrix
filtered = filter(lambda task: not(task.gas == 'Tatm') or not(
task.variable == 'Tatmxavk'), single_variables)
return {
'single': filtered,
'original': MoveVariables(dst=self.dst, file=self.file)
}
class EvaluationCompressionSize(EvaluationTask):
ancestor = 'CompressDataset'
def output_directory(self):
return 'compression-summary'
def output_extension(self):
return '.csv'
def size_in_kb(self, file):
return int(os.path.getsize(file) / (1000))
def run(self):
# get size for all parameters
df = pd.DataFrame()
for task, input in zip(self.requires()['single'], self.input()['single']):
df = df.append({
'gas': task.gas,
'variable': task.variable,
'ancestor': task.ancestor,
'size': self.size_in_kb(input.path),
'threshold': task.threshold
}, ignore_index=True)
with self.output().temporary_path() as target:
df.to_csv(target, index=False)
class EvaluationErrorEstimation(FileTask):
file = luigi.Parameter()
gases = luigi.Parameter()
variables = luigi.Parameter()
thresholds = luigi.ListParameter(default=[1e-3])
def output_directory(self):
return 'error-estimation'
def output_extension(self):
return '.csv'
def requires(self):
parameter = {
'file': [self.file],
'dst': [self.dst],
'thresholds': [self.thresholds],
'gas': self.gases,
'variable': self.variables,
'log_file': [self.log_file]
}
parameter_grid = ParameterGrid(parameter)
# exclude cross average kernel from atmospheric temperature.
# atmospheric temperature has only avk and noise matrix
parameter_grid = filter(lambda params: not(params['gas'] == 'Tatm') or not(
params['variable'] == 'Tatmxavk'), parameter_grid)
return [VariableErrorEstimation(**params) for params in parameter_grid]
def run(self):
report = pd.DataFrame()
for task in self.input():
with task.open() as file:
task_report = pd.read_csv(file)
report = report.append(task_report)
with self.output().temporary_path() as target:
report.to_csv(target, index=False)
class VariableErrorEstimation(FileTask):
gas = luigi.Parameter()
variable = luigi.Parameter()
thresholds = luigi.ListParameter(default=[1e-3])
def output_extension(self):
return '.csv'
def requires(self):
compressed = [DecompressDataset(
dst=self.dst,
file=self.file,
threshold=threshold,
log_file=self.log_file,
compress_upstream=True
) for threshold in self.thresholds]
original = MoveVariables(
dst=self.dst, file=self.file, log_file=self.log_file)
return {
'compressed': compressed,
'original': original
}
def run(self):
path = f'/state/{self.gas}/{self.variable}'
logger.info('Starting error estimation for %s', path)
tasks_and_input = list(zip(
self.requires()['compressed'], self.input()['compressed']))
original = Dataset(self.input()['original'].path)
nol = original['atm_nol'][...]
alt = original['atm_altitude'][...]
avk = original['/state/WV/avk'][...]
alt_trop = original['tropopause_altitude'][...]
counter = 0
message = f'Calculate original error for {path}: {counter}/{len(tasks_and_input)}'
logger.info(message)
self.set_status_message(message)
self.set_progress_percentage(int(counter / len(tasks_and_input) * 100))
error_estimation: ErrorEstimation = ErrorEstimation.factory(
self.gas, nol, alt, avk, alt_trop=alt_trop)
# calculation of original error
variable_report = error_estimation.report_for(
original[path], original[path][...], None, rc_error=False)
variable_report['threshold'] = 0
# calculation of reconstruction error
for task, input in tasks_and_input:
counter += 1
nc = Dataset(input.path)
message = f'Calculating error estimation {counter} of {len(tasks_and_input)} for {path} with threshold {task.threshold}'
logger.info(message)
self.set_status_message(message)
self.set_progress_percentage(
int(counter / len(tasks_and_input) * 100))
reconstructed_values = nc[path][...]
original_values = original[path][...]
report = error_estimation.report_for(
original[path], original_values, reconstructed_values, rc_error=True)
report['threshold'] = task.threshold
variable_report = variable_report.append(report, ignore_index=True)
nc.close()
variable_report['var'] = self.variable
variable_report['gas'] = self.gas
with self.output().temporary_path() as target:
variable_report.to_csv(target, index=False)
original.close()
def output_directory(self):
return os.path.join('error-estimation', self.gas, self.variable)
class ErrorEstimation:
levels_of_interest = []
# assume statosphere starting at 25 km
alt_strat = 25000
@staticmethod
def factory(gas: str, nol, alt, avk, alt_trop=None):
if gas == 'WV':
return WaterVapour(gas, nol, alt, avk, alt_trop, type_two=True)
if gas == 'GHG':
return GreenhouseGas(gas, nol, alt, alt_trop)
if gas == 'HNO3':
return NitridAcid(gas, nol, alt, alt_trop)
if gas == 'Tatm':
return AtmosphericTemperature(gas, nol, alt, alt_trop)
raise ValueError(f'No error estimation implementation for gas {gas}')
def __init__(self, gas, nol, alt, alt_trop, type_two=False):
# each gas may have multiple levels of interest
self.type_two = type_two
self.nol = nol
self.alt = alt
self.gas = gas
self.alt_trop = alt_trop
def matrix_ok(self, event, path, matrix):
ok = True
if np.ma.is_masked(matrix):
logger.warning(
'event %d contains masked values in %s. skipping...', event, path)
ok = False
if np.isnan(matrix).any():
logger.warning(
'event %d contains nan values in %s. skipping...', event, path)
ok = False
if np.isinf(matrix).any():
logger.warning(
'event %d contains inf values in %s. skipping...', event, path)
ok = False
if np.allclose(matrix, 0, atol=1e-14):
logger.warning(
'event %d contains zero or close to zero values in %s. skipping...', event, path)
ok = False
return ok
def report_for(self, variable: Variable, original, reconstructed, rc_error) -> pd.DataFrame:
# if not original.shape == reconstructed.shape:
# message = f'Different shape for {type(self).__name__} {variable.name}: original {original.shape}, reconstructed {reconstructed.shape}'
# logger.error(message)
# raise ValueError(message)
result = {
'event': [],
'level_of_interest': [],
'err': [],
'rc_error': [],
'type': []
}
error_estimation_methods = {
'avk': self.averaging_kernel,
'n': self.noise_matrix,
'Tatmxavk': self.cross_averaging_kernel
}
estimation_method = error_estimation_methods.get(variable.name)
if estimation_method is None:
raise ValueError(
f'No error estimation method for variable {variable.name}')
reshaper = Quadrant.for_assembly(self.gas, variable.name, variable)
path = f'/state/{self.gas}/{variable.name}'
for event in range(original.shape[0]):
if np.ma.is_masked(self.nol[event]) or self.nol.data[event] > 29:
continue
nol_event = self.nol.data[event]
if not self.matrix_ok(event, path, self.alt[event, :nol_event]):
continue
covariance = Covariance(nol_event, self.alt[event])
original_event = reshaper.transform(original[event], nol_event)
if not self.matrix_ok(event, path, original_event):
continue
# use reconstruced values iff rc_error flag is set
if rc_error:
rc_event = reshaper.transform(reconstructed[event], nol_event)
if not self.matrix_ok(event, path, rc_event):
continue
rc_event = rc_event.data
else:
rc_event = None
if isinstance(self, WaterVapour):
avk_event = AssembleFourQuadrants(
nol_event).transform(self.avk[event], nol_event)
if not self.matrix_ok(event, 'wv_avk', avk_event):
continue
avk_event = avk_event.data
else:
avk_event = None
# type two error only exists for water vapour
# if gas does not require type 2 error estimation, break loop after first iteration
calc_type_two = self.type_two
while True:
error = estimation_method(event,
original_event.data, rc_event, covariance, type2=calc_type_two, avk=avk_event)
for loi in self.levels_of_interest:
# zero == surface (special value)
if loi == 0:
level = 0
# for other levels substract from highest level
else:
level = nol_event + loi
if level < 2:
continue
result['event'].append(event)
result['level_of_interest'].append(loi)
result['err'].append(error[level, level])
result['rc_error'].append(rc_error)
result['type'].append(2 if calc_type_two else 1)
if self.gas == 'GHG':
# for greenhouse gases export also CH4 (lower right quadrant)
# nol as index offset for error level
result['event'].append(event)
result['level_of_interest'].append(loi - 29)
result['err'].append(
error[level + nol_event, level + nol_event])
result['rc_error'].append(rc_error)
result['type'].append(2 if calc_type_two else 1)
# stop if type 1 is calculated
if not calc_type_two:
break
# just finished type 2 in first iteration -> repeat with type 1
calc_type_two = False
return pd.DataFrame(result)
def averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None):
raise NotImplementedError
def noise_matrix(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None):
raise NotImplementedError
def cross_averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None):
raise NotImplementedError
def smoothing_error(self, actual_matrix, to_compare, assumed_covariance) -> np.ndarray:
"""Calulate smooting error with two matrices and assumed covariance"""
return (actual_matrix - to_compare) @ assumed_covariance @ (actual_matrix - to_compare).T
def assumed_covariance_temperature(self, event: int) -> np.ndarray:
"""Return assumed covariance for temperature cross averaging kernel"""
sig = self.sigma(event)
amp = self.amplitude_temperature(event)
return self.construct_covariance_matrix(event, amp, sig)
def construct_covariance_matrix(self, event, amp: np.ndarray, sig: np.ndarray) -> np.ndarray:
"""create a covariance matrix by amplitude and deviation
:param amp: Amplitude for levels
:param sig: Standard deviation for levels
"""
nol = self.nol.data[event]
alt = self.alt.data[event]
sa = np.ndarray((nol, nol))
for i in range(nol):
for j in range(nol):
sa[i, j] = amp[i] * amp[j] * \
np.exp(-((alt[i] - alt[j])*(alt[i] - alt[j])) /
(2 * sig[i] * sig[j]))
return sa
def sigma(self, event, f_sigma: float = 0.6) -> np.ndarray:
"""Assumed correlation length for all gases and temperature.
:param self.alt_strat: altitude of stratosphere in meters
:param f_sigma: scaling factor
:return: correlation length for each level
"""
nol = self.nol.data[event]
alt = self.alt.data[event]
alt_trop = self.alt_trop[event]
sig = np.ndarray(nol)
for i in range(nol):
# below tropopause
if alt[i] < alt_trop:
sig[i] = 2500 + (alt[i] - alt[0]) * \
((5000-2500)/(alt_trop-alt[0]))
# inside statrophere
if alt[i] >= alt_trop and alt[i] < self.alt_strat:
sig[i] = 5000+(alt[i]-alt_trop) * \
((10000-5000)/(self.alt_strat-alt_trop))
# above stratosphere
if alt[i] > self.alt_strat:
sig[i] = 10000
return sig * f_sigma
def amplitude(self, event):
raise NotImplementedError
def amplitude_temperature(self, event) -> np.ndarray:
"""Get amplitude and deviation for atmospheric temperature
:return: amp
"""
nol = self.nol.data[event]
alt = self.alt.data[event, :nol]
alt_trop = self.alt_trop.data[event]
amp = np.ndarray(nol)
for i in range(nol):
if alt[0]+4000 < alt_trop:
# setting amp_T
if alt[i] <= alt[0]+4000:
amp[i] = 2.0 - 1.0 * (alt[i] - alt[0]) / 4000
elif alt[i] >= alt[0]+4000 and alt[i] <= alt_trop:
amp[i] = 1.
elif alt[i] > alt_trop and alt[i] <= alt_trop+5000:
amp[i] = 1.0 + 0.5 * (alt[i] - alt_trop) / 5000
elif alt[i] > alt_trop+5000:
amp[i] = 1.5
else:
# setting amp[i]
if alt[i] < alt_trop:
amp[i] = 2.0 - 1.0 * (alt[i] - alt[0]) / \
(alt_trop - alt[0])
elif alt[i] == alt_trop:
amp[i] = 1.
elif alt[i] > alt_trop and alt[i] <= alt_trop+5000:
amp[i] = 1.0 + 0.5 * (alt[i] - alt_trop) / 5000
elif alt[i] > alt_trop+5000:
amp[i] = 1.5
return amp
class WaterVapour(ErrorEstimation):
levels_of_interest = [-6, -16, -19]
def __init__(self, gas, nol, alt, avk, alt_trop, type_two=True):
super().__init__(gas, nol, alt, alt_trop, type_two=type_two)
self.avk = avk
# for each method type one and type two
def averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
# in this method, avk should be same like original
if not np.allclose(original, avk):
logger.warn('There are differences in original parameter and avk')
s_cov = self.assumed_covariance(event)
nol = self.nol.data[event]
if type2:
# type 2 error
original_type2 = covariance.type2_of(original)
if reconstructed is None:
# type 2 original error
return self.smoothing_error(original_type2, np.identity(2 * nol), s_cov)
else:
# type 2 reconstruction error
rc_type2 = covariance.type2_of(reconstructed)
return self.smoothing_error(original_type2, rc_type2, s_cov)
else:
# type 1 error
original_type1 = covariance.type1_of(original)
if reconstructed is None:
# type 1 original error
return self.smoothing_error(
original_type1, np.identity(2 * nol), s_cov)
else:
# type 1 reconstruction error
rc_type1 = covariance.type1_of(reconstructed)
return self.smoothing_error(original_type1, rc_type1, s_cov)
def noise_matrix(self, event: int, original: np.ndarray, reconstruced: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
# original/approx event is already covariance matrix -> only type1/2 transformation
assert avk is not None
P = covariance.traf()
if type2:
# type 2 error
C = covariance.c_by_avk(avk)
original_type2 = C @ P @ original @ P.T @ C.T
if reconstruced is None:
# original error
return original_type2
else:
# reconstruction error
rc_type2 = C @ P @ reconstruced @ P.T @ C.T
return np.absolute(original_type2 - rc_type2)
else:
# type 1 error
original_type1 = P @ original @ P.T
if reconstruced is None:
return original_type1
else:
rc_type1 = P @ reconstruced @ P.T
return np.absolute(original_type1 - rc_type1)
def cross_averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
assert avk is not None
P = covariance.traf()
s_cov = self.assumed_covariance_temperature(event)
if type2:
# type 2 error
C = covariance.c_by_avk(avk)
original_type2 = C @ P @ original
if reconstructed is None:
# original error
return original_type2 @ s_cov @ original_type2.T
# reconstruction error
rc_type2 = C @ P @ reconstructed
return self.smoothing_error(original_type2, rc_type2, s_cov)
else:
# type 1 error
original_type1 = P @ original
if reconstructed is None:
# original error
return original_type1 @ s_cov @ original_type1.T
else:
# reconstruction error
rc_type1 = P @ reconstructed
return self.smoothing_error(original_type1, rc_type1, s_cov)
def assumed_covariance(self, event: int) -> np.ndarray:
"""Assumed covariance for both H2O and HDO"""
nol = self.nol.data[event]
amp_H2O, amp_dD = self.amplitude(event)
sig = self.sigma(event)
Sa_ = np.zeros([2*nol, 2*nol])
# Sa H2O
Sa_[:nol, :nol] = self.construct_covariance_matrix(event, amp_H2O, sig)
# Sa delD
Sa_[nol:, nol:] = self.construct_covariance_matrix(event, amp_dD, sig)
return Sa_
def amplitude(self, event):
"""Calculate amplitude for H2O and HDO
:return: (amp_H2O, amp_dD)
"""
nol = self.nol.data[event]
alt = self.alt.data[event, :nol]
alt_trop = self.alt_trop.data[event]
amp_H2O = np.ndarray(nol)
amp_dD = np.ndarray(nol)
for i in range(nol):
if alt[i] < 5000.:
amp_H2O[i] = 0.75 * (1 + alt[i] / 5000)
amp_dD[i] = 0.09 * (1 + alt[i] / 5000)
elif 5000. <= alt[i] < alt_trop:
amp_H2O[i] = 1.5
amp_dD[i] = 0.18
elif alt_trop <= alt[i] < self.alt_strat:
amp_H2O[i] = 1.5 - 1.2 * \
(alt[i] - alt_trop) / (self.alt_strat - alt_trop)
amp_dD[i] = 0.18 - 0.12 * \
(alt[i] - alt_trop) / (self.alt_strat - alt_trop)
elif alt[i] >= self.alt_strat:
amp_H2O[i] = 0.3
amp_dD[i] = 0.06
else:
raise ValueError(f'Invalid altitude at {event}')
return amp_H2O, amp_dD
class GreenhouseGas(ErrorEstimation):
levels_of_interest = [-6, -10, -19]
def averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
assert not type2
if reconstructed is None:
# original error
reconstructed = np.identity(covariance.nol * 2)
s_cov = self.assumed_covariance(event)
return self.smoothing_error(original, reconstructed, s_cov)
def cross_averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
assert not type2
s_cov = self.assumed_covariance_temperature(event)
if reconstructed is None:
# original error
return original @ s_cov @ original.T
return self.smoothing_error(original, reconstructed, s_cov)
def noise_matrix(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
if reconstructed is None:
return original
else:
return np.absolute(original - reconstructed)
def assumed_covariance(self, event) -> np.ndarray:
amp = self.amplitude(event)
sig = self.sigma(event)
s_cov = self.construct_covariance_matrix(event, amp, sig)
nol = self.nol.data[event]
s_cov_ghg = np.zeros((2 * nol, 2 * nol))
s_cov_ghg[:nol, :nol] = s_cov
s_cov_ghg[nol:, nol:] = s_cov
return s_cov_ghg
def amplitude(self, event) -> np.ndarray:
"""Amplitude for GHG"""
nol = self.nol.data[event]
alt = self.alt.data[event, :nol]
alt_trop = self.alt_trop.data[event]
amp = np.ndarray((nol))
for i in range(nol):
if alt[i] < alt_trop:
amp[i] = 0.1
elif alt_trop <= alt[i] < self.alt_strat:
amp[i] = 0.1 + (alt[i] - alt_trop) * \
((0.25 - 0.1)/(self.alt_strat - alt_trop))
elif alt[i] >= self.alt_strat:
amp[i] = 0.25
else:
raise ValueError('Invalid altitude')
return amp
class NitridAcid(ErrorEstimation):
levels_of_interest = [-6]
def averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
assert not type2
if reconstructed is None:
# original error
reconstructed = np.identity(covariance.nol)
s_cov = self.assumed_covariance(event)
return self.smoothing_error(original, reconstructed, s_cov)
def cross_averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
s_cov = self.assumed_covariance_temperature(event)
if reconstructed is None:
# original error
return original @ s_cov @ original.T
return self.smoothing_error(original, reconstructed, s_cov)
def noise_matrix(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
if reconstructed is None:
return original
else:
return np.absolute(original - reconstructed)
def assumed_covariance(self, event) -> np.ndarray:
amp = self.amplitude(event)
sig = self.sigma(event)
return self.construct_covariance_matrix(event, amp, sig)
def amplitude(self, event: int):
"""Amplitude of HNO3"""
nol = self.nol.data[event]
alt = self.alt.data[event, :nol]
alt_trop = self.alt_trop.data[event]
amp = np.ndarray((nol))
for i in range(nol):
# surface is more than 4km below tropopause
if alt[0] < alt_trop - 4000:
# higher variances in valley's due to human made emmisions
if alt[i] < alt_trop - 4000:
amp[i] = 2.4 + (alt[i] - alt[0]) * \
((1.2 - 2.4)/(alt_trop - 4000 - alt[0]))
elif alt_trop - 4000 <= alt[i] < alt_trop + 8000:
amp[i] = 1.2
elif alt_trop + 8000 <= alt[i] < 50000:
amp[i] = 1.2 + (alt[i] - (alt_trop + 8000)) * \
((0.3-1.2) / (50000 - (alt_trop + 8000)))
elif alt[i] >= 50000:
amp[i] = 0.3
else:
raise ValueError('Invalid altitude')
else:
# at higher altitudes covariance is lower
if alt_trop - 4000 <= alt[i] < alt_trop + 8000:
amp[i] = 1.2
elif alt_trop + 8000 < alt[i] < 50000:
amp[i] = 1.2 + (alt[i] - (alt_trop + 8000)) * \
((0.3 - 1.2)/(50000 - (alt_trop + 8000)))
elif alt[i] >= 50000:
amp[i] = 0.3
else:
raise ValueError('Invalid altitude')
return amp
class AtmosphericTemperature(ErrorEstimation):
# zero means surface
levels_of_interest = [0, -10, -19]
def averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None):
assert not type2
if reconstructed is None:
reconstructed = np.identity(covariance.nol)
s_cov = self.assumed_covariance_temperature(event)
return self.smoothing_error(original, reconstructed, s_cov)
def noise_matrix(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None):
assert not type2
if reconstructed is None:
return original
else:
return np.absolute(original - reconstructed)
| 41.278902
| 160
| 0.572729
|
from functools import partial
import math
import os
import luigi
import numpy as np
import pandas as pd
from netCDF4 import Dataset, Group, Variable
from sklearn.model_selection import ParameterGrid
from iasi.composition import Composition
from iasi.compression import CompressDataset, SelectSingleVariable, DecompressDataset
from iasi.file import MoveVariables, FileTask
from iasi.metrics import Covariance
from iasi.quadrant import Quadrant, AssembleFourQuadrants
from iasi.util import CustomTask
import logging
logger = logging.getLogger(__name__)
class EvaluationTask(FileTask):
gases = luigi.ListParameter()
variables = luigi.ListParameter()
threshold_values = luigi.ListParameter(default=[1e-2, 1e-3, 1e-4, 1e-5])
ancestor = None
def requires(self):
compression_parameter = {
'ancestor': [self.ancestor],
'file': [self.file],
'dst': [self.dst],
'threshold': self.threshold_values,
'gas': self.gases,
'variable': self.variables
}
compressed_param_grid = list(ParameterGrid(compression_parameter))
tasks = [SelectSingleVariable(**params)
for params in compressed_param_grid]
uncompressed_parameter = {
'ancestor': ['MoveVariables'],
'file': [self.file],
'dst': [self.dst],
'threshold': [0],
'gas': self.gases,
'variable': self.variables
}
uncompressed_param_grid = list(ParameterGrid(uncompressed_parameter))
single_variables = tasks + \
[SelectSingleVariable(**params)
for params in uncompressed_param_grid]
filtered = filter(lambda task: not(task.gas == 'Tatm') or not(
task.variable == 'Tatmxavk'), single_variables)
return {
'single': filtered,
'original': MoveVariables(dst=self.dst, file=self.file)
}
class EvaluationCompressionSize(EvaluationTask):
ancestor = 'CompressDataset'
def output_directory(self):
return 'compression-summary'
def output_extension(self):
return '.csv'
def size_in_kb(self, file):
return int(os.path.getsize(file) / (1000))
def run(self):
df = pd.DataFrame()
for task, input in zip(self.requires()['single'], self.input()['single']):
df = df.append({
'gas': task.gas,
'variable': task.variable,
'ancestor': task.ancestor,
'size': self.size_in_kb(input.path),
'threshold': task.threshold
}, ignore_index=True)
with self.output().temporary_path() as target:
df.to_csv(target, index=False)
class EvaluationErrorEstimation(FileTask):
file = luigi.Parameter()
gases = luigi.Parameter()
variables = luigi.Parameter()
thresholds = luigi.ListParameter(default=[1e-3])
def output_directory(self):
return 'error-estimation'
def output_extension(self):
return '.csv'
def requires(self):
parameter = {
'file': [self.file],
'dst': [self.dst],
'thresholds': [self.thresholds],
'gas': self.gases,
'variable': self.variables,
'log_file': [self.log_file]
}
parameter_grid = ParameterGrid(parameter)
parameter_grid = filter(lambda params: not(params['gas'] == 'Tatm') or not(
params['variable'] == 'Tatmxavk'), parameter_grid)
return [VariableErrorEstimation(**params) for params in parameter_grid]
def run(self):
report = pd.DataFrame()
for task in self.input():
with task.open() as file:
task_report = pd.read_csv(file)
report = report.append(task_report)
with self.output().temporary_path() as target:
report.to_csv(target, index=False)
class VariableErrorEstimation(FileTask):
gas = luigi.Parameter()
variable = luigi.Parameter()
thresholds = luigi.ListParameter(default=[1e-3])
def output_extension(self):
return '.csv'
def requires(self):
compressed = [DecompressDataset(
dst=self.dst,
file=self.file,
threshold=threshold,
log_file=self.log_file,
compress_upstream=True
) for threshold in self.thresholds]
original = MoveVariables(
dst=self.dst, file=self.file, log_file=self.log_file)
return {
'compressed': compressed,
'original': original
}
def run(self):
path = f'/state/{self.gas}/{self.variable}'
logger.info('Starting error estimation for %s', path)
tasks_and_input = list(zip(
self.requires()['compressed'], self.input()['compressed']))
original = Dataset(self.input()['original'].path)
nol = original['atm_nol'][...]
alt = original['atm_altitude'][...]
avk = original['/state/WV/avk'][...]
alt_trop = original['tropopause_altitude'][...]
counter = 0
message = f'Calculate original error for {path}: {counter}/{len(tasks_and_input)}'
logger.info(message)
self.set_status_message(message)
self.set_progress_percentage(int(counter / len(tasks_and_input) * 100))
error_estimation: ErrorEstimation = ErrorEstimation.factory(
self.gas, nol, alt, avk, alt_trop=alt_trop)
variable_report = error_estimation.report_for(
original[path], original[path][...], None, rc_error=False)
variable_report['threshold'] = 0
for task, input in tasks_and_input:
counter += 1
nc = Dataset(input.path)
message = f'Calculating error estimation {counter} of {len(tasks_and_input)} for {path} with threshold {task.threshold}'
logger.info(message)
self.set_status_message(message)
self.set_progress_percentage(
int(counter / len(tasks_and_input) * 100))
reconstructed_values = nc[path][...]
original_values = original[path][...]
report = error_estimation.report_for(
original[path], original_values, reconstructed_values, rc_error=True)
report['threshold'] = task.threshold
variable_report = variable_report.append(report, ignore_index=True)
nc.close()
variable_report['var'] = self.variable
variable_report['gas'] = self.gas
with self.output().temporary_path() as target:
variable_report.to_csv(target, index=False)
original.close()
def output_directory(self):
return os.path.join('error-estimation', self.gas, self.variable)
class ErrorEstimation:
levels_of_interest = []
alt_strat = 25000
@staticmethod
def factory(gas: str, nol, alt, avk, alt_trop=None):
if gas == 'WV':
return WaterVapour(gas, nol, alt, avk, alt_trop, type_two=True)
if gas == 'GHG':
return GreenhouseGas(gas, nol, alt, alt_trop)
if gas == 'HNO3':
return NitridAcid(gas, nol, alt, alt_trop)
if gas == 'Tatm':
return AtmosphericTemperature(gas, nol, alt, alt_trop)
raise ValueError(f'No error estimation implementation for gas {gas}')
def __init__(self, gas, nol, alt, alt_trop, type_two=False):
self.type_two = type_two
self.nol = nol
self.alt = alt
self.gas = gas
self.alt_trop = alt_trop
def matrix_ok(self, event, path, matrix):
ok = True
if np.ma.is_masked(matrix):
logger.warning(
'event %d contains masked values in %s. skipping...', event, path)
ok = False
if np.isnan(matrix).any():
logger.warning(
'event %d contains nan values in %s. skipping...', event, path)
ok = False
if np.isinf(matrix).any():
logger.warning(
'event %d contains inf values in %s. skipping...', event, path)
ok = False
if np.allclose(matrix, 0, atol=1e-14):
logger.warning(
'event %d contains zero or close to zero values in %s. skipping...', event, path)
ok = False
return ok
def report_for(self, variable: Variable, original, reconstructed, rc_error) -> pd.DataFrame:
result = {
'event': [],
'level_of_interest': [],
'err': [],
'rc_error': [],
'type': []
}
error_estimation_methods = {
'avk': self.averaging_kernel,
'n': self.noise_matrix,
'Tatmxavk': self.cross_averaging_kernel
}
estimation_method = error_estimation_methods.get(variable.name)
if estimation_method is None:
raise ValueError(
f'No error estimation method for variable {variable.name}')
reshaper = Quadrant.for_assembly(self.gas, variable.name, variable)
path = f'/state/{self.gas}/{variable.name}'
for event in range(original.shape[0]):
if np.ma.is_masked(self.nol[event]) or self.nol.data[event] > 29:
continue
nol_event = self.nol.data[event]
if not self.matrix_ok(event, path, self.alt[event, :nol_event]):
continue
covariance = Covariance(nol_event, self.alt[event])
original_event = reshaper.transform(original[event], nol_event)
if not self.matrix_ok(event, path, original_event):
continue
if rc_error:
rc_event = reshaper.transform(reconstructed[event], nol_event)
if not self.matrix_ok(event, path, rc_event):
continue
rc_event = rc_event.data
else:
rc_event = None
if isinstance(self, WaterVapour):
avk_event = AssembleFourQuadrants(
nol_event).transform(self.avk[event], nol_event)
if not self.matrix_ok(event, 'wv_avk', avk_event):
continue
avk_event = avk_event.data
else:
avk_event = None
calc_type_two = self.type_two
while True:
error = estimation_method(event,
original_event.data, rc_event, covariance, type2=calc_type_two, avk=avk_event)
for loi in self.levels_of_interest:
if loi == 0:
level = 0
else:
level = nol_event + loi
if level < 2:
continue
result['event'].append(event)
result['level_of_interest'].append(loi)
result['err'].append(error[level, level])
result['rc_error'].append(rc_error)
result['type'].append(2 if calc_type_two else 1)
if self.gas == 'GHG':
result['event'].append(event)
result['level_of_interest'].append(loi - 29)
result['err'].append(
error[level + nol_event, level + nol_event])
result['rc_error'].append(rc_error)
result['type'].append(2 if calc_type_two else 1)
if not calc_type_two:
break
calc_type_two = False
return pd.DataFrame(result)
def averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None):
raise NotImplementedError
def noise_matrix(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None):
raise NotImplementedError
def cross_averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None):
raise NotImplementedError
def smoothing_error(self, actual_matrix, to_compare, assumed_covariance) -> np.ndarray:
return (actual_matrix - to_compare) @ assumed_covariance @ (actual_matrix - to_compare).T
def assumed_covariance_temperature(self, event: int) -> np.ndarray:
sig = self.sigma(event)
amp = self.amplitude_temperature(event)
return self.construct_covariance_matrix(event, amp, sig)
def construct_covariance_matrix(self, event, amp: np.ndarray, sig: np.ndarray) -> np.ndarray:
nol = self.nol.data[event]
alt = self.alt.data[event]
sa = np.ndarray((nol, nol))
for i in range(nol):
for j in range(nol):
sa[i, j] = amp[i] * amp[j] * \
np.exp(-((alt[i] - alt[j])*(alt[i] - alt[j])) /
(2 * sig[i] * sig[j]))
return sa
def sigma(self, event, f_sigma: float = 0.6) -> np.ndarray:
nol = self.nol.data[event]
alt = self.alt.data[event]
alt_trop = self.alt_trop[event]
sig = np.ndarray(nol)
for i in range(nol):
if alt[i] < alt_trop:
sig[i] = 2500 + (alt[i] - alt[0]) * \
((5000-2500)/(alt_trop-alt[0]))
if alt[i] >= alt_trop and alt[i] < self.alt_strat:
sig[i] = 5000+(alt[i]-alt_trop) * \
((10000-5000)/(self.alt_strat-alt_trop))
if alt[i] > self.alt_strat:
sig[i] = 10000
return sig * f_sigma
def amplitude(self, event):
raise NotImplementedError
def amplitude_temperature(self, event) -> np.ndarray:
nol = self.nol.data[event]
alt = self.alt.data[event, :nol]
alt_trop = self.alt_trop.data[event]
amp = np.ndarray(nol)
for i in range(nol):
if alt[0]+4000 < alt_trop:
if alt[i] <= alt[0]+4000:
amp[i] = 2.0 - 1.0 * (alt[i] - alt[0]) / 4000
elif alt[i] >= alt[0]+4000 and alt[i] <= alt_trop:
amp[i] = 1.
elif alt[i] > alt_trop and alt[i] <= alt_trop+5000:
amp[i] = 1.0 + 0.5 * (alt[i] - alt_trop) / 5000
elif alt[i] > alt_trop+5000:
amp[i] = 1.5
else:
if alt[i] < alt_trop:
amp[i] = 2.0 - 1.0 * (alt[i] - alt[0]) / \
(alt_trop - alt[0])
elif alt[i] == alt_trop:
amp[i] = 1.
elif alt[i] > alt_trop and alt[i] <= alt_trop+5000:
amp[i] = 1.0 + 0.5 * (alt[i] - alt_trop) / 5000
elif alt[i] > alt_trop+5000:
amp[i] = 1.5
return amp
class WaterVapour(ErrorEstimation):
levels_of_interest = [-6, -16, -19]
def __init__(self, gas, nol, alt, avk, alt_trop, type_two=True):
super().__init__(gas, nol, alt, alt_trop, type_two=type_two)
self.avk = avk
def averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
if not np.allclose(original, avk):
logger.warn('There are differences in original parameter and avk')
s_cov = self.assumed_covariance(event)
nol = self.nol.data[event]
if type2:
original_type2 = covariance.type2_of(original)
if reconstructed is None:
return self.smoothing_error(original_type2, np.identity(2 * nol), s_cov)
else:
rc_type2 = covariance.type2_of(reconstructed)
return self.smoothing_error(original_type2, rc_type2, s_cov)
else:
original_type1 = covariance.type1_of(original)
if reconstructed is None:
return self.smoothing_error(
original_type1, np.identity(2 * nol), s_cov)
else:
rc_type1 = covariance.type1_of(reconstructed)
return self.smoothing_error(original_type1, rc_type1, s_cov)
def noise_matrix(self, event: int, original: np.ndarray, reconstruced: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
assert avk is not None
P = covariance.traf()
if type2:
C = covariance.c_by_avk(avk)
original_type2 = C @ P @ original @ P.T @ C.T
if reconstruced is None:
return original_type2
else:
rc_type2 = C @ P @ reconstruced @ P.T @ C.T
return np.absolute(original_type2 - rc_type2)
else:
original_type1 = P @ original @ P.T
if reconstruced is None:
return original_type1
else:
rc_type1 = P @ reconstruced @ P.T
return np.absolute(original_type1 - rc_type1)
def cross_averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
assert avk is not None
P = covariance.traf()
s_cov = self.assumed_covariance_temperature(event)
if type2:
C = covariance.c_by_avk(avk)
original_type2 = C @ P @ original
if reconstructed is None:
return original_type2 @ s_cov @ original_type2.T
rc_type2 = C @ P @ reconstructed
return self.smoothing_error(original_type2, rc_type2, s_cov)
else:
original_type1 = P @ original
if reconstructed is None:
return original_type1 @ s_cov @ original_type1.T
else:
rc_type1 = P @ reconstructed
return self.smoothing_error(original_type1, rc_type1, s_cov)
def assumed_covariance(self, event: int) -> np.ndarray:
nol = self.nol.data[event]
amp_H2O, amp_dD = self.amplitude(event)
sig = self.sigma(event)
Sa_ = np.zeros([2*nol, 2*nol])
Sa_[:nol, :nol] = self.construct_covariance_matrix(event, amp_H2O, sig)
Sa_[nol:, nol:] = self.construct_covariance_matrix(event, amp_dD, sig)
return Sa_
def amplitude(self, event):
nol = self.nol.data[event]
alt = self.alt.data[event, :nol]
alt_trop = self.alt_trop.data[event]
amp_H2O = np.ndarray(nol)
amp_dD = np.ndarray(nol)
for i in range(nol):
if alt[i] < 5000.:
amp_H2O[i] = 0.75 * (1 + alt[i] / 5000)
amp_dD[i] = 0.09 * (1 + alt[i] / 5000)
elif 5000. <= alt[i] < alt_trop:
amp_H2O[i] = 1.5
amp_dD[i] = 0.18
elif alt_trop <= alt[i] < self.alt_strat:
amp_H2O[i] = 1.5 - 1.2 * \
(alt[i] - alt_trop) / (self.alt_strat - alt_trop)
amp_dD[i] = 0.18 - 0.12 * \
(alt[i] - alt_trop) / (self.alt_strat - alt_trop)
elif alt[i] >= self.alt_strat:
amp_H2O[i] = 0.3
amp_dD[i] = 0.06
else:
raise ValueError(f'Invalid altitude at {event}')
return amp_H2O, amp_dD
class GreenhouseGas(ErrorEstimation):
levels_of_interest = [-6, -10, -19]
def averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
assert not type2
if reconstructed is None:
reconstructed = np.identity(covariance.nol * 2)
s_cov = self.assumed_covariance(event)
return self.smoothing_error(original, reconstructed, s_cov)
def cross_averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
assert not type2
s_cov = self.assumed_covariance_temperature(event)
if reconstructed is None:
return original @ s_cov @ original.T
return self.smoothing_error(original, reconstructed, s_cov)
def noise_matrix(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
if reconstructed is None:
return original
else:
return np.absolute(original - reconstructed)
def assumed_covariance(self, event) -> np.ndarray:
amp = self.amplitude(event)
sig = self.sigma(event)
s_cov = self.construct_covariance_matrix(event, amp, sig)
nol = self.nol.data[event]
s_cov_ghg = np.zeros((2 * nol, 2 * nol))
s_cov_ghg[:nol, :nol] = s_cov
s_cov_ghg[nol:, nol:] = s_cov
return s_cov_ghg
def amplitude(self, event) -> np.ndarray:
nol = self.nol.data[event]
alt = self.alt.data[event, :nol]
alt_trop = self.alt_trop.data[event]
amp = np.ndarray((nol))
for i in range(nol):
if alt[i] < alt_trop:
amp[i] = 0.1
elif alt_trop <= alt[i] < self.alt_strat:
amp[i] = 0.1 + (alt[i] - alt_trop) * \
((0.25 - 0.1)/(self.alt_strat - alt_trop))
elif alt[i] >= self.alt_strat:
amp[i] = 0.25
else:
raise ValueError('Invalid altitude')
return amp
class NitridAcid(ErrorEstimation):
levels_of_interest = [-6]
def averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
assert not type2
if reconstructed is None:
reconstructed = np.identity(covariance.nol)
s_cov = self.assumed_covariance(event)
return self.smoothing_error(original, reconstructed, s_cov)
def cross_averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
s_cov = self.assumed_covariance_temperature(event)
if reconstructed is None:
return original @ s_cov @ original.T
return self.smoothing_error(original, reconstructed, s_cov)
def noise_matrix(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None) -> np.ndarray:
if reconstructed is None:
return original
else:
return np.absolute(original - reconstructed)
def assumed_covariance(self, event) -> np.ndarray:
amp = self.amplitude(event)
sig = self.sigma(event)
return self.construct_covariance_matrix(event, amp, sig)
def amplitude(self, event: int):
nol = self.nol.data[event]
alt = self.alt.data[event, :nol]
alt_trop = self.alt_trop.data[event]
amp = np.ndarray((nol))
for i in range(nol):
if alt[0] < alt_trop - 4000:
if alt[i] < alt_trop - 4000:
amp[i] = 2.4 + (alt[i] - alt[0]) * \
((1.2 - 2.4)/(alt_trop - 4000 - alt[0]))
elif alt_trop - 4000 <= alt[i] < alt_trop + 8000:
amp[i] = 1.2
elif alt_trop + 8000 <= alt[i] < 50000:
amp[i] = 1.2 + (alt[i] - (alt_trop + 8000)) * \
((0.3-1.2) / (50000 - (alt_trop + 8000)))
elif alt[i] >= 50000:
amp[i] = 0.3
else:
raise ValueError('Invalid altitude')
else:
# at higher altitudes covariance is lower
if alt_trop - 4000 <= alt[i] < alt_trop + 8000:
amp[i] = 1.2
elif alt_trop + 8000 < alt[i] < 50000:
amp[i] = 1.2 + (alt[i] - (alt_trop + 8000)) * \
((0.3 - 1.2)/(50000 - (alt_trop + 8000)))
elif alt[i] >= 50000:
amp[i] = 0.3
else:
raise ValueError('Invalid altitude')
return amp
class AtmosphericTemperature(ErrorEstimation):
# zero means surface
levels_of_interest = [0, -10, -19]
def averaging_kernel(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None):
assert not type2
if reconstructed is None:
reconstructed = np.identity(covariance.nol)
s_cov = self.assumed_covariance_temperature(event)
return self.smoothing_error(original, reconstructed, s_cov)
def noise_matrix(self, event: int, original: np.ndarray, reconstructed: np.ndarray, covariance: Covariance, type2=False, avk=None):
assert not type2
if reconstructed is None:
return original
else:
return np.absolute(original - reconstructed)
| true
| true
|
1c4190c63cbc40bfd1c9d373ca0d6076c0b40b36
| 2,082
|
py
|
Python
|
migrations/0002_pymbawallpage_pymbawallpagelayers.py
|
andywar65/pymba
|
119cbab973638c127e6e736b08fdb8235a7537a6
|
[
"BSD-2-Clause"
] | 3
|
2020-04-20T05:34:30.000Z
|
2020-11-04T07:25:26.000Z
|
migrations/0002_pymbawallpage_pymbawallpagelayers.py
|
andywar65/pymba
|
119cbab973638c127e6e736b08fdb8235a7537a6
|
[
"BSD-2-Clause"
] | null | null | null |
migrations/0002_pymbawallpage_pymbawallpagelayers.py
|
andywar65/pymba
|
119cbab973638c127e6e736b08fdb8235a7537a6
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-02-04 19:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0019_delete_filter'),
('wagtailcore', '0040_page_draft_title'),
('pymba', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PymbaWallPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', models.CharField(blank=True, max_length=250, null=True)),
('pattern', models.BooleanField(default=False)),
('color', models.CharField(default='white', max_length=250)),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PymbaWallPageLayers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('material', models.CharField(default='brick', max_length=250)),
('thickness', models.CharField(default='0', max_length=250)),
('weight', models.CharField(default='0', max_length=250)),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='wall_layers', to='pymba.PymbaWallPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| 42.489796
| 191
| 0.595581
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0019_delete_filter'),
('wagtailcore', '0040_page_draft_title'),
('pymba', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PymbaWallPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', models.CharField(blank=True, max_length=250, null=True)),
('pattern', models.BooleanField(default=False)),
('color', models.CharField(default='white', max_length=250)),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PymbaWallPageLayers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('material', models.CharField(default='brick', max_length=250)),
('thickness', models.CharField(default='0', max_length=250)),
('weight', models.CharField(default='0', max_length=250)),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='wall_layers', to='pymba.PymbaWallPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| true
| true
|
1c4191007d0a6c4b0419d38678ca62d430bd0f52
| 184
|
py
|
Python
|
Scripts/django-admin.py
|
dragonrathony/zed_market
|
c73f17501608c8fe86692c3c4f6e03fc8ba03286
|
[
"bzip2-1.0.6"
] | 1
|
2020-06-17T13:45:54.000Z
|
2020-06-17T13:45:54.000Z
|
Scripts/django-admin.py
|
Honey4251996/zed_market
|
c73f17501608c8fe86692c3c4f6e03fc8ba03286
|
[
"bzip2-1.0.6"
] | 11
|
2021-03-19T07:55:39.000Z
|
2022-03-12T00:34:55.000Z
|
Scripts/django-admin.py
|
Honey4251996/zed_market
|
c73f17501608c8fe86692c3c4f6e03fc8ba03286
|
[
"bzip2-1.0.6"
] | null | null | null |
#!d:\projects\eric\python\django\zed-market\dev\zed-market\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 30.666667
| 77
| 0.788043
|
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| true
| true
|
1c41911f837ca62f3884f8c0ec85b9f5430f4763
| 298
|
py
|
Python
|
tests/enums/test_service_module_name.py
|
jbpratt78/mypy_boto3_builder
|
be4020782369b34e35f3b6a2117f00d947f3ae24
|
[
"MIT"
] | null | null | null |
tests/enums/test_service_module_name.py
|
jbpratt78/mypy_boto3_builder
|
be4020782369b34e35f3b6a2117f00d947f3ae24
|
[
"MIT"
] | null | null | null |
tests/enums/test_service_module_name.py
|
jbpratt78/mypy_boto3_builder
|
be4020782369b34e35f3b6a2117f00d947f3ae24
|
[
"MIT"
] | null | null | null |
from mypy_boto3_builder.enums.service_module_name import ServiceModuleName
class TestServiceModuleName:
def test_properties(self) -> None:
assert ServiceModuleName.paginator.file_name == "paginator.py"
assert ServiceModuleName.paginator.template_name == "paginator.py.jinja2"
| 37.25
| 81
| 0.788591
|
from mypy_boto3_builder.enums.service_module_name import ServiceModuleName
class TestServiceModuleName:
def test_properties(self) -> None:
assert ServiceModuleName.paginator.file_name == "paginator.py"
assert ServiceModuleName.paginator.template_name == "paginator.py.jinja2"
| true
| true
|
1c419190ff52c84309e46af3e38f91362aba2c9c
| 2,072
|
py
|
Python
|
clients/gui/gui.py
|
adlerweb/pyprologix
|
818ec301aee5ff051c415f03308acee96d3c5cd8
|
[
"MIT"
] | 1
|
2021-01-22T14:35:29.000Z
|
2021-01-22T14:35:29.000Z
|
clients/gui/gui.py
|
adlerweb/pyprologix
|
818ec301aee5ff051c415f03308acee96d3c5cd8
|
[
"MIT"
] | null | null | null |
clients/gui/gui.py
|
adlerweb/pyprologix
|
818ec301aee5ff051c415f03308acee96d3c5cd8
|
[
"MIT"
] | null | null | null |
# helloworld.py
import tkinter as tk
import pygubu
#Folder with hp3478a.py/prologix.py must be in PYTHONPATH
#Alternatively copy them to this folder
from hp3478a import hp3478a
from time import sleep
port = "/dev/ttyACM0"
test = hp3478a(23, port, debug=True)
test.getStatus()
class HelloWorldApp:
def __init__(self):
#1: Create a builder
self.builder = builder = pygubu.Builder()
#2: Load an ui file
builder.add_from_file('../gui.ui')
#3: Create the mainwindow
self.mainwindow = builder.get_object('Frame_1')
self.builder.get_object('Label_1').after(1000,self.update)
def run(self):
self.mainwindow.mainloop()
def update(self):
global test
labelM = self.builder.get_object('Label_1')
labelF = self.builder.get_object('Label_2')
labelR = self.builder.get_object('Label_3')
measure = float(test.getMeasure())
suffix = ""
if measure < 1:
measure = measure * 1000
suffix = "m"
print("<1000 - " + suffix + " - now: " + str(measure))
if measure < 1:
measure = measure * 1000
suffix = "µ"
print("<1000 - " + suffix + " - now: " + str(measure))
if measure < 1:
measure = measure * 1000
suffix = "n"
print("<1000 - " + suffix + " - now: " + str(measure))
elif measure > 1000:
measure = measure / 1000
suffix = "k"
if measure > 1000:
measure = measure / 1000
suffix = "M"
if measure > 1000:
measure = measure / 1000
suffix = "G"
measure = str(measure) + suffix
labelM.configure(text = measure)
labelF.configure(text = test.getFunction())
labelR.configure(text = test.getRange())
labelM.after(1000,self.update)
if __name__ == '__main__':
app = HelloWorldApp()
app.run()
| 28.383562
| 74
| 0.53668
|
import tkinter as tk
import pygubu
from hp3478a import hp3478a
from time import sleep
port = "/dev/ttyACM0"
test = hp3478a(23, port, debug=True)
test.getStatus()
class HelloWorldApp:
def __init__(self):
self.builder = builder = pygubu.Builder()
builder.add_from_file('../gui.ui')
self.mainwindow = builder.get_object('Frame_1')
self.builder.get_object('Label_1').after(1000,self.update)
def run(self):
self.mainwindow.mainloop()
def update(self):
global test
labelM = self.builder.get_object('Label_1')
labelF = self.builder.get_object('Label_2')
labelR = self.builder.get_object('Label_3')
measure = float(test.getMeasure())
suffix = ""
if measure < 1:
measure = measure * 1000
suffix = "m"
print("<1000 - " + suffix + " - now: " + str(measure))
if measure < 1:
measure = measure * 1000
suffix = "µ"
print("<1000 - " + suffix + " - now: " + str(measure))
if measure < 1:
measure = measure * 1000
suffix = "n"
print("<1000 - " + suffix + " - now: " + str(measure))
elif measure > 1000:
measure = measure / 1000
suffix = "k"
if measure > 1000:
measure = measure / 1000
suffix = "M"
if measure > 1000:
measure = measure / 1000
suffix = "G"
measure = str(measure) + suffix
labelM.configure(text = measure)
labelF.configure(text = test.getFunction())
labelR.configure(text = test.getRange())
labelM.after(1000,self.update)
if __name__ == '__main__':
app = HelloWorldApp()
app.run()
| true
| true
|
1c4191c1d2a32493fac86efbe6f1aa2a6fee1ce3
| 203
|
py
|
Python
|
cassiopeia/datastores/__init__.py
|
mertkutay/cassiopeia
|
1c4005f78f216322d179f3465303d105261beab2
|
[
"MIT"
] | null | null | null |
cassiopeia/datastores/__init__.py
|
mertkutay/cassiopeia
|
1c4005f78f216322d179f3465303d105261beab2
|
[
"MIT"
] | null | null | null |
cassiopeia/datastores/__init__.py
|
mertkutay/cassiopeia
|
1c4005f78f216322d179f3465303d105261beab2
|
[
"MIT"
] | null | null | null |
from .cache import Cache
from .riotapi import RiotAPI
from .ddragon import DDragon
from .ghost import UnloadedGhostStore
from .merakianalyticscdn import MerakiAnalyticsCDN
from .lolwikia import LolWikia
| 29
| 50
| 0.852217
|
from .cache import Cache
from .riotapi import RiotAPI
from .ddragon import DDragon
from .ghost import UnloadedGhostStore
from .merakianalyticscdn import MerakiAnalyticsCDN
from .lolwikia import LolWikia
| true
| true
|
1c41931e27ca720f8e8be0a8a09a2bbb093b36d0
| 11,055
|
py
|
Python
|
cert_tools.py
|
fizyr/ca-scripts
|
2b558a04f51392f0884c4c9abf3df9dc0dcf4a7a
|
[
"BSD-3-Clause"
] | null | null | null |
cert_tools.py
|
fizyr/ca-scripts
|
2b558a04f51392f0884c4c9abf3df9dc0dcf4a7a
|
[
"BSD-3-Clause"
] | null | null | null |
cert_tools.py
|
fizyr/ca-scripts
|
2b558a04f51392f0884c4c9abf3df9dc0dcf4a7a
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2017-2019 Fizyr B.V. - https://fizyr.com
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from datetime import datetime, timedelta
from pathlib import Path
from typing import Iterable, List, Tuple, Optional, Generator
from cryptography import x509
from cryptography.x509.oid import NameOID, ExtendedKeyUsageOID
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
import util
crypto_backend = default_backend()
def name_key_to_oid(key: str) -> Optional[NameOID]:
key = key.upper()
if key == 'C':
return NameOID.COUNTRY_NAME
elif key == 'ST':
return NameOID.STATE_OR_PROVINCE_NAME
elif key == 'L':
return NameOID.LOCALITY_NAME
elif key == 'DC':
return NameOID.DOMAIN_COMPONENT
elif key == 'O':
return NameOID.ORGANIZATION_NAME
elif key == 'OU':
return NameOID.ORGANIZATIONAL_UNIT_NAME
elif key == 'CN':
return NameOID.COMMON_NAME
else:
return None
def name_oid_to_key(oid: NameOID) -> Optional[str]:
if oid == NameOID.COUNTRY_NAME:
return 'C'
elif oid == NameOID.STATE_OR_PROVINCE_NAME:
return 'ST'
elif oid == NameOID.LOCALITY_NAME:
return 'L'
elif oid == NameOID.DOMAIN_COMPONENT:
return 'DC'
elif oid == NameOID.ORGANIZATION_NAME:
return 'O'
elif oid == NameOID.ORGANIZATIONAL_UNIT_NAME:
return 'OU'
elif oid == NameOID.COMMON_NAME:
return 'CN'
else:
return None
def parse_name_attribute(string: str) -> x509.NameAttribute:
string = string.strip()
key, sep, value = string.partition('=')
if key:
key = key.strip()
if value:
value = value.strip()
if not key or not sep or not value:
raise ValueError('invalid RDN syntax: should be key=value, got {}'.format(rdn))
oid = name_key_to_oid(key)
if not oid:
raise ValueError('failed to parse RND: unknown component: {}'.format(key))
return x509.NameAttribute(oid, value)
def parse_dn(value: str) -> x509.Name:
return x509.Name(map(lambda x: x509.RelativeDistinguishedName([parse_name_attribute(x)]), value.split(',')))
def format_attribute(attrib: x509.NameAttribute):
key = name_oid_to_key(attrib.oid)
if not key:
raise ValueError('unknown name attribute: {}'.format(attrib))
return '{}={}'.format(key, attrib.value)
def format_rdn(rdn: x509.RelativeDistinguishedName):
rdn = list(rdn)
if len(rdn) == 1:
return format_attribute(rdn[0])
return '{{{}}}'.format(', '.join(map(format_attribute, rdn)))
def format_name(name: x509.Name) -> str:
return ', '.join(map(format_rdn, name.rdns))
def generate_serial() -> int:
return int.from_bytes(os.urandom(20), byteorder='little') % (1 << 59)
def get_subject_alt_names(object) -> List[x509.GeneralName]:
try:
extension = object.extensions.get_extension_for_class(x509.SubjectAlternativeName)
except x509.ExtensionNotFound:
return []
return list(extension.value)
def get_basic_ca_constraint(object) -> Optional[bool]:
try:
extension = object.extensions.get_extension_for_class(x509.BasicConstraints)
except x509.ExtensionNotFound:
return None
return extension.value.ca
def get_dns_names(names: List[x509.GeneralName]) -> List[x509.DNSName]:
return list(filter(lambda x: isinstance(x, x509.DNSName), names))
def get_first_dns_name(names: List[x509.GeneralName]) -> Optional[x509.DNSName]:
try:
return get_dns_names(names)[0]
except IndexError:
return None
def read_serial(file: Path) -> int:
with open(file, 'r') as file:
return int(file.read(), 16)
def write_serial(file: Path, value: int):
with open(file, 'w') as file:
file.write(hex(value))
def bump_serial(file: Path) -> int:
try:
serial = read_serial(file)
except FileNotFoundError:
serial = generate_serial()
serial = (serial + 1) % (1 << 59)
write_serial(file, serial)
return serial
def ca_extensions(dns_name: str, max_path_length: Optional[int]) -> List[Tuple[x509.Extension, bool]]:
return [
(x509.BasicConstraints(True, max_path_length), True),
(x509.KeyUsage(False, False, False, False, False, True, True, False, False), True),
(x509.SubjectAlternativeName([x509.DNSName(dns_name)]), True),
(x509.NameConstraints([x509.DNSName('.' + dns_name)], []), True),
]
def client_extensions(dns_name: str) -> List[Tuple[x509.Extension, bool]]:
return [
(x509.BasicConstraints(False, None), True),
(x509.KeyUsage(True, False, False, False, False, False, False, False, False), True),
(x509.ExtendedKeyUsage([ExtendedKeyUsageOID.CLIENT_AUTH]), True),
(x509.SubjectAlternativeName([x509.DNSName(dns_name)]), True),
]
def add_extensions(object, extensions: Iterable[Tuple[x509.Extension, bool]]):
for extension, critical in extensions:
object = object.add_extension(extension, critical)
return object;
def replace_name_attribute(name: x509.Name, oid: NameOID, new_value: str) -> x509.Name:
new_attribs = []
replaced = False
for attrib in original:
if not replaced and attrib.oid == oid:
new_attribs.append(x509.NameAttribute(oid, new_value))
replaced = True
else:
new_attribs.append(attrib)
break
return x509.Name(new_attribs)
def prefix_name(name: x509.Name, oid: NameOID, value: str) -> x509.Name:
new_rdn = x509.RelativeDistinguishedName([x509.NameAttribute(oid, value)])
return x509.Name([new_rdn] + name.rdns)
def replace_common_name(name: x509, new_value: str) -> x509.Name:
replace_name_attribute(name, NameOID.COMMON_NAME, new_value)
def generate_rsa_key(file: Path, bits: int = 4096) -> rsa.RSAPrivateKey:
umask = util.or_umask(0o277)
key = rsa.generate_private_key(public_exponent = 65537, key_size = bits, backend = crypto_backend)
util.write_file(file, key.private_bytes(
encoding = serialization.Encoding.PEM,
format = serialization.PrivateFormat.PKCS8,
encryption_algorithm = serialization.NoEncryption(),
))
os.umask(umask)
return key
def load_key(file: Path):
with open(file, 'rb') as file:
return serialization.load_pem_private_key(file.read(), password=None, backend=crypto_backend)
def make_csr(file: Path, key: rsa.RSAPrivateKey, name: x509.Name, extensions: List[Tuple[x509.Extension, bool]]) -> x509.CertificateSigningRequest:
csr = x509.CertificateSigningRequestBuilder()
csr = csr.subject_name(name)
for extension, critical in extensions:
csr = csr.add_extension(extension, critical)
csr = csr.sign(key, hashes.SHA512(), crypto_backend)
util.write_file(file, csr.public_bytes(serialization.Encoding.PEM))
return csr
def load_csr(file: Path) -> x509.CertificateSigningRequest:
return x509.load_pem_x509_csr(util.read_file(file), crypto_backend)
def sign_csr(
file : Path,
chain : bytes,
csr : x509.CertificateSigningRequest,
ca_key : rsa.RSAPrivateKey,
ca_cert : x509.Certificate,
name : x509.Name,
serial : int,
extensions : List[Tuple[x509.Extension, bool]],
days : int,
now : datetime,
) -> x509.Certificate:
cert = x509.CertificateBuilder()
cert = cert.issuer_name(ca_cert.subject)
cert = cert.subject_name(name)
cert = cert.serial_number(serial)
cert = cert.public_key(csr.public_key())
cert = cert.not_valid_before(now)
cert = cert.not_valid_after(now + timedelta(days=days))
for extension, critical in extensions:
cert = cert.add_extension(extension, critical)
cert = cert.sign(ca_key, hashes.SHA512(), crypto_backend)
with open(file, 'wb') as file:
file.write(cert.public_bytes(serialization.Encoding.PEM))
file.write(chain)
return cert
def make_self_signed_cert(
file : Path,
key : rsa.RSAPrivateKey,
name : x509.Name,
serial : int,
extensions : List[Tuple[x509.Extension, bool]],
days : int,
now : datetime,
) -> x509.Certificate:
cert = x509.CertificateBuilder()
cert = cert.issuer_name(name)
cert = cert.subject_name(name)
cert = cert.serial_number(serial)
cert = cert.public_key(key.public_key())
cert = cert.not_valid_before(now)
cert = cert.not_valid_after(now + timedelta(days=days))
for extension, critical in extensions:
cert = cert.add_extension(extension, critical)
cert = cert.sign(key, hashes.SHA512(), crypto_backend)
util.write_file(file, cert.public_bytes(serialization.Encoding.PEM))
return cert
def load_certificate(file: Path) -> x509.Certificate:
return x509.load_pem_x509_certificate(util.read_file(file), crypto_backend)
class PemBlob:
def __init__(self, name: str, data: bytes):
self.name = name
self.data = data
PEM_BEGIN_PREFIX = b'-----BEGIN '
PEM_END_PREFIX = b'-----END '
PEM_SUFFIX = b'-----\n'
def read_pem_blobs(data: bytes) -> Generator[PemBlob, None, None]:
current = None
body = b''
for line in data.splitlines(keepends=True):
if current is None and line.startswith(PEM_BEGIN_PREFIX) and line.endswith(PEM_SUFFIX):
current = line[len(PEM_BEGIN_PREFIX):-len(PEM_SUFFIX)]
body = line
elif current is not None:
body += line
if line == PEM_END_PREFIX + current + PEM_SUFFIX:
yield PemBlob(current.decode('utf8'), body)
current = None
def read_first_pem_blob(data: bytes, name: str) -> Optional[bytes]:
for blob in read_pem_blobs(data):
if blob.name == name:
return blob.data
return None
def read_last_pem_blob(data: bytes, name: str) -> Optional[bytes]:
result = None
for blob in read_pem_blobs(data):
if blob.name == name:
result = blob
return result.data
def load_first_certificate(data: bytes) -> Optional[x509.Certificate]:
blob = read_first_pem_blob(data, 'CERTIFICATE')
if not blob:
return None
return x509.load_pem_x509_certificate(blob, crypto_backend)
| 30.793872
| 147
| 0.739213
|
import os
from datetime import datetime, timedelta
from pathlib import Path
from typing import Iterable, List, Tuple, Optional, Generator
from cryptography import x509
from cryptography.x509.oid import NameOID, ExtendedKeyUsageOID
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
import util
crypto_backend = default_backend()
def name_key_to_oid(key: str) -> Optional[NameOID]:
key = key.upper()
if key == 'C':
return NameOID.COUNTRY_NAME
elif key == 'ST':
return NameOID.STATE_OR_PROVINCE_NAME
elif key == 'L':
return NameOID.LOCALITY_NAME
elif key == 'DC':
return NameOID.DOMAIN_COMPONENT
elif key == 'O':
return NameOID.ORGANIZATION_NAME
elif key == 'OU':
return NameOID.ORGANIZATIONAL_UNIT_NAME
elif key == 'CN':
return NameOID.COMMON_NAME
else:
return None
def name_oid_to_key(oid: NameOID) -> Optional[str]:
if oid == NameOID.COUNTRY_NAME:
return 'C'
elif oid == NameOID.STATE_OR_PROVINCE_NAME:
return 'ST'
elif oid == NameOID.LOCALITY_NAME:
return 'L'
elif oid == NameOID.DOMAIN_COMPONENT:
return 'DC'
elif oid == NameOID.ORGANIZATION_NAME:
return 'O'
elif oid == NameOID.ORGANIZATIONAL_UNIT_NAME:
return 'OU'
elif oid == NameOID.COMMON_NAME:
return 'CN'
else:
return None
def parse_name_attribute(string: str) -> x509.NameAttribute:
string = string.strip()
key, sep, value = string.partition('=')
if key:
key = key.strip()
if value:
value = value.strip()
if not key or not sep or not value:
raise ValueError('invalid RDN syntax: should be key=value, got {}'.format(rdn))
oid = name_key_to_oid(key)
if not oid:
raise ValueError('failed to parse RND: unknown component: {}'.format(key))
return x509.NameAttribute(oid, value)
def parse_dn(value: str) -> x509.Name:
return x509.Name(map(lambda x: x509.RelativeDistinguishedName([parse_name_attribute(x)]), value.split(',')))
def format_attribute(attrib: x509.NameAttribute):
key = name_oid_to_key(attrib.oid)
if not key:
raise ValueError('unknown name attribute: {}'.format(attrib))
return '{}={}'.format(key, attrib.value)
def format_rdn(rdn: x509.RelativeDistinguishedName):
rdn = list(rdn)
if len(rdn) == 1:
return format_attribute(rdn[0])
return '{{{}}}'.format(', '.join(map(format_attribute, rdn)))
def format_name(name: x509.Name) -> str:
return ', '.join(map(format_rdn, name.rdns))
def generate_serial() -> int:
return int.from_bytes(os.urandom(20), byteorder='little') % (1 << 59)
def get_subject_alt_names(object) -> List[x509.GeneralName]:
try:
extension = object.extensions.get_extension_for_class(x509.SubjectAlternativeName)
except x509.ExtensionNotFound:
return []
return list(extension.value)
def get_basic_ca_constraint(object) -> Optional[bool]:
try:
extension = object.extensions.get_extension_for_class(x509.BasicConstraints)
except x509.ExtensionNotFound:
return None
return extension.value.ca
def get_dns_names(names: List[x509.GeneralName]) -> List[x509.DNSName]:
return list(filter(lambda x: isinstance(x, x509.DNSName), names))
def get_first_dns_name(names: List[x509.GeneralName]) -> Optional[x509.DNSName]:
try:
return get_dns_names(names)[0]
except IndexError:
return None
def read_serial(file: Path) -> int:
with open(file, 'r') as file:
return int(file.read(), 16)
def write_serial(file: Path, value: int):
with open(file, 'w') as file:
file.write(hex(value))
def bump_serial(file: Path) -> int:
try:
serial = read_serial(file)
except FileNotFoundError:
serial = generate_serial()
serial = (serial + 1) % (1 << 59)
write_serial(file, serial)
return serial
def ca_extensions(dns_name: str, max_path_length: Optional[int]) -> List[Tuple[x509.Extension, bool]]:
return [
(x509.BasicConstraints(True, max_path_length), True),
(x509.KeyUsage(False, False, False, False, False, True, True, False, False), True),
(x509.SubjectAlternativeName([x509.DNSName(dns_name)]), True),
(x509.NameConstraints([x509.DNSName('.' + dns_name)], []), True),
]
def client_extensions(dns_name: str) -> List[Tuple[x509.Extension, bool]]:
return [
(x509.BasicConstraints(False, None), True),
(x509.KeyUsage(True, False, False, False, False, False, False, False, False), True),
(x509.ExtendedKeyUsage([ExtendedKeyUsageOID.CLIENT_AUTH]), True),
(x509.SubjectAlternativeName([x509.DNSName(dns_name)]), True),
]
def add_extensions(object, extensions: Iterable[Tuple[x509.Extension, bool]]):
for extension, critical in extensions:
object = object.add_extension(extension, critical)
return object;
def replace_name_attribute(name: x509.Name, oid: NameOID, new_value: str) -> x509.Name:
new_attribs = []
replaced = False
for attrib in original:
if not replaced and attrib.oid == oid:
new_attribs.append(x509.NameAttribute(oid, new_value))
replaced = True
else:
new_attribs.append(attrib)
break
return x509.Name(new_attribs)
def prefix_name(name: x509.Name, oid: NameOID, value: str) -> x509.Name:
new_rdn = x509.RelativeDistinguishedName([x509.NameAttribute(oid, value)])
return x509.Name([new_rdn] + name.rdns)
def replace_common_name(name: x509, new_value: str) -> x509.Name:
replace_name_attribute(name, NameOID.COMMON_NAME, new_value)
def generate_rsa_key(file: Path, bits: int = 4096) -> rsa.RSAPrivateKey:
umask = util.or_umask(0o277)
key = rsa.generate_private_key(public_exponent = 65537, key_size = bits, backend = crypto_backend)
util.write_file(file, key.private_bytes(
encoding = serialization.Encoding.PEM,
format = serialization.PrivateFormat.PKCS8,
encryption_algorithm = serialization.NoEncryption(),
))
os.umask(umask)
return key
def load_key(file: Path):
with open(file, 'rb') as file:
return serialization.load_pem_private_key(file.read(), password=None, backend=crypto_backend)
def make_csr(file: Path, key: rsa.RSAPrivateKey, name: x509.Name, extensions: List[Tuple[x509.Extension, bool]]) -> x509.CertificateSigningRequest:
csr = x509.CertificateSigningRequestBuilder()
csr = csr.subject_name(name)
for extension, critical in extensions:
csr = csr.add_extension(extension, critical)
csr = csr.sign(key, hashes.SHA512(), crypto_backend)
util.write_file(file, csr.public_bytes(serialization.Encoding.PEM))
return csr
def load_csr(file: Path) -> x509.CertificateSigningRequest:
return x509.load_pem_x509_csr(util.read_file(file), crypto_backend)
def sign_csr(
file : Path,
chain : bytes,
csr : x509.CertificateSigningRequest,
ca_key : rsa.RSAPrivateKey,
ca_cert : x509.Certificate,
name : x509.Name,
serial : int,
extensions : List[Tuple[x509.Extension, bool]],
days : int,
now : datetime,
) -> x509.Certificate:
cert = x509.CertificateBuilder()
cert = cert.issuer_name(ca_cert.subject)
cert = cert.subject_name(name)
cert = cert.serial_number(serial)
cert = cert.public_key(csr.public_key())
cert = cert.not_valid_before(now)
cert = cert.not_valid_after(now + timedelta(days=days))
for extension, critical in extensions:
cert = cert.add_extension(extension, critical)
cert = cert.sign(ca_key, hashes.SHA512(), crypto_backend)
with open(file, 'wb') as file:
file.write(cert.public_bytes(serialization.Encoding.PEM))
file.write(chain)
return cert
def make_self_signed_cert(
file : Path,
key : rsa.RSAPrivateKey,
name : x509.Name,
serial : int,
extensions : List[Tuple[x509.Extension, bool]],
days : int,
now : datetime,
) -> x509.Certificate:
cert = x509.CertificateBuilder()
cert = cert.issuer_name(name)
cert = cert.subject_name(name)
cert = cert.serial_number(serial)
cert = cert.public_key(key.public_key())
cert = cert.not_valid_before(now)
cert = cert.not_valid_after(now + timedelta(days=days))
for extension, critical in extensions:
cert = cert.add_extension(extension, critical)
cert = cert.sign(key, hashes.SHA512(), crypto_backend)
util.write_file(file, cert.public_bytes(serialization.Encoding.PEM))
return cert
def load_certificate(file: Path) -> x509.Certificate:
return x509.load_pem_x509_certificate(util.read_file(file), crypto_backend)
class PemBlob:
def __init__(self, name: str, data: bytes):
self.name = name
self.data = data
PEM_BEGIN_PREFIX = b'-----BEGIN '
PEM_END_PREFIX = b'-----END '
PEM_SUFFIX = b'-----\n'
def read_pem_blobs(data: bytes) -> Generator[PemBlob, None, None]:
current = None
body = b''
for line in data.splitlines(keepends=True):
if current is None and line.startswith(PEM_BEGIN_PREFIX) and line.endswith(PEM_SUFFIX):
current = line[len(PEM_BEGIN_PREFIX):-len(PEM_SUFFIX)]
body = line
elif current is not None:
body += line
if line == PEM_END_PREFIX + current + PEM_SUFFIX:
yield PemBlob(current.decode('utf8'), body)
current = None
def read_first_pem_blob(data: bytes, name: str) -> Optional[bytes]:
for blob in read_pem_blobs(data):
if blob.name == name:
return blob.data
return None
def read_last_pem_blob(data: bytes, name: str) -> Optional[bytes]:
result = None
for blob in read_pem_blobs(data):
if blob.name == name:
result = blob
return result.data
def load_first_certificate(data: bytes) -> Optional[x509.Certificate]:
blob = read_first_pem_blob(data, 'CERTIFICATE')
if not blob:
return None
return x509.load_pem_x509_certificate(blob, crypto_backend)
| true
| true
|
1c4193c598a6936a19a00092d44a1b489fef5b33
| 13,515
|
py
|
Python
|
tests/test_seerpy.py
|
matias-seer/seer-py
|
fbb018e683817d108f2e1ee3162680de06ce110c
|
[
"MIT"
] | null | null | null |
tests/test_seerpy.py
|
matias-seer/seer-py
|
fbb018e683817d108f2e1ee3162680de06ce110c
|
[
"MIT"
] | null | null | null |
tests/test_seerpy.py
|
matias-seer/seer-py
|
fbb018e683817d108f2e1ee3162680de06ce110c
|
[
"MIT"
] | null | null | null |
# Copyright 2017,2018 Seer Medical Pty Ltd, Inc. or its affiliates. All Rights Reserved.
import json
import pathlib
from unittest import mock
import pytest
import pandas as pd
from seerpy.seerpy import SeerConnect
# having a class is useful to allow patches to be shared across mutliple test functions, but then
# pylint complains that the methods could be a function. this disables that warning.
# pylint:disable=no-self-use
# not really a problem for these test classes
# pylint:disable=too-few-public-methods
TEST_DATA_DIR = pathlib.Path(__file__).parent / "test_data"
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestSeerConnect:
def test_success(self, seer_auth):
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
result = SeerConnect()
assert result.graphql_client
def test_login_unauthorized(self, seer_auth):
seer_auth.return_value.cookie = None
# not really desired behaviour, just documenting current behaviour
with pytest.raises(AttributeError):
SeerConnect()
def test_login_error(self, seer_auth):
seer_auth.side_effect = InterruptedError('Authentication Failed')
with pytest.raises(InterruptedError):
SeerConnect()
@mock.patch.object(SeerConnect, "get_all_study_metadata_by_ids", autospec=True)
@mock.patch.object(SeerConnect, "__init__", autospec=True, return_value=None)
class TestGetAllStudyMetaDataDataframeByIds:
# as we don't rely on anything in __init() I have mocked it for simplicity
def test_single_study(self, unused_seer_connect_init, get_all_metadata):
# setup
with open(TEST_DATA_DIR / "study1_metadata.json", "r") as f:
test_input = json.load(f)
get_all_metadata.return_value = {'studies': [test_input['study']]}
expected_result = pd.read_csv(TEST_DATA_DIR / "study1_metadata.csv", index_col=0)
# run test
result = SeerConnect().get_all_study_metadata_dataframe_by_ids()
# check result
pd.testing.assert_frame_equal(result, expected_result)
def test_four_studies(self, unused_seer_connect_init, get_all_metadata):
# setup
studies = []
for i in range(1, 5):
filename = "study" + str(i) + "_metadata.json"
with open(TEST_DATA_DIR / filename, "r") as f:
studies.append(json.load(f)['study'])
get_all_metadata.return_value = {'studies': studies}
expected_result = pd.read_csv(TEST_DATA_DIR / "studies1-4_metadata.csv", index_col=0)
# run test
result = SeerConnect().get_all_study_metadata_dataframe_by_ids()
# check result
pd.testing.assert_frame_equal(result, expected_result)
@mock.patch('time.sleep', return_value=None)
@mock.patch('seerpy.seerpy.GQLClient', autospec=True)
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestGetAllStudyMetaDataByNames:
def test_no_study_param(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
# this is the call in get_studies()
with open(TEST_DATA_DIR / "studies.json", "r") as f:
side_effects.append({'studies': json.load(f)})
# this is the "no more data" response for get_studies()
side_effects.append({'studies': []})
# these are the calls from the loop in get_all_study_metadata_by_ids()
expected_results = []
for i in range(1, 5):
filename = "study" + str(i) + "_metadata.json"
with open(TEST_DATA_DIR / filename, "r") as f:
study = json.load(f)
side_effects.append(study)
expected_results.append(study['study'])
gql_client.return_value.execute.side_effect = side_effects
# run test
result = SeerConnect().get_all_study_metadata_by_names()
# check result
assert result == {'studies' : expected_results}
def test_existing_study_param(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
# this is the call in get_studies()
with open(TEST_DATA_DIR / "studies.json", "r") as f:
side_effects.append({'studies': json.load(f)})
# this is the "no more data" response for get_studies()
side_effects.append({'studies': []})
# these are the calls from the loop in get_all_study_metadata_by_ids()
expected_results = []
with open(TEST_DATA_DIR / "study1_metadata.json", "r") as f:
study = json.load(f)
side_effects.append(study)
expected_results = [study['study']]
gql_client.return_value.execute.side_effect = side_effects
# run test
result = SeerConnect().get_all_study_metadata_by_names("Study 1")
# check result
assert result == {'studies' : expected_results}
def test_nonexistent_study_param(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
# this is the call in get_studies()
with open(TEST_DATA_DIR / "studies.json", "r") as f:
side_effects.append({'studies': json.load(f)})
# this is the "no more data" response for get_studies()
side_effects.append({'studies': []})
gql_client.return_value.execute.side_effect = side_effects
# run test
result = SeerConnect().get_all_study_metadata_by_names("Study 12")
# check result
assert result == {'studies' : []}
# the only call will be in getStudies()
assert gql_client.return_value.execute.call_count == 2
@mock.patch('time.sleep', return_value=None)
@mock.patch('seerpy.seerpy.GQLClient', autospec=True)
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestGetSegmentUrls:
def test_success(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
with open(TEST_DATA_DIR / "segment_urls_1.json", "r") as f:
gql_client.return_value.execute.return_value = json.load(f)
expected_result = pd.read_csv(TEST_DATA_DIR / "segment_urls_1.csv", index_col=0)
# run test
result = SeerConnect().get_segment_urls(["segment-1-id", "segment-2-id"])
# check result
pd.testing.assert_frame_equal(result, expected_result)
def test_multiple_batches(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
for file_name in ["segment_urls_1.json", "segment_urls_2.json"]:
with open(TEST_DATA_DIR / file_name, "r") as f:
side_effects.append(json.load(f))
gql_client.return_value.execute.side_effect = side_effects
expected_result = pd.read_csv(TEST_DATA_DIR / "segment_urls_2.csv", index_col=0)
# run test
result = SeerConnect().get_segment_urls(["segment-1-id", "segment-2-id", "segment-3-id",
"segment-4-id"], 2)
# check result
pd.testing.assert_frame_equal(result, expected_result)
def test_none_segment_ids(self, seer_auth, unused_gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
expected_result = pd.read_csv(TEST_DATA_DIR / "segment_urls_empty.csv", index_col=0)
# run test
result = SeerConnect().get_segment_urls(None)
# check result
pd.testing.assert_frame_equal(result, expected_result)
def test_empty_segment_ids(self, seer_auth, unused_gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
# gql_client is never called as we don't enter the loop
# run test
result = SeerConnect().get_segment_urls([])
# check result
assert result.empty
def test_unmatched_segment_ids(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
with open(TEST_DATA_DIR / "segment_urls_no_match.json", "r") as f:
gql_client.return_value.execute.return_value = json.load(f)
# run test
result = SeerConnect().get_segment_urls(["blah", "blah1"])
# check result
assert result.empty
@mock.patch('time.sleep', return_value=None)
@mock.patch('seerpy.seerpy.GQLClient', autospec=True)
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestGetLabels:
def test_success(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
with open(TEST_DATA_DIR / "labels_1.json", "r") as f:
side_effects.append(json.load(f))
with open(TEST_DATA_DIR / "labels_2.json", "r") as f:
side_effects.append(json.load(f))
# this is the "no more data" response for get_labels()
with open(TEST_DATA_DIR / "labels_1_empty.json", "r") as f:
side_effects.append(json.load(f))
gql_client.return_value.execute.side_effect = side_effects
with open(TEST_DATA_DIR / "labels_result.json", "r") as f:
expected_result = json.load(f)
# run test
result = SeerConnect().get_labels("study-1-id", "label-group-1-id")
# check result
assert result == expected_result
@mock.patch('time.sleep', return_value=None)
@mock.patch('seerpy.seerpy.GQLClient', autospec=True)
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestGetLabelsDataframe:
def test_success(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
with open(TEST_DATA_DIR / "labels_1.json", "r") as f:
side_effects.append(json.load(f))
with open(TEST_DATA_DIR / "labels_2.json", "r") as f:
side_effects.append(json.load(f))
# this is the "no more data" response for get_labels()
with open(TEST_DATA_DIR / "labels_1_empty.json", "r") as f:
side_effects.append(json.load(f))
gql_client.return_value.execute.side_effect = side_effects
expected_result = pd.read_csv(TEST_DATA_DIR / "labels_1.csv", index_col=0)
# run test
result = SeerConnect().get_labels_dataframe("study-1-id", "label-group-1-id")
# check result
pd.testing.assert_frame_equal(result, expected_result)
@mock.patch('time.sleep', return_value=None)
@mock.patch('seerpy.seerpy.GQLClient', autospec=True)
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestGetViewedTimesDataframe:
def test_success(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
with open(TEST_DATA_DIR / "view_groups.json", "r") as f:
side_effects.append(json.load(f))
# this is the "no more data" response for get_viewed_times_dataframe()
with open(TEST_DATA_DIR / "view_groups_empty.json", "r") as f:
side_effects.append(json.load(f))
gql_client.return_value.execute.side_effect = side_effects
# need to set parse_dates and float_precision='round_trip' to make the comparison work
expected_result = pd.read_csv(TEST_DATA_DIR / "views.csv", index_col=0,
parse_dates=['createdAt', 'updatedAt'],
float_precision='round_trip')
# run test
result = SeerConnect().get_viewed_times_dataframe("study-1-id")
# check result
pd.testing.assert_frame_equal(result, expected_result)
@mock.patch('time.sleep', return_value=None)
@mock.patch('seerpy.seerpy.GQLClient', autospec=True)
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestGetDocumentsForStudiesDataframe:
def test_success(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
with open(TEST_DATA_DIR / "study_documents.json", "r") as f:
side_effects.append(json.load(f))
# # this is the "no more data" response for get_documents_for_studies_dataframe()
with open(TEST_DATA_DIR / "study_documents_empty.json", "r") as f:
side_effects.append(json.load(f))
side_effects.append({'studies': []}) # this is the "no more data" response for get_studies()
gql_client.return_value.execute.side_effect = side_effects
# need to set parse_dates and float_precision='round_trip' to make the comparison work
expected_result = pd.read_csv(TEST_DATA_DIR / "study_documents.csv", index_col=0,
parse_dates=['uploaded'],
float_precision='round_trip')
expected_result['uploaded'] = expected_result['uploaded'].astype(int)
# run test
result = SeerConnect().get_documents_for_studies_dataframe("study-1-id")
# check result
pd.testing.assert_frame_equal(result, expected_result, check_like=True)
| 36.527027
| 100
| 0.658454
|
import json
import pathlib
from unittest import mock
import pytest
import pandas as pd
from seerpy.seerpy import SeerConnect
TEST_DATA_DIR = pathlib.Path(__file__).parent / "test_data"
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestSeerConnect:
def test_success(self, seer_auth):
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
result = SeerConnect()
assert result.graphql_client
def test_login_unauthorized(self, seer_auth):
seer_auth.return_value.cookie = None
with pytest.raises(AttributeError):
SeerConnect()
def test_login_error(self, seer_auth):
seer_auth.side_effect = InterruptedError('Authentication Failed')
with pytest.raises(InterruptedError):
SeerConnect()
@mock.patch.object(SeerConnect, "get_all_study_metadata_by_ids", autospec=True)
@mock.patch.object(SeerConnect, "__init__", autospec=True, return_value=None)
class TestGetAllStudyMetaDataDataframeByIds:
def test_single_study(self, unused_seer_connect_init, get_all_metadata):
# setup
with open(TEST_DATA_DIR / "study1_metadata.json", "r") as f:
test_input = json.load(f)
get_all_metadata.return_value = {'studies': [test_input['study']]}
expected_result = pd.read_csv(TEST_DATA_DIR / "study1_metadata.csv", index_col=0)
# run test
result = SeerConnect().get_all_study_metadata_dataframe_by_ids()
# check result
pd.testing.assert_frame_equal(result, expected_result)
def test_four_studies(self, unused_seer_connect_init, get_all_metadata):
# setup
studies = []
for i in range(1, 5):
filename = "study" + str(i) + "_metadata.json"
with open(TEST_DATA_DIR / filename, "r") as f:
studies.append(json.load(f)['study'])
get_all_metadata.return_value = {'studies': studies}
expected_result = pd.read_csv(TEST_DATA_DIR / "studies1-4_metadata.csv", index_col=0)
# run test
result = SeerConnect().get_all_study_metadata_dataframe_by_ids()
# check result
pd.testing.assert_frame_equal(result, expected_result)
@mock.patch('time.sleep', return_value=None)
@mock.patch('seerpy.seerpy.GQLClient', autospec=True)
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestGetAllStudyMetaDataByNames:
def test_no_study_param(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
# this is the call in get_studies()
with open(TEST_DATA_DIR / "studies.json", "r") as f:
side_effects.append({'studies': json.load(f)})
# this is the "no more data" response for get_studies()
side_effects.append({'studies': []})
# these are the calls from the loop in get_all_study_metadata_by_ids()
expected_results = []
for i in range(1, 5):
filename = "study" + str(i) + "_metadata.json"
with open(TEST_DATA_DIR / filename, "r") as f:
study = json.load(f)
side_effects.append(study)
expected_results.append(study['study'])
gql_client.return_value.execute.side_effect = side_effects
# run test
result = SeerConnect().get_all_study_metadata_by_names()
# check result
assert result == {'studies' : expected_results}
def test_existing_study_param(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
# this is the call in get_studies()
with open(TEST_DATA_DIR / "studies.json", "r") as f:
side_effects.append({'studies': json.load(f)})
# this is the "no more data" response for get_studies()
side_effects.append({'studies': []})
# these are the calls from the loop in get_all_study_metadata_by_ids()
expected_results = []
with open(TEST_DATA_DIR / "study1_metadata.json", "r") as f:
study = json.load(f)
side_effects.append(study)
expected_results = [study['study']]
gql_client.return_value.execute.side_effect = side_effects
# run test
result = SeerConnect().get_all_study_metadata_by_names("Study 1")
# check result
assert result == {'studies' : expected_results}
def test_nonexistent_study_param(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
# this is the call in get_studies()
with open(TEST_DATA_DIR / "studies.json", "r") as f:
side_effects.append({'studies': json.load(f)})
# this is the "no more data" response for get_studies()
side_effects.append({'studies': []})
gql_client.return_value.execute.side_effect = side_effects
# run test
result = SeerConnect().get_all_study_metadata_by_names("Study 12")
# check result
assert result == {'studies' : []}
# the only call will be in getStudies()
assert gql_client.return_value.execute.call_count == 2
@mock.patch('time.sleep', return_value=None)
@mock.patch('seerpy.seerpy.GQLClient', autospec=True)
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestGetSegmentUrls:
def test_success(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
with open(TEST_DATA_DIR / "segment_urls_1.json", "r") as f:
gql_client.return_value.execute.return_value = json.load(f)
expected_result = pd.read_csv(TEST_DATA_DIR / "segment_urls_1.csv", index_col=0)
# run test
result = SeerConnect().get_segment_urls(["segment-1-id", "segment-2-id"])
# check result
pd.testing.assert_frame_equal(result, expected_result)
def test_multiple_batches(self, seer_auth, gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
for file_name in ["segment_urls_1.json", "segment_urls_2.json"]:
with open(TEST_DATA_DIR / file_name, "r") as f:
side_effects.append(json.load(f))
gql_client.return_value.execute.side_effect = side_effects
expected_result = pd.read_csv(TEST_DATA_DIR / "segment_urls_2.csv", index_col=0)
# run test
result = SeerConnect().get_segment_urls(["segment-1-id", "segment-2-id", "segment-3-id",
"segment-4-id"], 2)
# check result
pd.testing.assert_frame_equal(result, expected_result)
def test_none_segment_ids(self, seer_auth, unused_gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
expected_result = pd.read_csv(TEST_DATA_DIR / "segment_urls_empty.csv", index_col=0)
# run test
result = SeerConnect().get_segment_urls(None)
# check result
pd.testing.assert_frame_equal(result, expected_result)
def test_empty_segment_ids(self, seer_auth, unused_gql_client, unused_time_sleep):
# setup
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
# gql_client is never called as we don't enter the loop
result = SeerConnect().get_segment_urls([])
assert result.empty
def test_unmatched_segment_ids(self, seer_auth, gql_client, unused_time_sleep):
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
with open(TEST_DATA_DIR / "segment_urls_no_match.json", "r") as f:
gql_client.return_value.execute.return_value = json.load(f)
result = SeerConnect().get_segment_urls(["blah", "blah1"])
assert result.empty
@mock.patch('time.sleep', return_value=None)
@mock.patch('seerpy.seerpy.GQLClient', autospec=True)
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestGetLabels:
def test_success(self, seer_auth, gql_client, unused_time_sleep):
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
with open(TEST_DATA_DIR / "labels_1.json", "r") as f:
side_effects.append(json.load(f))
with open(TEST_DATA_DIR / "labels_2.json", "r") as f:
side_effects.append(json.load(f))
with open(TEST_DATA_DIR / "labels_1_empty.json", "r") as f:
side_effects.append(json.load(f))
gql_client.return_value.execute.side_effect = side_effects
with open(TEST_DATA_DIR / "labels_result.json", "r") as f:
expected_result = json.load(f)
result = SeerConnect().get_labels("study-1-id", "label-group-1-id")
assert result == expected_result
@mock.patch('time.sleep', return_value=None)
@mock.patch('seerpy.seerpy.GQLClient', autospec=True)
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestGetLabelsDataframe:
def test_success(self, seer_auth, gql_client, unused_time_sleep):
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
with open(TEST_DATA_DIR / "labels_1.json", "r") as f:
side_effects.append(json.load(f))
with open(TEST_DATA_DIR / "labels_2.json", "r") as f:
side_effects.append(json.load(f))
with open(TEST_DATA_DIR / "labels_1_empty.json", "r") as f:
side_effects.append(json.load(f))
gql_client.return_value.execute.side_effect = side_effects
expected_result = pd.read_csv(TEST_DATA_DIR / "labels_1.csv", index_col=0)
result = SeerConnect().get_labels_dataframe("study-1-id", "label-group-1-id")
pd.testing.assert_frame_equal(result, expected_result)
@mock.patch('time.sleep', return_value=None)
@mock.patch('seerpy.seerpy.GQLClient', autospec=True)
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestGetViewedTimesDataframe:
def test_success(self, seer_auth, gql_client, unused_time_sleep):
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
with open(TEST_DATA_DIR / "view_groups.json", "r") as f:
side_effects.append(json.load(f))
with open(TEST_DATA_DIR / "view_groups_empty.json", "r") as f:
side_effects.append(json.load(f))
gql_client.return_value.execute.side_effect = side_effects
expected_result = pd.read_csv(TEST_DATA_DIR / "views.csv", index_col=0,
parse_dates=['createdAt', 'updatedAt'],
float_precision='round_trip')
result = SeerConnect().get_viewed_times_dataframe("study-1-id")
pd.testing.assert_frame_equal(result, expected_result)
@mock.patch('time.sleep', return_value=None)
@mock.patch('seerpy.seerpy.GQLClient', autospec=True)
@mock.patch('seerpy.seerpy.SeerAuth', autospec=True)
class TestGetDocumentsForStudiesDataframe:
def test_success(self, seer_auth, gql_client, unused_time_sleep):
seer_auth.return_value.cookie = {'seer.sid': "cookie"}
side_effects = []
with open(TEST_DATA_DIR / "study_documents.json", "r") as f:
side_effects.append(json.load(f))
side_effects.append(json.load(f))
side_effects.append({'studies': []})
gql_client.return_value.execute.side_effect = side_effects
expected_result = pd.read_csv(TEST_DATA_DIR / "study_documents.csv", index_col=0,
parse_dates=['uploaded'],
float_precision='round_trip')
expected_result['uploaded'] = expected_result['uploaded'].astype(int)
result = SeerConnect().get_documents_for_studies_dataframe("study-1-id")
pd.testing.assert_frame_equal(result, expected_result, check_like=True)
| true
| true
|
1c4193dd25e08f5ede3d53f5443722355b2807e2
| 2,972
|
py
|
Python
|
CalibTracker/SiStripESProducers/test/python/templateCheckAllIOVs_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
CalibTracker/SiStripESProducers/test/python/templateCheckAllIOVs_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
CalibTracker/SiStripESProducers/test/python/templateCheckAllIOVs_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
from __future__ import print_function
def pack(high,low):
"""pack high,low 32bit unsigned int to one unsigned 64bit long long
Note:the print value of result number may appear signed, if the sign bit is used.
"""
h=high<<32
return (h|low)
def secondsFromString(i):
"""convert from a string in the format output from timeStamptoDate to a 32bit seconds from the epoch.
The format accepted is \"DD/MM/YYYY HH:MM:SS\". The year must be the full number.
"""
import time
return int(time.mktime(time.strptime(i, "%d/%m/%Y %H:%M:%S")))
def packFromString(i):
"""pack from a string in the format output from timeStamptoUTC to a 64bit timestamp
the format accepted is \"DD/MM/YYYY HH:MM:SS\" . The year must be the full number.
"""
return pack(secondsFromString(i), 0)
def intervalSinceEpoch(i):
""" compute the interval of time is seconds since the Epoch and return the packed 64bit value.
"""
return( packFromString(i) - packFromString("01/01/1970 00:00:00") )
import FWCore.ParameterSet.Config as cms
process = cms.Process("Reader")
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring("*"),
DetVOffReaderSummary_DATE = cms.untracked.PSet(
threshold = cms.untracked.string('INFO')
),
DetVOffReaderDebug_DATE = cms.untracked.PSet(
threshold = cms.untracked.string('DEBUG')
),
destinations = cms.untracked.vstring('DetVOffReaderSummary_DATE', 'DetVOffReaderDebug_DATE')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
# Check
# print "converting start date = 28/07/2009 08:53:53 to ",
# print packFromString("28/07/2009 08:53:53")
# print "converting end date = 28/07/2009 14:13:31 to ",
# print packFromString("28/07/2009 14:13:31")
print("using an interval of 1 second = ", end=' ')
print(intervalSinceEpoch("01/01/1970 00:00:01"))
process.source = cms.Source("EmptyIOVSource",
timetype = cms.string('timestamp'),
# firstValue = cms.uint64(packFromString("28/07/2009 10:53:53")),
# lastValue = cms.uint64(packFromString("28/07/2009 16:13:31")),
firstValue = cms.uint64(STARTTIME),
lastValue = cms.uint64(ENDTIME),
# One second inverval
interval = cms.uint64(intervalSinceEpoch("01/01/1970 00:00:01"))
)
process.poolDBESSource = cms.ESSource("PoolDBESSource",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(2),
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
),
connect = cms.string('DATABASE'),
toGet = cms.VPSet(cms.PSet(
timetype = cms.untracked.string('timestamp'),
record = cms.string('SiStripDetVOffRcd'),
tag = cms.string('SiStripDetVOff_Fake_31X')
))
)
process.reader = cms.EDFilter("SiStripDetVOffDummyPrinter")
process.p1 = cms.Path(process.reader)
| 35.380952
| 105
| 0.688762
|
from __future__ import print_function
def pack(high,low):
h=high<<32
return (h|low)
def secondsFromString(i):
import time
return int(time.mktime(time.strptime(i, "%d/%m/%Y %H:%M:%S")))
def packFromString(i):
return pack(secondsFromString(i), 0)
def intervalSinceEpoch(i):
return( packFromString(i) - packFromString("01/01/1970 00:00:00") )
import FWCore.ParameterSet.Config as cms
process = cms.Process("Reader")
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring("*"),
DetVOffReaderSummary_DATE = cms.untracked.PSet(
threshold = cms.untracked.string('INFO')
),
DetVOffReaderDebug_DATE = cms.untracked.PSet(
threshold = cms.untracked.string('DEBUG')
),
destinations = cms.untracked.vstring('DetVOffReaderSummary_DATE', 'DetVOffReaderDebug_DATE')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
print("using an interval of 1 second = ", end=' ')
print(intervalSinceEpoch("01/01/1970 00:00:01"))
process.source = cms.Source("EmptyIOVSource",
timetype = cms.string('timestamp'),
firstValue = cms.uint64(STARTTIME),
lastValue = cms.uint64(ENDTIME),
interval = cms.uint64(intervalSinceEpoch("01/01/1970 00:00:01"))
)
process.poolDBESSource = cms.ESSource("PoolDBESSource",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(2),
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
),
connect = cms.string('DATABASE'),
toGet = cms.VPSet(cms.PSet(
timetype = cms.untracked.string('timestamp'),
record = cms.string('SiStripDetVOffRcd'),
tag = cms.string('SiStripDetVOff_Fake_31X')
))
)
process.reader = cms.EDFilter("SiStripDetVOffDummyPrinter")
process.p1 = cms.Path(process.reader)
| true
| true
|
1c419467f0489fbf76fb84c4e1abbc3e19d99fa2
| 4,519
|
py
|
Python
|
test/test_planner.py
|
liespace/pyRRTs
|
11bfefad99218bc9eccd97040355c61d34a1181d
|
[
"MIT"
] | 2
|
2021-01-22T09:12:49.000Z
|
2021-05-06T14:22:05.000Z
|
test/test_planner.py
|
liespace/pyRRTs
|
11bfefad99218bc9eccd97040355c61d34a1181d
|
[
"MIT"
] | null | null | null |
test/test_planner.py
|
liespace/pyRRTs
|
11bfefad99218bc9eccd97040355c61d34a1181d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
import cv2
from matplotlib.patches import Polygon
from rrts import BiRRTStar, RRTStar
from rrts.debugger import Debugger
def center2rear(node, wheelbase=2.96):
"""calculate the coordinate of rear track center according to mass center"""
if not isinstance(node, RRTStar.StateNode):
theta, r = node[2] + np.pi, wheelbase / 2.
node[0] += r * np.cos(theta)
node[1] += r * np.sin(theta)
return node
theta, r = node.state[2] + np.pi, wheelbase / 2.
node.state[0] += r * np.cos(theta)
node.state[1] += r * np.sin(theta)
return node
def contour(wheelbase=2.96, width=2.0, length=5.0): # 2.96, 2.2, 5.0
return np.array([
[-(length/2. - wheelbase / 2.), width/2. - 1.0], [-(length/2. - wheelbase / 2. - 0.4), width/2.],
[length/2. + wheelbase / 2. - 0.6, width/2.], [length/2. + wheelbase / 2., width/2. - 0.8],
[length/2. + wheelbase / 2., -(width/2. - 0.8)], [length/2. + wheelbase / 2. - 0.6, -width/2.],
[-(length/2. - wheelbase / 2. - 0.4), -width/2.], [-(length/2. - wheelbase / 2.), -(width/2. - 1.0)]])
def read_task(filepath, seq=0):
"""
read source(start) and target(goal), and transform to right-hand and local coordinate system centered in source
LCS: local coordinate system, or said vehicle-frame.
GCS: global coordinate system
"""
# read task and transform coordinate system to right-hand
task = np.loadtxt('{}/{}_task.txt'.format(filepath, seq), delimiter=',')
org, aim = task[0], task[1]
# coordinate of the center of mass on source(start) state, in GCS
source = RRTStar.StateNode(state=(org[0], -org[1], -np.radians(org[3])))
# coordinate of center of mass on target(goal) state, in GCS
target = RRTStar.StateNode(state=(aim[0], -aim[1], -np.radians(aim[3])))
return source, target
def read_grid(filepath, seq):
# type: (str, int) -> np.ndarray
"""read occupancy grid map"""
return cv2.imread(filename='{}/{}_gridmap.png'.format(filepath, seq), flags=-1)
def read_ose(filepath, seq):
"""read heuristic ose"""
oseh = np.loadtxt('{}/{}_ose.txt'.format(filepath, seq), delimiter=',')
oseh = [((x[0], x[1], x[2]), ((0., x[3]/3.), (0., x[3]/3.), (0., x[3]/3. * np.pi/3./3.))) for x in oseh]
return oseh
def read_yips(filepath, seq, discrimination=0.7):
yips = np.loadtxt('{}/{}_pred.txt'.format(filepath, seq), delimiter=',')
yips = filter(lambda x: x[-1] > discrimination, yips)
yips = map(center2rear, yips)
yips = [((yip[0], yip[1], yip[2]), ((0.621, 2.146), (0.015, 1.951 * 1.0), (0.005, 0.401 * 1.0))) for yip in yips]
return yips
def set_plot(switch=True):
if switch:
plt.ion()
plt.figure()
plt.gca().set_xticks([])
plt.gca().set_yticks([])
plt.gca().set_aspect('equal')
plt.gca().set_facecolor((0.2, 0.2, 0.2))
plt.gca().set_xlim((-30, 30))
plt.gca().set_ylim((-30, 30))
plt.draw()
def transform(pts, pto):
xyo = np.array([[pto[0]], [pto[1]]])
rot = np.array([[np.cos(pto[2]), -np.sin(pto[2])], [np.sin(pto[2]), np.cos(pto[2])]])
return np.dot(rot, pts) + xyo
def main():
filepath, seq, debug = './test_scenes', 2062, True
rrt_star = BiRRTStar().set_vehicle(contour(), 0.3, 0.25)
# heuristic = read_ose(filepath, seq)
# heuristic = read_yips(filepath, seq)
heuristic = None
source, target = read_task(filepath, seq)
start = center2rear(deepcopy(source)).gcs2lcs(source.state)
goal = center2rear(deepcopy(target)).gcs2lcs(source.state)
grid_ori = deepcopy(source).gcs2lcs(source.state)
grid_map = read_grid(filepath, seq)
grid_res = 0.1
if debug:
set_plot(debug)
Debugger.plot_grid(grid_map, grid_res)
Debugger().plot_nodes([start, goal])
plt.gca().add_patch(Polygon(
transform(contour().transpose(), start.state).transpose(), True, color='b', fill=False, lw=2.0))
plt.gca().add_patch(Polygon(
transform(contour().transpose(), goal.state).transpose(), True, color='g', fill=False, lw=2.0))
if heuristic:
Debugger.plot_heuristic(heuristic)
plt.draw()
rrt_star.debug = debug
rrt_star.preset(start, goal, grid_map, grid_res, grid_ori, 255, heuristic).planning(100, debug=debug)
Debugger.breaker('Plotting', switch=debug)
if __name__ == '__main__':
main()
| 37.658333
| 117
| 0.610976
|
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
import cv2
from matplotlib.patches import Polygon
from rrts import BiRRTStar, RRTStar
from rrts.debugger import Debugger
def center2rear(node, wheelbase=2.96):
if not isinstance(node, RRTStar.StateNode):
theta, r = node[2] + np.pi, wheelbase / 2.
node[0] += r * np.cos(theta)
node[1] += r * np.sin(theta)
return node
theta, r = node.state[2] + np.pi, wheelbase / 2.
node.state[0] += r * np.cos(theta)
node.state[1] += r * np.sin(theta)
return node
def contour(wheelbase=2.96, width=2.0, length=5.0):
return np.array([
[-(length/2. - wheelbase / 2.), width/2. - 1.0], [-(length/2. - wheelbase / 2. - 0.4), width/2.],
[length/2. + wheelbase / 2. - 0.6, width/2.], [length/2. + wheelbase / 2., width/2. - 0.8],
[length/2. + wheelbase / 2., -(width/2. - 0.8)], [length/2. + wheelbase / 2. - 0.6, -width/2.],
[-(length/2. - wheelbase / 2. - 0.4), -width/2.], [-(length/2. - wheelbase / 2.), -(width/2. - 1.0)]])
def read_task(filepath, seq=0):
task = np.loadtxt('{}/{}_task.txt'.format(filepath, seq), delimiter=',')
org, aim = task[0], task[1]
source = RRTStar.StateNode(state=(org[0], -org[1], -np.radians(org[3])))
target = RRTStar.StateNode(state=(aim[0], -aim[1], -np.radians(aim[3])))
return source, target
def read_grid(filepath, seq):
return cv2.imread(filename='{}/{}_gridmap.png'.format(filepath, seq), flags=-1)
def read_ose(filepath, seq):
oseh = np.loadtxt('{}/{}_ose.txt'.format(filepath, seq), delimiter=',')
oseh = [((x[0], x[1], x[2]), ((0., x[3]/3.), (0., x[3]/3.), (0., x[3]/3. * np.pi/3./3.))) for x in oseh]
return oseh
def read_yips(filepath, seq, discrimination=0.7):
yips = np.loadtxt('{}/{}_pred.txt'.format(filepath, seq), delimiter=',')
yips = filter(lambda x: x[-1] > discrimination, yips)
yips = map(center2rear, yips)
yips = [((yip[0], yip[1], yip[2]), ((0.621, 2.146), (0.015, 1.951 * 1.0), (0.005, 0.401 * 1.0))) for yip in yips]
return yips
def set_plot(switch=True):
if switch:
plt.ion()
plt.figure()
plt.gca().set_xticks([])
plt.gca().set_yticks([])
plt.gca().set_aspect('equal')
plt.gca().set_facecolor((0.2, 0.2, 0.2))
plt.gca().set_xlim((-30, 30))
plt.gca().set_ylim((-30, 30))
plt.draw()
def transform(pts, pto):
xyo = np.array([[pto[0]], [pto[1]]])
rot = np.array([[np.cos(pto[2]), -np.sin(pto[2])], [np.sin(pto[2]), np.cos(pto[2])]])
return np.dot(rot, pts) + xyo
def main():
filepath, seq, debug = './test_scenes', 2062, True
rrt_star = BiRRTStar().set_vehicle(contour(), 0.3, 0.25)
heuristic = None
source, target = read_task(filepath, seq)
start = center2rear(deepcopy(source)).gcs2lcs(source.state)
goal = center2rear(deepcopy(target)).gcs2lcs(source.state)
grid_ori = deepcopy(source).gcs2lcs(source.state)
grid_map = read_grid(filepath, seq)
grid_res = 0.1
if debug:
set_plot(debug)
Debugger.plot_grid(grid_map, grid_res)
Debugger().plot_nodes([start, goal])
plt.gca().add_patch(Polygon(
transform(contour().transpose(), start.state).transpose(), True, color='b', fill=False, lw=2.0))
plt.gca().add_patch(Polygon(
transform(contour().transpose(), goal.state).transpose(), True, color='g', fill=False, lw=2.0))
if heuristic:
Debugger.plot_heuristic(heuristic)
plt.draw()
rrt_star.debug = debug
rrt_star.preset(start, goal, grid_map, grid_res, grid_ori, 255, heuristic).planning(100, debug=debug)
Debugger.breaker('Plotting', switch=debug)
if __name__ == '__main__':
main()
| true
| true
|
1c4194c7656fce73d5fad96f635e541689d9d119
| 5,576
|
py
|
Python
|
pp/components/mzi.py
|
flaport/gdsfactory
|
1f2e844c1fe27b9c6340e2d51500fd3358fa16e5
|
[
"MIT"
] | 8
|
2020-08-25T11:25:18.000Z
|
2022-03-27T11:32:11.000Z
|
pp/components/mzi.py
|
flaport/gdsfactory
|
1f2e844c1fe27b9c6340e2d51500fd3358fa16e5
|
[
"MIT"
] | null | null | null |
pp/components/mzi.py
|
flaport/gdsfactory
|
1f2e844c1fe27b9c6340e2d51500fd3358fa16e5
|
[
"MIT"
] | 1
|
2022-03-04T07:03:29.000Z
|
2022-03-04T07:03:29.000Z
|
from typing import Callable, Optional
import pp
from pp.component import Component
from pp.components.bend_circular import bend_circular as bend_circular_function
from pp.components.mmi1x2 import mmi1x2 as mmi1x2_function
from pp.components.waveguide import waveguide as waveguide_function
from pp.port import deco_rename_ports, rename_ports_by_orientation
@deco_rename_ports
@pp.cell
def mzi(
delta_length: float = 10.0,
length_y: float = 4.0,
length_x: float = 0.1,
bend_radius: float = 10.0,
bend90: Callable = bend_circular_function,
waveguide: Callable = waveguide_function,
waveguide_vertical: Optional[Callable] = None,
waveguide_horizontal: Optional[Callable] = None,
splitter: Callable = mmi1x2_function,
combiner: Optional[Callable] = None,
with_splitter: bool = True,
pins: bool = False,
splitter_settings=None,
combiner_settings=None,
) -> Component:
"""Mzi.
Args:
delta_length: bottom arm vertical extra length
length_y: vertical length for both and top arms
length_x: horizontal length
bend_radius: 10.0
bend90: bend_circular
waveguide: waveguide function
waveguide_vertical: waveguide
splitter: splitter function
combiner: combiner function
with_splitter: if False removes splitter
pins: add pins cell and child cells
combiner_settings: settings dict for combiner function
splitter_settings: settings dict for splitter function
.. code::
__Lx__
| |
Ly Lyr
| |
splitter==| |==combiner
| |
Ly Lyr
| |
DL/2 DL/2
| |
|__Lx__|
.. plot::
:include-source:
import pp
c = pp.c.mzi(delta_length=10.)
pp.plotgds(c)
"""
L2 = length_x
L0 = length_y
DL = delta_length
splitter_settings = splitter_settings or {}
combiner_settings = combiner_settings or {}
c = pp.Component()
cp1 = splitter(**splitter_settings)
if combiner:
cp2 = combiner(**combiner_settings)
else:
cp2 = cp1
waveguide_vertical = waveguide_vertical or waveguide
waveguide_horizontal = waveguide_horizontal or waveguide
b90 = bend90(radius=bend_radius)
l0 = waveguide_vertical(length=L0)
cp1 = rename_ports_by_orientation(cp1)
cp2 = rename_ports_by_orientation(cp2)
y1l = cp1.ports["E0"].y
y1r = cp2.ports["E0"].y
y2l = cp1.ports["E1"].y
y2r = cp2.ports["E1"].y
dl = abs(y2l - y1l) # splitter ports distance
dr = abs(y2r - y1r) # cp2 ports distance
delta_length_combiner = dl - dr
assert delta_length_combiner + L0 > 0, (
f"cp1 and cp2 port height offset delta_length ({delta_length_combiner}) +"
f" length_y ({length_y}) >0"
)
l0r = waveguide_vertical(length=L0 + delta_length_combiner / 2)
l1 = waveguide_vertical(length=DL / 2)
l2 = waveguide_horizontal(length=L2)
cin = cp1.ref()
cout = c << cp2
# top arm
blt = c << b90
bltl = c << b90
bltr = c << b90
blmr = c << b90 # bend left medium right
l0tl = c << l0
l2t = c << l2
l0tr = c << l0r
blt.connect(port="W0", destination=cin.ports["E1"])
l0tl.connect(port="W0", destination=blt.ports["N0"])
bltl.connect(port="N0", destination=l0tl.ports["E0"])
l2t.connect(port="W0", destination=bltl.ports["W0"])
bltr.connect(port="N0", destination=l2t.ports["E0"])
l0tr.connect(port="W0", destination=bltr.ports["W0"])
blmr.connect(port="W0", destination=l0tr.ports["E0"])
cout.connect(port="E0", destination=blmr.ports["N0"])
# bot arm
blb = c << b90
l0bl = c << l0
l1l = c << l1
blbl = c << b90
l2t = c << l2
brbr = c << b90
l1r = c << l1
l0br = c << l0r
blbmrb = c << b90 # bend left medium right bottom
blb.connect(port="N0", destination=cin.ports["E0"])
l0bl.connect(port="W0", destination=blb.ports["W0"])
l1l.connect(port="W0", destination=l0bl.ports["E0"])
blbl.connect(port="W0", destination=l1l.ports["E0"])
l2t.connect(port="W0", destination=blbl.ports["N0"])
brbr.connect(port="W0", destination=l2t.ports["E0"])
l1r.connect(port="W0", destination=brbr.ports["N0"])
l0br.connect(port="W0", destination=l1r.ports["E0"])
blbmrb.connect(port="N0", destination=l0br.ports["E0"])
blbmrb.connect(port="W0", destination=cout.ports["E1"]) # just for netlist
# west ports
if with_splitter:
c.add(cin)
for port_name, port in cin.ports.items():
if port.angle == 180:
c.add_port(name=port_name, port=port)
else:
c.add_port(name="W1", port=blt.ports["W0"])
c.add_port(name="W0", port=blb.ports["N0"])
# east ports
i = 0
for port_name, port in cout.ports.items():
if port.angle == 0:
c.add_port(name=f"E{i}", port=port)
i += 1
if pins:
pp.add_pins_to_references(c)
return c
if __name__ == "__main__":
delta_length = 116.8 / 2
# print(delta_length)
# c = mzi(delta_length=delta_length, with_splitter=False)
c = mzi(delta_length=10)
print(c.name)
# add_markers(c)
# print(c.ports["E0"].midpoint[1])
# c.plot_netlist()
# print(c.ports.keys())
# print(c.ports["E0"].midpoint)
pp.show(c)
# pp.qp(c)
# print(c.get_settings())
| 28.44898
| 82
| 0.610294
|
from typing import Callable, Optional
import pp
from pp.component import Component
from pp.components.bend_circular import bend_circular as bend_circular_function
from pp.components.mmi1x2 import mmi1x2 as mmi1x2_function
from pp.components.waveguide import waveguide as waveguide_function
from pp.port import deco_rename_ports, rename_ports_by_orientation
@deco_rename_ports
@pp.cell
def mzi(
delta_length: float = 10.0,
length_y: float = 4.0,
length_x: float = 0.1,
bend_radius: float = 10.0,
bend90: Callable = bend_circular_function,
waveguide: Callable = waveguide_function,
waveguide_vertical: Optional[Callable] = None,
waveguide_horizontal: Optional[Callable] = None,
splitter: Callable = mmi1x2_function,
combiner: Optional[Callable] = None,
with_splitter: bool = True,
pins: bool = False,
splitter_settings=None,
combiner_settings=None,
) -> Component:
L2 = length_x
L0 = length_y
DL = delta_length
splitter_settings = splitter_settings or {}
combiner_settings = combiner_settings or {}
c = pp.Component()
cp1 = splitter(**splitter_settings)
if combiner:
cp2 = combiner(**combiner_settings)
else:
cp2 = cp1
waveguide_vertical = waveguide_vertical or waveguide
waveguide_horizontal = waveguide_horizontal or waveguide
b90 = bend90(radius=bend_radius)
l0 = waveguide_vertical(length=L0)
cp1 = rename_ports_by_orientation(cp1)
cp2 = rename_ports_by_orientation(cp2)
y1l = cp1.ports["E0"].y
y1r = cp2.ports["E0"].y
y2l = cp1.ports["E1"].y
y2r = cp2.ports["E1"].y
dl = abs(y2l - y1l)
dr = abs(y2r - y1r)
delta_length_combiner = dl - dr
assert delta_length_combiner + L0 > 0, (
f"cp1 and cp2 port height offset delta_length ({delta_length_combiner}) +"
f" length_y ({length_y}) >0"
)
l0r = waveguide_vertical(length=L0 + delta_length_combiner / 2)
l1 = waveguide_vertical(length=DL / 2)
l2 = waveguide_horizontal(length=L2)
cin = cp1.ref()
cout = c << cp2
blt = c << b90
bltl = c << b90
bltr = c << b90
blmr = c << b90
l0tl = c << l0
l2t = c << l2
l0tr = c << l0r
blt.connect(port="W0", destination=cin.ports["E1"])
l0tl.connect(port="W0", destination=blt.ports["N0"])
bltl.connect(port="N0", destination=l0tl.ports["E0"])
l2t.connect(port="W0", destination=bltl.ports["W0"])
bltr.connect(port="N0", destination=l2t.ports["E0"])
l0tr.connect(port="W0", destination=bltr.ports["W0"])
blmr.connect(port="W0", destination=l0tr.ports["E0"])
cout.connect(port="E0", destination=blmr.ports["N0"])
blb = c << b90
l0bl = c << l0
l1l = c << l1
blbl = c << b90
l2t = c << l2
brbr = c << b90
l1r = c << l1
l0br = c << l0r
blbmrb = c << b90
blb.connect(port="N0", destination=cin.ports["E0"])
l0bl.connect(port="W0", destination=blb.ports["W0"])
l1l.connect(port="W0", destination=l0bl.ports["E0"])
blbl.connect(port="W0", destination=l1l.ports["E0"])
l2t.connect(port="W0", destination=blbl.ports["N0"])
brbr.connect(port="W0", destination=l2t.ports["E0"])
l1r.connect(port="W0", destination=brbr.ports["N0"])
l0br.connect(port="W0", destination=l1r.ports["E0"])
blbmrb.connect(port="N0", destination=l0br.ports["E0"])
blbmrb.connect(port="W0", destination=cout.ports["E1"])
if with_splitter:
c.add(cin)
for port_name, port in cin.ports.items():
if port.angle == 180:
c.add_port(name=port_name, port=port)
else:
c.add_port(name="W1", port=blt.ports["W0"])
c.add_port(name="W0", port=blb.ports["N0"])
i = 0
for port_name, port in cout.ports.items():
if port.angle == 0:
c.add_port(name=f"E{i}", port=port)
i += 1
if pins:
pp.add_pins_to_references(c)
return c
if __name__ == "__main__":
delta_length = 116.8 / 2
c = mzi(delta_length=10)
print(c.name)
pp.show(c)
| true
| true
|
1c419683f0766e2e4f3d5773217d2f84c05adb42
| 19,486
|
py
|
Python
|
code/apps/Managed Software Center/Managed Software Center/AlertController.py
|
erikng/munki
|
24dc96512f41fa3fa7a5cf064fbbedc9f2d71e14
|
[
"Apache-2.0"
] | 1
|
2018-07-25T21:29:43.000Z
|
2018-07-25T21:29:43.000Z
|
code/apps/Managed Software Center/Managed Software Center/AlertController.py
|
bruienne/munki
|
55936d96ed2f45ede1469873836d61596486020a
|
[
"Apache-2.0"
] | null | null | null |
code/apps/Managed Software Center/Managed Software Center/AlertController.py
|
bruienne/munki
|
55936d96ed2f45ede1469873836d61596486020a
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
#
# AlertController.py
# Managed Software Center
#
# Created by Greg Neagle on 2/25/14.
#
import os
#import sys
import munki
import msclog
import MunkiItems
from objc import nil
from AppKit import *
from Foundation import *
from PyObjCTools import AppHelper
# Disable PyLint complaining about 'invalid' camelCase names
# pylint: disable=C0103
class AlertController(NSObject):
'''An object that handles some of our alerts, if for no other reason
than to move a giant bunch of ugly code out of the WindowController'''
def setWindow_(self, the_window):
'''Store our parent window'''
self.window = the_window
def forcedLogoutWarning(self, notification_obj):
'''Display a forced logout warning'''
NSApp.activateIgnoringOtherApps_(True)
info = notification_obj.userInfo()
moreText = NSLocalizedString(
u"All pending updates will be installed. Unsaved work will be lost."
"\nYou may avoid the forced logout by logging out now.",
u"Forced Logout warning detail")
logout_time = None
if info:
logout_time = info.get('logout_time')
elif munki.thereAreUpdatesToBeForcedSoon():
logout_time = munki.earliestForceInstallDate()
if not logout_time:
return
time_til_logout = int(logout_time.timeIntervalSinceNow() / 60)
if time_til_logout > 55:
deadline_str = munki.stringFromDate(logout_time)
msclog.log("user", "forced_logout_warning_initial")
formatString = NSLocalizedString(
u"A logout will be forced at approximately %s.",
u"Logout warning string when logout is an hour or more away")
infoText = formatString % deadline_str + u"\n" + moreText
elif time_til_logout > 0:
msclog.log("user", "forced_logout_warning_%s" % time_til_logout)
formatString = NSLocalizedString(
u"A logout will be forced in less than %s minutes.",
u"Logout warning string when logout is in < 60 minutes")
infoText = formatString % time_til_logout + u"\n" + moreText
else:
msclog.log("user", "forced_logout_warning_final")
infoText = NSLocalizedString(
u"A logout will be forced in less than a minute.\nAll pending "
"updates will be installed. Unsaved work will be lost.",
u"Logout warning string when logout is in less than a minute")
# Set the OK button to default, unless less than 5 minutes to logout
# in which case only the Logout button should be displayed.
self._force_warning_logout_btn = NSLocalizedString(
u"Log out and update now", u"Logout and Update Now button text")
self._force_warning_ok_btn = NSLocalizedString(u"OK",
u"OK button title")
if time_til_logout > 5:
self._force_warning_btns = {
NSAlertDefaultReturn: self._force_warning_ok_btn,
NSAlertAlternateReturn: self._force_warning_logout_btn,
}
else:
self._force_warning_btns = {
NSAlertDefaultReturn: self._force_warning_logout_btn,
NSAlertAlternateReturn: nil,
}
if self.window.attachedSheet():
# there's an existing sheet open
NSApp.endSheet_(self.window.attachedSheet())
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(
u"Forced Logout for Mandatory Install",
u"Forced Logout title text"),
self._force_warning_btns[NSAlertDefaultReturn],
self._force_warning_btns[NSAlertAlternateReturn],
nil,
u"%@", infoText)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window, self,
self.forceLogoutWarningDidEnd_returnCode_contextInfo_, nil)
@AppHelper.endSheetMethod
def forceLogoutWarningDidEnd_returnCode_contextInfo_(
self, alert, returncode, contextinfo):
'''Called when the forced logout warning alert ends'''
btn_pressed = self._force_warning_btns.get(returncode)
if btn_pressed == self._force_warning_logout_btn:
msclog.log("user", "install_with_logout")
result = munki.logoutAndUpdate()
elif btn_pressed == self._force_warning_ok_btn:
msclog.log("user", "dismissed_forced_logout_warning")
def alertToExtraUpdates(self):
'''Notify user of additional pending updates'''
msclog.log("user", "extra_updates_pending")
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(
u"Additional Pending Updates",
u"Additional Pending Updates title"),
NSLocalizedString(u"OK", u"OK button title"),
nil,
nil,
u"%@", NSLocalizedString(
u"There are additional pending updates to install or remove.",
u"Additional Pending Updates detail")
)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window, self,
self.extraUpdatesAlertDidEnd_returnCode_contextInfo_, nil)
@AppHelper.endSheetMethod
def extraUpdatesAlertDidEnd_returnCode_contextInfo_(
self, alert, returncode, contextinfo):
'''Called when the extra updates alert ends'''
pass
def confirmUpdatesAndInstall(self):
'''Make sure it's OK to proceed with installing if logout or restart is
required'''
if self.alertedToMultipleUsers():
return
elif MunkiItems.updatesRequireRestart():
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(u"Restart Required",
u"Restart Required title"),
NSLocalizedString(u"Log out and update",
u"Log out and Update button text"),
NSLocalizedString(u"Cancel",
u"Cancel button title/short action text"),
nil,
u"%@", NSLocalizedString(
u"A restart is required after updating. Please be patient "
"as there may be a short delay at the login window. Log "
"out and update now?", u"Restart Required detail")
)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window, self,
self.logoutAlertDidEnd_returnCode_contextInfo_, nil)
elif MunkiItems.updatesRequireLogout() or munki.installRequiresLogout():
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(u"Logout Required", u"Logout Required title"),
NSLocalizedString(u"Log out and update",
u"Log out and Update button text"),
NSLocalizedString(u"Cancel",
u"Cancel button title/short action text"),
nil,
u"%@", NSLocalizedString(
u"A logout is required before updating. Please be patient "
"as there may be a short delay at the login window. Log "
"out and update now?", u"Logout Required detail")
)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window, self,
self.logoutAlertDidEnd_returnCode_contextInfo_, nil)
else:
# we shouldn't have been invoked if neither a restart or logout was
# required
msclog.debug_log(
'confirmUpdatesAndInstall was called but no restart or logout '
'was needed')
@AppHelper.endSheetMethod
def logoutAlertDidEnd_returnCode_contextInfo_(
self, alert, returncode, contextinfo):
'''Called when logout alert ends'''
if returncode == NSAlertDefaultReturn:
# make sure this alert panel is gone before we proceed, which
# might involve opening another alert sheet
alert.window().orderOut_(self)
if self.alertedToFirmwareUpdatesAndCancelled():
msclog.log("user", "alerted_to_firmware_updates_and_cancelled")
return
elif self.alertedToRunningOnBatteryAndCancelled():
msclog.log("user", "alerted_on_battery_power_and_cancelled")
return
msclog.log("user", "install_with_logout")
result = munki.logoutAndUpdate()
if result:
self.installSessionErrorAlert()
elif returncode == NSAlertAlternateReturn:
msclog.log("user", "cancelled")
def installSessionErrorAlert(self):
'''Something has gone wrong and we can't trigger an install at logout'''
msclog.log("user", "install_session_failed")
alertMessageText = NSLocalizedString(
u"Install session failed", u"Install Session Failed title")
detailText = NSLocalizedString(
u"There is a configuration problem with the managed software "
"installer. Could not start the process. Contact your systems "
"administrator.", u"Could Not Start Session message")
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
alertMessageText, OKButtonTitle, nil, nil, u"%@", detailText)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window(), self,
self.installSessionErrorAlertDidEnd_returnCode_contextInfo_, nil)
@AppHelper.endSheetMethod
def installSessionErrorAlertDidEnd_returnCode_contextInfo_(
self, alert, returncode, contextinfo):
'''Called when installSessionErrorAlert ends'''
pass
def alertedToMultipleUsers(self):
'''Returns True if there are multiple GUI logins; alerts as a side
effect'''
if len(munki.currentGUIusers()) > 1:
msclog.log("MSC", "multiple_gui_users_update_cancelled")
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(u"Other users logged in",
u"Other Users Logged In title"),
NSLocalizedString(u"Cancel",
u"Cancel button title/short action text"),
nil,
nil,
u"%@", NSLocalizedString(
u"There are other users logged into this computer.\n"
"Updating now could cause other users to lose their "
"work.\n\nPlease try again later after the other users "
"have logged out.", u"Other Users Logged In detail")
)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window, self,
self.multipleUserAlertDidEnd_returnCode_contextInfo_, nil)
return True
else:
return False
@AppHelper.endSheetMethod
def multipleUserAlertDidEnd_returnCode_contextInfo_(
self, alert, returncode, contextinfo):
'''Called when multiple users alert ends'''
pass
def alertedToBlockingAppsRunning(self):
'''Returns True if blocking_apps are running; alerts as a side-effect'''
apps_to_check = []
for update_item in MunkiItems.getUpdateList():
if 'blocking_applications' in update_item:
apps_to_check.extend(update_item['blocking_applications'])
else:
apps_to_check.extend(
[os.path.basename(item.get('path'))
for item in update_item.get('installs', [])
if item['type'] == 'application']
)
running_apps = munki.getRunningBlockingApps(apps_to_check)
if running_apps:
current_user = munki.getconsoleuser()
other_users_apps = [item['display_name'] for item in running_apps
if item['user'] != current_user]
my_apps = [item['display_name'] for item in running_apps
if item['user'] == current_user]
msclog.log(
"MSC", "conflicting_apps", ','.join(other_users_apps + my_apps))
if other_users_apps:
detailText = NSLocalizedString(
u"Other logged in users are using the following "
"applications. Try updating later when they are no longer "
"in use:\n\n%s",
u"Other Users Blocking Apps Running detail")
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(
u"Applications in use by others",
u"Other Users Blocking Apps Running title"),
NSLocalizedString(u"OK", u'OKButtonText'),
nil,
nil,
u"%@", detailText % u'\n'.join(set(other_users_apps))
)
else:
detailText = NSLocalizedString(
u"You must quit the following applications before "
"proceeding with installation or removal:\n\n%s",
u"Blocking Apps Running detail")
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(
u"Conflicting applications running",
u"Blocking Apps Running title"),
NSLocalizedString(u"OK", u"OK button title"),
nil,
nil,
u"%@", detailText % u'\n'.join(set(my_apps))
)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window, self,
self.blockingAppsRunningAlertDidEnd_returnCode_contextInfo_,
nil)
return True
else:
return False
@AppHelper.endSheetMethod
def blockingAppsRunningAlertDidEnd_returnCode_contextInfo_(
self, alert, returncode, contextinfo):
'''Called when blocking apps alert ends'''
pass
def getFirmwareAlertInfo(self):
'''Get detail about a firmware update'''
info = []
for update_item in MunkiItems.getUpdateList():
if 'firmware_alert_text' in update_item:
info_item = {}
info_item['name'] = update_item.get('display_name', 'name')
alert_text = update_item['firmware_alert_text']
if alert_text == u'_DEFAULT_FIRMWARE_ALERT_TEXT_':
# substitute localized default alert text
alert_text = NSLocalizedString(
(u"Firmware will be updated on your computer. "
"Your computer's power cord must be connected "
"and plugged into a working power source. "
"It may take several minutes for the update to "
"complete. Do not disturb or shut off the power "
"on your computer during this update."),
u"Firmware Alert Default detail")
info_item['alert_text'] = alert_text
info.append(info_item)
return info
def alertedToFirmwareUpdatesAndCancelled(self):
'''Returns True if we have one or more firmware updates and
the user clicks the Cancel button'''
firmware_alert_info = self.getFirmwareAlertInfo()
if not firmware_alert_info:
return False
power_info = munki.getPowerInfo()
on_battery_power = (power_info.get('PowerSource') == 'Battery Power')
for item in firmware_alert_info:
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
item['name'],
NSLocalizedString(u"Continue", u"Continue button text"),
NSLocalizedString(u"Cancel", u"Cancel button title/short action text"),
nil,
u"")
if on_battery_power:
alert_text = NSLocalizedString(
u"Your computer is not connected to a power source.",
u"No Power Source Warning text")
alert_text += "\n\n" + item['alert_text']
else:
alert_text = item['alert_text']
alert.setInformativeText_(alert_text)
alert.setAlertStyle_(NSCriticalAlertStyle)
if on_battery_power:
# set Cancel button to be activated by return key
alert.buttons()[1].setKeyEquivalent_('\r')
# set Continue button to be activated by Escape key
alert.buttons()[0].setKeyEquivalent_(chr(27))
buttonPressed = alert.runModal()
if buttonPressed == NSAlertAlternateReturn:
return True
return False
def alertedToRunningOnBatteryAndCancelled(self):
'''Returns True if we are running on battery and user clicks
the Cancel button'''
power_info = munki.getPowerInfo()
if (power_info.get('PowerSource') == 'Battery Power'
and power_info.get('BatteryCharge', 0) < 50):
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(
u"Your computer is not connected to a power source.",
u"No Power Source Warning text"),
NSLocalizedString(u"Continue", u"Continue button text"),
NSLocalizedString(u"Cancel",
u"Cancel button title/short action text"),
nil,
u"%@", NSLocalizedString(
u"For best results, you should connect your computer to a "
"power source before updating. Are you sure you want to "
"continue the update?", u"No Power Source Warning detail")
)
msclog.log("MSU", "alert_on_battery_power")
# making UI consistent with Apple Software Update...
# set Cancel button to be activated by return key
alert.buttons()[1].setKeyEquivalent_('\r')
# set Continue button to be activated by Escape key
alert.buttons()[0].setKeyEquivalent_(chr(27))
buttonPressed = alert.runModal()
if buttonPressed == NSAlertAlternateReturn:
return True
return False
| 48.232673
| 122
| 0.605563
|
import os
import munki
import msclog
import MunkiItems
from objc import nil
from AppKit import *
from Foundation import *
from PyObjCTools import AppHelper
class AlertController(NSObject):
def setWindow_(self, the_window):
self.window = the_window
def forcedLogoutWarning(self, notification_obj):
NSApp.activateIgnoringOtherApps_(True)
info = notification_obj.userInfo()
moreText = NSLocalizedString(
u"All pending updates will be installed. Unsaved work will be lost."
"\nYou may avoid the forced logout by logging out now.",
u"Forced Logout warning detail")
logout_time = None
if info:
logout_time = info.get('logout_time')
elif munki.thereAreUpdatesToBeForcedSoon():
logout_time = munki.earliestForceInstallDate()
if not logout_time:
return
time_til_logout = int(logout_time.timeIntervalSinceNow() / 60)
if time_til_logout > 55:
deadline_str = munki.stringFromDate(logout_time)
msclog.log("user", "forced_logout_warning_initial")
formatString = NSLocalizedString(
u"A logout will be forced at approximately %s.",
u"Logout warning string when logout is an hour or more away")
infoText = formatString % deadline_str + u"\n" + moreText
elif time_til_logout > 0:
msclog.log("user", "forced_logout_warning_%s" % time_til_logout)
formatString = NSLocalizedString(
u"A logout will be forced in less than %s minutes.",
u"Logout warning string when logout is in < 60 minutes")
infoText = formatString % time_til_logout + u"\n" + moreText
else:
msclog.log("user", "forced_logout_warning_final")
infoText = NSLocalizedString(
u"A logout will be forced in less than a minute.\nAll pending "
"updates will be installed. Unsaved work will be lost.",
u"Logout warning string when logout is in less than a minute")
self._force_warning_logout_btn = NSLocalizedString(
u"Log out and update now", u"Logout and Update Now button text")
self._force_warning_ok_btn = NSLocalizedString(u"OK",
u"OK button title")
if time_til_logout > 5:
self._force_warning_btns = {
NSAlertDefaultReturn: self._force_warning_ok_btn,
NSAlertAlternateReturn: self._force_warning_logout_btn,
}
else:
self._force_warning_btns = {
NSAlertDefaultReturn: self._force_warning_logout_btn,
NSAlertAlternateReturn: nil,
}
if self.window.attachedSheet():
NSApp.endSheet_(self.window.attachedSheet())
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(
u"Forced Logout for Mandatory Install",
u"Forced Logout title text"),
self._force_warning_btns[NSAlertDefaultReturn],
self._force_warning_btns[NSAlertAlternateReturn],
nil,
u"%@", infoText)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window, self,
self.forceLogoutWarningDidEnd_returnCode_contextInfo_, nil)
@AppHelper.endSheetMethod
def forceLogoutWarningDidEnd_returnCode_contextInfo_(
self, alert, returncode, contextinfo):
btn_pressed = self._force_warning_btns.get(returncode)
if btn_pressed == self._force_warning_logout_btn:
msclog.log("user", "install_with_logout")
result = munki.logoutAndUpdate()
elif btn_pressed == self._force_warning_ok_btn:
msclog.log("user", "dismissed_forced_logout_warning")
def alertToExtraUpdates(self):
msclog.log("user", "extra_updates_pending")
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(
u"Additional Pending Updates",
u"Additional Pending Updates title"),
NSLocalizedString(u"OK", u"OK button title"),
nil,
nil,
u"%@", NSLocalizedString(
u"There are additional pending updates to install or remove.",
u"Additional Pending Updates detail")
)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window, self,
self.extraUpdatesAlertDidEnd_returnCode_contextInfo_, nil)
@AppHelper.endSheetMethod
def extraUpdatesAlertDidEnd_returnCode_contextInfo_(
self, alert, returncode, contextinfo):
pass
def confirmUpdatesAndInstall(self):
if self.alertedToMultipleUsers():
return
elif MunkiItems.updatesRequireRestart():
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(u"Restart Required",
u"Restart Required title"),
NSLocalizedString(u"Log out and update",
u"Log out and Update button text"),
NSLocalizedString(u"Cancel",
u"Cancel button title/short action text"),
nil,
u"%@", NSLocalizedString(
u"A restart is required after updating. Please be patient "
"as there may be a short delay at the login window. Log "
"out and update now?", u"Restart Required detail")
)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window, self,
self.logoutAlertDidEnd_returnCode_contextInfo_, nil)
elif MunkiItems.updatesRequireLogout() or munki.installRequiresLogout():
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(u"Logout Required", u"Logout Required title"),
NSLocalizedString(u"Log out and update",
u"Log out and Update button text"),
NSLocalizedString(u"Cancel",
u"Cancel button title/short action text"),
nil,
u"%@", NSLocalizedString(
u"A logout is required before updating. Please be patient "
"as there may be a short delay at the login window. Log "
"out and update now?", u"Logout Required detail")
)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window, self,
self.logoutAlertDidEnd_returnCode_contextInfo_, nil)
else:
# we shouldn't have been invoked if neither a restart or logout was
msclog.debug_log(
'confirmUpdatesAndInstall was called but no restart or logout '
'was needed')
@AppHelper.endSheetMethod
def logoutAlertDidEnd_returnCode_contextInfo_(
self, alert, returncode, contextinfo):
if returncode == NSAlertDefaultReturn:
alert.window().orderOut_(self)
if self.alertedToFirmwareUpdatesAndCancelled():
msclog.log("user", "alerted_to_firmware_updates_and_cancelled")
return
elif self.alertedToRunningOnBatteryAndCancelled():
msclog.log("user", "alerted_on_battery_power_and_cancelled")
return
msclog.log("user", "install_with_logout")
result = munki.logoutAndUpdate()
if result:
self.installSessionErrorAlert()
elif returncode == NSAlertAlternateReturn:
msclog.log("user", "cancelled")
def installSessionErrorAlert(self):
msclog.log("user", "install_session_failed")
alertMessageText = NSLocalizedString(
u"Install session failed", u"Install Session Failed title")
detailText = NSLocalizedString(
u"There is a configuration problem with the managed software "
"installer. Could not start the process. Contact your systems "
"administrator.", u"Could Not Start Session message")
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
alertMessageText, OKButtonTitle, nil, nil, u"%@", detailText)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window(), self,
self.installSessionErrorAlertDidEnd_returnCode_contextInfo_, nil)
@AppHelper.endSheetMethod
def installSessionErrorAlertDidEnd_returnCode_contextInfo_(
self, alert, returncode, contextinfo):
pass
def alertedToMultipleUsers(self):
if len(munki.currentGUIusers()) > 1:
msclog.log("MSC", "multiple_gui_users_update_cancelled")
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(u"Other users logged in",
u"Other Users Logged In title"),
NSLocalizedString(u"Cancel",
u"Cancel button title/short action text"),
nil,
nil,
u"%@", NSLocalizedString(
u"There are other users logged into this computer.\n"
"Updating now could cause other users to lose their "
"work.\n\nPlease try again later after the other users "
"have logged out.", u"Other Users Logged In detail")
)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window, self,
self.multipleUserAlertDidEnd_returnCode_contextInfo_, nil)
return True
else:
return False
@AppHelper.endSheetMethod
def multipleUserAlertDidEnd_returnCode_contextInfo_(
self, alert, returncode, contextinfo):
pass
def alertedToBlockingAppsRunning(self):
apps_to_check = []
for update_item in MunkiItems.getUpdateList():
if 'blocking_applications' in update_item:
apps_to_check.extend(update_item['blocking_applications'])
else:
apps_to_check.extend(
[os.path.basename(item.get('path'))
for item in update_item.get('installs', [])
if item['type'] == 'application']
)
running_apps = munki.getRunningBlockingApps(apps_to_check)
if running_apps:
current_user = munki.getconsoleuser()
other_users_apps = [item['display_name'] for item in running_apps
if item['user'] != current_user]
my_apps = [item['display_name'] for item in running_apps
if item['user'] == current_user]
msclog.log(
"MSC", "conflicting_apps", ','.join(other_users_apps + my_apps))
if other_users_apps:
detailText = NSLocalizedString(
u"Other logged in users are using the following "
"applications. Try updating later when they are no longer "
"in use:\n\n%s",
u"Other Users Blocking Apps Running detail")
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(
u"Applications in use by others",
u"Other Users Blocking Apps Running title"),
NSLocalizedString(u"OK", u'OKButtonText'),
nil,
nil,
u"%@", detailText % u'\n'.join(set(other_users_apps))
)
else:
detailText = NSLocalizedString(
u"You must quit the following applications before "
"proceeding with installation or removal:\n\n%s",
u"Blocking Apps Running detail")
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(
u"Conflicting applications running",
u"Blocking Apps Running title"),
NSLocalizedString(u"OK", u"OK button title"),
nil,
nil,
u"%@", detailText % u'\n'.join(set(my_apps))
)
alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window, self,
self.blockingAppsRunningAlertDidEnd_returnCode_contextInfo_,
nil)
return True
else:
return False
@AppHelper.endSheetMethod
def blockingAppsRunningAlertDidEnd_returnCode_contextInfo_(
self, alert, returncode, contextinfo):
pass
def getFirmwareAlertInfo(self):
info = []
for update_item in MunkiItems.getUpdateList():
if 'firmware_alert_text' in update_item:
info_item = {}
info_item['name'] = update_item.get('display_name', 'name')
alert_text = update_item['firmware_alert_text']
if alert_text == u'_DEFAULT_FIRMWARE_ALERT_TEXT_':
alert_text = NSLocalizedString(
(u"Firmware will be updated on your computer. "
"Your computer's power cord must be connected "
"and plugged into a working power source. "
"It may take several minutes for the update to "
"complete. Do not disturb or shut off the power "
"on your computer during this update."),
u"Firmware Alert Default detail")
info_item['alert_text'] = alert_text
info.append(info_item)
return info
def alertedToFirmwareUpdatesAndCancelled(self):
firmware_alert_info = self.getFirmwareAlertInfo()
if not firmware_alert_info:
return False
power_info = munki.getPowerInfo()
on_battery_power = (power_info.get('PowerSource') == 'Battery Power')
for item in firmware_alert_info:
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
item['name'],
NSLocalizedString(u"Continue", u"Continue button text"),
NSLocalizedString(u"Cancel", u"Cancel button title/short action text"),
nil,
u"")
if on_battery_power:
alert_text = NSLocalizedString(
u"Your computer is not connected to a power source.",
u"No Power Source Warning text")
alert_text += "\n\n" + item['alert_text']
else:
alert_text = item['alert_text']
alert.setInformativeText_(alert_text)
alert.setAlertStyle_(NSCriticalAlertStyle)
if on_battery_power:
# set Cancel button to be activated by return key
alert.buttons()[1].setKeyEquivalent_('\r')
# set Continue button to be activated by Escape key
alert.buttons()[0].setKeyEquivalent_(chr(27))
buttonPressed = alert.runModal()
if buttonPressed == NSAlertAlternateReturn:
return True
return False
def alertedToRunningOnBatteryAndCancelled(self):
power_info = munki.getPowerInfo()
if (power_info.get('PowerSource') == 'Battery Power'
and power_info.get('BatteryCharge', 0) < 50):
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
NSLocalizedString(
u"Your computer is not connected to a power source.",
u"No Power Source Warning text"),
NSLocalizedString(u"Continue", u"Continue button text"),
NSLocalizedString(u"Cancel",
u"Cancel button title/short action text"),
nil,
u"%@", NSLocalizedString(
u"For best results, you should connect your computer to a "
"power source before updating. Are you sure you want to "
"continue the update?", u"No Power Source Warning detail")
)
msclog.log("MSU", "alert_on_battery_power")
# making UI consistent with Apple Software Update...
# set Cancel button to be activated by return key
alert.buttons()[1].setKeyEquivalent_('\r')
# set Continue button to be activated by Escape key
alert.buttons()[0].setKeyEquivalent_(chr(27))
buttonPressed = alert.runModal()
if buttonPressed == NSAlertAlternateReturn:
return True
return False
| true
| true
|
1c4196b797c7b2af1a73d65d76478c267982039c
| 2,093
|
py
|
Python
|
dpkt/aoe.py
|
sergedroz/dpkt
|
90928c5baaf36e76d8b62a973924af3c96b9e160
|
[
"BSD-3-Clause"
] | null | null | null |
dpkt/aoe.py
|
sergedroz/dpkt
|
90928c5baaf36e76d8b62a973924af3c96b9e160
|
[
"BSD-3-Clause"
] | null | null | null |
dpkt/aoe.py
|
sergedroz/dpkt
|
90928c5baaf36e76d8b62a973924af3c96b9e160
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""ATA over Ethernet Protocol."""
from __future__ import absolute_import
import struct
from . import dpkt
from .compat import iteritems
class AOE(dpkt.Packet):
"""ATA over Ethernet Protocol.
See more about the AOE on
https://en.wikipedia.org/wiki/ATA_over_Ethernet
Attributes:
__hdr__: Header fields of AOE.
data: Message data.
"""
__hdr__ = (
('ver_fl', 'B', 0x10),
('err', 'B', 0),
('maj', 'H', 0),
('min', 'B', 0),
('cmd', 'B', 0),
('tag', 'I', 0),
)
_cmdsw = {}
@property
def ver(self):
return self.ver_fl >> 4
@ver.setter
def ver(self, ver):
self.ver_fl = (ver << 4) | (self.ver_fl & 0xf)
@property
def fl(self):
return self.ver_fl & 0xf
@fl.setter
def fl(self, fl):
self.ver_fl = (self.ver_fl & 0xf0) | fl
@classmethod
def set_cmd(cls, cmd, pktclass):
cls._cmdsw[cmd] = pktclass
@classmethod
def get_cmd(cls, cmd):
return cls._cmdsw[cmd]
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
try:
self.data = self._cmdsw[self.cmd](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, struct.error, dpkt.UnpackError):
pass
def pack_hdr(self):
try:
return dpkt.Packet.pack_hdr(self)
except struct.error as e:
raise dpkt.PackError(str(e))
AOE_CMD_ATA = 0
AOE_CMD_CFG = 1
AOE_FLAG_RSP = 1 << 3
def _load_cmds():
prefix = 'AOE_CMD_'
g = globals()
for k, v in iteritems(g):
if k.startswith(prefix):
name = 'aoe' + k[len(prefix):].lower()
try:
mod = __import__(name, g, level=1)
AOE.set_cmd(v, getattr(mod, name.upper()))
except (ImportError, AttributeError):
continue
def _mod_init():
"""Post-initialization called when all dpkt modules are fully loaded"""
if not AOE._cmdsw:
_load_cmds()
| 22.265957
| 75
| 0.553273
|
from __future__ import absolute_import
import struct
from . import dpkt
from .compat import iteritems
class AOE(dpkt.Packet):
__hdr__ = (
('ver_fl', 'B', 0x10),
('err', 'B', 0),
('maj', 'H', 0),
('min', 'B', 0),
('cmd', 'B', 0),
('tag', 'I', 0),
)
_cmdsw = {}
@property
def ver(self):
return self.ver_fl >> 4
@ver.setter
def ver(self, ver):
self.ver_fl = (ver << 4) | (self.ver_fl & 0xf)
@property
def fl(self):
return self.ver_fl & 0xf
@fl.setter
def fl(self, fl):
self.ver_fl = (self.ver_fl & 0xf0) | fl
@classmethod
def set_cmd(cls, cmd, pktclass):
cls._cmdsw[cmd] = pktclass
@classmethod
def get_cmd(cls, cmd):
return cls._cmdsw[cmd]
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
try:
self.data = self._cmdsw[self.cmd](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, struct.error, dpkt.UnpackError):
pass
def pack_hdr(self):
try:
return dpkt.Packet.pack_hdr(self)
except struct.error as e:
raise dpkt.PackError(str(e))
AOE_CMD_ATA = 0
AOE_CMD_CFG = 1
AOE_FLAG_RSP = 1 << 3
def _load_cmds():
prefix = 'AOE_CMD_'
g = globals()
for k, v in iteritems(g):
if k.startswith(prefix):
name = 'aoe' + k[len(prefix):].lower()
try:
mod = __import__(name, g, level=1)
AOE.set_cmd(v, getattr(mod, name.upper()))
except (ImportError, AttributeError):
continue
def _mod_init():
if not AOE._cmdsw:
_load_cmds()
| true
| true
|
1c419735e1b86b5592261ddb7ee7d85c8a498907
| 8,576
|
py
|
Python
|
python/smqtk/representation/data_set/memory_set.py
|
joshanderson-kw/SMQTK
|
594e7c733fe7f4e514a1a08a7343293a883a41fc
|
[
"BSD-3-Clause"
] | 82
|
2015-01-07T15:33:29.000Z
|
2021-08-11T18:34:05.000Z
|
python/smqtk/representation/data_set/memory_set.py
|
joshanderson-kw/SMQTK
|
594e7c733fe7f4e514a1a08a7343293a883a41fc
|
[
"BSD-3-Clause"
] | 230
|
2015-04-08T14:36:51.000Z
|
2022-03-14T17:55:30.000Z
|
python/smqtk/representation/data_set/memory_set.py
|
joshanderson-kw/SMQTK
|
594e7c733fe7f4e514a1a08a7343293a883a41fc
|
[
"BSD-3-Clause"
] | 65
|
2015-01-04T15:00:16.000Z
|
2021-11-19T18:09:11.000Z
|
import threading
from six.moves import cPickle as pickle
from smqtk.exceptions import ReadOnlyError
from smqtk.representation import DataElement, DataSet
from smqtk.utils import SimpleTimer
from smqtk.utils.configuration import (
from_config_dict,
make_default_config,
to_config_dict
)
from smqtk.utils.dict import merge_dict
class DataMemorySet (DataSet):
"""
In-memory DataSet implementation.
This implementation maintains an in-memory mapping of stored DataElement
original UUID to the DataElement instance.
An optional writable DataElement may be provided to which the current set's
map state is cached. This cache is updated every time new data elements are
added to this set..
"""
@classmethod
def is_usable(cls):
"""
Check whether this data set implementations is available for use.
This is always true for this implementation as there are no required 3rd
party dependencies
:return: Boolean determination of whether this implementation is usable.
:rtype: bool
"""
return True
@classmethod
def get_default_config(cls):
"""
Generate and return a default configuration dictionary for this class.
This will be primarily used for generating what the configuration
dictionary would look like for this class without instantiating it.
By default, we observe what this class's constructor takes as arguments,
turning those argument names into configuration dictionary keys. If any
of those arguments have defaults, we will add those values into the
configuration dictionary appropriately. The dictionary returned should
only contain JSON compliant value types.
It is not be guaranteed that the configuration dictionary returned
from this method is valid for construction of an instance of this class.
:return: Default configuration dictionary for the class.
:rtype: dict
"""
c = super(DataMemorySet, cls).get_default_config()
c['cache_element'] = make_default_config(DataElement.get_impls())
return c
@classmethod
def from_config(cls, c, merge_default=True):
"""
Instantiate a new instance of this class given the configuration
JSON-compliant dictionary encapsulating initialization arguments.
This method should not be called via super unless an instance of the
class is desired.
:param c: JSON compliant dictionary encapsulating
a configuration.
:type c: dict
:param merge_default: Merge the given configuration on top of the
default provided by ``get_default_config``.
:type merge_default: bool
:return: Constructed instance from the provided config.
:rtype: DataMemorySet
"""
if merge_default:
c = merge_dict(cls.get_default_config(), c)
cache_element = None
if c['cache_element'] and c['cache_element']['type']:
cache_element = from_config_dict(c['cache_element'],
DataElement.get_impls())
c['cache_element'] = cache_element
return super(DataMemorySet, cls).from_config(c, False)
def __init__(self, cache_element=None, pickle_protocol=-1):
"""
Initialize a new in-memory data set instance.
:param cache_element: Optional data element to store/load a cache of
this data set's contents into. Cache loading, if the element has
bytes, will occur in this constructor. Cache writing will only occur
after adding one or more elements.
This can be optionally turned on after creating/using this data set
for a while by setting a valid element to the ``cache_element``
attribute and calling the ``.cache()`` method. When
``cache_element`` is not set, the ``cache()`` method does nothing.
:type cache_element: None | smqtk.representation.DataElement
:param pickle_protocol: Pickling protocol to use. We will use -1 by
default (latest version, probably binary).
:type pickle_protocol: int
"""
super(DataMemorySet, self).__init__()
# Mapping of UUIDs to DataElement instances
#: :type: dict[collections.abc.Hashable, DataElement]
self._element_map = {}
self._element_map_lock = threading.RLock()
# Optional path to a file that will act as a cache of our internal
# table
self.cache_element = cache_element
if cache_element and not cache_element.is_empty():
#: :type: dict[collections.abc.Hashable, DataElement]
self._element_map = pickle.loads(cache_element.get_bytes())
self.pickle_protocol = pickle_protocol
def __iter__(self):
"""
:return: Generator over the DataElements contained in this set in no
particular order.
"""
# making copy of UUIDs so we don't block when between yields, as well
# as so we aren't walking a possibly modified map
uuids = self.uuids()
with self._element_map_lock:
for k in uuids:
yield self._element_map[k]
def cache(self):
"""
Cache the current table if a cache has been configured.
"""
if self.cache_element:
if self.cache_element.is_read_only():
raise ReadOnlyError("Cache element (%s) is read-only."
% self.cache_element)
with self._element_map_lock:
with SimpleTimer("Caching memory data-set table",
self._log.debug):
self.cache_element.set_bytes(
pickle.dumps(self._element_map, self.pickle_protocol)
)
def get_config(self):
"""
This implementation has no configuration properties.
:return: JSON type compliant configuration dictionary.
:rtype: dict
"""
c = merge_dict(self.get_default_config(), {
"pickle_protocol": self.pickle_protocol,
})
if self.cache_element:
c['cache_element'] = merge_dict(
c['cache_element'],
to_config_dict(self.cache_element)
)
return c
def count(self):
"""
:return: The number of data elements in this set.
:rtype: int
"""
with self._element_map_lock:
return len(self._element_map)
def uuids(self):
"""
:return: A new set of uuids represented in this data set.
:rtype: set
"""
with self._element_map_lock:
return set(self._element_map)
def has_uuid(self, uuid):
"""
Test if the given uuid refers to an element in this data set.
:param uuid: Unique ID to test for inclusion. This should match the
type that the set implementation expects or cares about.
:return: True if the given uuid matches an element in this set, or
False if it does not.
:rtype: bool
"""
with self._element_map_lock:
return uuid in self._element_map
def add_data(self, *elems):
"""
Add the given data element(s) instance to this data set.
:param elems: Data element(s) to add
:type elems: smqtk.representation.DataElement
"""
with self._element_map_lock:
added_elements = False
for e in elems:
assert isinstance(e, DataElement), \
"Expected DataElement instance, got '%s' instance instead" \
% type(e)
self._element_map[e.uuid()] = e
added_elements = True
if added_elements:
self.cache()
def get_data(self, uuid):
"""
Get the data element the given uuid references, or raise an
exception if the uuid does not reference any element in this set.
:raises KeyError: If the given uuid does not refer to an element in
this data set.
:param uuid: The uuid of the element to retrieve.
:return: The data element instance for the given uuid.
:rtype: smqtk.representation.DataElement
"""
with self._element_map_lock:
return self._element_map[uuid]
DATA_SET_CLASS = DataMemorySet
| 34.304
| 80
| 0.626049
|
import threading
from six.moves import cPickle as pickle
from smqtk.exceptions import ReadOnlyError
from smqtk.representation import DataElement, DataSet
from smqtk.utils import SimpleTimer
from smqtk.utils.configuration import (
from_config_dict,
make_default_config,
to_config_dict
)
from smqtk.utils.dict import merge_dict
class DataMemorySet (DataSet):
@classmethod
def is_usable(cls):
return True
@classmethod
def get_default_config(cls):
c = super(DataMemorySet, cls).get_default_config()
c['cache_element'] = make_default_config(DataElement.get_impls())
return c
@classmethod
def from_config(cls, c, merge_default=True):
if merge_default:
c = merge_dict(cls.get_default_config(), c)
cache_element = None
if c['cache_element'] and c['cache_element']['type']:
cache_element = from_config_dict(c['cache_element'],
DataElement.get_impls())
c['cache_element'] = cache_element
return super(DataMemorySet, cls).from_config(c, False)
def __init__(self, cache_element=None, pickle_protocol=-1):
super(DataMemorySet, self).__init__()
self._element_map = {}
self._element_map_lock = threading.RLock()
self.cache_element = cache_element
if cache_element and not cache_element.is_empty():
self._element_map = pickle.loads(cache_element.get_bytes())
self.pickle_protocol = pickle_protocol
def __iter__(self):
# as so we aren't walking a possibly modified map
uuids = self.uuids()
with self._element_map_lock:
for k in uuids:
yield self._element_map[k]
def cache(self):
if self.cache_element:
if self.cache_element.is_read_only():
raise ReadOnlyError("Cache element (%s) is read-only."
% self.cache_element)
with self._element_map_lock:
with SimpleTimer("Caching memory data-set table",
self._log.debug):
self.cache_element.set_bytes(
pickle.dumps(self._element_map, self.pickle_protocol)
)
def get_config(self):
c = merge_dict(self.get_default_config(), {
"pickle_protocol": self.pickle_protocol,
})
if self.cache_element:
c['cache_element'] = merge_dict(
c['cache_element'],
to_config_dict(self.cache_element)
)
return c
def count(self):
with self._element_map_lock:
return len(self._element_map)
def uuids(self):
with self._element_map_lock:
return set(self._element_map)
def has_uuid(self, uuid):
with self._element_map_lock:
return uuid in self._element_map
def add_data(self, *elems):
with self._element_map_lock:
added_elements = False
for e in elems:
assert isinstance(e, DataElement), \
"Expected DataElement instance, got '%s' instance instead" \
% type(e)
self._element_map[e.uuid()] = e
added_elements = True
if added_elements:
self.cache()
def get_data(self, uuid):
with self._element_map_lock:
return self._element_map[uuid]
DATA_SET_CLASS = DataMemorySet
| true
| true
|
1c419981242a0c02bdf3b073d38a4aae22d392bb
| 11,458
|
py
|
Python
|
openfl/component/director/director.py
|
katerina-merkulova/openfl
|
acee1877f7dfc0bf22db60a4eda51040b5b46f47
|
[
"Apache-2.0"
] | 1
|
2022-03-29T17:17:05.000Z
|
2022-03-29T17:17:05.000Z
|
openfl/component/director/director.py
|
eceisik/openfl
|
050b8354b698a34b5ef01f0f55f968f52f63f84d
|
[
"Apache-2.0"
] | null | null | null |
openfl/component/director/director.py
|
eceisik/openfl
|
050b8354b698a34b5ef01f0f55f968f52f63f84d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Director module."""
import asyncio
import logging
import time
from collections import defaultdict
from pathlib import Path
from typing import Iterable
from typing import List
from typing import Union
from .experiment import Experiment
from .experiment import ExperimentsRegistry
from .experiment import Status
logger = logging.getLogger(__name__)
ENVOY_HEALTH_CHECK_PERIOD = 60 # in seconds
class Director:
"""Director class."""
def __init__(
self, *,
tls: bool = True,
root_certificate: Union[Path, str] = None,
private_key: Union[Path, str] = None,
certificate: Union[Path, str] = None,
sample_shape: list = None,
target_shape: list = None,
settings: dict = None
) -> None:
"""Initialize a director object."""
self.sample_shape, self.target_shape = sample_shape, target_shape
self._shard_registry = {}
self.tls = tls
self.root_certificate = root_certificate
self.private_key = private_key
self.certificate = certificate
self.experiments_registry = ExperimentsRegistry()
self.settings = settings or {}
self.col_exp_queues = defaultdict(asyncio.Queue)
self.col_exp = {}
def acknowledge_shard(self, shard_info: dict) -> bool:
"""Save shard info to shard registry if it's acceptable."""
is_accepted = False
if (self.sample_shape != shard_info['sample_shape']
or self.target_shape != shard_info['target_shape']):
logger.info('Request was not accepted')
return is_accepted
logger.info('Request was accepted')
self._shard_registry[shard_info['node_info']['name']] = {
'shard_info': shard_info,
'is_online': True,
'is_experiment_running': False
}
is_accepted = True
return is_accepted
async def set_new_experiment(
self, *,
experiment_name: str,
sender_name: str,
tensor_dict: dict,
collaborator_names: Iterable[str],
experiment_archive_path: Path,
) -> bool:
"""Set new experiment."""
experiment = Experiment(
name=experiment_name,
archive_path=experiment_archive_path,
collaborators=list(collaborator_names),
users=[sender_name],
sender=sender_name,
init_tensor_dict=tensor_dict,
)
self.experiments_registry.add(experiment)
return True
def get_trained_model(self, experiment_name: str, caller: str, model_type: str):
"""Get trained model."""
if (experiment_name not in self.experiments_registry
or caller not in self.experiments_registry[experiment_name].users):
logger.error('No experiment data in the stash')
return None
aggregator = self.experiments_registry[experiment_name].aggregator
if aggregator.last_tensor_dict is None:
logger.error('Aggregator have no aggregated model to return')
return None
if model_type == 'best':
return aggregator.best_tensor_dict
elif model_type == 'last':
return aggregator.last_tensor_dict
else:
logger.error('Unknown model type required.')
return None
def get_experiment_data(self, experiment_name: str) -> Path:
"""Get experiment data."""
return self.experiments_registry[experiment_name].archive_path
async def wait_experiment(self, envoy_name: str) -> str:
"""Wait an experiment."""
self.col_exp[envoy_name] = None
queue = self.col_exp_queues[envoy_name]
experiment_name = await queue.get()
self.col_exp[envoy_name] = experiment_name
return experiment_name
def get_dataset_info(self):
"""Get dataset info."""
return self.sample_shape, self.target_shape
def get_registered_shards(self) -> list: # Why is it here?
"""Get registered shard infos."""
return [shard_status['shard_info'] for shard_status in self._shard_registry.values()]
async def stream_metrics(self, experiment_name: str, caller: str):
"""
Stream metrics from the aggregator.
This method takes next metric dictionary from the aggregator's queue
and returns it to the caller.
Inputs:
experiment_name - string id for experiment
caller - string id for experiment owner
Returns:
metric_dict - {'metric_origin','task_name','metric_name','metric_value','round'}
if the queue is not empty
None - f queue is empty but the experiment is still running
Raises:
StopIteration - if the experiment is finished and there is no more metrics to report
"""
if (experiment_name not in self.experiments_registry
or caller not in self.experiments_registry[experiment_name].users):
raise Exception(
f'No experiment name "{experiment_name}" in experiments list, or caller "{caller}"'
f' does not have access to this experiment'
)
while not self.experiments_registry[experiment_name].aggregator:
await asyncio.sleep(1)
aggregator = self.experiments_registry[experiment_name].aggregator
while True:
if not aggregator.metric_queue.empty():
yield aggregator.metric_queue.get()
continue
if aggregator.all_quit_jobs_sent() and aggregator.metric_queue.empty():
return
yield None
def remove_experiment_data(self, experiment_name: str, caller: str):
"""Remove experiment data from stash."""
if (experiment_name in self.experiments_registry
and caller in self.experiments_registry[experiment_name].users):
self.experiments_registry.remove(experiment_name)
def envoy_health_check(
self, *,
envoy_name: str,
is_experiment_running: bool,
cuda_devices_status: list = None,
) -> int:
"""Accept health check from envoy."""
shard_info = self._shard_registry.get(envoy_name)
if not shard_info:
raise Exception(f'Unknown shard {envoy_name}')
hc_period = self.settings.get('envoy_health_check_period', ENVOY_HEALTH_CHECK_PERIOD)
shard_info['is_online']: True
shard_info['is_experiment_running'] = is_experiment_running
shard_info['valid_duration'] = 2 * hc_period
shard_info['last_updated'] = time.time()
if cuda_devices_status is not None:
for i in range(len(cuda_devices_status)):
shard_info['shard_info']['node_info']['cuda_devices'][i] = cuda_devices_status[i]
return hc_period
def get_envoys(self) -> list:
"""Get a status information about envoys."""
logger.info(f'Shard registry: {self._shard_registry}')
for envoy_info in self._shard_registry.values():
envoy_info['is_online'] = (
time.time() < envoy_info['last_updated'] + envoy_info['valid_duration']
)
envoy_name = envoy_info['shard_info']['node_info']['name']
envoy_info['experiment_name'] = self.col_exp[envoy_name]
return self._shard_registry.values()
def get_experiments_list(self, caller: str) -> list:
"""Get experiments list for specific user."""
experiments = self.experiments_registry.get_user_experiments(caller)
result = []
for exp in experiments:
exp_data = {
'name': exp.name,
'status': exp.status,
'collaborators_amount': len(exp.collaborators),
}
progress = _get_experiment_progress(exp)
if progress is not None:
exp_data['progress'] = progress
if exp.aggregator:
tasks_amount = len({
task['function']
for task in exp.aggregator.assigner.tasks.values()
})
exp_data['tasks_amount'] = tasks_amount
result.append(exp_data)
return result
def get_experiment_description(self, caller: str, name: str) -> dict:
"""Get a experiment information by name for specific user."""
exp = self.experiments_registry.get(name)
if not exp or caller not in exp.users:
return {}
progress = _get_experiment_progress(exp)
model_statuses = _get_model_download_statuses(exp)
tasks = _get_experiment_tasks(exp)
collaborators = _get_experiment_collaborators(exp)
result = {
'name': name,
'status': exp.status,
'current_round': exp.aggregator.round_number,
'total_rounds': exp.aggregator.rounds_to_train,
'download_statuses': {
'models': model_statuses,
'logs': [{
'name': 'aggregator',
'status': 'ready'
}],
},
'collaborators': collaborators,
'tasks': tasks,
'progress': progress
}
return result
async def start_experiment_execution_loop(self):
"""Run task to monitor and run experiments."""
while True:
async with self.experiments_registry.get_next_experiment() as experiment:
loop = asyncio.get_event_loop()
run_aggregator_future = loop.create_task(experiment.start(
root_certificate=self.root_certificate,
certificate=self.certificate,
private_key=self.private_key,
tls=self.tls,
))
for col_name in experiment.collaborators:
queue = self.col_exp_queues[col_name]
await queue.put(experiment.name)
await run_aggregator_future
def _get_model_download_statuses(experiment) -> List[dict]:
best_model_status = 'ready' if experiment.aggregator.best_tensor_dict else 'pending'
last_model_status = 'ready' if experiment.aggregator.last_tensor_dict else 'pending'
model_statuses = [{
'name': 'best',
'status': best_model_status,
}, {
'name': 'last',
'status': last_model_status,
}, {
'name': 'init',
'status': 'ready'
}]
return model_statuses
def _get_experiment_progress(experiment) -> Union[float, None]:
if experiment.status == Status.IN_PROGRESS:
return experiment.aggregator.round_number / experiment.aggregator.rounds_to_train
def _get_experiment_tasks(experiment) -> List[dict]:
return [{
'name': task['function'],
'description': 'Task description Mock',
} for task in experiment.aggregator.assigner.tasks.values()]
def _get_experiment_collaborators(experiment) -> List[dict]:
return [{
'name': name,
'status': 'pending_mock',
'progress': 0.0,
'round': 0,
'current_task': 'Current Task Mock',
'next_task': 'Next Task Mock'
} for name in experiment.aggregator.authorized_cols]
| 36.724359
| 99
| 0.615902
|
import asyncio
import logging
import time
from collections import defaultdict
from pathlib import Path
from typing import Iterable
from typing import List
from typing import Union
from .experiment import Experiment
from .experiment import ExperimentsRegistry
from .experiment import Status
logger = logging.getLogger(__name__)
ENVOY_HEALTH_CHECK_PERIOD = 60
class Director:
def __init__(
self, *,
tls: bool = True,
root_certificate: Union[Path, str] = None,
private_key: Union[Path, str] = None,
certificate: Union[Path, str] = None,
sample_shape: list = None,
target_shape: list = None,
settings: dict = None
) -> None:
self.sample_shape, self.target_shape = sample_shape, target_shape
self._shard_registry = {}
self.tls = tls
self.root_certificate = root_certificate
self.private_key = private_key
self.certificate = certificate
self.experiments_registry = ExperimentsRegistry()
self.settings = settings or {}
self.col_exp_queues = defaultdict(asyncio.Queue)
self.col_exp = {}
def acknowledge_shard(self, shard_info: dict) -> bool:
is_accepted = False
if (self.sample_shape != shard_info['sample_shape']
or self.target_shape != shard_info['target_shape']):
logger.info('Request was not accepted')
return is_accepted
logger.info('Request was accepted')
self._shard_registry[shard_info['node_info']['name']] = {
'shard_info': shard_info,
'is_online': True,
'is_experiment_running': False
}
is_accepted = True
return is_accepted
async def set_new_experiment(
self, *,
experiment_name: str,
sender_name: str,
tensor_dict: dict,
collaborator_names: Iterable[str],
experiment_archive_path: Path,
) -> bool:
experiment = Experiment(
name=experiment_name,
archive_path=experiment_archive_path,
collaborators=list(collaborator_names),
users=[sender_name],
sender=sender_name,
init_tensor_dict=tensor_dict,
)
self.experiments_registry.add(experiment)
return True
def get_trained_model(self, experiment_name: str, caller: str, model_type: str):
if (experiment_name not in self.experiments_registry
or caller not in self.experiments_registry[experiment_name].users):
logger.error('No experiment data in the stash')
return None
aggregator = self.experiments_registry[experiment_name].aggregator
if aggregator.last_tensor_dict is None:
logger.error('Aggregator have no aggregated model to return')
return None
if model_type == 'best':
return aggregator.best_tensor_dict
elif model_type == 'last':
return aggregator.last_tensor_dict
else:
logger.error('Unknown model type required.')
return None
def get_experiment_data(self, experiment_name: str) -> Path:
return self.experiments_registry[experiment_name].archive_path
async def wait_experiment(self, envoy_name: str) -> str:
self.col_exp[envoy_name] = None
queue = self.col_exp_queues[envoy_name]
experiment_name = await queue.get()
self.col_exp[envoy_name] = experiment_name
return experiment_name
def get_dataset_info(self):
return self.sample_shape, self.target_shape
def get_registered_shards(self) -> list:
return [shard_status['shard_info'] for shard_status in self._shard_registry.values()]
async def stream_metrics(self, experiment_name: str, caller: str):
if (experiment_name not in self.experiments_registry
or caller not in self.experiments_registry[experiment_name].users):
raise Exception(
f'No experiment name "{experiment_name}" in experiments list, or caller "{caller}"'
f' does not have access to this experiment'
)
while not self.experiments_registry[experiment_name].aggregator:
await asyncio.sleep(1)
aggregator = self.experiments_registry[experiment_name].aggregator
while True:
if not aggregator.metric_queue.empty():
yield aggregator.metric_queue.get()
continue
if aggregator.all_quit_jobs_sent() and aggregator.metric_queue.empty():
return
yield None
def remove_experiment_data(self, experiment_name: str, caller: str):
if (experiment_name in self.experiments_registry
and caller in self.experiments_registry[experiment_name].users):
self.experiments_registry.remove(experiment_name)
def envoy_health_check(
self, *,
envoy_name: str,
is_experiment_running: bool,
cuda_devices_status: list = None,
) -> int:
shard_info = self._shard_registry.get(envoy_name)
if not shard_info:
raise Exception(f'Unknown shard {envoy_name}')
hc_period = self.settings.get('envoy_health_check_period', ENVOY_HEALTH_CHECK_PERIOD)
shard_info['is_online']: True
shard_info['is_experiment_running'] = is_experiment_running
shard_info['valid_duration'] = 2 * hc_period
shard_info['last_updated'] = time.time()
if cuda_devices_status is not None:
for i in range(len(cuda_devices_status)):
shard_info['shard_info']['node_info']['cuda_devices'][i] = cuda_devices_status[i]
return hc_period
def get_envoys(self) -> list:
logger.info(f'Shard registry: {self._shard_registry}')
for envoy_info in self._shard_registry.values():
envoy_info['is_online'] = (
time.time() < envoy_info['last_updated'] + envoy_info['valid_duration']
)
envoy_name = envoy_info['shard_info']['node_info']['name']
envoy_info['experiment_name'] = self.col_exp[envoy_name]
return self._shard_registry.values()
def get_experiments_list(self, caller: str) -> list:
experiments = self.experiments_registry.get_user_experiments(caller)
result = []
for exp in experiments:
exp_data = {
'name': exp.name,
'status': exp.status,
'collaborators_amount': len(exp.collaborators),
}
progress = _get_experiment_progress(exp)
if progress is not None:
exp_data['progress'] = progress
if exp.aggregator:
tasks_amount = len({
task['function']
for task in exp.aggregator.assigner.tasks.values()
})
exp_data['tasks_amount'] = tasks_amount
result.append(exp_data)
return result
def get_experiment_description(self, caller: str, name: str) -> dict:
exp = self.experiments_registry.get(name)
if not exp or caller not in exp.users:
return {}
progress = _get_experiment_progress(exp)
model_statuses = _get_model_download_statuses(exp)
tasks = _get_experiment_tasks(exp)
collaborators = _get_experiment_collaborators(exp)
result = {
'name': name,
'status': exp.status,
'current_round': exp.aggregator.round_number,
'total_rounds': exp.aggregator.rounds_to_train,
'download_statuses': {
'models': model_statuses,
'logs': [{
'name': 'aggregator',
'status': 'ready'
}],
},
'collaborators': collaborators,
'tasks': tasks,
'progress': progress
}
return result
async def start_experiment_execution_loop(self):
while True:
async with self.experiments_registry.get_next_experiment() as experiment:
loop = asyncio.get_event_loop()
run_aggregator_future = loop.create_task(experiment.start(
root_certificate=self.root_certificate,
certificate=self.certificate,
private_key=self.private_key,
tls=self.tls,
))
for col_name in experiment.collaborators:
queue = self.col_exp_queues[col_name]
await queue.put(experiment.name)
await run_aggregator_future
def _get_model_download_statuses(experiment) -> List[dict]:
best_model_status = 'ready' if experiment.aggregator.best_tensor_dict else 'pending'
last_model_status = 'ready' if experiment.aggregator.last_tensor_dict else 'pending'
model_statuses = [{
'name': 'best',
'status': best_model_status,
}, {
'name': 'last',
'status': last_model_status,
}, {
'name': 'init',
'status': 'ready'
}]
return model_statuses
def _get_experiment_progress(experiment) -> Union[float, None]:
if experiment.status == Status.IN_PROGRESS:
return experiment.aggregator.round_number / experiment.aggregator.rounds_to_train
def _get_experiment_tasks(experiment) -> List[dict]:
return [{
'name': task['function'],
'description': 'Task description Mock',
} for task in experiment.aggregator.assigner.tasks.values()]
def _get_experiment_collaborators(experiment) -> List[dict]:
return [{
'name': name,
'status': 'pending_mock',
'progress': 0.0,
'round': 0,
'current_task': 'Current Task Mock',
'next_task': 'Next Task Mock'
} for name in experiment.aggregator.authorized_cols]
| true
| true
|
1c419a04555d4fdaff326fd6472ff1513b1436f9
| 893
|
py
|
Python
|
bukber/admin.py
|
ppabcd/django-bukber
|
8a5d272e988a63082977deb5ba026876d4c70ee4
|
[
"BSD-3-Clause"
] | null | null | null |
bukber/admin.py
|
ppabcd/django-bukber
|
8a5d272e988a63082977deb5ba026876d4c70ee4
|
[
"BSD-3-Clause"
] | null | null | null |
bukber/admin.py
|
ppabcd/django-bukber
|
8a5d272e988a63082977deb5ba026876d4c70ee4
|
[
"BSD-3-Clause"
] | null | null | null |
from admin_totals.admin import ModelAdminTotals
from django.contrib import admin
from django.db.models import Sum
from django.db.models.functions import Coalesce
from .models import Kelas, Peserta
# Register your models here.
class PesertaAdmin(ModelAdminTotals):
exclude = ['created_at', 'updated_at']
list_display = [
'nama',
'kelas',
'nominal',
'created_at',
'updated_at'
]
list_totals = [('nominal', lambda field: Coalesce(Sum(field), 0))]
list_filter = ['created_at']
search_fields = ['nama']
class KelasAdmin(admin.ModelAdmin):
exclude = ['created_at', 'updated_at', 'user']
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
admin.site.register(Peserta, PesertaAdmin)
admin.site.register(Kelas, KelasAdmin)
| 24.805556
| 71
| 0.668533
|
from admin_totals.admin import ModelAdminTotals
from django.contrib import admin
from django.db.models import Sum
from django.db.models.functions import Coalesce
from .models import Kelas, Peserta
class PesertaAdmin(ModelAdminTotals):
exclude = ['created_at', 'updated_at']
list_display = [
'nama',
'kelas',
'nominal',
'created_at',
'updated_at'
]
list_totals = [('nominal', lambda field: Coalesce(Sum(field), 0))]
list_filter = ['created_at']
search_fields = ['nama']
class KelasAdmin(admin.ModelAdmin):
exclude = ['created_at', 'updated_at', 'user']
def get_model_perms(self, request):
return {}
admin.site.register(Peserta, PesertaAdmin)
admin.site.register(Kelas, KelasAdmin)
| true
| true
|
1c419a0af3fb5953370a1109197f3b4f8405af08
| 5,357
|
py
|
Python
|
simpleppt/SimplePPT.py
|
LouisFaure/simpleppt
|
466c73fc64b9c4e3bf14b2c46c11d69de31c8a9b
|
[
"BSD-3-Clause"
] | null | null | null |
simpleppt/SimplePPT.py
|
LouisFaure/simpleppt
|
466c73fc64b9c4e3bf14b2c46c11d69de31c8a9b
|
[
"BSD-3-Clause"
] | null | null | null |
simpleppt/SimplePPT.py
|
LouisFaure/simpleppt
|
466c73fc64b9c4e3bf14b2c46c11d69de31c8a9b
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Any, Union, Optional, Mapping, Iterable # Meta
from typing import Mapping
import numpy as np
import igraph
from sklearn.metrics import pairwise_distances
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import shortest_path
import pandas as pd
import itertools
class SimplePPT:
"""A python object containing the data used for dynamical tracks analysis.
Parameters
----------
F
coordinates of principal points in the learned space.
R
soft assignment of datapoints to principal points.
B
adjacency matrix of the principal points.
L
Laplacian matrix.
d
Pairwise distance matrix of principal points.
score
Score minimized during the tree learning.
tips
Node IDs of the tree that have degree 1.
forks
Node IDs of the tree that have a degree of more than 1.
root
Selected node ID as the root of the tree for distance calculations.
pp_info
Per node ID info of distance from the root, and segment assigment.
pp_seg
Per segment info with node ID extremities and distance."""
def __init__(
self,
F: np.array,
R: np.array,
B: np.array,
L: np.array,
d: np.array,
score: float,
lam: float,
sigma: float,
nsteps: int,
metric: str,
tips: Optional[Union[Iterable, None]] = None,
forks: Optional[Union[Iterable, None]] = None,
root: Optional[Union[int, None]] = None,
pp_info: Optional[Union[pd.DataFrame]] = None,
pp_seg: Optional[Union[pd.DataFrame]] = None,
):
self.F = F
self.R = R
self.B = B
self.L = L
self.d = d
self.score = score
self.lam = lam
self.sigma = sigma
self.nsteps = nsteps
self.metric = metric
self.tips = tips
self.forks = forks
def __repr__(self):
dt, nd = self.R.shape
descr = f"SimplePPT object of {nd} nodes approximating {dt} datapoints"
return descr
def set_tips_forks(self):
"""Obtains the tips and forks of the tree.
Returns
-------
adds to SimplePPT object the following fields: :class:`simpleppt.SimplePPT`
`.tips`
Node IDs of the tree that have degree 1..
`.forks`
Node IDs of the tree that have a degree of more than 1.
"""
g = igraph.Graph.Adjacency((self.B > 0).tolist(), mode="undirected")
self.tips = np.argwhere(np.array(g.degree()) == 1).flatten()
self.forks = np.argwhere(np.array(g.degree()) > 2).flatten()
def set_branches(self, root=None):
"""Assign branches/segments to nodes.
Returns
-------
adds to SimplePPT object the following fields: :class:`simpleppt.SimplePPT`
`.pp_info`
Per node ID info of distance from the root, and segment assigment.
`.pp_seg`
Per segment info with node ID extremities and distance.
"""
root = self.tips[0] if root is None else root
d = 1e-6 + pairwise_distances(self.F.T, self.F.T, metric=self.metric)
to_g = self.B * d
csr = csr_matrix(to_g)
g = igraph.Graph.Adjacency((to_g > 0).tolist(), mode="undirected")
g.es["weight"] = to_g[to_g.nonzero()]
root_dist_matrix = shortest_path(csr, directed=False, indices=root)
pp_info = pd.DataFrame(
{
"PP": g.vs.indices,
"dist": root_dist_matrix,
"seg": np.zeros(csr.shape[0]),
}
)
nodes = np.argwhere(
np.apply_along_axis(arr=(csr > 0).todense(), axis=0, func1d=np.sum) != 2
).flatten()
nodes = np.unique(np.append(nodes, root))
pp_seg = pd.DataFrame(columns=["n", "from", "to", "d"])
for node1, node2 in itertools.combinations(nodes, 2):
paths12 = g.get_shortest_paths(node1, node2)
paths12 = np.array([val for sublist in paths12 for val in sublist])
if np.sum(np.isin(nodes, paths12)) == 2:
fromto = np.array([node1, node2])
path_root = root_dist_matrix[[node1, node2]]
fro = fromto[np.argmin(path_root)]
to = fromto[np.argmax(path_root)]
pp_info.loc[paths12, "seg"] = pp_seg.shape[0] + 1
pp_seg = pp_seg.append(
pd.DataFrame(
{
"n": pp_seg.shape[0] + 1,
"from": fro,
"to": to,
"d": shortest_path(csr, directed=False, indices=fro)[to],
},
index=[pp_seg.shape[0] + 1],
)
)
pp_seg["n"] = pp_seg["n"].astype(int).astype(str)
pp_seg["n"] = pp_seg["n"].astype(int).astype(str)
pp_seg["from"] = pp_seg["from"].astype(int)
pp_seg["to"] = pp_seg["to"].astype(int)
pp_info["seg"] = pp_info["seg"].astype(int).astype(str)
pp_info["seg"] = pp_info["seg"].astype(int).astype(str)
self.pp_info = pp_info
self.pp_seg = pp_seg
self.root = root
| 32.271084
| 85
| 0.550308
|
from typing import Any, Union, Optional, Mapping, Iterable
from typing import Mapping
import numpy as np
import igraph
from sklearn.metrics import pairwise_distances
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import shortest_path
import pandas as pd
import itertools
class SimplePPT:
def __init__(
self,
F: np.array,
R: np.array,
B: np.array,
L: np.array,
d: np.array,
score: float,
lam: float,
sigma: float,
nsteps: int,
metric: str,
tips: Optional[Union[Iterable, None]] = None,
forks: Optional[Union[Iterable, None]] = None,
root: Optional[Union[int, None]] = None,
pp_info: Optional[Union[pd.DataFrame]] = None,
pp_seg: Optional[Union[pd.DataFrame]] = None,
):
self.F = F
self.R = R
self.B = B
self.L = L
self.d = d
self.score = score
self.lam = lam
self.sigma = sigma
self.nsteps = nsteps
self.metric = metric
self.tips = tips
self.forks = forks
def __repr__(self):
dt, nd = self.R.shape
descr = f"SimplePPT object of {nd} nodes approximating {dt} datapoints"
return descr
def set_tips_forks(self):
g = igraph.Graph.Adjacency((self.B > 0).tolist(), mode="undirected")
self.tips = np.argwhere(np.array(g.degree()) == 1).flatten()
self.forks = np.argwhere(np.array(g.degree()) > 2).flatten()
def set_branches(self, root=None):
root = self.tips[0] if root is None else root
d = 1e-6 + pairwise_distances(self.F.T, self.F.T, metric=self.metric)
to_g = self.B * d
csr = csr_matrix(to_g)
g = igraph.Graph.Adjacency((to_g > 0).tolist(), mode="undirected")
g.es["weight"] = to_g[to_g.nonzero()]
root_dist_matrix = shortest_path(csr, directed=False, indices=root)
pp_info = pd.DataFrame(
{
"PP": g.vs.indices,
"dist": root_dist_matrix,
"seg": np.zeros(csr.shape[0]),
}
)
nodes = np.argwhere(
np.apply_along_axis(arr=(csr > 0).todense(), axis=0, func1d=np.sum) != 2
).flatten()
nodes = np.unique(np.append(nodes, root))
pp_seg = pd.DataFrame(columns=["n", "from", "to", "d"])
for node1, node2 in itertools.combinations(nodes, 2):
paths12 = g.get_shortest_paths(node1, node2)
paths12 = np.array([val for sublist in paths12 for val in sublist])
if np.sum(np.isin(nodes, paths12)) == 2:
fromto = np.array([node1, node2])
path_root = root_dist_matrix[[node1, node2]]
fro = fromto[np.argmin(path_root)]
to = fromto[np.argmax(path_root)]
pp_info.loc[paths12, "seg"] = pp_seg.shape[0] + 1
pp_seg = pp_seg.append(
pd.DataFrame(
{
"n": pp_seg.shape[0] + 1,
"from": fro,
"to": to,
"d": shortest_path(csr, directed=False, indices=fro)[to],
},
index=[pp_seg.shape[0] + 1],
)
)
pp_seg["n"] = pp_seg["n"].astype(int).astype(str)
pp_seg["n"] = pp_seg["n"].astype(int).astype(str)
pp_seg["from"] = pp_seg["from"].astype(int)
pp_seg["to"] = pp_seg["to"].astype(int)
pp_info["seg"] = pp_info["seg"].astype(int).astype(str)
pp_info["seg"] = pp_info["seg"].astype(int).astype(str)
self.pp_info = pp_info
self.pp_seg = pp_seg
self.root = root
| true
| true
|
1c419a9890ffa661f4b93e3f7cb17869f69aa93e
| 5,814
|
py
|
Python
|
py/parseMidi.py
|
Lazersmoke/idawator-hacking
|
12db250afa6f0192041a233339db535edbc72f86
|
[
"MIT"
] | null | null | null |
py/parseMidi.py
|
Lazersmoke/idawator-hacking
|
12db250afa6f0192041a233339db535edbc72f86
|
[
"MIT"
] | null | null | null |
py/parseMidi.py
|
Lazersmoke/idawator-hacking
|
12db250afa6f0192041a233339db535edbc72f86
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.io import wavfile
from scipy.signal import hilbert
from scipy.special import binom
from mido import MidiFile
import itertools
import nonlin
fockSize = 10
# Pitch class, octave
sizeOfNoteSpec = 12 + 1
sizeOfFockNoteSpec = 0
fockOffsets = []
for k in range(fockSize + 1):
if k < fockSize:
fockOffsets.append(sizeOfFockNoteSpec + sizeOfNoteSpec * k)
sizeOfFockNoteSpec += sizeOfNoteSpec * k
print("Fock size total:",sizeOfFockNoteSpec)
print("Fock offsets:",fockOffsets)
# Include time density!
sizeEpoch = 1 + sizeOfFockNoteSpec
ohbMatrix = np.eye(12)
# Build a NoteSpec out of the current midi situation during this particular epoch
def mkNoteSpec(heldNotes,decayingNotes,timeDensity):
allNotes = heldNotes + decayingNotes
noteCount = len(allNotes)
if noteCount > fockSize:
print("!!! Warning, fock size of {} exceeded by {} simultaneous notes !!!".format(fockSize,noteCount))
allNotes = allNotes[:fockSize]
noteCount = fockSize
fOff = fockOffsets[noteCount - 1]
fockVec = np.zeros(sizeOfFockNoteSpec)
contribs = []
for k in range(noteCount):
(octave,pc) = midiNoteToRepr(allNotes[k])
pcVec = np.zeros(12)
pcVec[pc] = 1
nOff = fOff + k * sizeOfNoteSpec
fockVec[nOff : nOff + sizeOfNoteSpec] = np.append(np.matmul(ohbMatrix,pcVec),octave)
#print(pc)
epoch = np.insert(fockVec,0,timeDensity)
return epoch
def traceNoteSpec(ns):
mess = "Time Density: {}, note probabilites:".format(ns[0])
for k in range(fockSize):
s = 1 + fockOffsets[k]
fock = ns[s : s + (k + 1) * sizeOfNoteSpec]
prob = np.linalg.norm(fock)
if prob > 0:
mess += "\n{:.2f} for {} notes (".format(prob,k + 1)
for l in range(k + 1):
# minus one to forget octave
noteStart = l * sizeOfNoteSpec
thisNote = fock[noteStart : noteStart + sizeOfNoteSpec - 1]
thisOctave = fock[noteStart + sizeOfNoteSpec - 1]
mess += "{}^{}, ".format(np.argwhere(thisNote).flatten(),thisOctave)
mess = mess[:-2] + ")"
return mess
# Midi should have octave in integers [0,10] (so eleven octaves)
# Returns (octave,pitchClass)
def midiNoteToRepr(midiNote):
return divmod(midiNote,12)
mid = MidiFile('stayorgo.mid')
tracks = []
for i, track in enumerate(mid.tracks):
print('Track {}: {}'.format(i, track.name))
heldNotes = []
toUnHold = []
lastTime = 0
noteSpecs = []
for msg in track:
if msg.time != 0:
#print()
#print("Held",heldNotes,"with these ones decaying:",toUnHold,"for time:",lastTime)
#print()
noteSpecs.append(mkNoteSpec(heldNotes,toUnHold,min(lastTime,300)))
#print(traceNoteSpec(noteSpecs[-1]))
for n in toUnHold:
heldNotes.remove(n)
toUnHold = []
lastTime = msg.time
if msg.type == 'note_on':
#print("Note",msg.note,"on with time=",msg.time)
if msg.note not in heldNotes:
heldNotes.append(msg.note)
elif msg.type == 'note_off':
#print("Note",msg.note,"off with time=",msg.time)
if msg.note in heldNotes:
toUnHold.append(msg.note)
else:
#print(msg)
x=1
tracks.append(np.stack(noteSpecs,axis=0))
print("Found {} Note Specs\n".format(len(noteSpecs)))
# Memoryless predictor network
def stepPredictLoss(predictor,track):
print("Finding step loss...")
totalLoss = 0
lastNoteSpec = None
for noteSpec in track:
if lastNoteSpec is not None:
predicted = nonlin.applyNonlinear(predictor,lastNoteSpec)
totalLoss += np.linalg.norm(predicted - noteSpec)
lastNoteSpec = noteSpec
return totalLoss
def timePredictLoss(predictor,track):
totalLoss = 0
for k in range(track.shape[0] - predictorSideLength):
i = k + predictorSideLength
predicted = nonlin.applyNonlinear(predictor,track[k:i,0])[0]
totalLoss += np.abs(predicted - track[i,0]) ** 2
return np.log(totalLoss)
def plotTimePredictLoss(predictor,track):
totalLoss = 0
losses = []
ks = range(track.shape[0] - predictorSideLength)
for k in ks:
i = k + predictorSideLength
predicted = nonlin.applyNonlinear(predictor,track[k:i,0])[0]
totalLoss += np.abs(predicted - track[i,0]) ** 2
losses.append(totalLoss)
return losses
predictorDepth = 2
predictorSideLength = 3
identityPredictor = nonlin.identityNonLin(sizeEpoch,predictorDepth)
identityTimePredictor = nonlin.identityNonLin(predictorSideLength,predictorDepth)
print("Computing identity time loss on Track 1...")
print("Total Loss",timePredictLoss(identityTimePredictor,tracks[1]))
def minCB(params):
predictor = nonlin.deserializeNonLin(params,predictorSideLength,predictorDepth)
print(predictor)
for l in range(10):
k = l + 200
i = k + predictorSideLength
predicted = nonlin.applyNonlinear(predictor,tracks[1][k:i,0])[0]
print("Predicted: ",predicted)
print("Actual: ",tracks[1][i,0])
print("Loss: ",np.abs(predicted - tracks[1][i,0]) ** 2)
print()
print("Step Loss: ",timePredictLoss(predictor,tracks[1]))
allLosses.append(plotTimePredictLoss(predictor,tracks[1]))
def toMinimize(params):
predictor = nonlin.deserializeNonLin(params,predictorSideLength,predictorDepth)
return timePredictLoss(predictor,tracks[1])
input("...")
allLosses = []
minResult = minimize(toMinimize,nonlin.serializeNonLin(identityTimePredictor),callback=minCB,options={'disp':True,'maxiter':10})
print(minResult)
for i in range(len(allLosses)):
losses = allLosses[i]
plt.plot(range(len(losses)),losses, color = str(1-(i/len(allLosses))))
plt.title('Cumulative loss during track, progressive iterations')
plt.xlabel('Note in Track')
plt.ylabel('Cumulative Loss')
plt.show(block=True)
plt.hist(np.diff(allLosses[-1]))
plt.title('Distribution of losses')
plt.show(block=True)
| 31.770492
| 128
| 0.697282
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.io import wavfile
from scipy.signal import hilbert
from scipy.special import binom
from mido import MidiFile
import itertools
import nonlin
fockSize = 10
sizeOfNoteSpec = 12 + 1
sizeOfFockNoteSpec = 0
fockOffsets = []
for k in range(fockSize + 1):
if k < fockSize:
fockOffsets.append(sizeOfFockNoteSpec + sizeOfNoteSpec * k)
sizeOfFockNoteSpec += sizeOfNoteSpec * k
print("Fock size total:",sizeOfFockNoteSpec)
print("Fock offsets:",fockOffsets)
sizeEpoch = 1 + sizeOfFockNoteSpec
ohbMatrix = np.eye(12)
def mkNoteSpec(heldNotes,decayingNotes,timeDensity):
allNotes = heldNotes + decayingNotes
noteCount = len(allNotes)
if noteCount > fockSize:
print("!!! Warning, fock size of {} exceeded by {} simultaneous notes !!!".format(fockSize,noteCount))
allNotes = allNotes[:fockSize]
noteCount = fockSize
fOff = fockOffsets[noteCount - 1]
fockVec = np.zeros(sizeOfFockNoteSpec)
contribs = []
for k in range(noteCount):
(octave,pc) = midiNoteToRepr(allNotes[k])
pcVec = np.zeros(12)
pcVec[pc] = 1
nOff = fOff + k * sizeOfNoteSpec
fockVec[nOff : nOff + sizeOfNoteSpec] = np.append(np.matmul(ohbMatrix,pcVec),octave)
epoch = np.insert(fockVec,0,timeDensity)
return epoch
def traceNoteSpec(ns):
mess = "Time Density: {}, note probabilites:".format(ns[0])
for k in range(fockSize):
s = 1 + fockOffsets[k]
fock = ns[s : s + (k + 1) * sizeOfNoteSpec]
prob = np.linalg.norm(fock)
if prob > 0:
mess += "\n{:.2f} for {} notes (".format(prob,k + 1)
for l in range(k + 1):
noteStart = l * sizeOfNoteSpec
thisNote = fock[noteStart : noteStart + sizeOfNoteSpec - 1]
thisOctave = fock[noteStart + sizeOfNoteSpec - 1]
mess += "{}^{}, ".format(np.argwhere(thisNote).flatten(),thisOctave)
mess = mess[:-2] + ")"
return mess
def midiNoteToRepr(midiNote):
return divmod(midiNote,12)
mid = MidiFile('stayorgo.mid')
tracks = []
for i, track in enumerate(mid.tracks):
print('Track {}: {}'.format(i, track.name))
heldNotes = []
toUnHold = []
lastTime = 0
noteSpecs = []
for msg in track:
if msg.time != 0:
noteSpecs.append(mkNoteSpec(heldNotes,toUnHold,min(lastTime,300)))
for n in toUnHold:
heldNotes.remove(n)
toUnHold = []
lastTime = msg.time
if msg.type == 'note_on':
if msg.note not in heldNotes:
heldNotes.append(msg.note)
elif msg.type == 'note_off':
if msg.note in heldNotes:
toUnHold.append(msg.note)
else:
x=1
tracks.append(np.stack(noteSpecs,axis=0))
print("Found {} Note Specs\n".format(len(noteSpecs)))
def stepPredictLoss(predictor,track):
print("Finding step loss...")
totalLoss = 0
lastNoteSpec = None
for noteSpec in track:
if lastNoteSpec is not None:
predicted = nonlin.applyNonlinear(predictor,lastNoteSpec)
totalLoss += np.linalg.norm(predicted - noteSpec)
lastNoteSpec = noteSpec
return totalLoss
def timePredictLoss(predictor,track):
totalLoss = 0
for k in range(track.shape[0] - predictorSideLength):
i = k + predictorSideLength
predicted = nonlin.applyNonlinear(predictor,track[k:i,0])[0]
totalLoss += np.abs(predicted - track[i,0]) ** 2
return np.log(totalLoss)
def plotTimePredictLoss(predictor,track):
totalLoss = 0
losses = []
ks = range(track.shape[0] - predictorSideLength)
for k in ks:
i = k + predictorSideLength
predicted = nonlin.applyNonlinear(predictor,track[k:i,0])[0]
totalLoss += np.abs(predicted - track[i,0]) ** 2
losses.append(totalLoss)
return losses
predictorDepth = 2
predictorSideLength = 3
identityPredictor = nonlin.identityNonLin(sizeEpoch,predictorDepth)
identityTimePredictor = nonlin.identityNonLin(predictorSideLength,predictorDepth)
print("Computing identity time loss on Track 1...")
print("Total Loss",timePredictLoss(identityTimePredictor,tracks[1]))
def minCB(params):
predictor = nonlin.deserializeNonLin(params,predictorSideLength,predictorDepth)
print(predictor)
for l in range(10):
k = l + 200
i = k + predictorSideLength
predicted = nonlin.applyNonlinear(predictor,tracks[1][k:i,0])[0]
print("Predicted: ",predicted)
print("Actual: ",tracks[1][i,0])
print("Loss: ",np.abs(predicted - tracks[1][i,0]) ** 2)
print()
print("Step Loss: ",timePredictLoss(predictor,tracks[1]))
allLosses.append(plotTimePredictLoss(predictor,tracks[1]))
def toMinimize(params):
predictor = nonlin.deserializeNonLin(params,predictorSideLength,predictorDepth)
return timePredictLoss(predictor,tracks[1])
input("...")
allLosses = []
minResult = minimize(toMinimize,nonlin.serializeNonLin(identityTimePredictor),callback=minCB,options={'disp':True,'maxiter':10})
print(minResult)
for i in range(len(allLosses)):
losses = allLosses[i]
plt.plot(range(len(losses)),losses, color = str(1-(i/len(allLosses))))
plt.title('Cumulative loss during track, progressive iterations')
plt.xlabel('Note in Track')
plt.ylabel('Cumulative Loss')
plt.show(block=True)
plt.hist(np.diff(allLosses[-1]))
plt.title('Distribution of losses')
plt.show(block=True)
| true
| true
|
1c419b03436ca7714cc145a58d60b72d08249e0b
| 9,376
|
py
|
Python
|
src/software/parse/testall.py
|
intel/RAAD
|
9cca9e72ff61658191e30756bb260173d5600102
|
[
"Intel",
"Apache-2.0"
] | null | null | null |
src/software/parse/testall.py
|
intel/RAAD
|
9cca9e72ff61658191e30756bb260173d5600102
|
[
"Intel",
"Apache-2.0"
] | null | null | null |
src/software/parse/testall.py
|
intel/RAAD
|
9cca9e72ff61658191e30756bb260173d5600102
|
[
"Intel",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: Joseph Tarango, Randal Eike
# *****************************************************************************/
# @file: testAll.py
# This file is based on testSetup.py from Phuong Tran. This file run
# the bench telemetry compliance test suite
from __future__ import absolute_import, division, print_function, unicode_literals # , nested_scopes, generators, generator_stop, with_statement, annotations
import re, os, sys
from optparse import OptionParser
##### .exe extension patch for the compiled version of this script
if not re.search('\.PY$|\.PYC$|\.EXE$', os.path.split(sys.argv[0])[1].upper()):
sys.argv[0] = os.path.join( os.path.split(sys.argv[0])[0] , os.path.split(sys.argv[0])[1]+'.exe' )
#### extend the Python search path to include TWIDL_tools directory
if __name__ == '__main__':
twidlcore = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
sys.path.insert(0,twidlcore)
#### import test utilities
from src.software.parse.output_log import OutputLog
from src.software.parse.internal.drive_utility import testDrive
from src.software.parse.internal.drive_utility import SetUlink
from src.software.parse.internal.drive_utility import ScanDrives
from src.software.parse.internal.drive_utility import driveList
#### import telemetry modules
from src.software.parse.telemetry_drive import logDevice
from src.software.parse.telemetry_util import openReadFile
from src.software.parse.telemetry_util import openWriteFile
from src.software.parse.telemetry_util import cleanDir
from src.software.parse.internal.getTelemetry import readTelemetryLog
from src.software.parse.parseTelemetryBin import parseInputBin
from src.software.parse.testTelemetryPull import pullTest
from src.software.parse.internal.testCIAER import CIAER_Test
from src.software.parse.internal.testCIAER import clearOldLogs
TOOL_VERSION = 1.0
def makeDirName(dirName):
folder = os.path.join(os.getcwd(), dirName)
return folder
def makeFileName(fileName, dirName):
folder = makeDirName(dirName)
outName = os.path.join(folder, fileName)
return outName
def main():
drvIndex = None
core = None
#### Command-line arguments ####
parser = OptionParser(usage="usage: %prog [options] outputFile", version="%prog Version: "+str(TOOL_VERSION))
parser.add_option('--debug',type="int", dest='debug', action="store", default=0, help='Enable debug level')
parser.add_option('-d',type='int', dest='drvnum', metavar='<DRVNUM>', default=None, help='Drive number to analyze')
parser.add_option('-q',action='callback', callback=ScanDrives, help='Query system for the drive list')
parser.add_option('--ulink',metavar='on|off|pc', default='', help='ULINK Control: ON, OFF, or Power Cycle (OFF+ON)')
(options, args) = parser.parse_args()
if (len(args) >= 1):
outFile = args[0]
else:
outFile = "testAll.txt"
# Initialize setup
if (options.debug > 0):
OutputLog.setDebugLevel(options.debug)
else:
OutputLog.enableQuiet()
OutputLog.setWarnIsError(True)
# check for ulink power cycle
if (options.ulink):
if(False == SetUlink(options.ulink)):
OutputLog.Error("INVALID --ulink argument")
sys.exit(1)
#if no options specified use drive
driveNumber = driveList.checkDriveIndex(options.drvnum)
if(driveNumber is None): sys.exit(1)
### Select drive to analyze
drive = testDrive(driveNumber)
if (drive is None): sys.exit(1)
### Determine what to do
drive.globalDriveSpecificParams()
OutputLog.Information("Get Id information")
OutputLog.Information(drive.toStr())
if(False == drive.unlockDrive()): sys.exit(1)
### Verify drive is not asserted
if (drive.isDriveAsserted()):
OutputLog.Information( "\nDrive is asserted!!!" )
AssertedDrive = True
else:
OutputLog.Information( "\nDrive is NOT asserted!!!" )
AssertedDrive = False
### Perform Test ###
testNumber = 0
runTest = True
exitStatus = True
nvmeMaxBlockSize = 8 # Limit to 4K until I get the big block transfer fixed
dut = logDevice(drive.getTestDrive())
simplePullDir = "simple_pull"
simpleHiLogName = makeFileName("v2hiLog.bin", makeDirName(simplePullDir))
simpleCiLogName = makeFileName("v2ciLog.bin", makeDirName(simplePullDir))
simplehiParse = "hi_parse"
simpleciParse = "ci_parse"
hiPullDir = "host_log_pull"
ciPullDir = "ctrl_log_pull"
ciEmptyPullDir = "ctrl_log_empty_pull"
ciaerPullDir = "ciaer_pull"
while ((True == exitStatus) and (True == runTest)):
if (0 == testNumber):
# Clean the output directories
cleanDir(simplePullDir)
cleanDir(simplehiParse)
cleanDir(simpleciParse)
cleanDir(hiPullDir)
cleanDir(ciPullDir)
cleanDir(ciEmptyPullDir)
cleanDir(ciaerPullDir)
dut.setCiLog()
clearOldLogs(dut)
elif (1 == testNumber):
# Perform basic HI pull test
OutputLog.Print("Basic HI Log Pull...")
telemetryData = openWriteFile(simpleHiLogName)
dut.setHiLog()
if(telemetryData is not None):
exitStatus, ciGeneration = readTelemetryLog(dut, telemetryData, blockSize = 4096, block0Size = 512, createLog = True, doubleTOCRead = False)
telemetryData.close()
else:
exitStatus = False
elif (2 == testNumber):
# Perform basic parse test on the HI file generated during the basic pull
OutputLog.Print("Basic HI Log Check...")
telemetryInputBin = openReadFile(simpleHiLogName)
if(telemetryInputBin is not None):
parseStatus, fileValidity = parseInputBin(telemetryInputBin, True, simplehiParse, None, False)
if((False == parseStatus) or (False == fileValidity)):
OutputLog.Error(format("File \"%s\" failed validity check\n" % (simpleHiLogName)))
exitStatus = False
telemetryInputBin.close()
else:
exitStatus = False
elif (3 == testNumber):
# Perform basic CI pull test
OutputLog.Print("Basic CI Log Pull...")
telemetryData = openWriteFile(simpleCiLogName)
dut.setCiLog()
if(telemetryData is not None):
exitStatus, ciGeneration = readTelemetryLog(dut, telemetryData, blockSize = 4096, block0Size = 512, createLog = True, doubleTOCRead = False)
telemetryData.close()
else:
exitStatus = False
elif (4 == testNumber):
# Perform basic parse test on the CI file generated during the basic pull
OutputLog.Print("Basic CI Log Check...")
telemetryInputBin = openReadFile(simpleCiLogName)
if(telemetryInputBin is not None):
parseStatus, fileValidity = parseInputBin(telemetryInputBin, False, simpleciParse, None, False)
if((False == parseStatus) or (False == fileValidity)):
OutputLog.Error(format("File \"%s\" failed validity check\n" % (simpleCiLogName)))
exitStatus = False
telemetryInputBin.close()
else:
exitStatus = False
elif (5 == testNumber):
# Test the log pull function
OutputLog.Print("Multiple Block Size HI Log Pull...")
dut.setHiLog()
exitStatus = pullTest(dut, AssertedDrive, makeDirName(hiPullDir), nvmeMaxBlockSize)
elif (6 == testNumber):
# Test the log pull function
OutputLog.Print("Multiple Block Size CI Log Pull (no eventdump)...")
dut.setCiLog()
exitStatus = pullTest(dut, AssertedDrive, makeDirName(ciEmptyPullDir), nvmeMaxBlockSize)
elif (7 == testNumber):
# Test the AER function
OutputLog.Print("Multiple Block Size CI Log Pull (eventdump)...")
dut.setCiLog()
dut.generateEvent()
exitStatus = pullTest(dut, AssertedDrive, makeDirName(ciPullDir), nvmeMaxBlockSize)
clearOldLogs(dut)
elif (8 == testNumber):
# Test the AER function
OutputLog.Print("Test Async Event Request...")
dut.setCiLog()
exitStatus = CIAER_Test(dut, "telemetryCIAER", makeDirName(ciaerPullDir))
clearOldLogs(dut)
else:
runTest = False
testNumber += 1
if(True == exitStatus): OutputLog.Print ("All bench tests passed!!!!")
else: OutputLog.Print ("Bench test suite FAILED!!!")
return exitStatus
######## Test it #######
if __name__ == '__main__':
from datetime import datetime
p = datetime.now()
exitStatus = main()
q = datetime.now()
OutputLog.Print("\nExecution time: "+str(q-p))
sys.exit(exitStatus)
| 40.943231
| 159
| 0.620307
|
from __future__ import absolute_import, division, print_function, unicode_literals
import re, os, sys
from optparse import OptionParser
re.parse.internal.drive_utility import ScanDrives
from src.software.parse.internal.drive_utility import driveList
elemetry_util import openReadFile
from src.software.parse.telemetry_util import openWriteFile
from src.software.parse.telemetry_util import cleanDir
from src.software.parse.internal.getTelemetry import readTelemetryLog
from src.software.parse.parseTelemetryBin import parseInputBin
from src.software.parse.testTelemetryPull import pullTest
from src.software.parse.internal.testCIAER import CIAER_Test
from src.software.parse.internal.testCIAER import clearOldLogs
TOOL_VERSION = 1.0
def makeDirName(dirName):
folder = os.path.join(os.getcwd(), dirName)
return folder
def makeFileName(fileName, dirName):
folder = makeDirName(dirName)
outName = os.path.join(folder, fileName)
return outName
def main():
drvIndex = None
core = None
RSION))
parser.add_option('--debug',type="int", dest='debug', action="store", default=0, help='Enable debug level')
parser.add_option('-d',type='int', dest='drvnum', metavar='<DRVNUM>', default=None, help='Drive number to analyze')
parser.add_option('-q',action='callback', callback=ScanDrives, help='Query system for the drive list')
parser.add_option('--ulink',metavar='on|off|pc', default='', help='ULINK Control: ON, OFF, or Power Cycle (OFF+ON)')
(options, args) = parser.parse_args()
if (len(args) >= 1):
outFile = args[0]
else:
outFile = "testAll.txt"
if (options.debug > 0):
OutputLog.setDebugLevel(options.debug)
else:
OutputLog.enableQuiet()
OutputLog.setWarnIsError(True)
if (options.ulink):
if(False == SetUlink(options.ulink)):
OutputLog.Error("INVALID --ulink argument")
sys.exit(1)
driveNumber = driveList.checkDriveIndex(options.drvnum)
if(driveNumber is None): sys.exit(1)
None): sys.exit(1)
putLog.Information("Get Id information")
OutputLog.Information(drive.toStr())
if(False == drive.unlockDrive()): sys.exit(1)
on( "\nDrive is asserted!!!" )
AssertedDrive = True
else:
OutputLog.Information( "\nDrive is NOT asserted!!!" )
AssertedDrive = False
tStatus = True
nvmeMaxBlockSize = 8
dut = logDevice(drive.getTestDrive())
simplePullDir = "simple_pull"
simpleHiLogName = makeFileName("v2hiLog.bin", makeDirName(simplePullDir))
simpleCiLogName = makeFileName("v2ciLog.bin", makeDirName(simplePullDir))
simplehiParse = "hi_parse"
simpleciParse = "ci_parse"
hiPullDir = "host_log_pull"
ciPullDir = "ctrl_log_pull"
ciEmptyPullDir = "ctrl_log_empty_pull"
ciaerPullDir = "ciaer_pull"
while ((True == exitStatus) and (True == runTest)):
if (0 == testNumber):
cleanDir(simplePullDir)
cleanDir(simplehiParse)
cleanDir(simpleciParse)
cleanDir(hiPullDir)
cleanDir(ciPullDir)
cleanDir(ciEmptyPullDir)
cleanDir(ciaerPullDir)
dut.setCiLog()
clearOldLogs(dut)
elif (1 == testNumber):
OutputLog.Print("Basic HI Log Pull...")
telemetryData = openWriteFile(simpleHiLogName)
dut.setHiLog()
if(telemetryData is not None):
exitStatus, ciGeneration = readTelemetryLog(dut, telemetryData, blockSize = 4096, block0Size = 512, createLog = True, doubleTOCRead = False)
telemetryData.close()
else:
exitStatus = False
elif (2 == testNumber):
OutputLog.Print("Basic HI Log Check...")
telemetryInputBin = openReadFile(simpleHiLogName)
if(telemetryInputBin is not None):
parseStatus, fileValidity = parseInputBin(telemetryInputBin, True, simplehiParse, None, False)
if((False == parseStatus) or (False == fileValidity)):
OutputLog.Error(format("File \"%s\" failed validity check\n" % (simpleHiLogName)))
exitStatus = False
telemetryInputBin.close()
else:
exitStatus = False
elif (3 == testNumber):
OutputLog.Print("Basic CI Log Pull...")
telemetryData = openWriteFile(simpleCiLogName)
dut.setCiLog()
if(telemetryData is not None):
exitStatus, ciGeneration = readTelemetryLog(dut, telemetryData, blockSize = 4096, block0Size = 512, createLog = True, doubleTOCRead = False)
telemetryData.close()
else:
exitStatus = False
elif (4 == testNumber):
OutputLog.Print("Basic CI Log Check...")
telemetryInputBin = openReadFile(simpleCiLogName)
if(telemetryInputBin is not None):
parseStatus, fileValidity = parseInputBin(telemetryInputBin, False, simpleciParse, None, False)
if((False == parseStatus) or (False == fileValidity)):
OutputLog.Error(format("File \"%s\" failed validity check\n" % (simpleCiLogName)))
exitStatus = False
telemetryInputBin.close()
else:
exitStatus = False
elif (5 == testNumber):
OutputLog.Print("Multiple Block Size HI Log Pull...")
dut.setHiLog()
exitStatus = pullTest(dut, AssertedDrive, makeDirName(hiPullDir), nvmeMaxBlockSize)
elif (6 == testNumber):
OutputLog.Print("Multiple Block Size CI Log Pull (no eventdump)...")
dut.setCiLog()
exitStatus = pullTest(dut, AssertedDrive, makeDirName(ciEmptyPullDir), nvmeMaxBlockSize)
elif (7 == testNumber):
OutputLog.Print("Multiple Block Size CI Log Pull (eventdump)...")
dut.setCiLog()
dut.generateEvent()
exitStatus = pullTest(dut, AssertedDrive, makeDirName(ciPullDir), nvmeMaxBlockSize)
clearOldLogs(dut)
elif (8 == testNumber):
OutputLog.Print("Test Async Event Request...")
dut.setCiLog()
exitStatus = CIAER_Test(dut, "telemetryCIAER", makeDirName(ciaerPullDir))
clearOldLogs(dut)
else:
runTest = False
testNumber += 1
if(True == exitStatus): OutputLog.Print ("All bench tests passed!!!!")
else: OutputLog.Print ("Bench test suite FAILED!!!")
return exitStatus
q-p))
sys.exit(exitStatus)
| true
| true
|
1c419b871f3fff1ebd2e35764f5e998c4986bced
| 3,951
|
py
|
Python
|
importio2/extractor_util.py
|
import-io/import-io-api-python
|
5c838a357742233e714b2ccfd19d25c18531cfa3
|
[
"Apache-2.0"
] | 1
|
2021-08-18T03:27:40.000Z
|
2021-08-18T03:27:40.000Z
|
importio2/extractor_util.py
|
import-io/import-io-api-python
|
5c838a357742233e714b2ccfd19d25c18531cfa3
|
[
"Apache-2.0"
] | null | null | null |
importio2/extractor_util.py
|
import-io/import-io-api-python
|
5c838a357742233e714b2ccfd19d25c18531cfa3
|
[
"Apache-2.0"
] | 2
|
2021-09-13T14:28:50.000Z
|
2021-09-27T17:56:21.000Z
|
#
# Copyright 2017 Import.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from datetime import datetime
from time import sleep
from importio2 import CrawlRunAPI
from importio2 import ExtractorAPI
logger = logging.getLogger(__name__)
class ExtractorUtilities(object):
def __init__(self):
self.api = ExtractorAPI()
def crawl_run_active(self, extractor_id, crawl_run_id):
"""
Determine if a crawl run is in progress for the given extractor id and crawl run id
:param extractor_id:
:param crawl_run_id:
:return: True if the crawl run is not found or is running. False if found and state is either
FINISHED, CANCELLED, or FAILED
"""
active = False
extractor = self.api.get(extractor_id)
name = extractor['name']
api = CrawlRunAPI()
state = None
for i in range(0, 11):
crawl_run = api.get(crawl_run_id)
if crawl_run is not None:
state = crawl_run['state']
break
else:
sleep(1.0)
logger.info("Extractor: {0} has a state of {1}".format(name, state))
if state == 'STARTED' or state == 'PENDING':
active = True
else:
active = False
logger.info("{0} => name: {1}, id: {2}, crawl_run_id: {3}".format(state, name, extractor_id, crawl_run_id))
return active
def report_crawl_run_stats(self, extractor_id, crawl_run_id):
"""
Outputs the some of the metrics of a crawl run
:param extractor_id: specifices the extractor
:param crawl_run_id: specifies the crawl run
:return: None
"""
try:
api = ExtractorAPI()
extractor = self.api.get(extractor_id)
name = extractor['name']
api = CrawlRunAPI()
for i in range(0, 11):
run = api.get(crawl_run_id)
if run is not None:
started_at = datetime.fromtimestamp(int(run['startedAt'] / 1000))
total = int(run['totalUrlCount'])
failed = int(run['failedUrlCount'])
success = int(run['successUrlCount'])
rows = int(run['rowCount'])
logger.info("name: {0}, started: {1}, total: {2}, success: {3}, failed: {4}, rows: {5}".format(
name, started_at, total, success, failed, rows))
break
else:
sleep(1.0)
except Exception as e:
logger.exception(e)
def extractor_run_and_wait(self, extractor_id, report=5):
"""
Executes a Crawl Run and waits for it to complete
:param extractor_id:
:param report: How often to report on crawl run
:return: None
"""
extractor = self.api.get(extractor_id)
api = CrawlRunAPI()
name = extractor['name']
crawl_run_id = self.api.start(extractor_id)
logger.info("{0} => name: {1}, id: {2}, crawl_run_id: {3}".format(api.state(crawl_run_id), name, extractor_id,
crawl_run_id))
count = 1
while self.crawl_run_active(extractor_id, crawl_run_id):
sleep(report)
self.report_crawl_run_stats(extractor_id, crawl_run_id)
return crawl_run_id
| 37.273585
| 118
| 0.586434
|
import logging
from datetime import datetime
from time import sleep
from importio2 import CrawlRunAPI
from importio2 import ExtractorAPI
logger = logging.getLogger(__name__)
class ExtractorUtilities(object):
def __init__(self):
self.api = ExtractorAPI()
def crawl_run_active(self, extractor_id, crawl_run_id):
active = False
extractor = self.api.get(extractor_id)
name = extractor['name']
api = CrawlRunAPI()
state = None
for i in range(0, 11):
crawl_run = api.get(crawl_run_id)
if crawl_run is not None:
state = crawl_run['state']
break
else:
sleep(1.0)
logger.info("Extractor: {0} has a state of {1}".format(name, state))
if state == 'STARTED' or state == 'PENDING':
active = True
else:
active = False
logger.info("{0} => name: {1}, id: {2}, crawl_run_id: {3}".format(state, name, extractor_id, crawl_run_id))
return active
def report_crawl_run_stats(self, extractor_id, crawl_run_id):
try:
api = ExtractorAPI()
extractor = self.api.get(extractor_id)
name = extractor['name']
api = CrawlRunAPI()
for i in range(0, 11):
run = api.get(crawl_run_id)
if run is not None:
started_at = datetime.fromtimestamp(int(run['startedAt'] / 1000))
total = int(run['totalUrlCount'])
failed = int(run['failedUrlCount'])
success = int(run['successUrlCount'])
rows = int(run['rowCount'])
logger.info("name: {0}, started: {1}, total: {2}, success: {3}, failed: {4}, rows: {5}".format(
name, started_at, total, success, failed, rows))
break
else:
sleep(1.0)
except Exception as e:
logger.exception(e)
def extractor_run_and_wait(self, extractor_id, report=5):
extractor = self.api.get(extractor_id)
api = CrawlRunAPI()
name = extractor['name']
crawl_run_id = self.api.start(extractor_id)
logger.info("{0} => name: {1}, id: {2}, crawl_run_id: {3}".format(api.state(crawl_run_id), name, extractor_id,
crawl_run_id))
count = 1
while self.crawl_run_active(extractor_id, crawl_run_id):
sleep(report)
self.report_crawl_run_stats(extractor_id, crawl_run_id)
return crawl_run_id
| true
| true
|
1c419baebb28d70b2d3ce8be1c6f2f65b0b07eef
| 821
|
py
|
Python
|
runtests.py
|
funkybob/django-reformation
|
8c6ae3f6091c48ba4079e017663b2c9a15a91b9f
|
[
"BSD-3-Clause"
] | 1
|
2019-06-27T13:24:08.000Z
|
2019-06-27T13:24:08.000Z
|
runtests.py
|
funkybob/django-reformation
|
8c6ae3f6091c48ba4079e017663b2c9a15a91b9f
|
[
"BSD-3-Clause"
] | null | null | null |
runtests.py
|
funkybob/django-reformation
|
8c6ae3f6091c48ba4079e017663b2c9a15a91b9f
|
[
"BSD-3-Clause"
] | null | null | null |
import os, sys
from django.conf import settings
DIRNAME = os.path.dirname(__file__)
settings.configure(
DEBUG = True,
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DIRNAME, 'reformation-test.db'),
}
},
INSTALLED_APPS = ('django.contrib.contenttypes',
'django.contrib.sessions',
'reformation',
),
TEMPLATE_DIRS = (
os.path.join(DIRNAME, 'reformation', 'tests', 'templates'),
),
)
from django.test.utils import setup_test_environment, get_runner, teardown_test_environment
setup_test_environment()
runner = get_runner(settings)()
failures = runner.run_tests(['reformation',], verbosity=1)
# teardown_test_environment()
if failures:
sys.exit(failures)
| 24.878788
| 91
| 0.638246
|
import os, sys
from django.conf import settings
DIRNAME = os.path.dirname(__file__)
settings.configure(
DEBUG = True,
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DIRNAME, 'reformation-test.db'),
}
},
INSTALLED_APPS = ('django.contrib.contenttypes',
'django.contrib.sessions',
'reformation',
),
TEMPLATE_DIRS = (
os.path.join(DIRNAME, 'reformation', 'tests', 'templates'),
),
)
from django.test.utils import setup_test_environment, get_runner, teardown_test_environment
setup_test_environment()
runner = get_runner(settings)()
failures = runner.run_tests(['reformation',], verbosity=1)
if failures:
sys.exit(failures)
| true
| true
|
1c419db96e522d6532947c03991186fa54405f97
| 7,079
|
py
|
Python
|
rest_api/simple_supply_rest_api/database.py
|
elyssa12/education-sawtooth-simple-supply
|
52a669db2b30a6a506ceac278378a8161a1c6718
|
[
"Apache-2.0"
] | null | null | null |
rest_api/simple_supply_rest_api/database.py
|
elyssa12/education-sawtooth-simple-supply
|
52a669db2b30a6a506ceac278378a8161a1c6718
|
[
"Apache-2.0"
] | null | null | null |
rest_api/simple_supply_rest_api/database.py
|
elyssa12/education-sawtooth-simple-supply
|
52a669db2b30a6a506ceac278378a8161a1c6718
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import asyncio
import logging
import aiopg
import psycopg2
from psycopg2.extras import RealDictCursor
LATEST_BLOCK_NUM = """
SELECT max(block_num) FROM blocks
"""
LOGGER = logging.getLogger(__name__)
class Database(object):
"""Manages connection to the postgres database and makes async queries
"""
def __init__(self, host, port, name, user, password, loop):
self._dsn = 'dbname={} user={} password={} host={} port={}'.format(
name, user, password, host, port)
self._loop = loop
self._conn = None
async def connect(self, retries=5, initial_delay=1, backoff=2):
"""Initializes a connection to the database
Args:
retries (int): Number of times to retry the connection
initial_delay (int): Number of seconds wait between reconnects
backoff (int): Multiplies the delay after each retry
"""
LOGGER.info('Connecting to database')
delay = initial_delay
for attempt in range(retries):
try:
self._conn = await aiopg.connect(
dsn=self._dsn, loop=self._loop, echo=True)
LOGGER.info('Successfully connected to database')
return
except psycopg2.OperationalError:
LOGGER.debug(
'Connection failed.'
' Retrying connection (%s retries remaining)',
retries - attempt)
await asyncio.sleep(delay)
delay *= backoff
self._conn = await aiopg.connect(
dsn=self._dsn, loop=self._loop, echo=True)
LOGGER.info('Successfully connected to database')
def disconnect(self):
"""Closes connection to the database
"""
if self._conn is not None:
self._conn.close()
async def create_auth_entry(self,
public_key,
encrypted_private_key,
hashed_password):
insert = """
INSERT INTO auth (
public_key,
encrypted_private_key,
hashed_password
)
VALUES ('{}', '{}', '{}');
""".format(
public_key,
encrypted_private_key.hex(),
hashed_password.hex())
async with self._conn.cursor() as cursor:
await cursor.execute(insert)
self._conn.commit()
async def fetch_agent_resource(self, public_key):
fetch = """
SELECT public_key, name, timestamp FROM agents
WHERE public_key='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(public_key, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_all_agent_resources(self):
fetch = """
SELECT public_key, name, timestamp FROM agents
WHERE ({0}) >= start_block_num
AND ({0}) < end_block_num;
""".format(LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchall()
async def fetch_auth_resource(self, public_key):
fetch = """
SELECT * FROM auth WHERE public_key='{}'
""".format(public_key)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_record_resource(self, record_id):
fetch_record = """
SELECT record_id FROM records
WHERE record_id='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(record_id, LATEST_BLOCK_NUM)
fetch_record_locations = """
SELECT latitude, longitude, timestamp FROM record_locations
WHERE record_id='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(record_id, LATEST_BLOCK_NUM)
fetch_record_owners = """
SELECT agent_id, timestamp FROM record_owners
WHERE record_id='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(record_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
try:
await cursor.execute(fetch_record)
record = await cursor.fetchone()
await cursor.execute(fetch_record_locations)
record['locations'] = await cursor.fetchall()
await cursor.execute(fetch_record_owners)
record['owners'] = await cursor.fetchall()
return record
except TypeError:
return None
async def fetch_all_record_resources(self):
fetch_records = """
SELECT record_id FROM records
WHERE ({0}) >= start_block_num
AND ({0}) < end_block_num;
""".format(LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
try:
await cursor.execute(fetch_records)
records = await cursor.fetchall()
for record in records:
fetch_record_locations = """
SELECT latitude, longitude, timestamp
FROM record_locations
WHERE record_id='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(record['record_id'], LATEST_BLOCK_NUM)
fetch_record_owners = """
SELECT agent_id, timestamp
FROM record_owners
WHERE record_id='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(record['record_id'], LATEST_BLOCK_NUM)
await cursor.execute(fetch_record_locations)
record['locations'] = await cursor.fetchall()
await cursor.execute(fetch_record_owners)
record['owners'] = await cursor.fetchall()
return records
except TypeError:
return []
| 34.871921
| 80
| 0.573527
|
import asyncio
import logging
import aiopg
import psycopg2
from psycopg2.extras import RealDictCursor
LATEST_BLOCK_NUM = """
SELECT max(block_num) FROM blocks
"""
LOGGER = logging.getLogger(__name__)
class Database(object):
def __init__(self, host, port, name, user, password, loop):
self._dsn = 'dbname={} user={} password={} host={} port={}'.format(
name, user, password, host, port)
self._loop = loop
self._conn = None
async def connect(self, retries=5, initial_delay=1, backoff=2):
LOGGER.info('Connecting to database')
delay = initial_delay
for attempt in range(retries):
try:
self._conn = await aiopg.connect(
dsn=self._dsn, loop=self._loop, echo=True)
LOGGER.info('Successfully connected to database')
return
except psycopg2.OperationalError:
LOGGER.debug(
'Connection failed.'
' Retrying connection (%s retries remaining)',
retries - attempt)
await asyncio.sleep(delay)
delay *= backoff
self._conn = await aiopg.connect(
dsn=self._dsn, loop=self._loop, echo=True)
LOGGER.info('Successfully connected to database')
def disconnect(self):
if self._conn is not None:
self._conn.close()
async def create_auth_entry(self,
public_key,
encrypted_private_key,
hashed_password):
insert = """
INSERT INTO auth (
public_key,
encrypted_private_key,
hashed_password
)
VALUES ('{}', '{}', '{}');
""".format(
public_key,
encrypted_private_key.hex(),
hashed_password.hex())
async with self._conn.cursor() as cursor:
await cursor.execute(insert)
self._conn.commit()
async def fetch_agent_resource(self, public_key):
fetch = """
SELECT public_key, name, timestamp FROM agents
WHERE public_key='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(public_key, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_all_agent_resources(self):
fetch = """
SELECT public_key, name, timestamp FROM agents
WHERE ({0}) >= start_block_num
AND ({0}) < end_block_num;
""".format(LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchall()
async def fetch_auth_resource(self, public_key):
fetch = """
SELECT * FROM auth WHERE public_key='{}'
""".format(public_key)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
await cursor.execute(fetch)
return await cursor.fetchone()
async def fetch_record_resource(self, record_id):
fetch_record = """
SELECT record_id FROM records
WHERE record_id='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(record_id, LATEST_BLOCK_NUM)
fetch_record_locations = """
SELECT latitude, longitude, timestamp FROM record_locations
WHERE record_id='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(record_id, LATEST_BLOCK_NUM)
fetch_record_owners = """
SELECT agent_id, timestamp FROM record_owners
WHERE record_id='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(record_id, LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
try:
await cursor.execute(fetch_record)
record = await cursor.fetchone()
await cursor.execute(fetch_record_locations)
record['locations'] = await cursor.fetchall()
await cursor.execute(fetch_record_owners)
record['owners'] = await cursor.fetchall()
return record
except TypeError:
return None
async def fetch_all_record_resources(self):
fetch_records = """
SELECT record_id FROM records
WHERE ({0}) >= start_block_num
AND ({0}) < end_block_num;
""".format(LATEST_BLOCK_NUM)
async with self._conn.cursor(cursor_factory=RealDictCursor) as cursor:
try:
await cursor.execute(fetch_records)
records = await cursor.fetchall()
for record in records:
fetch_record_locations = """
SELECT latitude, longitude, timestamp
FROM record_locations
WHERE record_id='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(record['record_id'], LATEST_BLOCK_NUM)
fetch_record_owners = """
SELECT agent_id, timestamp
FROM record_owners
WHERE record_id='{0}'
AND ({1}) >= start_block_num
AND ({1}) < end_block_num;
""".format(record['record_id'], LATEST_BLOCK_NUM)
await cursor.execute(fetch_record_locations)
record['locations'] = await cursor.fetchall()
await cursor.execute(fetch_record_owners)
record['owners'] = await cursor.fetchall()
return records
except TypeError:
return []
| true
| true
|
1c419de856ffae73049f83b1158780a2791cd626
| 1,129
|
py
|
Python
|
sherlockpipe/objectinfo/MissionFfiCoordsObjectInfo.py
|
martindevora/SHERLOCK
|
5e7492552cbce29e960684a44fd6ad875c8cf60e
|
[
"MIT"
] | 1
|
2021-01-14T16:44:48.000Z
|
2021-01-14T16:44:48.000Z
|
sherlockpipe/objectinfo/MissionFfiCoordsObjectInfo.py
|
martindevora/SHERLOCK
|
5e7492552cbce29e960684a44fd6ad875c8cf60e
|
[
"MIT"
] | null | null | null |
sherlockpipe/objectinfo/MissionFfiCoordsObjectInfo.py
|
martindevora/SHERLOCK
|
5e7492552cbce29e960684a44fd6ad875c8cf60e
|
[
"MIT"
] | null | null | null |
from sherlockpipe.objectinfo.ObjectInfo import ObjectInfo
class MissionFfiCoordsObjectInfo(ObjectInfo):
"""
Implementation of ObjectInfo to be used to characterize long-cadence objects from TESS by providing the RA and Dec.
"""
def __init__(self, ra, dec, sectors, initial_mask=None, initial_detrend_period=None):
"""
@param ra: the objects right ascension.
@param dec: the objects declination.
@param sectors: an array of integers specifying which sectors will be analysed for the object
@param initial_mask: an array of time ranges provided to mask them into the initial object light curve.
@param initial_detrend_period: integer value specifying a fixed value for an initial period to be detrended
from the initial light curve before processing.
"""
super().__init__(initial_mask, initial_detrend_period)
self.ra = ra
self.dec = dec
self.sectors = sectors
def sherlock_id(self):
return str(self.ra) + "_" + str(self.dec) + "_FFI_" + str(self.sectors)
def mission_id(self):
return None
| 40.321429
| 119
| 0.693534
|
from sherlockpipe.objectinfo.ObjectInfo import ObjectInfo
class MissionFfiCoordsObjectInfo(ObjectInfo):
def __init__(self, ra, dec, sectors, initial_mask=None, initial_detrend_period=None):
super().__init__(initial_mask, initial_detrend_period)
self.ra = ra
self.dec = dec
self.sectors = sectors
def sherlock_id(self):
return str(self.ra) + "_" + str(self.dec) + "_FFI_" + str(self.sectors)
def mission_id(self):
return None
| true
| true
|
1c419f1406e615e605e48a658688ee756341eb09
| 2,316
|
py
|
Python
|
domintell/messages/dio_status.py
|
yaccri/python-domintell
|
e8a17c9f25ef071a58dd0656746bde9105ba5f01
|
[
"MIT"
] | 1
|
2021-12-03T04:29:21.000Z
|
2021-12-03T04:29:21.000Z
|
domintell/messages/dio_status.py
|
yaccri/python-domintell
|
e8a17c9f25ef071a58dd0656746bde9105ba5f01
|
[
"MIT"
] | 3
|
2020-09-20T11:50:28.000Z
|
2021-08-13T10:16:14.000Z
|
domintell/messages/dio_status.py
|
yaccri/python-domintell
|
e8a17c9f25ef071a58dd0656746bde9105ba5f01
|
[
"MIT"
] | 6
|
2020-10-05T20:23:06.000Z
|
2021-09-14T07:18:31.000Z
|
"""
DIO (Input / Output) status (to be inherited)
:author: Zilvinas Binisevicius <zilvinas@binis.me>
"""
import json
import domintell
DIO_COMMAND_CODE = "DIO"
class GenericDIOStatusMessage(domintell.Message):
"""
Generic Digital input & output hybrid module status
"""
def __init__(self, pinCount=1, address=None):
domintell.Message.__init__(self)
self.moduleType = DIO_COMMAND_CODE
self.pinCount = pinCount
self.serialNumber = None
self.dataType = None
self._inputs = {}
self._outputs = {}
for i in range(0, self.pinCount):
self._inputs[i] = 0
for i in range(0, self.pinCount):
self._outputs[i] = 0
def get_inputs(self):
return self._inputs
def get_outputs(self):
return self._outputs
def get_input(self, channel):
if channel < self.pinCount:
return self._inputs[channel]
return 0
def get_output(self, channel):
if channel < self.pinCount:
return self._outputs[channel]
return 0
def is_input(self):
if self.dataType == 'I':
return True
return False
def is_output(self):
if self.dataType == 'O':
return True
return False
def populate(self, serialNumber, dataType, dataString):
"""
:return: None
"""
assert isinstance(dataString, str)
self.serialNumber = serialNumber
self.dataType = dataType
mask = int(dataString[0:2].strip(), 16)
if dataType == 'I':
for input in range(0, self.pinCount):
self._inputs[input] = 1 if (mask & (input + 1)) else 0
if dataType == 'O':
for output in range(0, self.pinCount):
self._outputs[output] = 1 if (mask & (output + 1)) else 0
def to_json(self):
"""
:return: str
"""
json_dict = self.to_json_basic()
for input in range(0, self.pinCount):
if input < len(self._inputs):
json_dict['input{}'.format(input + 1)] = self._inputs[input]
if input < len(self._outputs):
json_dict['output{}'.format(input + 1)] = self._outputs[input]
return json.dumps(json_dict)
| 26.930233
| 78
| 0.567789
|
import json
import domintell
DIO_COMMAND_CODE = "DIO"
class GenericDIOStatusMessage(domintell.Message):
def __init__(self, pinCount=1, address=None):
domintell.Message.__init__(self)
self.moduleType = DIO_COMMAND_CODE
self.pinCount = pinCount
self.serialNumber = None
self.dataType = None
self._inputs = {}
self._outputs = {}
for i in range(0, self.pinCount):
self._inputs[i] = 0
for i in range(0, self.pinCount):
self._outputs[i] = 0
def get_inputs(self):
return self._inputs
def get_outputs(self):
return self._outputs
def get_input(self, channel):
if channel < self.pinCount:
return self._inputs[channel]
return 0
def get_output(self, channel):
if channel < self.pinCount:
return self._outputs[channel]
return 0
def is_input(self):
if self.dataType == 'I':
return True
return False
def is_output(self):
if self.dataType == 'O':
return True
return False
def populate(self, serialNumber, dataType, dataString):
assert isinstance(dataString, str)
self.serialNumber = serialNumber
self.dataType = dataType
mask = int(dataString[0:2].strip(), 16)
if dataType == 'I':
for input in range(0, self.pinCount):
self._inputs[input] = 1 if (mask & (input + 1)) else 0
if dataType == 'O':
for output in range(0, self.pinCount):
self._outputs[output] = 1 if (mask & (output + 1)) else 0
def to_json(self):
json_dict = self.to_json_basic()
for input in range(0, self.pinCount):
if input < len(self._inputs):
json_dict['input{}'.format(input + 1)] = self._inputs[input]
if input < len(self._outputs):
json_dict['output{}'.format(input + 1)] = self._outputs[input]
return json.dumps(json_dict)
| true
| true
|
1c419f420ff290d6c2c95b8df409a0fbd916bfc3
| 221
|
py
|
Python
|
docs/components_page/components/table/kwargs_source.py
|
benpgreen/dash-bootstrap-components
|
7853b1db5ea39b1eec52ea42fe90db851b509b02
|
[
"Apache-2.0"
] | null | null | null |
docs/components_page/components/table/kwargs_source.py
|
benpgreen/dash-bootstrap-components
|
7853b1db5ea39b1eec52ea42fe90db851b509b02
|
[
"Apache-2.0"
] | null | null | null |
docs/components_page/components/table/kwargs_source.py
|
benpgreen/dash-bootstrap-components
|
7853b1db5ea39b1eec52ea42fe90db851b509b02
|
[
"Apache-2.0"
] | null | null | null |
import dash_bootstrap_components as dbc
from .simple import table as simple_table
table = dbc.Table(
simple_table.children,
bordered=True,
dark=True,
hover=True,
responsive=True,
striped=True,
)
| 17
| 41
| 0.714932
|
import dash_bootstrap_components as dbc
from .simple import table as simple_table
table = dbc.Table(
simple_table.children,
bordered=True,
dark=True,
hover=True,
responsive=True,
striped=True,
)
| true
| true
|
1c41a26f61032f6198f2787aacb6012cc8cd56f6
| 1,800
|
py
|
Python
|
setup.py
|
jonathaneunice/combomethod
|
554a3ae1c45f156f4bde9b71365ff8fb21c50a13
|
[
"Apache-2.0"
] | 1
|
2015-10-12T01:42:11.000Z
|
2015-10-12T01:42:11.000Z
|
setup.py
|
jonathaneunice/combomethod
|
554a3ae1c45f156f4bde9b71365ff8fb21c50a13
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jonathaneunice/combomethod
|
554a3ae1c45f156f4bde9b71365ff8fb21c50a13
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup
from codecs import open
def lines(text):
"""
Returns each non-blank line in text enclosed in a list.
See https://pypi.org/project/textdata for more sophisticated version.
"""
return [l.strip() for l in text.strip().splitlines() if l.strip()]
setup(
name='combomethod',
version='1.0.12',
author='Jonathan Eunice',
author_email='jonathan.eunice@gmail.com',
description="Decorator indicating a method is both a class and an instance method",
long_description=open('README.rst', encoding='utf-8').read(),
url='https://bitbucket.org/jeunice/combomethod',
license='Apache License 2.0',
py_modules=['combomethod'],
setup_requires=[],
install_requires=[],
tests_require=['tox', 'pytest', 'pytest-cov'],
test_suite="test",
zip_safe=False,
keywords='method classmethod instance combomethod',
classifiers=lines("""
Development Status :: 5 - Production/Stable
Operating System :: OS Independent
License :: OSI Approved :: Apache Software License
Intended Audience :: Developers
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Topic :: Software Development :: Libraries :: Python Modules
""")
)
| 35.294118
| 87
| 0.651667
|
from setuptools import setup
from codecs import open
def lines(text):
return [l.strip() for l in text.strip().splitlines() if l.strip()]
setup(
name='combomethod',
version='1.0.12',
author='Jonathan Eunice',
author_email='jonathan.eunice@gmail.com',
description="Decorator indicating a method is both a class and an instance method",
long_description=open('README.rst', encoding='utf-8').read(),
url='https://bitbucket.org/jeunice/combomethod',
license='Apache License 2.0',
py_modules=['combomethod'],
setup_requires=[],
install_requires=[],
tests_require=['tox', 'pytest', 'pytest-cov'],
test_suite="test",
zip_safe=False,
keywords='method classmethod instance combomethod',
classifiers=lines("""
Development Status :: 5 - Production/Stable
Operating System :: OS Independent
License :: OSI Approved :: Apache Software License
Intended Audience :: Developers
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Topic :: Software Development :: Libraries :: Python Modules
""")
)
| true
| true
|
1c41a402ce1ea5d220dd14bb8c5656b8c61a0fd9
| 27,943
|
py
|
Python
|
reactiondataextractor/extractors/conditions.py
|
dmw51/reactiondataextractor
|
f7d2ee9a2a7df17ffcf9b33efee2bcb49dfdcbae
|
[
"MIT"
] | 3
|
2021-09-29T01:33:35.000Z
|
2022-03-19T09:04:23.000Z
|
reactiondataextractor/extractors/conditions.py
|
dmw51/reactiondataextractor
|
f7d2ee9a2a7df17ffcf9b33efee2bcb49dfdcbae
|
[
"MIT"
] | 4
|
2021-10-05T06:11:28.000Z
|
2022-02-23T21:18:32.000Z
|
reactiondataextractor/extractors/conditions.py
|
dmw51/reactiondataextractor
|
f7d2ee9a2a7df17ffcf9b33efee2bcb49dfdcbae
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Conditions
=======
This module contains classes and methods for extracting conditions, as well as directly related functions.
author: Damian Wilary
email: dmw51@cam.ac.uk
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from collections import Counter
from itertools import chain
import logging
from matplotlib.patches import Rectangle
import numpy as np
import os
import re
from chemdataextractor.doc import Span
from chemdataextractor.nlp.tokenize import ChemWordTokenizer
from scipy.signal import find_peaks
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import GridSearchCV
from ..actions import find_nearby_ccs, extend_line
from ..models import Conditions, SolidArrow, BaseExtractor, Figure, TextLine, Crop, FigureRoleEnum, ReactionRoleEnum, Panel
from ..models.utils import Point, Line, DisabledNegativeIndices
from ..ocr import read_conditions
from ..utils.processing import find_minima_between_peaks, erase_elements
from .. import settings
log = logging.getLogger('extract.conditions')
SPECIES_FILE = os.path.join(settings.ROOT_DIR, 'dict', 'species.txt')
class ConditionsExtractor(BaseExtractor):
"""Main class for extracting reaction conditions from images
:param arrows: All arrows in a figure
:type arrows: list[SolidArrow]
:param fig: main figure
:type fig: Figure"""
def __init__(self, arrows, fig=None):
self.fig = fig if fig is not None else settings.main_figure[0]
self.arrows = arrows
self._extracted = None
def extract(self):
"""Main extraction method"""
conditions, conditions_structures = [], []
for arrow in self.arrows:
step_conditions, step_structures = self.get_conditions(arrow)
conditions += [step_conditions]
conditions_structures.extend(step_structures)
self._extracted = conditions, conditions_structures
return self.extracted
@property
def extracted(self):
"""Returns extracted objects"""
return self._extracted
def plot_extracted(self, ax):
"""Adds extracted panels onto a canvas of ``ax``"""
conditions, conditions_structures = self._extracted
params = {'facecolor': 'g', 'edgecolor': None, 'alpha': 0.3}
for panel in conditions_structures:
rect_bbox = Rectangle((panel.left - 1, panel.top - 1), panel.right - panel.left,
panel.bottom - panel.top, **params)
ax.add_patch(rect_bbox)
for step_conditions in conditions:
for t in step_conditions.text_lines:
panel = t.panel
rect_bbox = Rectangle((panel.left - 1, panel.top - 1), panel.right - panel.left,
panel.bottom - panel.top, **params)
ax.add_patch(rect_bbox)
def get_conditions(self, arrow):
"""
Recovers conditions of a single reaction step.
Marks text lines and chemical structures in the conditions region. Passes text through an OCR engine, and parses
the output. Forms a Conditions object containing all the collected information.
:param SolidArrow arrow: Reaction arrow around which the search for conditions is performed
:return Conditions: Conditions object containing found information.
"""
textlines, condition_structures = self.find_step_conditions(arrow)
[setattr(panel, 'role', ReactionRoleEnum.CONDITIONS) for panel in condition_structures]
if textlines:
recognised = [read_conditions(self.fig, line, conf_threshold=40) for line in textlines]
recognised = [sentence for sentence in recognised if sentence]
parser = ConditionParser(recognised)
conditions_dct = parser.parse_conditions()
else:
conditions_dct = {}
return Conditions(textlines, conditions_dct, arrow, condition_structures), condition_structures
def find_step_conditions(self, arrow):
"""
Finds conditions of a step. Selects a region around an arrow. If the region contains text, scans the text.
Otherwise it returns None (no conditions found).
:param Arrow arrow: Arrow around which the conditions are to be looked for
:return: Collection [Textline,...] containing characters grouped together as text lines
"""
structure_panels = [cc.parent_panel for cc in self.fig.connected_components if
cc.role == FigureRoleEnum.STRUCTUREBACKBONE
and cc.parent_panel]
conditions_panels = [panel for panel in structure_panels if ConditionsExtractor.belongs_to_conditions(panel,
arrow)]
text_lines = self.mark_text_lines(arrow, conditions_panels)
for text_line in text_lines:
self.collect_characters(text_line)
text_lines = [text_line for text_line in text_lines if text_line.connected_components]
return text_lines, conditions_panels
def mark_text_lines(self, arrow, conditions_panels):
"""
Isolates conditions around around ``arrow`` in ``fig``.
Marks text lines first by finding obvious conditions' text characters around an arrow.
This scan is also performed around `conditions_panels` if any. Using the found ccs, text lines are fitted
with kernel density estimates.
:param SolidArrow arrow: arrow around which the region of interest is centered
:param [Panel,...] conditions_panels: iterable of panels containing connected components representing conditions
:return: Crop: Figure-like object containing the relevant crop with the arrow removed
"""
fig = self.fig
average_height = np.median([cc.height for cc in fig.connected_components])
areas = [cc.area for cc in fig.connected_components]
areas.sort()
def condition1(cc): return cc.role != FigureRoleEnum.STRUCTUREAUXILIARY
if arrow.is_vertical:
def condition2(cc): return cc.top > arrow.top and cc.bottom < arrow.bottom
else:
def condition2(cc): return cc.left > arrow.left and cc.right < arrow.right
condition = condition1 and condition2
middle_pixel = arrow.center_px
def distance_fn(cc): return 2.2 * cc.height
core_ccs = find_nearby_ccs(middle_pixel, fig.connected_components, (3 * average_height, distance_fn),
condition=condition)
if not core_ccs:
for pixel in arrow.pixels[::10]:
core_ccs = find_nearby_ccs(pixel, fig.connected_components, (2 * average_height, distance_fn),
condition=condition)
if len(core_ccs) > 1:
break
else:
log.warning('No conditions were found in the initial scan. Aborting conditions search...')
return []
if conditions_panels:
for panel in conditions_panels:
core_ccs += find_nearby_ccs(panel, fig.connected_components, (3 * average_height, distance_fn),
condition=condition)
conditions_region = Panel.create_megarect(core_ccs)
cropped_region = Crop(erase_elements(fig, conditions_panels), conditions_region) # Do not look at structures
text_lines = [TextLine(None, None, top, bottom, crop=cropped_region, anchor=anchor) for (top, bottom, anchor) in
self.identify_text_lines(cropped_region)]
text_lines = [text_line.in_main_figure for text_line in text_lines]
return text_lines
def identify_text_lines(self, crop):
"""Fits text lines of conditions text using kernel density estimation.
Fits kernel density estimate to bottom boundaries of the relevant panels. Bottom text lines are found as the
maxima of the estimate subject to a condition that the text lines must be separated by appropriate distance.
The estimate is then chopped into region based on the deepest minima between peaks and characters assigned to
these regions. Groups of characters are then used to estimate the top boundary of each text line. Each text line
is finally associated with an anchor - one of its characters - to situate it in the main image.
:param Crop crop: cropped region of interest containing the reaction conditions
:return: iterable of tuples (top boundary, bottom boundary, anchor)
:rtype: list
"""
ccs = [cc for cc in crop.connected_components if cc.role != FigureRoleEnum.ARROW] # filter out arrows
if len(ccs) == 1: # Special case
only_cc = ccs[0]
anchor = Point(only_cc.center[1], only_cc.center[0])
return [(only_cc.top, only_cc.bottom, anchor)]
if len(ccs) > 10:
ccs = [cc for cc in ccs if
cc.area > np.percentile([cc.area for cc in ccs], 0.2)] # filter out all small ccs (e.g. dots)
img = crop.img
bottom_boundaries = [cc.bottom for cc in ccs]
bottom_boundaries.sort()
bottom_count = Counter(bottom_boundaries)
bottom_boundaries = np.array([item for item in bottom_count.elements()]).reshape(-1, 1)
little_data = len(ccs) < 10
grid = GridSearchCV(KernelDensity(),
{'bandwidth': np.linspace(0.005, 2.0, 100)},
cv=(len(bottom_boundaries) if little_data else 10)) # 10-fold cross-validation
grid.fit(bottom_boundaries)
best_bw = grid.best_params_['bandwidth']
kde = KernelDensity(bandwidth=best_bw, kernel='exponential')
kde.fit(bottom_boundaries)
# print(f'params: {kde.get_params()}')
rows = np.linspace(0, img.shape[0] + 20, img.shape[0] + 21)
logp_bottom = kde.score_samples(rows.reshape(-1, 1))
heights = [cc.bottom - cc.top for cc in ccs]
mean_height = np.mean(heights, dtype=np.uint32)
bottom_lines, _ = find_peaks(logp_bottom, distance=mean_height * 1.2)
data = np.array([rows, logp_bottom])
bottom_lines.sort()
bucket_limits = find_minima_between_peaks(data, bottom_lines)
buckets = np.split(rows, bucket_limits)
bucketed_chars = [[cc for cc in ccs if cc.bottom in bucket] for bucket in buckets]
top_lines = [np.mean([cc.top for cc in bucket], dtype=int) for bucket in bucketed_chars]
anchors = [sorted([cc for cc in bucket], key=lambda cc: cc.area)[-1].center for bucket in bucketed_chars]
anchors = [Point(row=anchor[1], col=anchor[0]) for anchor in anchors]
return [line for line in zip(top_lines, bottom_lines, anchors)]
def collect_characters(self, text_line):
"""
Accurately assigns relevant characters in ``fig`` to ``text_line``
Uses a proximity search algorithm to carefully assign characters to each text line. Characters are assigned
based on mutual distance as well as horizontal displacements from the middle of text line and from the
bottom of the line and panel height.
:param TextLine text_line: found text line object
:return: None (mutates connected components assigned to a text line)
:rtype: None
"""
relevant_ccs = [cc for cc in self.fig.connected_components if cc.role != FigureRoleEnum.ARROW]
initial_distance = np.sqrt(np.mean([cc.area for cc in relevant_ccs]))
distance_fn = settings.DISTANCE_FN_CHARS
def proximity_coeff(cc): return .75 if cc.area < np.percentile([cc.area for cc in relevant_ccs], 65) else .4
def condition1(cc): return (
abs(text_line.panel.center[1] - cc.center[1]) < proximity_coeff(cc) * text_line.panel.height)
def condition2(cc): return cc.height < text_line.panel.height * 1.7
def condition3(cc): return abs(text_line.panel.bottom - cc.bottom) < 0.65 * text_line.panel.height
def condition(cc): return condition1(cc) and condition2(cc) and condition3(cc)
# First condition is proximity of panel center to center of text line measured vertically.
# Second is that height is comparable to text_line.
# Third is that the base of each letter is close to the bottom text line
found_ccs = find_nearby_ccs(text_line.anchor, relevant_ccs, (initial_distance, distance_fn),
FigureRoleEnum.CONDITIONSCHAR, condition)
if found_ccs:
text_line.connected_components = found_ccs
def add_diags_to_dicts(self, diags):
"""Adds SMILES representations of diagrams that had been assigned to conditions regions
:param [Diagram,...] diags: iterable of extracted diagrams
:return: None (mutates the conditions dictionary)
:rtype: None"""
conditions, _ = self.extracted
for step_conditions in conditions:
if step_conditions.structure_panels:
cond_diags = [diag for diag in diags if diag.panel in step_conditions.structure_panels]
step_conditions.diags = cond_diags
try:
step_conditions.conditions_dct['other species'].extend(
[diag.smiles for diag in cond_diags if diag.smiles])
except KeyError:
step_conditions.conditions_dct['other species'] = [diag.smiles for diag in cond_diags if
diag.smiles]
@staticmethod
def belongs_to_conditions(structure_panel, arrow):
"""
Checks if a structure is part of the conditions
Looks if the ``structure_panel`` center lies close to a line parallel to arrow.
Two points equidistant to the arrow are chosen and the distance from these is compared to two extreme
points of an arrow. If the centre is closer to either of the two points
(subject to a maximum threshold distance) than to either of the extremes, the structure is deemed to be
part of the conditions region.
:param Panel structure_panel: Panel object marking a structure (superatoms included)
:param Arrow arrow: Arrow defining the conditions region
:return: bool True if within the conditions region else close
"""
pixels = arrow.pixels
react_endpoint = pixels[0]
prod_endpoint = pixels[-1]
midpoint = pixels[len(pixels) // 2]
parallel_line_dummy = Line([midpoint])
slope = arrow.line.slope
parallel_line_dummy.slope = -1 / slope if abs(slope) > 0.05 else np.inf
parallel_1, parallel_2 = extend_line(parallel_line_dummy,
extension=react_endpoint.separation(prod_endpoint) // 2)
closest = min([parallel_1, parallel_2, react_endpoint, prod_endpoint],
key=lambda point: structure_panel.separation(point))
if closest in [parallel_1, parallel_2] and structure_panel.separation(arrow.panel) < 1.0 * np.sqrt(
structure_panel.area):
return True
else:
return False
class ConditionParser:
"""
This class is used to parse conditions text. It is composed of several methods to facilitate parsing recognised text
using formal grammars.
The following strings define formal grammars to detect catalysts (cat) and coreactants (co) based on their units.
Species which fulfill neither criterion can be parsed as `other_chemicals`. `default_values` is also defined to help
parse both integers and floating-point values.
:param sentences: Sentence object retrieved from an OCR engine.
:type sentences: chemdataextractor.Sentence
"""
default_values = r'((?:\d\.)?\d{1,3})'
cat_units = r'(mol\s?%|M|wt\s?%)'
# co_units = r'(eq\.?(?:uiv(?:alents?)?\.?)?|m?L)'
co_units = r'(equivalents?|equiv\.?|eq\.?|m?L)'
def __init__(self, sentences):
self.sentences = sentences # sentences are ChemDataExtractor Sentence objects
def parse_conditions(self):
parse_fns = [ConditionParser._parse_coreactants, ConditionParser._parse_catalysis,
ConditionParser._parse_other_species, ConditionParser._parse_other_conditions]
conditions_dct = {'catalysts': None, 'coreactants': None, 'other species': None, 'temperature': None,
'pressure': None, 'time': None, 'yield': None}
coreactants_lst = []
catalysis_lst = []
other_species_lst = []
for sentence in self.sentences:
parsed = [parse(sentence) for parse in parse_fns]
coreactants_lst.extend(parsed[0])
catalysis_lst.extend(parsed[1])
other_species_lst.extend(ConditionParser._filter_species(parsed))
conditions_dct.update(parsed[3])
conditions_dct['coreactants'] = coreactants_lst
conditions_dct['catalysts'] = catalysis_lst
conditions_dct['other species'] = other_species_lst
return conditions_dct
@staticmethod
def _identify_species(sentence):
with open(SPECIES_FILE, 'r') as file:
species_list = file.read().strip().split('\n')
# letters between which some lowercase letters and digits are allowed, optional brackets
formulae_brackets = r'((?:[A-Z]*\d?[a-z]\d?)\((?:[A-Z]*\d?[a-z]?\d?)*\)?\d?[A-Z]*[a-z]*\d?)*'
formulae_bracketless = r'(?<!°)\b(?<!\)|\()((?:[A-Z]+\d?[a-z]?\d?)+)(?!\(|\))\b'
letter_upper_identifiers = r'((?<!°)\b[A-Z]{1,4}\b)(?!\)|\.)' # Up to four capital letters? Just a single one?
letter_lower_identifiers = r'(\b[a-z]\b)(?!\)|\.)' # Accept single lowercase letter subject to restrictions
number_identifiers = r'(?:^| )(?<!\w)([1-9])(?!\w)(?!\))(?:$|[, ])(?![A-Za-z])'
# number_identifiers matches the following:
# 1, 2, 3, three numbers as chemical identifiers
# CH3OH, 5, 6 (5 equiv) 5 and 6 in the middle only
# 5 5 equiv first 5 only
# A 5 equiv -no matches
entity_mentions_brackets = re.finditer(formulae_brackets, sentence.text)
entity_mentions_bracketless = re.finditer(formulae_bracketless, sentence.text)
entity_mentions_letters_upper = re.finditer(letter_upper_identifiers, sentence.text)
entity_mentions_letters_lower = re.finditer(letter_lower_identifiers, sentence.text)
entity_mentions_numbers = re.finditer(number_identifiers, sentence.text)
spans = [Span(e.group(1), e.start(), e.end()) for e in
chain(entity_mentions_brackets, entity_mentions_bracketless,
entity_mentions_numbers, entity_mentions_letters_upper,
entity_mentions_letters_lower) if e.group(1)]
slashed_names = []
for token in sentence.tokens:
if '/' in token.text:
slashed_names.append(token)
all_mentions = ConditionParser._resolve_spans(spans+slashed_names)
# Add species from the list, treat them as seeds - allow more complex names
# eg. based on 'pentanol' on the list, allow '1-pentanol'
species_from_list = [token for token in sentence.tokens
if any(species in token.text.lower() for species in species_list if species)] # except ''
all_mentions += species_from_list
return list(set(all_mentions))
@staticmethod
def _parse_coreactants(sentence):
co_values = ConditionParser.default_values
co_str = co_values + r'\s?' + ConditionParser.co_units
return ConditionParser._find_closest_cem(sentence, co_str)
@staticmethod
def _parse_catalysis(sentence):
cat_values = ConditionParser.default_values
cat_str = cat_values + r'\s?' + ConditionParser.cat_units
return ConditionParser._find_closest_cem(sentence, cat_str)
@staticmethod
def _parse_other_species(sentence):
cems = ConditionParser._identify_species(sentence)
return [cem.text for cem in cems]
@staticmethod
def _parse_other_conditions(sentence):
other_dct = {}
parsed = [ConditionParser._parse_temperature(sentence), ConditionParser._parse_time(sentence),
ConditionParser._parse_pressure(sentence), ConditionParser._parse_yield(sentence)]
temperature, time, pressure, yield_ = parsed
if temperature:
other_dct['temperature'] = temperature # Create the key only if temperature was parsed
if time:
other_dct['time'] = time
if pressure:
other_dct['pressure'] = pressure
if yield_:
other_dct['yield'] = yield_
return other_dct
@staticmethod
def _find_closest_cem(sentence, parse_str):
"""Assign closest chemical species to found units (e.g. 'mol%' or 'eq')"""
phrase = sentence.text
matches = []
cwt = ChemWordTokenizer()
bracketed_units_pat = re.compile(r'\(\s*'+parse_str+r'\s*\)')
bracketed_units = re.findall(bracketed_units_pat, sentence.text)
if bracketed_units: # remove brackets
phrase = re.sub(bracketed_units_pat, ' '.join(bracketed_units[0]), phrase)
for match in re.finditer(parse_str, phrase):
match_tokens = cwt.tokenize(match.group(0))
phrase_tokens = cwt.tokenize(phrase)
match_start_idx = [idx for idx, token in enumerate(phrase_tokens) if match_tokens[0] in token][0]
match_end_idx = [idx for idx, token in enumerate(phrase_tokens) if match_tokens[-1] in token][0]
# To simplify syntax above, introduce a new tokeniser that splits full stops more consistently
# Accept two tokens, strip commas and full stops, especially if one of the tokens
species = DisabledNegativeIndices(phrase_tokens)[match_start_idx-2:match_start_idx]
species = ' '.join(token for token in species).strip('()., ')
if not species:
try:
species = DisabledNegativeIndices(phrase_tokens)[match_end_idx+1:match_start_idx+4]
# filter special signs and digits
species = map(lambda s: s.strip('., '), species)
species = filter(lambda token: token.isalpha(), species)
species = ' '.join(token for token in species)
except IndexError:
log.debug('Closest CEM not found for a catalyst/coreactant key phrase')
species = ''
if species:
matches.append({'Species': species, 'Value': float(match.group(1)), 'Units': match.group(2)})
return matches
@staticmethod
def _filter_species(parsed):
""" If a chemical species has been assigned as both catalyst or coreactant, and `other species`, remove if from
the latter. Also remove special cases"""
coreactants, catalysts, other_species, _ = parsed
combined = [d['Species'] for d in coreactants] + [d['Species'] for d in catalysts]
# if not coreactants or catalysts found, return unchanged
if not combined:
return other_species
else:
unaccounted = []
combined = ' '.join(combined)
for species in other_species:
found = re.search(re.escape(species), combined) # include individual tokens for multi-token names
if not found and species != 'M':
unaccounted.append(species)
return list(set(unaccounted))
@staticmethod
def _resolve_spans(spans):
span_copy = spans.copy()
# spans is ~10-15 elements long at most
for span1 in spans:
for span2 in spans:
if span1.text != span2.text:
if span1.text in span2.text:
try:
span_copy.remove(span1)
except ValueError:
pass
elif span2.text in span1.text:
try:
span_copy.remove(span2)
except ValueError:
pass
return span_copy
@staticmethod
def _parse_time(sentence): # add conditions to add the parsed data
t_values = ConditionParser.default_values
t_units = r'(h(?:ours?)?|m(?:in)?|s(?:econds)?|days?)'
time_str = re.compile(r'(?<!\w)' + t_values + r'\s?' + t_units + r'(?=$|\s?,)')
time = re.search(time_str, sentence.text)
if time:
return {'Value': float(time.group(1)), 'Units': time.group(2)}
@staticmethod
def _parse_temperature(sentence):
# The following formals grammars for temperature and pressure are quite complex, but allow to parse additional
# generic descriptors like 'heat' or 'UHV' in `.group(1)'
t_units = r'\s?(?:o|O|0|°)C|K' # match 0C, oC and similar, as well as K
t_value1 = r'-?\d{1,4}' + r'\s?(?=' + t_units + ')' # capture numbers only if followed by units
t_value2 = r'r\.?\s?t\.?'
t_value3 = r'heat|reflux|room\s?temp'
# Add greek delta?
t_or = re.compile('(' + '|'.join((t_value1, t_value2, t_value3)) + ')' + '(' + t_units + ')' + '?', re.I)
temperature = re.search(t_or, sentence.text)
return ConditionParser._form_dict_entry(temperature)
@staticmethod
def _form_dict_entry(match):
if match:
units = match.group(2) if match.group(2) else 'N/A'
try:
return {'Value': float(match.group(1)), 'Units': units}
except ValueError:
return {'Value': match.group(1), 'Units': units} # if value is rt or heat, gram scale etc
@staticmethod
def _parse_pressure(sentence):
p_units = r'(?:m|h|k|M)?Pa|m?bar|atm' # match bar, mbar, mPa, hPa, MPa and atm
p_values1 = r'\d{1,4}' + r'\s?(?=' + p_units + ')' # match numbers only if followed by units
p_values2 = r'(?:U?HV)|vacuum'
p_or = re.compile('(' + '|'.join((p_values1, p_values2)) + ')' + '(' + p_units + ')' + '?')
pressure = re.search(p_or, sentence.text)
if pressure:
units = pressure.group(2) if pressure.group(2) else 'N/A'
return {'Value': float(pressure.group(1)), 'Units': units}
@staticmethod
def _parse_yield(sentence):
y_units = r'%' # match 0C, oC and similar, as well as K
y_value1 = r'\d{1,2}' + r'\s?(?=' + y_units + ')' # capture numbers only if followed by units
y_value2 = r'gram scale'
# Add greek delta?
y_or = re.compile('(' + '|'.join((y_value1, y_value2)) + ')' + '(' + y_units + ')' + '?')
y = re.search(y_or, sentence.text)
return ConditionParser._form_dict_entry(y)
def clear_conditions_region(fig):
"""Removes connected components belonging to conditions and denoises the figure afterwards
:param Figure fig: Analysed figure
:return: new Figure object with conditions regions erased"""
fig_no_cond = erase_elements(fig, [cc for cc in fig.connected_components
if cc.role == FigureRoleEnum.ARROW or cc.role == FigureRoleEnum.CONDITIONSCHAR])
area_threshold = fig.get_bounding_box().area / 30000
# width_threshold = fig.get_bounding_box().width / 200
noise = [panel for panel in fig_no_cond.connected_components if panel.area < area_threshold]
return erase_elements(fig_no_cond, noise)
| 46.963025
| 123
| 0.637655
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from collections import Counter
from itertools import chain
import logging
from matplotlib.patches import Rectangle
import numpy as np
import os
import re
from chemdataextractor.doc import Span
from chemdataextractor.nlp.tokenize import ChemWordTokenizer
from scipy.signal import find_peaks
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import GridSearchCV
from ..actions import find_nearby_ccs, extend_line
from ..models import Conditions, SolidArrow, BaseExtractor, Figure, TextLine, Crop, FigureRoleEnum, ReactionRoleEnum, Panel
from ..models.utils import Point, Line, DisabledNegativeIndices
from ..ocr import read_conditions
from ..utils.processing import find_minima_between_peaks, erase_elements
from .. import settings
log = logging.getLogger('extract.conditions')
SPECIES_FILE = os.path.join(settings.ROOT_DIR, 'dict', 'species.txt')
class ConditionsExtractor(BaseExtractor):
def __init__(self, arrows, fig=None):
self.fig = fig if fig is not None else settings.main_figure[0]
self.arrows = arrows
self._extracted = None
def extract(self):
conditions, conditions_structures = [], []
for arrow in self.arrows:
step_conditions, step_structures = self.get_conditions(arrow)
conditions += [step_conditions]
conditions_structures.extend(step_structures)
self._extracted = conditions, conditions_structures
return self.extracted
@property
def extracted(self):
return self._extracted
def plot_extracted(self, ax):
conditions, conditions_structures = self._extracted
params = {'facecolor': 'g', 'edgecolor': None, 'alpha': 0.3}
for panel in conditions_structures:
rect_bbox = Rectangle((panel.left - 1, panel.top - 1), panel.right - panel.left,
panel.bottom - panel.top, **params)
ax.add_patch(rect_bbox)
for step_conditions in conditions:
for t in step_conditions.text_lines:
panel = t.panel
rect_bbox = Rectangle((panel.left - 1, panel.top - 1), panel.right - panel.left,
panel.bottom - panel.top, **params)
ax.add_patch(rect_bbox)
def get_conditions(self, arrow):
textlines, condition_structures = self.find_step_conditions(arrow)
[setattr(panel, 'role', ReactionRoleEnum.CONDITIONS) for panel in condition_structures]
if textlines:
recognised = [read_conditions(self.fig, line, conf_threshold=40) for line in textlines]
recognised = [sentence for sentence in recognised if sentence]
parser = ConditionParser(recognised)
conditions_dct = parser.parse_conditions()
else:
conditions_dct = {}
return Conditions(textlines, conditions_dct, arrow, condition_structures), condition_structures
def find_step_conditions(self, arrow):
structure_panels = [cc.parent_panel for cc in self.fig.connected_components if
cc.role == FigureRoleEnum.STRUCTUREBACKBONE
and cc.parent_panel]
conditions_panels = [panel for panel in structure_panels if ConditionsExtractor.belongs_to_conditions(panel,
arrow)]
text_lines = self.mark_text_lines(arrow, conditions_panels)
for text_line in text_lines:
self.collect_characters(text_line)
text_lines = [text_line for text_line in text_lines if text_line.connected_components]
return text_lines, conditions_panels
def mark_text_lines(self, arrow, conditions_panels):
fig = self.fig
average_height = np.median([cc.height for cc in fig.connected_components])
areas = [cc.area for cc in fig.connected_components]
areas.sort()
def condition1(cc): return cc.role != FigureRoleEnum.STRUCTUREAUXILIARY
if arrow.is_vertical:
def condition2(cc): return cc.top > arrow.top and cc.bottom < arrow.bottom
else:
def condition2(cc): return cc.left > arrow.left and cc.right < arrow.right
condition = condition1 and condition2
middle_pixel = arrow.center_px
def distance_fn(cc): return 2.2 * cc.height
core_ccs = find_nearby_ccs(middle_pixel, fig.connected_components, (3 * average_height, distance_fn),
condition=condition)
if not core_ccs:
for pixel in arrow.pixels[::10]:
core_ccs = find_nearby_ccs(pixel, fig.connected_components, (2 * average_height, distance_fn),
condition=condition)
if len(core_ccs) > 1:
break
else:
log.warning('No conditions were found in the initial scan. Aborting conditions search...')
return []
if conditions_panels:
for panel in conditions_panels:
core_ccs += find_nearby_ccs(panel, fig.connected_components, (3 * average_height, distance_fn),
condition=condition)
conditions_region = Panel.create_megarect(core_ccs)
cropped_region = Crop(erase_elements(fig, conditions_panels), conditions_region)
text_lines = [TextLine(None, None, top, bottom, crop=cropped_region, anchor=anchor) for (top, bottom, anchor) in
self.identify_text_lines(cropped_region)]
text_lines = [text_line.in_main_figure for text_line in text_lines]
return text_lines
def identify_text_lines(self, crop):
ccs = [cc for cc in crop.connected_components if cc.role != FigureRoleEnum.ARROW]
if len(ccs) == 1:
only_cc = ccs[0]
anchor = Point(only_cc.center[1], only_cc.center[0])
return [(only_cc.top, only_cc.bottom, anchor)]
if len(ccs) > 10:
ccs = [cc for cc in ccs if
cc.area > np.percentile([cc.area for cc in ccs], 0.2)]
img = crop.img
bottom_boundaries = [cc.bottom for cc in ccs]
bottom_boundaries.sort()
bottom_count = Counter(bottom_boundaries)
bottom_boundaries = np.array([item for item in bottom_count.elements()]).reshape(-1, 1)
little_data = len(ccs) < 10
grid = GridSearchCV(KernelDensity(),
{'bandwidth': np.linspace(0.005, 2.0, 100)},
cv=(len(bottom_boundaries) if little_data else 10))
grid.fit(bottom_boundaries)
best_bw = grid.best_params_['bandwidth']
kde = KernelDensity(bandwidth=best_bw, kernel='exponential')
kde.fit(bottom_boundaries)
rows = np.linspace(0, img.shape[0] + 20, img.shape[0] + 21)
logp_bottom = kde.score_samples(rows.reshape(-1, 1))
heights = [cc.bottom - cc.top for cc in ccs]
mean_height = np.mean(heights, dtype=np.uint32)
bottom_lines, _ = find_peaks(logp_bottom, distance=mean_height * 1.2)
data = np.array([rows, logp_bottom])
bottom_lines.sort()
bucket_limits = find_minima_between_peaks(data, bottom_lines)
buckets = np.split(rows, bucket_limits)
bucketed_chars = [[cc for cc in ccs if cc.bottom in bucket] for bucket in buckets]
top_lines = [np.mean([cc.top for cc in bucket], dtype=int) for bucket in bucketed_chars]
anchors = [sorted([cc for cc in bucket], key=lambda cc: cc.area)[-1].center for bucket in bucketed_chars]
anchors = [Point(row=anchor[1], col=anchor[0]) for anchor in anchors]
return [line for line in zip(top_lines, bottom_lines, anchors)]
def collect_characters(self, text_line):
relevant_ccs = [cc for cc in self.fig.connected_components if cc.role != FigureRoleEnum.ARROW]
initial_distance = np.sqrt(np.mean([cc.area for cc in relevant_ccs]))
distance_fn = settings.DISTANCE_FN_CHARS
def proximity_coeff(cc): return .75 if cc.area < np.percentile([cc.area for cc in relevant_ccs], 65) else .4
def condition1(cc): return (
abs(text_line.panel.center[1] - cc.center[1]) < proximity_coeff(cc) * text_line.panel.height)
def condition2(cc): return cc.height < text_line.panel.height * 1.7
def condition3(cc): return abs(text_line.panel.bottom - cc.bottom) < 0.65 * text_line.panel.height
def condition(cc): return condition1(cc) and condition2(cc) and condition3(cc)
found_ccs = find_nearby_ccs(text_line.anchor, relevant_ccs, (initial_distance, distance_fn),
FigureRoleEnum.CONDITIONSCHAR, condition)
if found_ccs:
text_line.connected_components = found_ccs
def add_diags_to_dicts(self, diags):
conditions, _ = self.extracted
for step_conditions in conditions:
if step_conditions.structure_panels:
cond_diags = [diag for diag in diags if diag.panel in step_conditions.structure_panels]
step_conditions.diags = cond_diags
try:
step_conditions.conditions_dct['other species'].extend(
[diag.smiles for diag in cond_diags if diag.smiles])
except KeyError:
step_conditions.conditions_dct['other species'] = [diag.smiles for diag in cond_diags if
diag.smiles]
@staticmethod
def belongs_to_conditions(structure_panel, arrow):
pixels = arrow.pixels
react_endpoint = pixels[0]
prod_endpoint = pixels[-1]
midpoint = pixels[len(pixels) // 2]
parallel_line_dummy = Line([midpoint])
slope = arrow.line.slope
parallel_line_dummy.slope = -1 / slope if abs(slope) > 0.05 else np.inf
parallel_1, parallel_2 = extend_line(parallel_line_dummy,
extension=react_endpoint.separation(prod_endpoint) // 2)
closest = min([parallel_1, parallel_2, react_endpoint, prod_endpoint],
key=lambda point: structure_panel.separation(point))
if closest in [parallel_1, parallel_2] and structure_panel.separation(arrow.panel) < 1.0 * np.sqrt(
structure_panel.area):
return True
else:
return False
class ConditionParser:
default_values = r'((?:\d\.)?\d{1,3})'
cat_units = r'(mol\s?%|M|wt\s?%)'
co_units = r'(equivalents?|equiv\.?|eq\.?|m?L)'
def __init__(self, sentences):
self.sentences = sentences
def parse_conditions(self):
parse_fns = [ConditionParser._parse_coreactants, ConditionParser._parse_catalysis,
ConditionParser._parse_other_species, ConditionParser._parse_other_conditions]
conditions_dct = {'catalysts': None, 'coreactants': None, 'other species': None, 'temperature': None,
'pressure': None, 'time': None, 'yield': None}
coreactants_lst = []
catalysis_lst = []
other_species_lst = []
for sentence in self.sentences:
parsed = [parse(sentence) for parse in parse_fns]
coreactants_lst.extend(parsed[0])
catalysis_lst.extend(parsed[1])
other_species_lst.extend(ConditionParser._filter_species(parsed))
conditions_dct.update(parsed[3])
conditions_dct['coreactants'] = coreactants_lst
conditions_dct['catalysts'] = catalysis_lst
conditions_dct['other species'] = other_species_lst
return conditions_dct
@staticmethod
def _identify_species(sentence):
with open(SPECIES_FILE, 'r') as file:
species_list = file.read().strip().split('\n')
formulae_brackets = r'((?:[A-Z]*\d?[a-z]\d?)\((?:[A-Z]*\d?[a-z]?\d?)*\)?\d?[A-Z]*[a-z]*\d?)*'
formulae_bracketless = r'(?<!°)\b(?<!\)|\()((?:[A-Z]+\d?[a-z]?\d?)+)(?!\(|\))\b'
letter_upper_identifiers = r'((?<!°)\b[A-Z]{1,4}\b)(?!\)|\.)'
letter_lower_identifiers = r'(\b[a-z]\b)(?!\)|\.)'
number_identifiers = r'(?:^| )(?<!\w)([1-9])(?!\w)(?!\))(?:$|[, ])(?![A-Za-z])'
entity_mentions_brackets = re.finditer(formulae_brackets, sentence.text)
entity_mentions_bracketless = re.finditer(formulae_bracketless, sentence.text)
entity_mentions_letters_upper = re.finditer(letter_upper_identifiers, sentence.text)
entity_mentions_letters_lower = re.finditer(letter_lower_identifiers, sentence.text)
entity_mentions_numbers = re.finditer(number_identifiers, sentence.text)
spans = [Span(e.group(1), e.start(), e.end()) for e in
chain(entity_mentions_brackets, entity_mentions_bracketless,
entity_mentions_numbers, entity_mentions_letters_upper,
entity_mentions_letters_lower) if e.group(1)]
slashed_names = []
for token in sentence.tokens:
if '/' in token.text:
slashed_names.append(token)
all_mentions = ConditionParser._resolve_spans(spans+slashed_names)
species_from_list = [token for token in sentence.tokens
if any(species in token.text.lower() for species in species_list if species)]
all_mentions += species_from_list
return list(set(all_mentions))
@staticmethod
def _parse_coreactants(sentence):
co_values = ConditionParser.default_values
co_str = co_values + r'\s?' + ConditionParser.co_units
return ConditionParser._find_closest_cem(sentence, co_str)
@staticmethod
def _parse_catalysis(sentence):
cat_values = ConditionParser.default_values
cat_str = cat_values + r'\s?' + ConditionParser.cat_units
return ConditionParser._find_closest_cem(sentence, cat_str)
@staticmethod
def _parse_other_species(sentence):
cems = ConditionParser._identify_species(sentence)
return [cem.text for cem in cems]
@staticmethod
def _parse_other_conditions(sentence):
other_dct = {}
parsed = [ConditionParser._parse_temperature(sentence), ConditionParser._parse_time(sentence),
ConditionParser._parse_pressure(sentence), ConditionParser._parse_yield(sentence)]
temperature, time, pressure, yield_ = parsed
if temperature:
other_dct['temperature'] = temperature
if time:
other_dct['time'] = time
if pressure:
other_dct['pressure'] = pressure
if yield_:
other_dct['yield'] = yield_
return other_dct
@staticmethod
def _find_closest_cem(sentence, parse_str):
phrase = sentence.text
matches = []
cwt = ChemWordTokenizer()
bracketed_units_pat = re.compile(r'\(\s*'+parse_str+r'\s*\)')
bracketed_units = re.findall(bracketed_units_pat, sentence.text)
if bracketed_units:
phrase = re.sub(bracketed_units_pat, ' '.join(bracketed_units[0]), phrase)
for match in re.finditer(parse_str, phrase):
match_tokens = cwt.tokenize(match.group(0))
phrase_tokens = cwt.tokenize(phrase)
match_start_idx = [idx for idx, token in enumerate(phrase_tokens) if match_tokens[0] in token][0]
match_end_idx = [idx for idx, token in enumerate(phrase_tokens) if match_tokens[-1] in token][0]
species = DisabledNegativeIndices(phrase_tokens)[match_start_idx-2:match_start_idx]
species = ' '.join(token for token in species).strip('()., ')
if not species:
try:
species = DisabledNegativeIndices(phrase_tokens)[match_end_idx+1:match_start_idx+4]
species = map(lambda s: s.strip('., '), species)
species = filter(lambda token: token.isalpha(), species)
species = ' '.join(token for token in species)
except IndexError:
log.debug('Closest CEM not found for a catalyst/coreactant key phrase')
species = ''
if species:
matches.append({'Species': species, 'Value': float(match.group(1)), 'Units': match.group(2)})
return matches
@staticmethod
def _filter_species(parsed):
coreactants, catalysts, other_species, _ = parsed
combined = [d['Species'] for d in coreactants] + [d['Species'] for d in catalysts]
if not combined:
return other_species
else:
unaccounted = []
combined = ' '.join(combined)
for species in other_species:
found = re.search(re.escape(species), combined)
if not found and species != 'M':
unaccounted.append(species)
return list(set(unaccounted))
@staticmethod
def _resolve_spans(spans):
span_copy = spans.copy()
for span1 in spans:
for span2 in spans:
if span1.text != span2.text:
if span1.text in span2.text:
try:
span_copy.remove(span1)
except ValueError:
pass
elif span2.text in span1.text:
try:
span_copy.remove(span2)
except ValueError:
pass
return span_copy
@staticmethod
def _parse_time(sentence):
t_values = ConditionParser.default_values
t_units = r'(h(?:ours?)?|m(?:in)?|s(?:econds)?|days?)'
time_str = re.compile(r'(?<!\w)' + t_values + r'\s?' + t_units + r'(?=$|\s?,)')
time = re.search(time_str, sentence.text)
if time:
return {'Value': float(time.group(1)), 'Units': time.group(2)}
@staticmethod
def _parse_temperature(sentence):
t_units = r'\s?(?:o|O|0|°)C|K' # match 0C, oC and similar, as well as K
t_value1 = r'-?\d{1,4}' + r'\s?(?=' + t_units + ')' # capture numbers only if followed by units
t_value2 = r'r\.?\s?t\.?'
t_value3 = r'heat|reflux|room\s?temp'
# Add greek delta?
t_or = re.compile('(' + '|'.join((t_value1, t_value2, t_value3)) + ')' + '(' + t_units + ')' + '?', re.I)
temperature = re.search(t_or, sentence.text)
return ConditionParser._form_dict_entry(temperature)
@staticmethod
def _form_dict_entry(match):
if match:
units = match.group(2) if match.group(2) else 'N/A'
try:
return {'Value': float(match.group(1)), 'Units': units}
except ValueError:
return {'Value': match.group(1), 'Units': units} # if value is rt or heat, gram scale etc
@staticmethod
def _parse_pressure(sentence):
p_units = r'(?:m|h|k|M)?Pa|m?bar|atm' # match bar, mbar, mPa, hPa, MPa and atm
p_values1 = r'\d{1,4}' + r'\s?(?=' + p_units + ')' # match numbers only if followed by units
p_values2 = r'(?:U?HV)|vacuum'
p_or = re.compile('(' + '|'.join((p_values1, p_values2)) + ')' + '(' + p_units + ')' + '?')
pressure = re.search(p_or, sentence.text)
if pressure:
units = pressure.group(2) if pressure.group(2) else 'N/A'
return {'Value': float(pressure.group(1)), 'Units': units}
@staticmethod
def _parse_yield(sentence):
y_units = r'%' # match 0C, oC and similar, as well as K
y_value1 = r'\d{1,2}' + r'\s?(?=' + y_units + ')' # capture numbers only if followed by units
y_value2 = r'gram scale'
# Add greek delta?
y_or = re.compile('(' + '|'.join((y_value1, y_value2)) + ')' + '(' + y_units + ')' + '?')
y = re.search(y_or, sentence.text)
return ConditionParser._form_dict_entry(y)
def clear_conditions_region(fig):
fig_no_cond = erase_elements(fig, [cc for cc in fig.connected_components
if cc.role == FigureRoleEnum.ARROW or cc.role == FigureRoleEnum.CONDITIONSCHAR])
area_threshold = fig.get_bounding_box().area / 30000
# width_threshold = fig.get_bounding_box().width / 200
noise = [panel for panel in fig_no_cond.connected_components if panel.area < area_threshold]
return erase_elements(fig_no_cond, noise)
| true
| true
|
1c41a46a988da3d4e057e2e594df382646f14b9c
| 24,352
|
py
|
Python
|
nsl/stac/utils.py
|
nearspacelabs/stac-client-python
|
d23eb3d991b97f23ea835bf5f9834a7e86886048
|
[
"Apache-2.0"
] | 19
|
2019-12-09T15:04:40.000Z
|
2021-12-09T21:46:21.000Z
|
nsl/stac/utils.py
|
nearspacelabs/stac-client-python
|
d23eb3d991b97f23ea835bf5f9834a7e86886048
|
[
"Apache-2.0"
] | 16
|
2019-11-25T16:54:11.000Z
|
2021-12-16T15:35:40.000Z
|
nsl/stac/utils.py
|
nearspacelabs/stac-client-python
|
d23eb3d991b97f23ea835bf5f9834a7e86886048
|
[
"Apache-2.0"
] | 3
|
2019-12-12T08:34:12.000Z
|
2021-04-13T22:49:59.000Z
|
# Copyright 2019-20 Near Space Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# for additional information, contact:
# info@nearspacelabs.com
import os
import datetime
import http.client
import re
from urllib.parse import urlparse
from typing import List, Iterator, IO, Union, Dict, Any
import boto3
import botocore
import botocore.exceptions
import botocore.client
from google.cloud import storage
from google.protobuf import timestamp_pb2, duration_pb2
from nsl.stac import gcs_storage_client, bearer_auth, \
StacItem, Asset, TimestampFilter, Eo, DatetimeRange, enum
from nsl.stac.enum import Band, CloudPlatform, FilterRelationship, SortDirection, AssetType
DEFAULT_RGB = [Band.RED, Band.GREEN, Band.BLUE, Band.NIR]
RASTER_TYPES = [AssetType.CO_GEOTIFF, AssetType.GEOTIFF, AssetType.MRF]
UNSUPPORTED_TIME_FILTERS = [FilterRelationship.IN,
FilterRelationship.NOT_IN,
FilterRelationship.LIKE,
FilterRelationship.NOT_LIKE]
def get_blob_metadata(bucket: str, blob_name: str) -> storage.Blob:
"""
get metadata/interface for one asset in google cloud storage
:param bucket: bucket name
:param blob_name: complete blob name of item (doesn't include bucket name)
:return: Blob interface item
"""
if gcs_storage_client.client is None:
raise ValueError("GOOGLE_APPLICATION_CREDENTIALS environment variable not set")
bucket = gcs_storage_client.client.get_bucket(bucket)
return bucket.get_blob(blob_name=blob_name.strip('/'))
def download_gcs_object(bucket: str,
blob_name: str,
file_obj: IO[bytes] = None,
save_filename: str = "",
make_dir=True) -> str:
"""
download a specific blob from Google Cloud Storage (GCS) to a file object handle
:param make_dir: if directory doesn't exist create
:param bucket: bucket name
:param blob_name: the full prefix to a specific asset in GCS. Does not include bucket name
:param file_obj: file object (or BytesIO string_buffer) where data should be written
:param save_filename: the filename to save the file to
:return: returns path to downloaded file if applicable
"""
if make_dir and save_filename != "":
path_to_create = os.path.split(save_filename)[0]
if not os.path.exists(path_to_create):
os.makedirs(path_to_create, exist_ok=True)
blob = get_blob_metadata(bucket=bucket, blob_name=blob_name)
if file_obj is not None:
blob.download_to_file(file_obj=file_obj, client=gcs_storage_client.client)
if "name" in file_obj.__dict__:
save_filename = file_obj.name
else:
save_filename = ""
file_obj.seek(0)
return save_filename
elif len(save_filename) > 0:
with open(save_filename, "w+b") as file_obj:
download_gcs_object(bucket, blob_name, file_obj=file_obj)
return save_filename
else:
raise ValueError("must provide filename or file_obj")
def download_s3_object(bucket: str,
blob_name: str,
file_obj: IO = None,
save_filename: str = "",
requester_pays: bool = False):
extra_args = None
if requester_pays:
extra_args = {'RequestPayer': 'requester'}
s3 = boto3.client('s3')
try:
if file_obj is not None:
s3.download_fileobj(Bucket=bucket, Key=blob_name, Fileobj=file_obj, ExtraArgs=extra_args)
if "name" in file_obj.__dict__:
save_filename = file_obj.name
else:
save_filename = ""
file_obj.seek(0)
return save_filename
elif len(save_filename) > 0:
s3.download_file(Bucket=bucket, Key=blob_name, Filename=save_filename, ExtraArgs=extra_args)
return save_filename
else:
raise ValueError("must provide filename or file_obj")
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
def download_href_object(asset: Asset, file_obj: IO = None, save_filename: str = "", nsl_id: str = None):
"""
download the href of an asset
:param asset: The asset to download
:param file_obj: BinaryIO file object to download data into. If file_obj and save_filename and/or save_directory
are set, then only file_obj is used
:param save_filename: absolute or relative path filename to save asset to (must have write permissions)
:param nsl_id: ADVANCED ONLY. Only necessary if more than one nsl_id and nsl_secret have been defined with
set_credentials method. Specify nsl_id to use. if NSL_ID and NSL_SECRET environment variables not set must use
NSLClient object's set_credentials to set credentials
:return: returns the save_filename. if BinaryIO is not a FileIO object type, save_filename returned is an
empty string
"""
if not asset.href:
raise ValueError("no href on asset")
host = urlparse(asset.href)
conn = http.client.HTTPConnection(host.netloc)
headers = {}
asset_url = host.path
if asset.bucket_manager == "Near Space Labs":
headers = {"authorization": bearer_auth.auth_header(nsl_id=nsl_id)}
asset_url = "/download/{object}".format(object=asset.object_path)
if len(asset.type) > 0:
headers["content-type"] = asset.type
conn.request(method="GET", url=asset_url, headers=headers)
res = conn.getresponse()
if res.status == 404:
raise ValueError("not found error for {path}".format(path=asset.href))
elif res.status == 403:
raise ValueError("auth error for asset {asset}".format(asset=asset.href))
elif res.status == 402:
raise ValueError("not enough credits for downloading asset {asset}".format(asset=asset.href))
elif res.status != 200:
raise ValueError("error code {code} for asset: {asset}".format(code=res.status, asset=asset.href))
if len(save_filename) > 0:
with open(save_filename, mode='wb') as f:
f.write(res.read())
elif file_obj is not None:
file_obj.write(res.read())
if "name" in file_obj.__dict__:
save_filename = file_obj.name
else:
save_filename = ""
file_obj.seek(0)
else:
raise ValueError("must provide filename or file_obj")
return save_filename
def download_asset(asset: Asset,
from_bucket: bool = False,
file_obj: IO[Union[Union[str, bytes], Any]] = None,
save_filename: str = "",
save_directory: str = "",
requester_pays: bool = False,
nsl_id: str = None):
"""
download an asset. Defaults to downloading from cloud storage. save the data to a BinaryIO file object, a filename
on your filesystem, or to a directory on your filesystem (the filename will be chosen from the basename of the
object).
:param requester_pays: authorize a requester pays download. this can be costly,
so only enable it if you understand the implications.
:param asset: The asset to download
:param from_bucket: force the download to occur from cloud storage instead of href endpoint
:param file_obj: BinaryIO file object to download data into. If file_obj and save_filename and/or save_directory are
set, then only file_obj is used
:param save_filename: absolute or relative path filename to save asset to (must have write permissions)
:param save_directory: absolute or relative directory path to save asset in (must have write permissions). Filename
is derived from the basename of the object_path or the href
:param nsl_id: ADVANCED ONLY. Only necessary if more than one nsl_id and nsl_secret have been defined with
set_credentials method. Specify nsl_id to use. if NSL_ID and NSL_SECRET environment variables not set must use
NSLClient object's set_credentials to set credentials
:return:
"""
if len(save_directory) > 0 and file_obj is None and len(save_filename) == 0:
if os.path.exists(save_directory):
save_filename = os.path.join(save_directory, os.path.basename(asset.object_path))
else:
raise ValueError("directory 'save_directory' doesn't exist")
if from_bucket and asset.cloud_platform == CloudPlatform.GCP:
return download_gcs_object(bucket=asset.bucket,
blob_name=asset.object_path,
file_obj=file_obj,
save_filename=save_filename)
elif from_bucket and asset.cloud_platform == CloudPlatform.AWS:
return download_s3_object(bucket=asset.bucket,
blob_name=asset.object_path,
file_obj=file_obj,
save_filename=save_filename,
requester_pays=requester_pays)
else:
return download_href_object(asset=asset,
file_obj=file_obj,
save_filename=save_filename,
nsl_id=nsl_id)
def download_assets(stac_item: StacItem,
save_directory: str,
from_bucket: bool = False,
nsl_id: str = None) -> List[str]:
"""
Download all the assets for a StacItem into a directory
:param nsl_id: ADVANCED ONLY. Only necessary if more than one nsl_id and nsl_secret have been defined with
set_credentials method. Specify nsl_id to use. if NSL_ID and NSL_SECRET environment variables not set must use
NSLClient object's set_credentials to set credentials
:param stac_item: StacItem containing assets to download
:param save_directory: the directory where the files should be downloaded
:param from_bucket: force download from bucket. if set to false downloads happen from href. defaults to False
:return:
"""
filenames = []
for asset_key in stac_item.assets:
asset = stac_item.assets[asset_key]
filenames.append(download_asset(asset=asset,
from_bucket=from_bucket,
save_directory=save_directory,
nsl_id=nsl_id))
return filenames
def get_asset(stac_item: StacItem,
asset_type: AssetType = None,
cloud_platform: CloudPlatform = CloudPlatform.UNKNOWN_CLOUD_PLATFORM,
eo_bands: Eo.Band = Eo.UNKNOWN_BAND,
asset_regex: Dict = None,
asset_key: str = None,
b_relaxed_types: bool = False) -> Asset:
"""
get a protobuf object(pb) asset from a stac item pb. If your parameters are broad (say, if you used all defaults)
this function would only return you the first asset that matches the parameters. use
:func:`get_assets <st.stac.utils.get_assets>` to return more than one asset from a request.
:param stac_item: stac item whose assets we want to search by parameters
:param asset_type: an asset_type enum to return. if not defined then it is assumed to search all asset types
:param cloud_platform: only return assets that are hosted on the cloud platform described in the cloud_platform
field of the item. default grabs the first asset that meets all the other parameters.
:param band: if the data has electro-optical spectrum data, define the band you want to retrieve. if the data is
not electro-optical then don't define this parameter (defaults to UNKNOWN_BAND)
:param asset_basename: only return asset if the basename of the object path matches this value
:return: asset pb object
"""
results = get_assets(stac_item, asset_type, cloud_platform, eo_bands, asset_regex, asset_key, b_relaxed_types)
if len(results) > 1:
raise ValueError("must be more specific in selecting your asset. if all enums are used, try using "
"asset_key_regex")
elif len(results) == 1:
return results[0]
return None
def _asset_types_match(desired_type: enum.AssetType, asset_type: enum.AssetType, b_relaxed_types: bool = False) -> bool:
if not b_relaxed_types:
return desired_type == asset_type
elif desired_type == enum.AssetType.TIFF:
return asset_type == desired_type or \
asset_type == enum.AssetType.GEOTIFF or \
asset_type == enum.AssetType.CO_GEOTIFF
elif desired_type == enum.AssetType.GEOTIFF:
return asset_type == desired_type or asset_type == enum.AssetType.CO_GEOTIFF
return asset_type == desired_type
def equals_pb(left: Asset, right: Asset):
"""
does the AssetWrap equal a protobuf Asset
:param other:
:return:
"""
return left.SerializeToString() == right.SerializeToString()
def get_assets(stac_item: StacItem,
asset_type: enum.AssetType = None,
cloud_platform: CloudPlatform = CloudPlatform.UNKNOWN_CLOUD_PLATFORM,
eo_bands: Eo.Band = Eo.UNKNOWN_BAND,
asset_regex: Dict = None,
asset_key: str = None,
b_relaxed_types: bool = False) -> List[Asset]:
"""
get a generator of assets from a stac item, filtered by the parameters.
:param stac_item: stac item whose assets we want to search by parameters
:param band: if the data has electro optical spectrum data, define the band you want to retrieve. if the data is not
electro optical then don't define this parameter (defaults to UNKNOWN_BAND)
:param asset_types: a list of asset_types to seach. if not defined then it is assumed to search all asset types
:param cloud_platform: only return assets that are hosted on the cloud platform described in the cloud_platform
field of the item. default grabs the first asset that meets all the other parameters.
:param asset_basename: only return asset if the basename of the object path matches this value
:return: asset pb object
"""
if asset_key is not None and asset_key in stac_item.assets:
return [stac_item.assets[asset_key]]
elif asset_key is not None and asset_key and asset_key not in stac_item.assets:
raise ValueError("asset_key {} not found".format(asset_key))
results = []
for asset_key in stac_item.assets:
current = stac_item.assets[asset_key]
b_asset_type_match = _asset_types_match(desired_type=asset_type,
asset_type=current.asset_type,
b_relaxed_types=b_relaxed_types)
if (eo_bands is not None and eo_bands != enum.Band.UNKNOWN_BAND) and current.eo_bands != eo_bands:
continue
if (cloud_platform is not None and cloud_platform != enum.CloudPlatform.UNKNOWN_CLOUD_PLATFORM) and \
current.cloud_platform != cloud_platform:
continue
if (asset_type is not None and asset_type != enum.AssetType.UNKNOWN_ASSET) and not b_asset_type_match:
continue
if asset_regex is not None and len(asset_regex) > 0:
b_continue = False
for key, regex_value in asset_regex.items():
if key == 'asset_key':
if not re.match(regex_value, asset_key):
b_continue = True
break
else:
if not hasattr(current, key):
raise AttributeError("no key {0} in asset {1}".format(key, current))
elif not re.match(regex_value, getattr(current, key)):
b_continue = True
break
if b_continue:
continue
# check that asset hasn't changed between protobuf and asset_map
pb_asset = stac_item.assets[asset_key]
if not equals_pb(current, pb_asset):
raise ValueError("corrupted protobuf. Asset and AssetWrap have differing underlying protobuf")
results.append(current)
return results
def _asset_has_filename(asset: Asset, asset_basename):
if os.path.basename(asset.object_path).lower() == os.path.basename(asset_basename).lower():
return True
return False
def has_asset_type(stac_item: StacItem,
asset_type: AssetType):
"""
does the stac item contain the asset
:param stac_item:
:param asset_type:
:return:
"""
for asset in stac_item.assets.values():
if asset.asset_type == asset_type:
return True
return False
def has_asset(stac_item: StacItem,
asset: Asset):
"""
check whether a stac_item has a perfect match to the provided asset
:param stac_item: stac item whose assets we're checking against asset
:param asset: asset we're looking for in stac_item
:return:
"""
for test_asset in stac_item.assets.values():
b_matches = True
for field in test_asset.DESCRIPTOR.fields:
if getattr(test_asset, field.name) != getattr(asset, field.name):
b_matches = False
break
if b_matches:
return b_matches
return False
def get_uri(asset: Asset, b_vsi_uri=True, prefix: str = "") -> str:
"""
construct the uri for the resource in the asset.
:param asset:
:param b_vsi_uri:
:param prefix:
:return:
"""
if not asset.bucket or not asset.object_path:
if not b_vsi_uri:
raise FileNotFoundError("The bucket ref is not AWS or Google:\nhref : {0}".format(asset.href))
return '/vsicurl_streaming/{}'.format(asset.href)
elif not prefix:
prefix = "{0}://"
if b_vsi_uri:
prefix = "/vsi{0}_streaming"
if asset.cloud_platform == CloudPlatform.GCP:
prefix = prefix.format("gs")
elif asset.cloud_platform == CloudPlatform.AWS:
prefix = prefix.format("s3")
else:
raise ValueError("The only current cloud platforms are GCP and AWS. This asset doesn't have the "
"'cloud_platform' field defined")
return "{0}/{1}/{2}".format(prefix, asset.bucket, asset.object_path)
def pb_timestampfield(rel_type: FilterRelationship,
value: Union[datetime.datetime, datetime.date] = None,
start: Union[datetime.datetime, datetime.date] = None,
end: Union[datetime.datetime, datetime.date] = None,
sort_direction: SortDirection = SortDirection.NOT_SORTED,
tzinfo: datetime.timezone = datetime.timezone.utc) -> TimestampFilter:
"""
Create a protobuf query filter for a timestamp or a range of timestamps. If you use a datetime.date as
the value combined with a rel_type of EQ then you will be creating a query filter for the
24 period of that date.
:param rel_type: the relationship type to query more
[here](https://geo-grpc.github.io/api/#epl.protobuf.FieldRelationship)
:param value: time to search by using >, >=, <, <=, etc. cannot be used with start or end
:param start: start time for between/not between query. cannot be used with value
:param end: end time for between/not between query. cannot be used with value
:param sort_direction: sort direction for results. Defaults to not sorting by this field
:param tzinfo: timezone info, defaults to UTC
:return: TimestampFilter
"""
if rel_type in UNSUPPORTED_TIME_FILTERS:
raise ValueError("unsupported relationship type: {}".format(rel_type.name))
if value is not None and rel_type != FilterRelationship.EQ and rel_type != FilterRelationship.NEQ:
if not isinstance(value, datetime.datetime):
if rel_type == FilterRelationship.GTE or rel_type == FilterRelationship.LT:
return TimestampFilter(value=pb_timestamp(value, tzinfo, b_force_min=True),
rel_type=rel_type,
sort_direction=sort_direction)
elif rel_type == FilterRelationship.LTE or rel_type == FilterRelationship.GT:
return TimestampFilter(value=pb_timestamp(value, tzinfo, b_force_min=False),
rel_type=rel_type,
sort_direction=sort_direction)
return TimestampFilter(value=pb_timestamp(value, tzinfo), rel_type=rel_type, sort_direction=sort_direction)
elif value is not None and not isinstance(value, datetime.datetime) and \
(rel_type == FilterRelationship.EQ or rel_type == FilterRelationship.NEQ):
start = datetime.datetime.combine(value, datetime.datetime.min.time(), tzinfo=tzinfo)
end = datetime.datetime.combine(value, datetime.datetime.max.time(), tzinfo=tzinfo)
if rel_type == FilterRelationship.EQ:
rel_type = FilterRelationship.BETWEEN
else:
rel_type = FilterRelationship.NOT_BETWEEN
return TimestampFilter(start=pb_timestamp(start, tzinfo),
end=pb_timestamp(end, tzinfo),
rel_type=rel_type,
sort_direction=sort_direction)
def pb_timestamp(d_utc: Union[datetime.datetime, datetime.date],
tzinfo: datetime.timezone = datetime.timezone.utc,
b_force_min=True) -> timestamp_pb2.Timestamp:
"""
create a google.protobuf.Timestamp from a python datetime
:param d_utc: python datetime or date
:param tzinfo:
:return:
"""
ts = timestamp_pb2.Timestamp()
ts.FromDatetime(timezoned(d_utc, tzinfo, b_force_min))
return ts
def timezoned(d_utc: Union[datetime.datetime, datetime.date],
tzinfo: datetime.timezone = datetime.timezone.utc,
b_force_min=True):
# datetime is child to datetime.date, so if we reverse the order of this instance of we fail
if isinstance(d_utc, datetime.datetime) and d_utc.tzinfo is None:
# TODO add warning here:
# print("warning, no timezone provided with datetime, so UTC is assumed")
d_utc = datetime.datetime(d_utc.year,
d_utc.month,
d_utc.day,
d_utc.hour,
d_utc.minute,
d_utc.second,
d_utc.microsecond,
tzinfo=tzinfo)
elif not isinstance(d_utc, datetime.datetime):
# print("warning, no timezone provided with date, so UTC is assumed")
if b_force_min:
d_utc = datetime.datetime.combine(d_utc, datetime.datetime.min.time(), tzinfo=tzinfo)
else:
d_utc = datetime.datetime.combine(d_utc, datetime.datetime.max.time(), tzinfo=tzinfo)
return d_utc
def duration(d_start: Union[datetime.datetime, datetime.date], d_end: Union[datetime.datetime, datetime.date]):
d = duration_pb2.Duration()
d.FromTimedelta(timezoned(d_end) - timezoned(d_start))
return d
def datetime_range(d_start: Union[datetime.datetime, datetime.date],
d_end: Union[datetime.datetime, datetime.date]) -> DatetimeRange:
"""
for datetime range definitions for Mosaic objects.
:param d_start: start datetime or date
:param d_end: end datetime or date
:return: DatetimeRange object
"""
return DatetimeRange(start=pb_timestamp(d_start), end=pb_timestamp(d_end))
| 45.348231
| 120
| 0.650583
|
import os
import datetime
import http.client
import re
from urllib.parse import urlparse
from typing import List, Iterator, IO, Union, Dict, Any
import boto3
import botocore
import botocore.exceptions
import botocore.client
from google.cloud import storage
from google.protobuf import timestamp_pb2, duration_pb2
from nsl.stac import gcs_storage_client, bearer_auth, \
StacItem, Asset, TimestampFilter, Eo, DatetimeRange, enum
from nsl.stac.enum import Band, CloudPlatform, FilterRelationship, SortDirection, AssetType
DEFAULT_RGB = [Band.RED, Band.GREEN, Band.BLUE, Band.NIR]
RASTER_TYPES = [AssetType.CO_GEOTIFF, AssetType.GEOTIFF, AssetType.MRF]
UNSUPPORTED_TIME_FILTERS = [FilterRelationship.IN,
FilterRelationship.NOT_IN,
FilterRelationship.LIKE,
FilterRelationship.NOT_LIKE]
def get_blob_metadata(bucket: str, blob_name: str) -> storage.Blob:
if gcs_storage_client.client is None:
raise ValueError("GOOGLE_APPLICATION_CREDENTIALS environment variable not set")
bucket = gcs_storage_client.client.get_bucket(bucket)
return bucket.get_blob(blob_name=blob_name.strip('/'))
def download_gcs_object(bucket: str,
blob_name: str,
file_obj: IO[bytes] = None,
save_filename: str = "",
make_dir=True) -> str:
if make_dir and save_filename != "":
path_to_create = os.path.split(save_filename)[0]
if not os.path.exists(path_to_create):
os.makedirs(path_to_create, exist_ok=True)
blob = get_blob_metadata(bucket=bucket, blob_name=blob_name)
if file_obj is not None:
blob.download_to_file(file_obj=file_obj, client=gcs_storage_client.client)
if "name" in file_obj.__dict__:
save_filename = file_obj.name
else:
save_filename = ""
file_obj.seek(0)
return save_filename
elif len(save_filename) > 0:
with open(save_filename, "w+b") as file_obj:
download_gcs_object(bucket, blob_name, file_obj=file_obj)
return save_filename
else:
raise ValueError("must provide filename or file_obj")
def download_s3_object(bucket: str,
blob_name: str,
file_obj: IO = None,
save_filename: str = "",
requester_pays: bool = False):
extra_args = None
if requester_pays:
extra_args = {'RequestPayer': 'requester'}
s3 = boto3.client('s3')
try:
if file_obj is not None:
s3.download_fileobj(Bucket=bucket, Key=blob_name, Fileobj=file_obj, ExtraArgs=extra_args)
if "name" in file_obj.__dict__:
save_filename = file_obj.name
else:
save_filename = ""
file_obj.seek(0)
return save_filename
elif len(save_filename) > 0:
s3.download_file(Bucket=bucket, Key=blob_name, Filename=save_filename, ExtraArgs=extra_args)
return save_filename
else:
raise ValueError("must provide filename or file_obj")
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
def download_href_object(asset: Asset, file_obj: IO = None, save_filename: str = "", nsl_id: str = None):
if not asset.href:
raise ValueError("no href on asset")
host = urlparse(asset.href)
conn = http.client.HTTPConnection(host.netloc)
headers = {}
asset_url = host.path
if asset.bucket_manager == "Near Space Labs":
headers = {"authorization": bearer_auth.auth_header(nsl_id=nsl_id)}
asset_url = "/download/{object}".format(object=asset.object_path)
if len(asset.type) > 0:
headers["content-type"] = asset.type
conn.request(method="GET", url=asset_url, headers=headers)
res = conn.getresponse()
if res.status == 404:
raise ValueError("not found error for {path}".format(path=asset.href))
elif res.status == 403:
raise ValueError("auth error for asset {asset}".format(asset=asset.href))
elif res.status == 402:
raise ValueError("not enough credits for downloading asset {asset}".format(asset=asset.href))
elif res.status != 200:
raise ValueError("error code {code} for asset: {asset}".format(code=res.status, asset=asset.href))
if len(save_filename) > 0:
with open(save_filename, mode='wb') as f:
f.write(res.read())
elif file_obj is not None:
file_obj.write(res.read())
if "name" in file_obj.__dict__:
save_filename = file_obj.name
else:
save_filename = ""
file_obj.seek(0)
else:
raise ValueError("must provide filename or file_obj")
return save_filename
def download_asset(asset: Asset,
from_bucket: bool = False,
file_obj: IO[Union[Union[str, bytes], Any]] = None,
save_filename: str = "",
save_directory: str = "",
requester_pays: bool = False,
nsl_id: str = None):
if len(save_directory) > 0 and file_obj is None and len(save_filename) == 0:
if os.path.exists(save_directory):
save_filename = os.path.join(save_directory, os.path.basename(asset.object_path))
else:
raise ValueError("directory 'save_directory' doesn't exist")
if from_bucket and asset.cloud_platform == CloudPlatform.GCP:
return download_gcs_object(bucket=asset.bucket,
blob_name=asset.object_path,
file_obj=file_obj,
save_filename=save_filename)
elif from_bucket and asset.cloud_platform == CloudPlatform.AWS:
return download_s3_object(bucket=asset.bucket,
blob_name=asset.object_path,
file_obj=file_obj,
save_filename=save_filename,
requester_pays=requester_pays)
else:
return download_href_object(asset=asset,
file_obj=file_obj,
save_filename=save_filename,
nsl_id=nsl_id)
def download_assets(stac_item: StacItem,
save_directory: str,
from_bucket: bool = False,
nsl_id: str = None) -> List[str]:
filenames = []
for asset_key in stac_item.assets:
asset = stac_item.assets[asset_key]
filenames.append(download_asset(asset=asset,
from_bucket=from_bucket,
save_directory=save_directory,
nsl_id=nsl_id))
return filenames
def get_asset(stac_item: StacItem,
asset_type: AssetType = None,
cloud_platform: CloudPlatform = CloudPlatform.UNKNOWN_CLOUD_PLATFORM,
eo_bands: Eo.Band = Eo.UNKNOWN_BAND,
asset_regex: Dict = None,
asset_key: str = None,
b_relaxed_types: bool = False) -> Asset:
results = get_assets(stac_item, asset_type, cloud_platform, eo_bands, asset_regex, asset_key, b_relaxed_types)
if len(results) > 1:
raise ValueError("must be more specific in selecting your asset. if all enums are used, try using "
"asset_key_regex")
elif len(results) == 1:
return results[0]
return None
def _asset_types_match(desired_type: enum.AssetType, asset_type: enum.AssetType, b_relaxed_types: bool = False) -> bool:
if not b_relaxed_types:
return desired_type == asset_type
elif desired_type == enum.AssetType.TIFF:
return asset_type == desired_type or \
asset_type == enum.AssetType.GEOTIFF or \
asset_type == enum.AssetType.CO_GEOTIFF
elif desired_type == enum.AssetType.GEOTIFF:
return asset_type == desired_type or asset_type == enum.AssetType.CO_GEOTIFF
return asset_type == desired_type
def equals_pb(left: Asset, right: Asset):
return left.SerializeToString() == right.SerializeToString()
def get_assets(stac_item: StacItem,
asset_type: enum.AssetType = None,
cloud_platform: CloudPlatform = CloudPlatform.UNKNOWN_CLOUD_PLATFORM,
eo_bands: Eo.Band = Eo.UNKNOWN_BAND,
asset_regex: Dict = None,
asset_key: str = None,
b_relaxed_types: bool = False) -> List[Asset]:
if asset_key is not None and asset_key in stac_item.assets:
return [stac_item.assets[asset_key]]
elif asset_key is not None and asset_key and asset_key not in stac_item.assets:
raise ValueError("asset_key {} not found".format(asset_key))
results = []
for asset_key in stac_item.assets:
current = stac_item.assets[asset_key]
b_asset_type_match = _asset_types_match(desired_type=asset_type,
asset_type=current.asset_type,
b_relaxed_types=b_relaxed_types)
if (eo_bands is not None and eo_bands != enum.Band.UNKNOWN_BAND) and current.eo_bands != eo_bands:
continue
if (cloud_platform is not None and cloud_platform != enum.CloudPlatform.UNKNOWN_CLOUD_PLATFORM) and \
current.cloud_platform != cloud_platform:
continue
if (asset_type is not None and asset_type != enum.AssetType.UNKNOWN_ASSET) and not b_asset_type_match:
continue
if asset_regex is not None and len(asset_regex) > 0:
b_continue = False
for key, regex_value in asset_regex.items():
if key == 'asset_key':
if not re.match(regex_value, asset_key):
b_continue = True
break
else:
if not hasattr(current, key):
raise AttributeError("no key {0} in asset {1}".format(key, current))
elif not re.match(regex_value, getattr(current, key)):
b_continue = True
break
if b_continue:
continue
# check that asset hasn't changed between protobuf and asset_map
pb_asset = stac_item.assets[asset_key]
if not equals_pb(current, pb_asset):
raise ValueError("corrupted protobuf. Asset and AssetWrap have differing underlying protobuf")
results.append(current)
return results
def _asset_has_filename(asset: Asset, asset_basename):
if os.path.basename(asset.object_path).lower() == os.path.basename(asset_basename).lower():
return True
return False
def has_asset_type(stac_item: StacItem,
asset_type: AssetType):
for asset in stac_item.assets.values():
if asset.asset_type == asset_type:
return True
return False
def has_asset(stac_item: StacItem,
asset: Asset):
for test_asset in stac_item.assets.values():
b_matches = True
for field in test_asset.DESCRIPTOR.fields:
if getattr(test_asset, field.name) != getattr(asset, field.name):
b_matches = False
break
if b_matches:
return b_matches
return False
def get_uri(asset: Asset, b_vsi_uri=True, prefix: str = "") -> str:
if not asset.bucket or not asset.object_path:
if not b_vsi_uri:
raise FileNotFoundError("The bucket ref is not AWS or Google:\nhref : {0}".format(asset.href))
return '/vsicurl_streaming/{}'.format(asset.href)
elif not prefix:
prefix = "{0}://"
if b_vsi_uri:
prefix = "/vsi{0}_streaming"
if asset.cloud_platform == CloudPlatform.GCP:
prefix = prefix.format("gs")
elif asset.cloud_platform == CloudPlatform.AWS:
prefix = prefix.format("s3")
else:
raise ValueError("The only current cloud platforms are GCP and AWS. This asset doesn't have the "
"'cloud_platform' field defined")
return "{0}/{1}/{2}".format(prefix, asset.bucket, asset.object_path)
def pb_timestampfield(rel_type: FilterRelationship,
value: Union[datetime.datetime, datetime.date] = None,
start: Union[datetime.datetime, datetime.date] = None,
end: Union[datetime.datetime, datetime.date] = None,
sort_direction: SortDirection = SortDirection.NOT_SORTED,
tzinfo: datetime.timezone = datetime.timezone.utc) -> TimestampFilter:
if rel_type in UNSUPPORTED_TIME_FILTERS:
raise ValueError("unsupported relationship type: {}".format(rel_type.name))
if value is not None and rel_type != FilterRelationship.EQ and rel_type != FilterRelationship.NEQ:
if not isinstance(value, datetime.datetime):
if rel_type == FilterRelationship.GTE or rel_type == FilterRelationship.LT:
return TimestampFilter(value=pb_timestamp(value, tzinfo, b_force_min=True),
rel_type=rel_type,
sort_direction=sort_direction)
elif rel_type == FilterRelationship.LTE or rel_type == FilterRelationship.GT:
return TimestampFilter(value=pb_timestamp(value, tzinfo, b_force_min=False),
rel_type=rel_type,
sort_direction=sort_direction)
return TimestampFilter(value=pb_timestamp(value, tzinfo), rel_type=rel_type, sort_direction=sort_direction)
elif value is not None and not isinstance(value, datetime.datetime) and \
(rel_type == FilterRelationship.EQ or rel_type == FilterRelationship.NEQ):
start = datetime.datetime.combine(value, datetime.datetime.min.time(), tzinfo=tzinfo)
end = datetime.datetime.combine(value, datetime.datetime.max.time(), tzinfo=tzinfo)
if rel_type == FilterRelationship.EQ:
rel_type = FilterRelationship.BETWEEN
else:
rel_type = FilterRelationship.NOT_BETWEEN
return TimestampFilter(start=pb_timestamp(start, tzinfo),
end=pb_timestamp(end, tzinfo),
rel_type=rel_type,
sort_direction=sort_direction)
def pb_timestamp(d_utc: Union[datetime.datetime, datetime.date],
tzinfo: datetime.timezone = datetime.timezone.utc,
b_force_min=True) -> timestamp_pb2.Timestamp:
ts = timestamp_pb2.Timestamp()
ts.FromDatetime(timezoned(d_utc, tzinfo, b_force_min))
return ts
def timezoned(d_utc: Union[datetime.datetime, datetime.date],
tzinfo: datetime.timezone = datetime.timezone.utc,
b_force_min=True):
# datetime is child to datetime.date, so if we reverse the order of this instance of we fail
if isinstance(d_utc, datetime.datetime) and d_utc.tzinfo is None:
# TODO add warning here:
# print("warning, no timezone provided with datetime, so UTC is assumed")
d_utc = datetime.datetime(d_utc.year,
d_utc.month,
d_utc.day,
d_utc.hour,
d_utc.minute,
d_utc.second,
d_utc.microsecond,
tzinfo=tzinfo)
elif not isinstance(d_utc, datetime.datetime):
# print("warning, no timezone provided with date, so UTC is assumed")
if b_force_min:
d_utc = datetime.datetime.combine(d_utc, datetime.datetime.min.time(), tzinfo=tzinfo)
else:
d_utc = datetime.datetime.combine(d_utc, datetime.datetime.max.time(), tzinfo=tzinfo)
return d_utc
def duration(d_start: Union[datetime.datetime, datetime.date], d_end: Union[datetime.datetime, datetime.date]):
d = duration_pb2.Duration()
d.FromTimedelta(timezoned(d_end) - timezoned(d_start))
return d
def datetime_range(d_start: Union[datetime.datetime, datetime.date],
d_end: Union[datetime.datetime, datetime.date]) -> DatetimeRange:
return DatetimeRange(start=pb_timestamp(d_start), end=pb_timestamp(d_end))
| true
| true
|
1c41a62dff4399c2ae042990b71a295d06bd7539
| 604
|
py
|
Python
|
setup.py
|
kablekompany/kable-kogs
|
3fa0937281a9610aa4c10d389d1ae30d61d1fd15
|
[
"MIT"
] | 18
|
2020-08-25T07:30:22.000Z
|
2021-12-19T18:46:41.000Z
|
setup.py
|
KingPanda0/kable
|
3679029bc8698033d6bc853a64f8470e3a4d9c54
|
[
"MIT"
] | 24
|
2020-08-27T06:07:32.000Z
|
2021-06-20T18:00:38.000Z
|
setup.py
|
KingPanda0/kable
|
3679029bc8698033d6bc853a64f8470e3a4d9c54
|
[
"MIT"
] | 21
|
2020-08-27T04:33:37.000Z
|
2021-12-31T12:33:50.000Z
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="Kable-Kogs",
version="1.0.1",
author="Trent Kable",
author_email="trent@kablekompany.com",
description="Cogs for Kr0nos",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kableko/Kable-Kogs",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 27.454545
| 50
| 0.660596
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="Kable-Kogs",
version="1.0.1",
author="Trent Kable",
author_email="trent@kablekompany.com",
description="Cogs for Kr0nos",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kableko/Kable-Kogs",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| true
| true
|
1c41a9a44ac033df15f98746e9d31323224a73a3
| 2,461
|
py
|
Python
|
setup.py
|
miigotu/pymediainfo
|
0ec662b0e2f5d34123e65e01c4c3af92acb91825
|
[
"MIT"
] | null | null | null |
setup.py
|
miigotu/pymediainfo
|
0ec662b0e2f5d34123e65e01c4c3af92acb91825
|
[
"MIT"
] | null | null | null |
setup.py
|
miigotu/pymediainfo
|
0ec662b0e2f5d34123e65e01c4c3af92acb91825
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
from setuptools import find_packages, setup
with open("README.rst") as f:
long_description = f.read()
data_files = []
bin_files = []
cmdclass = {}
bin_license = 'docs/License.html'
if os.path.exists(bin_license):
data_files.append(('docs', [bin_license]))
bin_files.extend(['MediaInfo.dll', 'libmediainfo.*'])
try:
from wheel.bdist_wheel import bdist_wheel
class platform_bdist_wheel(bdist_wheel):
def finalize_options(self):
bdist_wheel.finalize_options(self)
# Force the wheel to be marked as platform-specific
self.root_is_pure = False
def get_tag(self):
python, abi, plat = bdist_wheel.get_tag(self)
# The python code works for any Python version,
# not just the one we are running to build the wheel
return 'py3', 'none', plat
cmdclass['bdist_wheel'] = platform_bdist_wheel
except ImportError:
pass
setup(
name='pymediainfo',
author='Louis Sautier',
author_email='sautier.louis@gmail.com',
url='https://github.com/sbraz/pymediainfo',
project_urls={
"Documentation": "https://pymediainfo.readthedocs.io/",
"Bugs": "https://github.com/sbraz/pymediainfo/issues",
},
description="""A Python wrapper for the mediainfo library.""",
long_description=long_description,
packages=find_packages(),
namespace_packages=[],
include_package_data=True,
zip_safe=False,
license='MIT',
data_files=data_files,
use_scm_version=True,
python_requires=">=3.6",
setup_requires=["setuptools_scm"],
install_requires=["setuptools"],
package_data={'pymediainfo': bin_files},
cmdclass=cmdclass,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"License :: OSI Approved :: MIT License",
]
)
| 33.712329
| 70
| 0.628606
|
import os
from setuptools import find_packages, setup
with open("README.rst") as f:
long_description = f.read()
data_files = []
bin_files = []
cmdclass = {}
bin_license = 'docs/License.html'
if os.path.exists(bin_license):
data_files.append(('docs', [bin_license]))
bin_files.extend(['MediaInfo.dll', 'libmediainfo.*'])
try:
from wheel.bdist_wheel import bdist_wheel
class platform_bdist_wheel(bdist_wheel):
def finalize_options(self):
bdist_wheel.finalize_options(self)
self.root_is_pure = False
def get_tag(self):
python, abi, plat = bdist_wheel.get_tag(self)
return 'py3', 'none', plat
cmdclass['bdist_wheel'] = platform_bdist_wheel
except ImportError:
pass
setup(
name='pymediainfo',
author='Louis Sautier',
author_email='sautier.louis@gmail.com',
url='https://github.com/sbraz/pymediainfo',
project_urls={
"Documentation": "https://pymediainfo.readthedocs.io/",
"Bugs": "https://github.com/sbraz/pymediainfo/issues",
},
description="""A Python wrapper for the mediainfo library.""",
long_description=long_description,
packages=find_packages(),
namespace_packages=[],
include_package_data=True,
zip_safe=False,
license='MIT',
data_files=data_files,
use_scm_version=True,
python_requires=">=3.6",
setup_requires=["setuptools_scm"],
install_requires=["setuptools"],
package_data={'pymediainfo': bin_files},
cmdclass=cmdclass,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"License :: OSI Approved :: MIT License",
]
)
| true
| true
|
1c41a9d2514c2ce9d2e5fb9a3e6c44b1be4b246f
| 1,407
|
py
|
Python
|
applied_python/applied_python/lib/python2.7/site-packages/pylint/test/functional/redefined_variable_type.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/pylint/test/functional/redefined_variable_type.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | 1
|
2016-03-22T13:36:30.000Z
|
2016-03-22T13:36:30.000Z
|
applied_python/applied_python/lib/python2.7/site-packages/pylint/test/functional/redefined_variable_type.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
"""Checks variable types aren't redefined within a method or a function"""
# pylint: disable=too-few-public-methods, missing-docstring, unused-variable
_OK = True
class MyClass(object):
class Klass(object):
def __init__(self):
self.var2 = 'var'
def __init__(self):
self.var = True
self.var1 = 2
self.var2 = 1.
self.var1 = 2. # [redefined-variable-type]
self.a_str = "hello"
a_str = False
(a_str, b_str) = (1, 2) # no support for inference on tuple assignment
a_str = 2.0 if self.var else 1.0 # no support for inference on ifexpr
def _getter(self):
return self.a_str
def _setter(self, val):
self.a_str = val
var2 = property(_getter, _setter)
def some_method(self):
def func():
var = 1
test = 'bar'
var = 'baz' # [redefined-variable-type]
self.var = 1 # the rule checks for redefinitions in the scope of a function or method
test = 'foo'
myint = 2
myint = False # [redefined-variable-type]
_OK = "This is OK" # [redefined-variable-type]
if _OK:
SOME_FLOAT = 1.
def dummy_function():
return 2
def other_function():
instance = MyClass()
instance = True # [redefined-variable-type]
SOME_FLOAT = dummy_function() # [redefined-variable-type]
A_GLOB = None
A_GLOB = [1, 2, 3]
| 25.125
| 94
| 0.599147
|
_OK = True
class MyClass(object):
class Klass(object):
def __init__(self):
self.var2 = 'var'
def __init__(self):
self.var = True
self.var1 = 2
self.var2 = 1.
self.var1 = 2.
self.a_str = "hello"
a_str = False
(a_str, b_str) = (1, 2)
a_str = 2.0 if self.var else 1.0
def _getter(self):
return self.a_str
def _setter(self, val):
self.a_str = val
var2 = property(_getter, _setter)
def some_method(self):
def func():
var = 1
test = 'bar'
var = 'baz'
self.var = 1
test = 'foo'
myint = 2
myint = False
_OK = "This is OK"
if _OK:
SOME_FLOAT = 1.
def dummy_function():
return 2
def other_function():
instance = MyClass()
instance = True
SOME_FLOAT = dummy_function()
A_GLOB = None
A_GLOB = [1, 2, 3]
| true
| true
|
1c41aa776db58a7a73cab29edf0dae007400da3d
| 14,366
|
py
|
Python
|
Workflow_Scripts/hidata4gxp.py
|
USGS-Astrogeology/APPL-Tools
|
ee050355251377c512578f15c541929cd52b0acb
|
[
"Unlicense"
] | null | null | null |
Workflow_Scripts/hidata4gxp.py
|
USGS-Astrogeology/APPL-Tools
|
ee050355251377c512578f15c541929cd52b0acb
|
[
"Unlicense"
] | 12
|
2021-05-19T20:59:23.000Z
|
2021-07-29T14:45:17.000Z
|
Workflow_Scripts/hidata4gxp.py
|
USGS-Astrogeology/APPL-Tools
|
ee050355251377c512578f15c541929cd52b0acb
|
[
"Unlicense"
] | 2
|
2022-02-15T17:00:03.000Z
|
2022-02-23T00:50:22.000Z
|
#!/usr/bin/env python
import sys
import os
from os import path
import argparse
import subprocess
from pysis import isis
from pysis.exceptions import ProcessError
import pandas as pd
import numpy as np
from osgeo import gdal, ogr, osr
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ElementTree
from appl_tools.config import mola_delta_radius_iau, pedr_list
from appl_tools.pedr import pedr2tab_prm, run_pedr2tab, pedrtab2df, pedrcsv2vrt
def parse_args():
parser = argparse.ArgumentParser(
description="""Naive port of hidata4socet.pl to Python
""")
parser.add_argument("project_name",
help = "Name of the project in Socet Set or Socet GXP.")
parser.add_argument("noproj_cube",
nargs=2,
help = """Path to noproj'd cube that belongs to the stereopair. Script accepts exactly 2 cubes.""")
args = parser.parse_args()
return args
def dd2dms(dd):
"""
Convert a decimal to degrees, minutes, seconds.
Parameters
----------
dd : numeric
The decimal to convert
Returns
-------
d,m,s : list
List of the integer degrees, integer minutes, and float seconds
"""
d = int(dd)
dm = (dd - d)*60
m = int(dm)
s = float((dm - m)*60)
return d,m,s
def write_hidata_stats(rlat,rlon,minZ,maxZ,outfile):
"""
Write the geographic reference point and min/max elevation to a file.
Formatted for legacy compatibility with Socet Set workflow.
Parameters
----------
rlat : list
List containing degrees, minutes, and seconds of the reference latitude
Seconds in the list are ignored and set to 00.0 in the output for legacy compatibility
rlon : list
List containing degrees, minutes, and seconds of the reference longitude
Seconds in the list are ignored and set to 00.0 in the output for legacy compatibility
minZ : numeric
The minimum elevation of the cropped MOLA DEM
maxZ : numeric
The maximum elevation of the cropped MOLA DEM
outfile : path
Path to the file to write output to
Returns
-------
None
"""
s1 = """Geographic reference point: Latitude = {}:{}:{}\n""".format(str(rlat[0]), str(rlat[1]).zfill(2), str("00.0"))
s2 = """ Longitude = {}:{}:{}\n\n""".format(str(rlon[0]), str(rlon[1]).zfill(2), str("00.0"))
s3 = """Minimum Elevation: {}\n""".format(str(minZ))
s4 = """Maximum Elevation: {}\n""".format(str(maxZ))
try:
with open(outfile, 'w') as rpt:
r = rpt.writelines([s1,s2,s3,s4])
rpt.close
except:
print("Error writing statistics file " + outfile, file=sys.stderr)
return
def camrange_mbr(camrange_pvl):
"""
Parse ISIS camrange results by running ISIS getkey, and return a list of the latitude/longitude extents.
Planetocentric latitudes, +/-180, positive east longitude domain
Parameters
----------
camrange_pvl : str
path to a pvl file containing output from the ISIS camrange program
Returns
-------
minlon,
minlat,
maxlon,
maxlat : list
Coordinates of the MBR in "Lower Left, Upper Right" (LLUR) order
"""
minlat = float(isis.getkey(from_=camrange_pvl, grpname="UniversalGroundRange", keyword="MinimumLatitude").decode().replace('\n', ''))
maxlat = float(isis.getkey(from_=camrange_pvl, grpname="UniversalGroundRange", keyword="MaximumLatitude").decode().replace('\n', ''))
minlon = float(isis.getkey(from_=camrange_pvl, grpname="PositiveEast180", keyword="MinimumLongitude").decode().replace('\n', ''))
maxlon = float(isis.getkey(from_=camrange_pvl, grpname="PositiveEast180", keyword="MaximumLongitude").decode().replace('\n', ''))
return minlon,minlat,maxlon,maxlat
def stereo_mbr(minlongs,minlats,maxlongs,maxlats,buff=0.0):
"""
Compute a minimum bounding rectangle of the intersection of multiple overlapping rectangles.
This is useful for taking the MBRs of images in a stereopair and determining the MBR of the stereo coverage.
Input coordinates are assumed to be latitudes and longitudes in degrees.
Parameters
----------
minlongs : list
Minimum longitudes to consider
minlats : list
Minimum latitudes to consider
maxlongs : list
Maximum latitudes to consider
maxlats : list
Maximum latitudes to consider
buff : float
Optional keyword to apply a buffer to each of the input coordinates.
Can be positive or negative.
Defaults to 0.0.
Returns
-------
minlon,
minlat,
maxlon,
maxlat : list
Coordinates of the intersection MBR in "Lower Left, Upper Right" (LLUR) order
"""
# Compute min/max latitude of stereo coverage
# Buffer stereo MBR by 0.5 degrees (clamping within known lat/long bounds of MOLA grid)
minlon = max( (max(minlongs)-buff ), -180 )
minlat = max( (max(minlats)-buff ), -88 )
maxlon = min( (min(maxlongs)+buff ), 180 )
maxlat = min( (min(maxlats)+buff ), 88 )
# Return values in "LLUR" order for easy use in gdal.Warp()
return minlon,minlat,maxlon,maxlat
def main(user_args):
project_name = user_args.project_name
noproj_cubes = user_args.noproj_cube
print(user_args)
gdal.UseExceptions()
ogr.UseExceptions()
# Run campt
for i in noproj_cubes:
# Note output file name is based on first 15 characters of infile, which *should* capture full HiRISE ID
# Why? "Because that's the way we've always done it!"
# Write campt output to same directory as input images for legacy compatibility
campt_out = os.path.join(os.path.dirname(i), 'campt_' + os.path.basename(os.path.splitext(i)[0])[0:15] + '.prt' )
try:
isis.campt(from_=i, to=campt_out)
except ProcessError as e:
print(e, file=sys.stderr)
sys.exit(1)
# Run camrange on each input cube
camrange_pvl = [os.path.splitext(x)[0] + '_camrange.txt' for x in noproj_cubes]
camrange_dict = dict(zip(noproj_cubes,camrange_pvl))
for k,v in camrange_dict.items():
try:
isis.camrange(from_=k, to=v)
except ProcessError as e:
print(e, file=sys.stderr)
sys.exit(1)
img_minlats = []
img_maxlats = []
img_minlongs = []
img_maxlongs = []
# Figure out MBR of stereo coverage
for i in camrange_pvl:
minlon,minlat,maxlon,maxlat = camrange_mbr(i)
if (minlon == -180) and (maxlon == 180):
print("\nWARNING: " + i + " crosses the 180 degree longitude line. \n")
img_minlongs.append(minlon)
img_minlats.append(minlat)
img_maxlongs.append(maxlon)
img_maxlats.append(maxlat)
# Delete the camrange file
os.remove(i)
# Compute min/max latitude of stereo coverage
# Buffer stereo MBR by 0.5 degrees (clamping within known lat/long bounds of MOLA grid)
stereo_minlon, stereo_minlat, stereo_maxlon, stereo_maxlat = stereo_mbr(img_minlongs,
img_minlats,
img_maxlongs,
img_maxlats,
buff=0.5)
# Done with ISIS, rename print.prt for legacy compatibility
os.replace('print.prt', 'hidata4gxp.prt')
print(stereo_minlat,stereo_maxlat,stereo_minlon,stereo_maxlon)
# Stereo coverage that straddles +/-180 degrees longitude is currently unsupported
if (minlon == -180) and (maxlon == 180):
print("\nERROR: Unable to compute longitude bounds of stereo coverage. \nAll images cross the 180 degree longitude line. \n")
sys.exit(1)
# Create directory to hold MOLA grid
if not os.path.exists('MOLA_DEM'):
os.mkdir('MOLA_DEM')
# Get spatial reference object defining a geographic SRS for Mars based on mola_delta_radius_iau
mola_ds = gdal.Open(mola_delta_radius_iau)
tsrs = mola_ds.GetSpatialRef()
mola_ds = None
# Set width and height (in pixels) of output at 256ppd, rounding to nearest 0.125 degrees (=32 pixels)
w = 32 * round( (abs(stereo_maxlon - stereo_minlon) * 256) / 32)
h = 32 * round( (abs(stereo_maxlat - stereo_minlat) * 256) / 32)
wopts = gdal.WarpOptions(format="GTiff", \
dstSRS=tsrs, \
outputBounds=(stereo_minlon, stereo_minlat, stereo_maxlon, stereo_maxlat), \
width=w, \
height=h)
# Run gdal.Warp()
mola_output = ('MOLA_DEM/' + project_name + '_mola.tif')
gdal.Warp(mola_output, mola_delta_radius_iau, options=wopts)
# Extract the elevation range of within the stereo coverage MBR, buffered by 0.1 degrees
# that is, apply a *negative* 0.4 degree buffer to the (previously buffered) stereo coverage MBR
topts = gdal.TranslateOptions(format="VRT", \
projWin=[stereo_minlon+0.4, stereo_maxlat-0.4, stereo_maxlon-0.4, stereo_minlat-0.4])
mem_subset = ('/vsimem/' + project_name + '_stats.vrt')
gdal.Translate(mem_subset, mola_output, options=topts)
iopts = gdal.InfoOptions(format='json', computeMinMax=True, showRAT=False)
info_out = gdal.Info(mem_subset, options=iopts)
# Round min and max elevation to nearest 100 meters
minZ = 100 * round(info_out['bands'][0]['computedMin'] / 100)
maxZ = 100 * round(info_out['bands'][0]['computedMax'] / 100)
gdal.Unlink(mem_subset)
# If this particular area of the MOLA grid is flat add 100 m to maxZ
# Why? "Because that's the way we've always done it!"
if minZ == maxZ :
maxZ = maxZ + 100.0
print("minZ: ", minZ)
print("maxZ: ", maxZ)
# Calculate reference point, rounded to nearest 0.1 degrees and converted to DMS
rlat = round( (((stereo_maxlat - stereo_minlat)/2) + stereo_minlat )* 10)/10
rlon = round( (((stereo_maxlon - stereo_minlon)/2) + stereo_minlon )* 10)/10
rlat = dd2dms(rlat)
rlon = dd2dms(rlon)
project_stats = project_name + '_GXP_statistics.lis'
write_hidata_stats(rlat,rlon,minZ,maxZ,project_stats)
### MOLA PEDR extraction ###
# 1. Build PEDR2TAB.PRM file with stereo MBR from above
# Create directory to hold MOLA shot data
if not os.path.exists('MOLA_TRACKS'):
os.mkdir('MOLA_TRACKS')
pedr2tab_prm(stereo_minlon,stereo_minlat,stereo_maxlon,stereo_maxlat,
flags=['T','T','F','T','T','F','F','F','F','F','T','T','T'],
out=(project_name + ".tab"),
f=169.8944472236118)
# 2. Run pedr2tab
run_pedr2tab(pedr_list)
# 3. Read PEDR output into pandas DataFrame and do some conditioning
pedr = pedrtab2df(project_name + '.tab')
# Convert longitudes to +/-180 domain
pedr['long_East'] = pedr['long_East'].apply(lambda x: ((x + 180) % 360) - 180 )
# Convert values in the "planet_rad" column to delta radius, IAU sphere by subtracting 3396190 meters
pedr['planet_rad'] = pedr['planet_rad'].apply(lambda x: x - 3396190)
pedr.rename(columns={'planet_rad':'DeltaR_IAU'}, inplace = True)
# Force original precision of EphemerisTime
pedr['EphemerisTime'] = pedr['EphemerisTime'].map(lambda x: '{0:.5f}'.format(x))
# Set up paths of output files to go to MOLA_TRACKS directory
pedr_tab = os.path.join('MOLA_TRACKS', project_name + '.tab')
pedr_csv = os.path.join('MOLA_TRACKS', project_name + '.csv')
pedr_prj = os.path.join('MOLA_TRACKS', project_name + '.prj')
pedr_shp = os.path.join('MOLA_TRACKS', project_name + '_Z.shp')
# Move the PEDR table and PRM file into MOLA_TRACKS directory
os.replace(project_name + '.tab',pedr_tab)
os.replace('PEDR2TAB.PRM',os.path.join('MOLA_TRACKS','PEDR2TAB.PRM'))
# 4. Write PEDR DataFrame to CSV
pedr.to_csv(path_or_buf=(pedr_csv), header=True, index=False)
# 5. Create VRT to go with the CSV, use hard-coded SRS (with vertical datum)
# This is not OGC-compliant WKT, but it's what GXP requires
wkt = """GEOGCS["GCS_Mars_Sphere_2000",
DATUM["D_Mars_Sphere_2000",
SPHEROID["Mars_Sphere_2000_IAU",3396190,0.0]],
PRIMEM["Reference_Meridian",0.0],
UNIT["Degree",0.0174532925199433]],
VERTCS["Mars_2000",
DATUM["D_Mars_Sphere_2000",
SPHEROID["Mars_Sphere_2000_IAU",3396190,0.0]],
PARAMETER["Vertical_Shift",0.0],
PARAMETER["Direction",1.0],
UNIT["Meter",1.0]]"""
try:
with open(pedr_prj, 'w') as prj:
p = prj.write(wkt)
prj.close
except:
print("Error writing WKT file " + pedr_prj, file=sys.stderr)
sys.exit(1)
# Create dict of field names and their types for VRT file
# Force EphemerisTime to type String as lazy way of avoiding nonsense warning from OGR later on
fields = ['long_East','lat_North','topography','MOLArange','DeltaR_IAU','c','A','offndr',
'EphemerisTime','areod_lat','areoid_rad','shot','pkt','orbit','gm']
types = ['Real','Real','Real','Real','Real','Integer','Integer','Real',
'String','Real','Real','Integer','Integer','Integer','Integer']
field_dict = dict(zip(fields,types))
pedr_vrt = pedrcsv2vrt(pedr_csv, pedr_prj, field_dict, x='long_East', y='lat_North', z='DeltaR_IAU')
# 5. Convert VRT to shapefile using OGR
in_ds = ogr.Open(pedr_vrt)
ogr.GetDriverByName("ESRI Shapefile").CopyDataSource(in_ds, pedr_shp)
in_ds = None
# ogr silently drops the VERTCS part of the WKT,
# so replace the .prj file associated with the shapefile with exact WKT we want
os.replace(pedr_prj, os.path.splitext(pedr_shp)[0] + '.prj')
os.remove(pedr_csv)
os.remove(pedr_vrt)
if __name__ == "__main__":
sys.exit(main(parse_args()))
| 37.905013
| 137
| 0.626062
|
import sys
import os
from os import path
import argparse
import subprocess
from pysis import isis
from pysis.exceptions import ProcessError
import pandas as pd
import numpy as np
from osgeo import gdal, ogr, osr
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ElementTree
from appl_tools.config import mola_delta_radius_iau, pedr_list
from appl_tools.pedr import pedr2tab_prm, run_pedr2tab, pedrtab2df, pedrcsv2vrt
def parse_args():
parser = argparse.ArgumentParser(
description="""Naive port of hidata4socet.pl to Python
""")
parser.add_argument("project_name",
help = "Name of the project in Socet Set or Socet GXP.")
parser.add_argument("noproj_cube",
nargs=2,
help = """Path to noproj'd cube that belongs to the stereopair. Script accepts exactly 2 cubes.""")
args = parser.parse_args()
return args
def dd2dms(dd):
d = int(dd)
dm = (dd - d)*60
m = int(dm)
s = float((dm - m)*60)
return d,m,s
def write_hidata_stats(rlat,rlon,minZ,maxZ,outfile):
s1 = """Geographic reference point: Latitude = {}:{}:{}\n""".format(str(rlat[0]), str(rlat[1]).zfill(2), str("00.0"))
s2 = """ Longitude = {}:{}:{}\n\n""".format(str(rlon[0]), str(rlon[1]).zfill(2), str("00.0"))
s3 = """Minimum Elevation: {}\n""".format(str(minZ))
s4 = """Maximum Elevation: {}\n""".format(str(maxZ))
try:
with open(outfile, 'w') as rpt:
r = rpt.writelines([s1,s2,s3,s4])
rpt.close
except:
print("Error writing statistics file " + outfile, file=sys.stderr)
return
def camrange_mbr(camrange_pvl):
minlat = float(isis.getkey(from_=camrange_pvl, grpname="UniversalGroundRange", keyword="MinimumLatitude").decode().replace('\n', ''))
maxlat = float(isis.getkey(from_=camrange_pvl, grpname="UniversalGroundRange", keyword="MaximumLatitude").decode().replace('\n', ''))
minlon = float(isis.getkey(from_=camrange_pvl, grpname="PositiveEast180", keyword="MinimumLongitude").decode().replace('\n', ''))
maxlon = float(isis.getkey(from_=camrange_pvl, grpname="PositiveEast180", keyword="MaximumLongitude").decode().replace('\n', ''))
return minlon,minlat,maxlon,maxlat
def stereo_mbr(minlongs,minlats,maxlongs,maxlats,buff=0.0):
# Compute min/max latitude of stereo coverage
# Buffer stereo MBR by 0.5 degrees (clamping within known lat/long bounds of MOLA grid)
minlon = max( (max(minlongs)-buff ), -180 )
minlat = max( (max(minlats)-buff ), -88 )
maxlon = min( (min(maxlongs)+buff ), 180 )
maxlat = min( (min(maxlats)+buff ), 88 )
# Return values in "LLUR" order for easy use in gdal.Warp()
return minlon,minlat,maxlon,maxlat
def main(user_args):
project_name = user_args.project_name
noproj_cubes = user_args.noproj_cube
print(user_args)
gdal.UseExceptions()
ogr.UseExceptions()
# Run campt
for i in noproj_cubes:
# Note output file name is based on first 15 characters of infile, which *should* capture full HiRISE ID
# Why? "Because that's the way we've always done it!"
# Write campt output to same directory as input images for legacy compatibility
campt_out = os.path.join(os.path.dirname(i), 'campt_' + os.path.basename(os.path.splitext(i)[0])[0:15] + '.prt' )
try:
isis.campt(from_=i, to=campt_out)
except ProcessError as e:
print(e, file=sys.stderr)
sys.exit(1)
# Run camrange on each input cube
camrange_pvl = [os.path.splitext(x)[0] + '_camrange.txt' for x in noproj_cubes]
camrange_dict = dict(zip(noproj_cubes,camrange_pvl))
for k,v in camrange_dict.items():
try:
isis.camrange(from_=k, to=v)
except ProcessError as e:
print(e, file=sys.stderr)
sys.exit(1)
img_minlats = []
img_maxlats = []
img_minlongs = []
img_maxlongs = []
# Figure out MBR of stereo coverage
for i in camrange_pvl:
minlon,minlat,maxlon,maxlat = camrange_mbr(i)
if (minlon == -180) and (maxlon == 180):
print("\nWARNING: " + i + " crosses the 180 degree longitude line. \n")
img_minlongs.append(minlon)
img_minlats.append(minlat)
img_maxlongs.append(maxlon)
img_maxlats.append(maxlat)
# Delete the camrange file
os.remove(i)
# Compute min/max latitude of stereo coverage
# Buffer stereo MBR by 0.5 degrees (clamping within known lat/long bounds of MOLA grid)
stereo_minlon, stereo_minlat, stereo_maxlon, stereo_maxlat = stereo_mbr(img_minlongs,
img_minlats,
img_maxlongs,
img_maxlats,
buff=0.5)
# Done with ISIS, rename print.prt for legacy compatibility
os.replace('print.prt', 'hidata4gxp.prt')
print(stereo_minlat,stereo_maxlat,stereo_minlon,stereo_maxlon)
# Stereo coverage that straddles +/-180 degrees longitude is currently unsupported
if (minlon == -180) and (maxlon == 180):
print("\nERROR: Unable to compute longitude bounds of stereo coverage. \nAll images cross the 180 degree longitude line. \n")
sys.exit(1)
# Create directory to hold MOLA grid
if not os.path.exists('MOLA_DEM'):
os.mkdir('MOLA_DEM')
# Get spatial reference object defining a geographic SRS for Mars based on mola_delta_radius_iau
mola_ds = gdal.Open(mola_delta_radius_iau)
tsrs = mola_ds.GetSpatialRef()
mola_ds = None
# Set width and height (in pixels) of output at 256ppd, rounding to nearest 0.125 degrees (=32 pixels)
w = 32 * round( (abs(stereo_maxlon - stereo_minlon) * 256) / 32)
h = 32 * round( (abs(stereo_maxlat - stereo_minlat) * 256) / 32)
wopts = gdal.WarpOptions(format="GTiff", \
dstSRS=tsrs, \
outputBounds=(stereo_minlon, stereo_minlat, stereo_maxlon, stereo_maxlat), \
width=w, \
height=h)
# Run gdal.Warp()
mola_output = ('MOLA_DEM/' + project_name + '_mola.tif')
gdal.Warp(mola_output, mola_delta_radius_iau, options=wopts)
# Extract the elevation range of within the stereo coverage MBR, buffered by 0.1 degrees
# that is, apply a *negative* 0.4 degree buffer to the (previously buffered) stereo coverage MBR
topts = gdal.TranslateOptions(format="VRT", \
projWin=[stereo_minlon+0.4, stereo_maxlat-0.4, stereo_maxlon-0.4, stereo_minlat-0.4])
mem_subset = ('/vsimem/' + project_name + '_stats.vrt')
gdal.Translate(mem_subset, mola_output, options=topts)
iopts = gdal.InfoOptions(format='json', computeMinMax=True, showRAT=False)
info_out = gdal.Info(mem_subset, options=iopts)
# Round min and max elevation to nearest 100 meters
minZ = 100 * round(info_out['bands'][0]['computedMin'] / 100)
maxZ = 100 * round(info_out['bands'][0]['computedMax'] / 100)
gdal.Unlink(mem_subset)
# If this particular area of the MOLA grid is flat add 100 m to maxZ
# Why? "Because that's the way we've always done it!"
if minZ == maxZ :
maxZ = maxZ + 100.0
print("minZ: ", minZ)
print("maxZ: ", maxZ)
# Calculate reference point, rounded to nearest 0.1 degrees and converted to DMS
rlat = round( (((stereo_maxlat - stereo_minlat)/2) + stereo_minlat )* 10)/10
rlon = round( (((stereo_maxlon - stereo_minlon)/2) + stereo_minlon )* 10)/10
rlat = dd2dms(rlat)
rlon = dd2dms(rlon)
project_stats = project_name + '_GXP_statistics.lis'
write_hidata_stats(rlat,rlon,minZ,maxZ,project_stats)
### MOLA PEDR extraction ###
# 1. Build PEDR2TAB.PRM file with stereo MBR from above
# Create directory to hold MOLA shot data
if not os.path.exists('MOLA_TRACKS'):
os.mkdir('MOLA_TRACKS')
pedr2tab_prm(stereo_minlon,stereo_minlat,stereo_maxlon,stereo_maxlat,
flags=['T','T','F','T','T','F','F','F','F','F','T','T','T'],
out=(project_name + ".tab"),
f=169.8944472236118)
# 2. Run pedr2tab
run_pedr2tab(pedr_list)
# 3. Read PEDR output into pandas DataFrame and do some conditioning
pedr = pedrtab2df(project_name + '.tab')
# Convert longitudes to +/-180 domain
pedr['long_East'] = pedr['long_East'].apply(lambda x: ((x + 180) % 360) - 180 )
# Convert values in the "planet_rad" column to delta radius, IAU sphere by subtracting 3396190 meters
pedr['planet_rad'] = pedr['planet_rad'].apply(lambda x: x - 3396190)
pedr.rename(columns={'planet_rad':'DeltaR_IAU'}, inplace = True)
# Force original precision of EphemerisTime
pedr['EphemerisTime'] = pedr['EphemerisTime'].map(lambda x: '{0:.5f}'.format(x))
# Set up paths of output files to go to MOLA_TRACKS directory
pedr_tab = os.path.join('MOLA_TRACKS', project_name + '.tab')
pedr_csv = os.path.join('MOLA_TRACKS', project_name + '.csv')
pedr_prj = os.path.join('MOLA_TRACKS', project_name + '.prj')
pedr_shp = os.path.join('MOLA_TRACKS', project_name + '_Z.shp')
# Move the PEDR table and PRM file into MOLA_TRACKS directory
os.replace(project_name + '.tab',pedr_tab)
os.replace('PEDR2TAB.PRM',os.path.join('MOLA_TRACKS','PEDR2TAB.PRM'))
# 4. Write PEDR DataFrame to CSV
pedr.to_csv(path_or_buf=(pedr_csv), header=True, index=False)
# 5. Create VRT to go with the CSV, use hard-coded SRS (with vertical datum)
# This is not OGC-compliant WKT, but it's what GXP requires
wkt = """GEOGCS["GCS_Mars_Sphere_2000",
DATUM["D_Mars_Sphere_2000",
SPHEROID["Mars_Sphere_2000_IAU",3396190,0.0]],
PRIMEM["Reference_Meridian",0.0],
UNIT["Degree",0.0174532925199433]],
VERTCS["Mars_2000",
DATUM["D_Mars_Sphere_2000",
SPHEROID["Mars_Sphere_2000_IAU",3396190,0.0]],
PARAMETER["Vertical_Shift",0.0],
PARAMETER["Direction",1.0],
UNIT["Meter",1.0]]"""
try:
with open(pedr_prj, 'w') as prj:
p = prj.write(wkt)
prj.close
except:
print("Error writing WKT file " + pedr_prj, file=sys.stderr)
sys.exit(1)
fields = ['long_East','lat_North','topography','MOLArange','DeltaR_IAU','c','A','offndr',
'EphemerisTime','areod_lat','areoid_rad','shot','pkt','orbit','gm']
types = ['Real','Real','Real','Real','Real','Integer','Integer','Real',
'String','Real','Real','Integer','Integer','Integer','Integer']
field_dict = dict(zip(fields,types))
pedr_vrt = pedrcsv2vrt(pedr_csv, pedr_prj, field_dict, x='long_East', y='lat_North', z='DeltaR_IAU')
in_ds = ogr.Open(pedr_vrt)
ogr.GetDriverByName("ESRI Shapefile").CopyDataSource(in_ds, pedr_shp)
in_ds = None
os.replace(pedr_prj, os.path.splitext(pedr_shp)[0] + '.prj')
os.remove(pedr_csv)
os.remove(pedr_vrt)
if __name__ == "__main__":
sys.exit(main(parse_args()))
| true
| true
|
1c41ab16d8d7931058760b239bdd280c786fa723
| 1,707
|
py
|
Python
|
nerodia/locators/text_field/selector_builder.py
|
harsh183/nerodia
|
69c5e4408432e85b5af0b2da03015f729809dac4
|
[
"MIT"
] | 83
|
2017-11-20T08:41:09.000Z
|
2022-02-09T21:01:47.000Z
|
nerodia/locators/text_field/selector_builder.py
|
harsh183/nerodia
|
69c5e4408432e85b5af0b2da03015f729809dac4
|
[
"MIT"
] | 28
|
2017-11-21T02:25:03.000Z
|
2021-04-15T15:26:30.000Z
|
nerodia/locators/text_field/selector_builder.py
|
harsh183/nerodia
|
69c5e4408432e85b5af0b2da03015f729809dac4
|
[
"MIT"
] | 14
|
2017-11-29T06:44:12.000Z
|
2021-09-06T04:53:44.000Z
|
from nerodia.exception import LocatorException
from ..element.selector_builder import SelectorBuilder as ElementSelectorBuilder, \
XPath as ElementXPath
from ..element.xpath_support import XpathSupport
from ...elements.text_field import TextField
class SelectorBuilder(ElementSelectorBuilder):
pass
class XPath(ElementXPath):
# private
@property
def _text_string(self):
if self.adjacent is not None:
return super(XPath, self)._text_string
if 'text' in self.selector:
self.built['text'] = self.selector.pop('text')
return ''
@property
def _additional_string(self):
if self.adjacent is not None:
return ''
return self._type_string(self.selector.pop('type', None))
@property
def _tag_string(self):
if self.adjacent is None:
self.selector['tag_name'] = 'input'
return super(XPath, self)._tag_string
def _type_string(self, typ):
if typ is True:
return '[{}]'.format(self._negative_type_text)
elif typ in TextField.NON_TEXT_TYPES:
raise LocatorException('TextField Elements can not be located by type: {}'.format(typ))
elif typ is None:
return '[not(@type) or ({})]'.format(self._negative_type_text)
else:
return '[{}]'.format(self._process_attribute('type', typ))
@property
def _negative_type_text(self):
types = []
for typ in TextField.NON_TEXT_TYPES:
lhs = self._lhs_for('type', lower=True)
rhs = XpathSupport.lower(XpathSupport.escape(typ))
types.append('{}!={}'.format(lhs, rhs))
return ' and '.join(types)
| 32.207547
| 99
| 0.636204
|
from nerodia.exception import LocatorException
from ..element.selector_builder import SelectorBuilder as ElementSelectorBuilder, \
XPath as ElementXPath
from ..element.xpath_support import XpathSupport
from ...elements.text_field import TextField
class SelectorBuilder(ElementSelectorBuilder):
pass
class XPath(ElementXPath):
@property
def _text_string(self):
if self.adjacent is not None:
return super(XPath, self)._text_string
if 'text' in self.selector:
self.built['text'] = self.selector.pop('text')
return ''
@property
def _additional_string(self):
if self.adjacent is not None:
return ''
return self._type_string(self.selector.pop('type', None))
@property
def _tag_string(self):
if self.adjacent is None:
self.selector['tag_name'] = 'input'
return super(XPath, self)._tag_string
def _type_string(self, typ):
if typ is True:
return '[{}]'.format(self._negative_type_text)
elif typ in TextField.NON_TEXT_TYPES:
raise LocatorException('TextField Elements can not be located by type: {}'.format(typ))
elif typ is None:
return '[not(@type) or ({})]'.format(self._negative_type_text)
else:
return '[{}]'.format(self._process_attribute('type', typ))
@property
def _negative_type_text(self):
types = []
for typ in TextField.NON_TEXT_TYPES:
lhs = self._lhs_for('type', lower=True)
rhs = XpathSupport.lower(XpathSupport.escape(typ))
types.append('{}!={}'.format(lhs, rhs))
return ' and '.join(types)
| true
| true
|
1c41ab714ae677b4582248ecbcdc9ff139b239e2
| 37
|
py
|
Python
|
src/py2_em/__init__.py
|
chrisBrookes93/Py2Em
|
92f659a88609a26bcfe02c203d9b8d961e3c24fd
|
[
"MIT"
] | null | null | null |
src/py2_em/__init__.py
|
chrisBrookes93/Py2Em
|
92f659a88609a26bcfe02c203d9b8d961e3c24fd
|
[
"MIT"
] | null | null | null |
src/py2_em/__init__.py
|
chrisBrookes93/Py2Em
|
92f659a88609a26bcfe02c203d9b8d961e3c24fd
|
[
"MIT"
] | null | null | null |
from .py2emulator import Py2Emulator
| 18.5
| 36
| 0.864865
|
from .py2emulator import Py2Emulator
| true
| true
|
1c41abcbd4cac69e273cbc8ae777627a6034afae
| 2,521
|
py
|
Python
|
ee_mapper/map/ee_analysis.py
|
dgketchum/IrrMapper
|
37b692e91e20bc0f34b1fb0116402990510a1e21
|
[
"Apache-2.0"
] | 6
|
2018-03-20T21:32:00.000Z
|
2020-07-20T16:07:04.000Z
|
ee_mapper/map/ee_analysis.py
|
dgketchum/IrrMapper
|
37b692e91e20bc0f34b1fb0116402990510a1e21
|
[
"Apache-2.0"
] | 9
|
2018-12-22T19:15:28.000Z
|
2021-08-25T14:40:15.000Z
|
ee_mapper/map/ee_analysis.py
|
dgketchum/IrrMapper
|
37b692e91e20bc0f34b1fb0116402990510a1e21
|
[
"Apache-2.0"
] | 7
|
2018-03-15T06:08:39.000Z
|
2021-04-13T07:52:01.000Z
|
# ===============================================================================
# Copyright 2018 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
from pprint import pprint
import ee
from map.assets import list_assets
from map.call_ee import is_authorized
ASSET_ROOT = 'users/dgketchum/classy'
STATES = ['AZ', 'CA', 'CO', 'ID', 'MT', 'NM', 'NV', 'OR', 'UT', 'WA', 'WY']
TARGET_STATES = ['OR']
BOUNDARIES = 'users/dgketchum/boundaries'
ASSET_ROOT = 'users/dgketchum/first_detected'
def first_detection():
# this doesn't work, but it works in Code Editor
for state in TARGET_STATES:
bounds = os.path.join(BOUNDARIES, state)
roi = ee.FeatureCollection(bounds)
mask = roi.geometry().bounds().getInfo()['coordinates']
image_list = list_assets('users/dgketchum/classy')
out_images = []
for yr in range(1986, 2017):
yr_img = [x for x in image_list if x.endswith(str(yr))]
coll = ee.ImageCollection(yr_img)
classed = coll.mosaic().select('classification').remap([0, 1, 2, 3],
[yr, 0, 0, 0]).rename('{}_min'.format(yr))
out_images.append(classed)
coll = ee.ImageCollection(out_images)
img = coll.reduce(ee.Reducer.minMax()).rename('min', 'max')
pprint(img.getInfo())
task = ee.batch.Export.image.toAsset(
image=img,
description='{}'.format(state),
assetId=os.path.join(ASSET_ROOT, '{}'.format(state)),
fileNamePrefix='{}'.format(state),
region=mask,
scale=30,
maxPixels=1e10)
print(state)
task.start()
break
if __name__ == '__main__':
home = os.path.expanduser('~')
is_authorized()
first_detection()
# ========================= EOF ====================================================================
| 36.536232
| 109
| 0.563269
|
import os
from pprint import pprint
import ee
from map.assets import list_assets
from map.call_ee import is_authorized
ASSET_ROOT = 'users/dgketchum/classy'
STATES = ['AZ', 'CA', 'CO', 'ID', 'MT', 'NM', 'NV', 'OR', 'UT', 'WA', 'WY']
TARGET_STATES = ['OR']
BOUNDARIES = 'users/dgketchum/boundaries'
ASSET_ROOT = 'users/dgketchum/first_detected'
def first_detection():
for state in TARGET_STATES:
bounds = os.path.join(BOUNDARIES, state)
roi = ee.FeatureCollection(bounds)
mask = roi.geometry().bounds().getInfo()['coordinates']
image_list = list_assets('users/dgketchum/classy')
out_images = []
for yr in range(1986, 2017):
yr_img = [x for x in image_list if x.endswith(str(yr))]
coll = ee.ImageCollection(yr_img)
classed = coll.mosaic().select('classification').remap([0, 1, 2, 3],
[yr, 0, 0, 0]).rename('{}_min'.format(yr))
out_images.append(classed)
coll = ee.ImageCollection(out_images)
img = coll.reduce(ee.Reducer.minMax()).rename('min', 'max')
pprint(img.getInfo())
task = ee.batch.Export.image.toAsset(
image=img,
description='{}'.format(state),
assetId=os.path.join(ASSET_ROOT, '{}'.format(state)),
fileNamePrefix='{}'.format(state),
region=mask,
scale=30,
maxPixels=1e10)
print(state)
task.start()
break
if __name__ == '__main__':
home = os.path.expanduser('~')
is_authorized()
first_detection()
# ========================= EOF ====================================================================
| true
| true
|
1c41ac8c28e95e2fd967b7b3abf9f6e5e46581e5
| 360
|
py
|
Python
|
Python/B7-Klatschschalter/02 Klatsch-Schalter.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
Python/B7-Klatschschalter/02 Klatsch-Schalter.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
Python/B7-Klatschschalter/02 Klatsch-Schalter.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
LED = 0
def on_forever():
global LED
if input.sound_level() > 20:
if LED == 1:
LED = 0
if LED == 0:
basic.turn_rgb_led_off()
elif LED == 0:
LED = 1
if LED == 1:
basic.set_led_color(0xff0000)
basic.pause(500)
basic.forever(on_forever)
| 24
| 46
| 0.45
|
LED = 0
def on_forever():
global LED
if input.sound_level() > 20:
if LED == 1:
LED = 0
if LED == 0:
basic.turn_rgb_led_off()
elif LED == 0:
LED = 1
if LED == 1:
basic.set_led_color(0xff0000)
basic.pause(500)
basic.forever(on_forever)
| true
| true
|
1c41ad4f30724a7a81f6d60c2522b7bdc7a72915
| 1,511
|
py
|
Python
|
qtrio/_tests/examples/test_emissions.py
|
nodeselector/qtrio
|
4bc25ef97d7e6e01a9751de9c84a4214e637e9d4
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
qtrio/_tests/examples/test_emissions.py
|
nodeselector/qtrio
|
4bc25ef97d7e6e01a9751de9c84a4214e637e9d4
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-03-30T21:14:20.000Z
|
2021-03-30T21:14:20.000Z
|
qtrio/_tests/examples/test_emissions.py
|
nodeselector/qtrio
|
4bc25ef97d7e6e01a9751de9c84a4214e637e9d4
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import functools
import typing
import pytestqt.qtbot
import trio
import trio.testing
import qtrio
import qtrio.examples.emissions
async def test_main(
qtbot: pytestqt.qtbot.QtBot,
optional_hold_event: typing.Optional[trio.Event],
) -> None:
async with trio.open_nursery() as nursery:
start = functools.partial(
qtrio.examples.emissions.start_widget,
hold_event=optional_hold_event,
)
widget: qtrio.examples.emissions.Widget = await nursery.start(start)
qtbot.addWidget(widget.widget)
if optional_hold_event is not None:
optional_hold_event.set()
else:
await trio.testing.wait_all_tasks_blocked(cushion=0.1)
await widget.serving_event.wait()
buttons = [
widget.increment,
widget.increment,
widget.increment,
widget.decrement,
widget.decrement,
widget.decrement,
widget.decrement,
]
results: typing.List[str] = []
for button in buttons:
# TODO: Doesn't work reliably on macOS in GitHub Actions. Seems to
# sometimes just miss the click entirely.
# qtbot.mouseClick(button, QtCore.Qt.LeftButton)
button.click()
await trio.testing.wait_all_tasks_blocked(cushion=0.01)
results.append(widget.label.text())
widget.widget.close()
assert results == ["1", "2", "3", "2", "1", "0", "-1"]
| 27.981481
| 79
| 0.612839
|
import functools
import typing
import pytestqt.qtbot
import trio
import trio.testing
import qtrio
import qtrio.examples.emissions
async def test_main(
qtbot: pytestqt.qtbot.QtBot,
optional_hold_event: typing.Optional[trio.Event],
) -> None:
async with trio.open_nursery() as nursery:
start = functools.partial(
qtrio.examples.emissions.start_widget,
hold_event=optional_hold_event,
)
widget: qtrio.examples.emissions.Widget = await nursery.start(start)
qtbot.addWidget(widget.widget)
if optional_hold_event is not None:
optional_hold_event.set()
else:
await trio.testing.wait_all_tasks_blocked(cushion=0.1)
await widget.serving_event.wait()
buttons = [
widget.increment,
widget.increment,
widget.increment,
widget.decrement,
widget.decrement,
widget.decrement,
widget.decrement,
]
results: typing.List[str] = []
for button in buttons:
# sometimes just miss the click entirely.
# qtbot.mouseClick(button, QtCore.Qt.LeftButton)
button.click()
await trio.testing.wait_all_tasks_blocked(cushion=0.01)
results.append(widget.label.text())
widget.widget.close()
assert results == ["1", "2", "3", "2", "1", "0", "-1"]
| true
| true
|
1c41ad8a0e57bc585224e96a4a5ca9f78f10405e
| 657
|
py
|
Python
|
manage.py
|
dongtianyi/xiaomubiao
|
0768273515e117dfcbe9c311fa91079599bc40ac
|
[
"MIT"
] | null | null | null |
manage.py
|
dongtianyi/xiaomubiao
|
0768273515e117dfcbe9c311fa91079599bc40ac
|
[
"MIT"
] | null | null | null |
manage.py
|
dongtianyi/xiaomubiao
|
0768273515e117dfcbe9c311fa91079599bc40ac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'xiaomubiao.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.565217
| 74
| 0.678843
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'xiaomubiao.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true
| true
|
1c41aed5304b3f0610dbabc70a4fb8f987fe843a
| 3,081
|
py
|
Python
|
pyembed/markdown/test/pattern_test.py
|
wamonite/pyembed-markdown
|
64f9766ff705ee9c0402958cb33aa1e3561e9315
|
[
"MIT"
] | 11
|
2015-06-09T20:59:15.000Z
|
2021-01-04T09:32:54.000Z
|
pyembed/markdown/test/pattern_test.py
|
wamonite/pyembed-markdown
|
64f9766ff705ee9c0402958cb33aa1e3561e9315
|
[
"MIT"
] | 4
|
2015-12-14T05:10:23.000Z
|
2020-02-18T02:24:34.000Z
|
pyembed/markdown/test/pattern_test.py
|
wamonite/pyembed-markdown
|
64f9766ff705ee9c0402958cb33aa1e3561e9315
|
[
"MIT"
] | 3
|
2015-12-21T19:10:55.000Z
|
2018-01-02T03:25:15.000Z
|
# The MIT License(MIT)
# Copyright (c) 2013-2014 Matt Thomson
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from pyembed.markdown.pattern import PyEmbedPattern
from mock import patch, Mock
def test_should_match_pyembed_link():
md = Mock()
re = PyEmbedPattern(None, md).getCompiledRegExp()
match = re.match('[!embed](http://www.example.com)')
assert match
def test_should_match_pyembed_link_with_params():
md = Mock()
re = PyEmbedPattern(None, md).getCompiledRegExp()
match = re.match('[!embed?param=value](http://www.example.com)')
assert match
def test_should_not_match_non_pyembed_link():
md = Mock()
re = PyEmbedPattern(None, md).getCompiledRegExp()
match = re.match('[example](http://www.example.com)')
assert not match
def test_should_substitute_link_with_embedding():
source = '[!embed](http://www.example.com)'
generic_embed_test(source, 'http://www.example.com', None, None)
def test_should_apply_max_height():
source = '[!embed?max_height=200](http://www.example.com)'
generic_embed_test(source, 'http://www.example.com', None, 200)
def test_should_apply_max_width():
source = '[!embed?max_width=100](http://www.example.com)'
generic_embed_test(source, 'http://www.example.com', 100, None)
def test_should_apply_max_height_and_width():
source = '[!embed?max_width=100&max_height=200](http://www.example.com)'
generic_embed_test(source, 'http://www.example.com', 100, 200)
def test_should_ignore_extra_params():
source = '[!embed?max_height=200&extra=value](http://www.example.com)'
generic_embed_test(source, 'http://www.example.com', None, 200)
def generic_embed_test(source, *embed_params):
md = Mock()
pyembed = Mock()
pyembed.embed.return_value = '<h1>Bees!</h1>'
pattern = PyEmbedPattern(pyembed, md)
match = pattern.getCompiledRegExp().match(source)
result = pattern.handleMatch(match)
assert result
pyembed.embed.assert_called_with(*embed_params)
md.htmlStash.store.assert_called_with('<h1>Bees!</h1>')
| 33.129032
| 79
| 0.733853
|
from pyembed.markdown.pattern import PyEmbedPattern
from mock import patch, Mock
def test_should_match_pyembed_link():
md = Mock()
re = PyEmbedPattern(None, md).getCompiledRegExp()
match = re.match('[!embed](http://www.example.com)')
assert match
def test_should_match_pyembed_link_with_params():
md = Mock()
re = PyEmbedPattern(None, md).getCompiledRegExp()
match = re.match('[!embed?param=value](http://www.example.com)')
assert match
def test_should_not_match_non_pyembed_link():
md = Mock()
re = PyEmbedPattern(None, md).getCompiledRegExp()
match = re.match('[example](http://www.example.com)')
assert not match
def test_should_substitute_link_with_embedding():
source = '[!embed](http://www.example.com)'
generic_embed_test(source, 'http://www.example.com', None, None)
def test_should_apply_max_height():
source = '[!embed?max_height=200](http://www.example.com)'
generic_embed_test(source, 'http://www.example.com', None, 200)
def test_should_apply_max_width():
source = '[!embed?max_width=100](http://www.example.com)'
generic_embed_test(source, 'http://www.example.com', 100, None)
def test_should_apply_max_height_and_width():
source = '[!embed?max_width=100&max_height=200](http://www.example.com)'
generic_embed_test(source, 'http://www.example.com', 100, 200)
def test_should_ignore_extra_params():
source = '[!embed?max_height=200&extra=value](http://www.example.com)'
generic_embed_test(source, 'http://www.example.com', None, 200)
def generic_embed_test(source, *embed_params):
md = Mock()
pyembed = Mock()
pyembed.embed.return_value = '<h1>Bees!</h1>'
pattern = PyEmbedPattern(pyembed, md)
match = pattern.getCompiledRegExp().match(source)
result = pattern.handleMatch(match)
assert result
pyembed.embed.assert_called_with(*embed_params)
md.htmlStash.store.assert_called_with('<h1>Bees!</h1>')
| true
| true
|
1c41af2c668e04c2ab96aaa7cf8b39d914a30ec4
| 12,962
|
py
|
Python
|
espnet/nets/pytorch_backend/lm/default.py
|
Hertin/espnet
|
a0f2175df08b4750a9f0305c20b8c11f6e941867
|
[
"Apache-2.0"
] | 3
|
2021-05-27T13:33:37.000Z
|
2021-10-06T05:52:20.000Z
|
espnet/nets/pytorch_backend/lm/default.py
|
Hertin/espnet
|
a0f2175df08b4750a9f0305c20b8c11f6e941867
|
[
"Apache-2.0"
] | 2
|
2020-10-26T15:22:48.000Z
|
2021-01-15T10:17:57.000Z
|
espnet/nets/pytorch_backend/lm/default.py
|
Hertin/espnet
|
a0f2175df08b4750a9f0305c20b8c11f6e941867
|
[
"Apache-2.0"
] | 2
|
2021-11-30T07:42:44.000Z
|
2021-12-01T07:10:01.000Z
|
"""Default Recurrent Neural Network Languge Model in `lm_train.py`."""
from typing import Any
from typing import List
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from espnet.nets.lm_interface import LMInterface
from espnet.nets.pytorch_backend.e2e_asr import to_device
from espnet.nets.scorer_interface import BatchScorerInterface
class DefaultRNNLM(BatchScorerInterface, LMInterface, nn.Module):
"""Default RNNLM for `LMInterface` Implementation.
Note:
PyTorch seems to have memory leak when one GPU compute this after data parallel.
If parallel GPUs compute this, it seems to be fine.
See also https://github.com/espnet/espnet/issues/1075
"""
@staticmethod
def add_arguments(parser):
"""Add arguments to command line argument parser."""
parser.add_argument(
"--type",
type=str,
default="lstm",
nargs="?",
choices=["lstm", "gru"],
help="Which type of RNN to use",
)
parser.add_argument(
"--layer", "-l", type=int, default=2, help="Number of hidden layers"
)
parser.add_argument(
"--unit", "-u", type=int, default=650, help="Number of hidden units"
)
parser.add_argument(
"--embed-unit",
default=None,
help="Number of hidden units in embedding layer, "
"if it is not specified, it keeps the same number with hidden units.",
)
parser.add_argument(
"--dropout-rate", type=float, default=0.5, help="dropout probability"
)
return parser
def __init__(self, n_vocab, args):
"""Initialize class.
Args:
n_vocab (int): The size of the vocabulary
args (argparse.Namespace): configurations. see py:method:`add_arguments`
"""
nn.Module.__init__(self)
# NOTE: for a compatibility with less than 0.5.0 version models
dropout_rate = getattr(args, "dropout_rate", 0.0)
# NOTE: for a compatibility with less than 0.6.1 version models
embed_unit = getattr(args, "embed_unit", None)
self.model = ClassifierWithState(
RNNLM(n_vocab, args.layer, args.unit, embed_unit, args.type, dropout_rate)
)
def state_dict(self):
"""Dump state dict."""
return self.model.state_dict()
def load_state_dict(self, d):
"""Load state dict."""
self.model.load_state_dict(d)
def forward(self, x, t):
"""Compute LM loss value from buffer sequences.
Args:
x (torch.Tensor): Input ids. (batch, len)
t (torch.Tensor): Target ids. (batch, len)
Returns:
tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Tuple of
loss to backward (scalar),
negative log-likelihood of t: -log p(t) (scalar) and
the number of elements in x (scalar)
Notes:
The last two return values are used
in perplexity: p(t)^{-n} = exp(-log p(t) / n)
"""
loss = 0
logp = 0
count = torch.tensor(0).long()
state = None
batch_size, sequence_length = x.shape
for i in range(sequence_length):
# Compute the loss at this time step and accumulate it
state, loss_batch = self.model(state, x[:, i], t[:, i])
non_zeros = torch.sum(x[:, i] != 0, dtype=loss_batch.dtype)
loss += loss_batch.mean() * non_zeros
logp += torch.sum(loss_batch * non_zeros)
count += int(non_zeros)
return loss / batch_size, loss, count.to(loss.device)
def score(self, y, state, x):
"""Score new token.
Args:
y (torch.Tensor): 1D torch.int64 prefix tokens.
state: Scorer state for prefix tokens
x (torch.Tensor): 2D encoder feature that generates ys.
Returns:
tuple[torch.Tensor, Any]: Tuple of
torch.float32 scores for next token (n_vocab)
and next state for ys
"""
new_state, scores = self.model.predict(state, y[-1].unsqueeze(0))
return scores.squeeze(0), new_state
def final_score(self, state):
"""Score eos.
Args:
state: Scorer state for prefix tokens
Returns:
float: final score
"""
return self.model.final(state)
# batch beam search API (see BatchScorerInterface)
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
"""Score new token batch.
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
# merge states
n_batch = len(ys)
n_layers = self.model.predictor.n_layers
if self.model.predictor.typ == "lstm":
keys = ("c", "h")
else:
keys = ("h",)
if states[0] is None:
states = None
else:
# transpose state of [batch, key, layer] into [key, layer, batch]
states = {
k: [
torch.stack([states[b][k][i] for b in range(n_batch)])
for i in range(n_layers)
]
for k in keys
}
states, logp = self.model.predict(states, ys[:, -1])
# transpose state of [key, layer, batch] into [batch, key, layer]
return (
logp,
[
{k: [states[k][i][b] for i in range(n_layers)] for k in keys}
for b in range(n_batch)
],
)
class ClassifierWithState(nn.Module):
"""A wrapper for pytorch RNNLM."""
def __init__(
self, predictor, lossfun=nn.CrossEntropyLoss(reduction="none"), label_key=-1
):
"""Initialize class.
:param torch.nn.Module predictor : The RNNLM
:param function lossfun : The loss function to use
:param int/str label_key :
"""
if not (isinstance(label_key, (int, str))):
raise TypeError("label_key must be int or str, but is %s" % type(label_key))
super(ClassifierWithState, self).__init__()
self.lossfun = lossfun
self.y = None
self.loss = None
self.label_key = label_key
self.predictor = predictor
def forward(self, state, *args, **kwargs):
"""Compute the loss value for an input and label pair.
Notes:
It also computes accuracy and stores it to the attribute.
When ``label_key`` is ``int``, the corresponding element in ``args``
is treated as ground truth labels. And when it is ``str``, the
element in ``kwargs`` is used.
The all elements of ``args`` and ``kwargs`` except the groundtruth
labels are features.
It feeds features to the predictor and compare the result
with ground truth labels.
:param torch.Tensor state : the LM state
:param list[torch.Tensor] args : Input minibatch
:param dict[torch.Tensor] kwargs : Input minibatch
:return loss value
:rtype torch.Tensor
"""
if isinstance(self.label_key, int):
if not (-len(args) <= self.label_key < len(args)):
msg = "Label key %d is out of bounds" % self.label_key
raise ValueError(msg)
t = args[self.label_key]
if self.label_key == -1:
args = args[:-1]
else:
args = args[: self.label_key] + args[self.label_key + 1 :]
elif isinstance(self.label_key, str):
if self.label_key not in kwargs:
msg = 'Label key "%s" is not found' % self.label_key
raise ValueError(msg)
t = kwargs[self.label_key]
del kwargs[self.label_key]
self.y = None
self.loss = None
state, self.y = self.predictor(state, *args, **kwargs)
self.loss = self.lossfun(self.y, t)
return state, self.loss
def predict(self, state, x):
"""Predict log probabilities for given state and input x using the predictor.
:param torch.Tensor state : The current state
:param torch.Tensor x : The input
:return a tuple (new state, log prob vector)
:rtype (torch.Tensor, torch.Tensor)
"""
if hasattr(self.predictor, "normalized") and self.predictor.normalized:
return self.predictor(state, x)
else:
state, z = self.predictor(state, x)
return state, F.log_softmax(z, dim=1)
def buff_predict(self, state, x, n):
"""Predict new tokens from buffered inputs."""
if self.predictor.__class__.__name__ == "RNNLM":
return self.predict(state, x)
new_state = []
new_log_y = []
for i in range(n):
state_i = None if state is None else state[i]
state_i, log_y = self.predict(state_i, x[i].unsqueeze(0))
new_state.append(state_i)
new_log_y.append(log_y)
return new_state, torch.cat(new_log_y)
def final(self, state, index=None):
"""Predict final log probabilities for given state using the predictor.
:param state: The state
:return The final log probabilities
:rtype torch.Tensor
"""
if hasattr(self.predictor, "final"):
if index is not None:
return self.predictor.final(state[index])
else:
return self.predictor.final(state)
else:
return 0.0
# Definition of a recurrent net for language modeling
class RNNLM(nn.Module):
"""A pytorch RNNLM."""
def __init__(
self, n_vocab, n_layers, n_units, n_embed=None, typ="lstm", dropout_rate=0.5
):
"""Initialize class.
:param int n_vocab: The size of the vocabulary
:param int n_layers: The number of layers to create
:param int n_units: The number of units per layer
:param str typ: The RNN type
"""
super(RNNLM, self).__init__()
if n_embed is None:
n_embed = n_units
self.embed = nn.Embedding(n_vocab, n_embed)
if typ == "lstm":
self.rnn = nn.ModuleList(
[nn.LSTMCell(n_embed, n_units)]
+ [nn.LSTMCell(n_units, n_units) for _ in range(n_layers - 1)]
)
else:
self.rnn = nn.ModuleList(
[nn.GRUCell(n_embed, n_units)]
+ [nn.GRUCell(n_units, n_units) for _ in range(n_layers - 1)]
)
self.dropout = nn.ModuleList(
[nn.Dropout(dropout_rate) for _ in range(n_layers + 1)]
)
self.lo = nn.Linear(n_units, n_vocab)
self.n_layers = n_layers
self.n_units = n_units
self.typ = typ
# initialize parameters from uniform distribution
for param in self.parameters():
param.data.uniform_(-0.1, 0.1)
def zero_state(self, batchsize):
"""Initialize state."""
p = next(self.parameters())
return torch.zeros(batchsize, self.n_units).to(device=p.device, dtype=p.dtype)
def forward(self, state, x):
"""Forward neural networks."""
if state is None:
h = [to_device(x, self.zero_state(x.size(0))) for n in range(self.n_layers)]
state = {"h": h}
if self.typ == "lstm":
c = [
to_device(x, self.zero_state(x.size(0)))
for n in range(self.n_layers)
]
state = {"c": c, "h": h}
h = [None] * self.n_layers
emb = self.embed(x)
if self.typ == "lstm":
c = [None] * self.n_layers
h[0], c[0] = self.rnn[0](
self.dropout[0](emb), (state["h"][0], state["c"][0])
)
for n in range(1, self.n_layers):
h[n], c[n] = self.rnn[n](
self.dropout[n](h[n - 1]), (state["h"][n], state["c"][n])
)
state = {"c": c, "h": h}
else:
h[0] = self.rnn[0](self.dropout[0](emb), state["h"][0])
for n in range(1, self.n_layers):
h[n] = self.rnn[n](self.dropout[n](h[n - 1]), state["h"][n])
state = {"h": h}
y = self.lo(self.dropout[-1](h[-1]))
return state, y
| 34.565333
| 88
| 0.554004
|
from typing import Any
from typing import List
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from espnet.nets.lm_interface import LMInterface
from espnet.nets.pytorch_backend.e2e_asr import to_device
from espnet.nets.scorer_interface import BatchScorerInterface
class DefaultRNNLM(BatchScorerInterface, LMInterface, nn.Module):
@staticmethod
def add_arguments(parser):
parser.add_argument(
"--type",
type=str,
default="lstm",
nargs="?",
choices=["lstm", "gru"],
help="Which type of RNN to use",
)
parser.add_argument(
"--layer", "-l", type=int, default=2, help="Number of hidden layers"
)
parser.add_argument(
"--unit", "-u", type=int, default=650, help="Number of hidden units"
)
parser.add_argument(
"--embed-unit",
default=None,
help="Number of hidden units in embedding layer, "
"if it is not specified, it keeps the same number with hidden units.",
)
parser.add_argument(
"--dropout-rate", type=float, default=0.5, help="dropout probability"
)
return parser
def __init__(self, n_vocab, args):
nn.Module.__init__(self)
dropout_rate = getattr(args, "dropout_rate", 0.0)
embed_unit = getattr(args, "embed_unit", None)
self.model = ClassifierWithState(
RNNLM(n_vocab, args.layer, args.unit, embed_unit, args.type, dropout_rate)
)
def state_dict(self):
return self.model.state_dict()
def load_state_dict(self, d):
self.model.load_state_dict(d)
def forward(self, x, t):
loss = 0
logp = 0
count = torch.tensor(0).long()
state = None
batch_size, sequence_length = x.shape
for i in range(sequence_length):
state, loss_batch = self.model(state, x[:, i], t[:, i])
non_zeros = torch.sum(x[:, i] != 0, dtype=loss_batch.dtype)
loss += loss_batch.mean() * non_zeros
logp += torch.sum(loss_batch * non_zeros)
count += int(non_zeros)
return loss / batch_size, loss, count.to(loss.device)
def score(self, y, state, x):
new_state, scores = self.model.predict(state, y[-1].unsqueeze(0))
return scores.squeeze(0), new_state
def final_score(self, state):
return self.model.final(state)
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
n_batch = len(ys)
n_layers = self.model.predictor.n_layers
if self.model.predictor.typ == "lstm":
keys = ("c", "h")
else:
keys = ("h",)
if states[0] is None:
states = None
else:
states = {
k: [
torch.stack([states[b][k][i] for b in range(n_batch)])
for i in range(n_layers)
]
for k in keys
}
states, logp = self.model.predict(states, ys[:, -1])
return (
logp,
[
{k: [states[k][i][b] for i in range(n_layers)] for k in keys}
for b in range(n_batch)
],
)
class ClassifierWithState(nn.Module):
def __init__(
self, predictor, lossfun=nn.CrossEntropyLoss(reduction="none"), label_key=-1
):
if not (isinstance(label_key, (int, str))):
raise TypeError("label_key must be int or str, but is %s" % type(label_key))
super(ClassifierWithState, self).__init__()
self.lossfun = lossfun
self.y = None
self.loss = None
self.label_key = label_key
self.predictor = predictor
def forward(self, state, *args, **kwargs):
if isinstance(self.label_key, int):
if not (-len(args) <= self.label_key < len(args)):
msg = "Label key %d is out of bounds" % self.label_key
raise ValueError(msg)
t = args[self.label_key]
if self.label_key == -1:
args = args[:-1]
else:
args = args[: self.label_key] + args[self.label_key + 1 :]
elif isinstance(self.label_key, str):
if self.label_key not in kwargs:
msg = 'Label key "%s" is not found' % self.label_key
raise ValueError(msg)
t = kwargs[self.label_key]
del kwargs[self.label_key]
self.y = None
self.loss = None
state, self.y = self.predictor(state, *args, **kwargs)
self.loss = self.lossfun(self.y, t)
return state, self.loss
def predict(self, state, x):
if hasattr(self.predictor, "normalized") and self.predictor.normalized:
return self.predictor(state, x)
else:
state, z = self.predictor(state, x)
return state, F.log_softmax(z, dim=1)
def buff_predict(self, state, x, n):
if self.predictor.__class__.__name__ == "RNNLM":
return self.predict(state, x)
new_state = []
new_log_y = []
for i in range(n):
state_i = None if state is None else state[i]
state_i, log_y = self.predict(state_i, x[i].unsqueeze(0))
new_state.append(state_i)
new_log_y.append(log_y)
return new_state, torch.cat(new_log_y)
def final(self, state, index=None):
if hasattr(self.predictor, "final"):
if index is not None:
return self.predictor.final(state[index])
else:
return self.predictor.final(state)
else:
return 0.0
class RNNLM(nn.Module):
def __init__(
self, n_vocab, n_layers, n_units, n_embed=None, typ="lstm", dropout_rate=0.5
):
super(RNNLM, self).__init__()
if n_embed is None:
n_embed = n_units
self.embed = nn.Embedding(n_vocab, n_embed)
if typ == "lstm":
self.rnn = nn.ModuleList(
[nn.LSTMCell(n_embed, n_units)]
+ [nn.LSTMCell(n_units, n_units) for _ in range(n_layers - 1)]
)
else:
self.rnn = nn.ModuleList(
[nn.GRUCell(n_embed, n_units)]
+ [nn.GRUCell(n_units, n_units) for _ in range(n_layers - 1)]
)
self.dropout = nn.ModuleList(
[nn.Dropout(dropout_rate) for _ in range(n_layers + 1)]
)
self.lo = nn.Linear(n_units, n_vocab)
self.n_layers = n_layers
self.n_units = n_units
self.typ = typ
for param in self.parameters():
param.data.uniform_(-0.1, 0.1)
def zero_state(self, batchsize):
p = next(self.parameters())
return torch.zeros(batchsize, self.n_units).to(device=p.device, dtype=p.dtype)
def forward(self, state, x):
if state is None:
h = [to_device(x, self.zero_state(x.size(0))) for n in range(self.n_layers)]
state = {"h": h}
if self.typ == "lstm":
c = [
to_device(x, self.zero_state(x.size(0)))
for n in range(self.n_layers)
]
state = {"c": c, "h": h}
h = [None] * self.n_layers
emb = self.embed(x)
if self.typ == "lstm":
c = [None] * self.n_layers
h[0], c[0] = self.rnn[0](
self.dropout[0](emb), (state["h"][0], state["c"][0])
)
for n in range(1, self.n_layers):
h[n], c[n] = self.rnn[n](
self.dropout[n](h[n - 1]), (state["h"][n], state["c"][n])
)
state = {"c": c, "h": h}
else:
h[0] = self.rnn[0](self.dropout[0](emb), state["h"][0])
for n in range(1, self.n_layers):
h[n] = self.rnn[n](self.dropout[n](h[n - 1]), state["h"][n])
state = {"h": h}
y = self.lo(self.dropout[-1](h[-1]))
return state, y
| true
| true
|
1c41b07b12f06d5178ee919761cfff1e7f50f924
| 171
|
py
|
Python
|
Exercícios/ex030.py
|
JefterV/Cursoemvideo.py
|
e65ac53a4e38793be3039d360e7127e1c5d51030
|
[
"MIT"
] | 3
|
2020-11-24T17:20:34.000Z
|
2020-12-03T01:19:31.000Z
|
Exercícios/ex030.py
|
JefterV/Cursoemvideo.py
|
e65ac53a4e38793be3039d360e7127e1c5d51030
|
[
"MIT"
] | null | null | null |
Exercícios/ex030.py
|
JefterV/Cursoemvideo.py
|
e65ac53a4e38793be3039d360e7127e1c5d51030
|
[
"MIT"
] | 1
|
2021-01-03T00:48:48.000Z
|
2021-01-03T00:48:48.000Z
|
num = int(input('Digite um numero: '))
resultado = num % 2
if resultado == 0:
print('O numero {} é PAR'.format(num))
else:
print('O numero {} é IMPAR'.format(num))
| 28.5
| 44
| 0.619883
|
num = int(input('Digite um numero: '))
resultado = num % 2
if resultado == 0:
print('O numero {} é PAR'.format(num))
else:
print('O numero {} é IMPAR'.format(num))
| true
| true
|
1c41b0dd46bde5deecd1784f1c58b70fbada87c2
| 30,602
|
py
|
Python
|
qiskit/providers/aer/backends/qasm_simulator.py
|
paulineollitrault/qiskit-aer
|
7f9ad2ea93698813901b345f7ee6eee8e02ebce9
|
[
"Apache-2.0"
] | null | null | null |
qiskit/providers/aer/backends/qasm_simulator.py
|
paulineollitrault/qiskit-aer
|
7f9ad2ea93698813901b345f7ee6eee8e02ebce9
|
[
"Apache-2.0"
] | null | null | null |
qiskit/providers/aer/backends/qasm_simulator.py
|
paulineollitrault/qiskit-aer
|
7f9ad2ea93698813901b345f7ee6eee8e02ebce9
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Qiskit Aer qasm simulator backend.
"""
import copy
import logging
from warnings import warn
from qiskit.providers.options import Options
from qiskit.providers.models import QasmBackendConfiguration
from ..version import __version__
from ..aererror import AerError
from .aerbackend import AerBackend
from .backend_utils import (cpp_execute, available_methods,
MAX_QUBITS_STATEVECTOR,
LEGACY_METHOD_MAP,
map_legacy_method_options)
# pylint: disable=import-error, no-name-in-module
from .controller_wrappers import aer_controller_execute
logger = logging.getLogger(__name__)
class QasmSimulator(AerBackend):
"""
Noisy quantum circuit simulator backend.
**Configurable Options**
The `QasmSimulator` supports multiple simulation methods and
configurable options for each simulation method. These may be set using the
appropriate kwargs during initialization. They can also be set of updated
using the :meth:`set_options` method.
Run-time options may also be specified as kwargs using the :meth:`run` method.
These will not be stored in the backend and will only apply to that execution.
They will also override any previously set options.
For example, to configure a density matrix simulator with a custom noise
model to use for every execution
.. code-block:: python
noise_model = NoiseModel.from_backend(backend)
backend = QasmSimulator(method='density_matrix',
noise_model=noise_model)
**Simulating an IBMQ Backend**
The simulator can be automatically configured to mimic an IBMQ backend using
the :meth:`from_backend` method. This will configure the simulator to use the
basic device :class:`NoiseModel` for that backend, and the same basis gates
and coupling map.
.. code-block:: python
backend = QasmSimulator.from_backend(backend)
**Simulation Method Option**
The simulation method is set using the ``method`` kwarg.
Supported simulation methods are
* ``"statevector"``: A dense statevector simulation that can sample
measurement outcomes from *ideal* circuits with all measurements at
end of the circuit. For noisy simulations each shot samples a
randomly sampled noisy circuit from the noise model.
``"statevector_cpu"`` is an alias of ``"statevector"``.
* ``"statevector_gpu"``: A dense statevector simulation that provides
the same functionalities with ``"statevector"``. GPU performs the computation
to calculate probability amplitudes as CPU does. If no GPU is available,
a runtime error is raised.
* ``"density_matrix"``: A dense density matrix simulation that may
sample measurement outcomes from *noisy* circuits with all
measurements at end of the circuit. It can only simulate half the
number of qubits as the statevector method.
* ``"density_matrix_gpu"``: A dense density matrix simulation that provides
the same functionalities with ``"density_matrix"``. GPU performs the computation
to calculate probability amplitudes as CPU does. If no GPU is available,
a runtime error is raised.
* ``"stabilizer"``: An efficient Clifford stabilizer state simulator
that can simulate noisy Clifford circuits if all errors in the noise model are also
Clifford errors.
* ``"extended_stabilizer"``: An approximate simulated based on a
ranked-stabilizer decomposition that decomposes circuits into stabilizer
state terms. The number of terms grows with the number of
non-Clifford gates.
* ``"matrix_product_state"``: A tensor-network statevector simulator that
uses a Matrix Product State (MPS) representation for the state.
* ``"automatic"``: The default behavior where the method is chosen
automatically for each circuit based on the circuit instructions,
number of qubits, and noise model.
**Additional Backend Options**
The following simulator specific backend options are supported
* ``method`` (str): Set the simulation method (Default: ``"automatic"``).
Use :meth:`available_methods` to return a list of all availabe methods.
* ``device`` (str): Set the simulation device (Default: ``"CPU"``).
Use :meth:`available_devices` to return a list of devices supported
on the current system.
* ``precision`` (str): Set the floating point precision for
certain simulation methods to either ``"single"`` or ``"double"``
precision (default: ``"double"``).
* ``executor`` (futures.Executor): Set a custom executor for
asynchronous running of simulation jobs (Default: None).
* ``max_job_size`` (int or None): If the number of run circuits
exceeds this value simulation will be run as a set of of sub-jobs
on the executor. If ``None`` simulation of all circuits are submitted
to the executor as a single job (Default: None).
* ``enable_truncation`` (bool): If set to True this removes unnecessary
qubits which do not affect the simulation outcome from the simulated
circuits (Default: True).
* ``zero_threshold`` (double): Sets the threshold for truncating
small values to zero in the result data (Default: 1e-10).
* ``validation_threshold`` (double): Sets the threshold for checking
if initial states are valid (Default: 1e-8).
* ``max_parallel_threads`` (int): Sets the maximum number of CPU
cores used by OpenMP for parallelization. If set to 0 the
maximum will be set to the number of CPU cores (Default: 0).
* ``max_parallel_experiments`` (int): Sets the maximum number of
qobj experiments that may be executed in parallel up to the
max_parallel_threads value. If set to 1 parallel circuit
execution will be disabled. If set to 0 the maximum will be
automatically set to max_parallel_threads (Default: 1).
* ``max_parallel_shots`` (int): Sets the maximum number of
shots that may be executed in parallel during each experiment
execution, up to the max_parallel_threads value. If set to 1
parallel shot execution will be disabled. If set to 0 the
maximum will be automatically set to max_parallel_threads.
Note that this cannot be enabled at the same time as parallel
experiment execution (Default: 0).
* ``max_memory_mb`` (int): Sets the maximum size of memory
to store a state vector. If a state vector needs more, an error
is thrown. In general, a state vector of n-qubits uses 2^n complex
values (16 Bytes). If set to 0, the maximum will be automatically
set to the system memory size (Default: 0).
* ``optimize_ideal_threshold`` (int): Sets the qubit threshold for
applying circuit optimization passes on ideal circuits.
Passes include gate fusion and truncation of unused qubits
(Default: 5).
* ``optimize_noise_threshold`` (int): Sets the qubit threshold for
applying circuit optimization passes on ideal circuits.
Passes include gate fusion and truncation of unused qubits
(Default: 12).
These backend options only apply when using the ``"statevector"``
simulation method:
* ``statevector_parallel_threshold`` (int): Sets the threshold that
the number of qubits must be greater than to enable OpenMP
parallelization for matrix multiplication during execution of
an experiment. If parallel circuit or shot execution is enabled
this will only use unallocated CPU cores up to
max_parallel_threads. Note that setting this too low can reduce
performance (Default: 14).
* ``statevector_sample_measure_opt`` (int): Sets the threshold that
the number of qubits must be greater than to enable a large
qubit optimized implementation of measurement sampling. Note
that setting this two low can reduce performance (Default: 10)
These backend options only apply when using the ``"stabilizer"``
simulation method:
* ``stabilizer_max_snapshot_probabilities`` (int): set the maximum
qubit number for the
`~qiskit.providers.aer.extensions.SnapshotProbabilities`
instruction (Default: 32).
These backend options only apply when using the ``"extended_stabilizer"``
simulation method:
* ``extended_stabilizer_sampling_method`` (string): Choose how to simulate
measurements on qubits. The performance of the simulator depends
significantly on this choice. In the following, let n be the number of
qubits in the circuit, m the number of qubits measured, and S be the
number of shots (Default: resampled_metropolis).
- ``"metropolis"``: Use a Monte-Carlo method to sample many output
strings from the simulator at once. To be accurate, this method
requires that all the possible output strings have a non-zero
probability. It will give inaccurate results on cases where
the circuit has many zero-probability outcomes.
This method has an overall runtime that scales as n^{2} + (S-1)n.
- ``"resampled_metropolis"``: A variant of the metropolis method,
where the Monte-Carlo method is reinitialised for every shot. This
gives better results for circuits where some outcomes have zero
probability, but will still fail if the output distribution
is sparse. The overall runtime scales as Sn^{2}.
- ``"norm_estimation"``: An alternative sampling method using
random state inner products to estimate outcome probabilites. This
method requires twice as much memory, and significantly longer
runtimes, but gives accurate results on circuits with sparse
output distributions. The overall runtime scales as Sn^{3}m^{3}.
* ``extended_stabilizer_metropolis_mixing_time`` (int): Set how long the
monte-carlo method runs before performing measurements. If the
output distribution is strongly peaked, this can be decreased
alongside setting extended_stabilizer_disable_measurement_opt
to True (Default: 5000).
* ``"extended_stabilizer_approximation_error"`` (double): Set the error
in the approximation for the extended_stabilizer method. A
smaller error needs more memory and computational time
(Default: 0.05).
* ``extended_stabilizer_norm_estimation_samples`` (int): The default number
of samples for the norm estimation sampler. The method will use the
default, or 4m^{2} samples where m is the number of qubits to be
measured, whichever is larger (Default: 100).
* ``extended_stabilizer_norm_estimation_repetitions`` (int): The number
of times to repeat the norm estimation. The median of these reptitions
is used to estimate and sample output strings (Default: 3).
* ``extended_stabilizer_parallel_threshold`` (int): Set the minimum
size of the extended stabilizer decomposition before we enable
OpenMP parallelization. If parallel circuit or shot execution
is enabled this will only use unallocated CPU cores up to
max_parallel_threads (Default: 100).
* ``extended_stabilizer_probabilities_snapshot_samples`` (int): If using
the metropolis or resampled_metropolis sampling method, set the number of
samples used to estimate probabilities in a probabilities snapshot
(Default: 3000).
These backend options only apply when using the ``"matrix_product_state"``
simulation method:
* ``matrix_product_state_max_bond_dimension`` (int): Sets a limit
on the number of Schmidt coefficients retained at the end of
the svd algorithm. Coefficients beyond this limit will be discarded.
(Default: None, i.e., no limit on the bond dimension).
* ``matrix_product_state_truncation_threshold`` (double):
Discard the smallest coefficients for which the sum of
their squares is smaller than this threshold.
(Default: 1e-16).
* ``mps_sample_measure_algorithm`` (str): Choose which algorithm to use for
``"sample_measure"`` (Default: "mps_apply_measure").
- ``"mps_probabilities"``: This method first constructs the probability
vector and then generates a sample per shot. It is more efficient for
a large number of shots and a small number of qubits, with complexity
O(2^n * n * D^2) to create the vector and O(1) per shot, where n is
the number of qubits and D is the bond dimension.
- ``"mps_apply_measure"``: This method creates a copy of the mps structure
and measures directly on it. It is more efficient for a small number of
shots, and a large number of qubits, with complexity around
O(n * D^2) per shot.
* ``mps_log_data`` (str): if True, output logging data of the MPS
structure: bond dimensions and values discarded during approximation.
(Default: False)
These backend options apply in circuit optimization passes:
* ``fusion_enable`` (bool): Enable fusion optimization in circuit
optimization passes [Default: True]
* ``fusion_verbose`` (bool): Output gates generated in fusion optimization
into metadata [Default: False]
* ``fusion_max_qubit`` (int): Maximum number of qubits for a operation generated
in a fusion optimization [Default: 5]
* ``fusion_threshold`` (int): Threshold that number of qubits must be greater
than or equal to enable fusion optimization [Default: 14]
"""
_DEFAULT_BASIS_GATES = sorted([
'u1', 'u2', 'u3', 'u', 'p', 'r', 'rx', 'ry', 'rz', 'id', 'x',
'y', 'z', 'h', 's', 'sdg', 'sx', 'sxdg', 't', 'tdg', 'swap', 'cx',
'cy', 'cz', 'csx', 'cp', 'cu', 'cu1', 'cu2', 'cu3', 'rxx', 'ryy',
'rzz', 'rzx', 'ccx', 'cswap', 'mcx', 'mcy', 'mcz', 'mcsx',
'mcp', 'mcphase', 'mcu', 'mcu1', 'mcu2', 'mcu3', 'mcrx', 'mcry', 'mcrz',
'mcr', 'mcswap', 'unitary', 'diagonal', 'multiplexer',
'initialize', 'delay', 'pauli', 'mcx_gray'
])
_DEFAULT_CUSTOM_INSTR = sorted([
'roerror', 'kraus', 'snapshot', 'save_expval', 'save_expval_var',
'save_probabilities', 'save_probabilities_dict',
'save_amplitudes', 'save_amplitudes_sq', 'save_state',
'save_density_matrix', 'save_statevector', 'save_statevector_dict',
'save_stabilizer', 'set_statevector', 'set_density_matrix',
'set_stabilizer'
])
_DEFAULT_CONFIGURATION = {
'backend_name': 'qasm_simulator',
'backend_version': __version__,
'n_qubits': MAX_QUBITS_STATEVECTOR,
'url': 'https://github.com/Qiskit/qiskit-aer',
'simulator': True,
'local': True,
'conditional': True,
'open_pulse': False,
'memory': True,
'max_shots': int(1e6),
'description': 'A C++ QasmQobj simulator with noise',
'coupling_map': None,
'basis_gates': _DEFAULT_BASIS_GATES,
'custom_instructions': _DEFAULT_CUSTOM_INSTR,
'gates': []
}
_SIMULATION_METHODS = [
'automatic', 'statevector', 'statevector_gpu',
'statevector_thrust', 'density_matrix',
'density_matrix_gpu', 'density_matrix_thrust',
'stabilizer', 'matrix_product_state', 'extended_stabilizer'
]
_AVAILABLE_METHODS = None
_SIMULATION_DEVICES = ('CPU', 'GPU', 'Thrust')
_AVAILABLE_DEVICES = None
def __init__(self,
configuration=None,
properties=None,
provider=None,
**backend_options):
warn('The `QasmSimulator` backend will be deprecated in the'
' future. It has been superseded by the `AerSimulator`'
' backend.', PendingDeprecationWarning)
self._controller = aer_controller_execute()
# Update available methods for class
if QasmSimulator._AVAILABLE_METHODS is None:
QasmSimulator._AVAILABLE_METHODS = available_methods(
self._controller, QasmSimulator._SIMULATION_METHODS)
# Default configuration
if configuration is None:
configuration = QasmBackendConfiguration.from_dict(
QasmSimulator._DEFAULT_CONFIGURATION)
else:
configuration.open_pulse = False
# Cache basis gates since computing the intersection
# of noise model, method, and config gates is expensive.
self._cached_basis_gates = self._DEFAULT_BASIS_GATES
super().__init__(configuration,
properties=properties,
provider=provider,
backend_options=backend_options)
def __repr__(self):
"""String representation of an AerBackend."""
display = super().__repr__()[:-1]
pad = ' ' * (len(self.__class__.__name__) + 1)
method = getattr(self.options, 'method', None)
if method not in [None, 'automatic']:
display += ",\n{}method='{}'".format(pad, method)
noise_model = getattr(self.options, 'noise_model', None)
if noise_model is not None and not noise_model.is_ideal():
display += ',\n{}noise_model={})'.format(pad, repr(noise_model))
display += ")"
return display
@classmethod
def _default_options(cls):
return Options(
# Global options
shots=1024,
method=None,
device="CPU",
precision="double",
executor=None,
max_job_size=None,
enable_truncation=True,
zero_threshold=1e-10,
validation_threshold=None,
max_parallel_threads=None,
max_parallel_experiments=None,
max_parallel_shots=None,
max_memory_mb=None,
optimize_ideal_threshold=5,
optimize_noise_threshold=12,
fusion_enable=True,
fusion_verbose=False,
fusion_max_qubit=5,
fusion_threshold=14,
accept_distributed_results=None,
blocking_qubits=None,
blocking_enable=False,
memory=None,
noise_model=None,
seed_simulator=None,
# statevector options
statevector_parallel_threshold=14,
statevector_sample_measure_opt=10,
# stabilizer options
stabilizer_max_snapshot_probabilities=32,
# extended stabilizer options
extended_stabilizer_sampling_method='resampled_metropolis',
extended_stabilizer_metropolis_mixing_time=5000,
extended_stabilizer_approximation_error=0.05,
extended_stabilizer_norm_estimation_samples=100,
extended_stabilizer_norm_estimation_repetitions=3,
extended_stabilizer_parallel_threshold=100,
extended_stabilizer_probabilities_snapshot_samples=3000,
# MPS options
matrix_product_state_truncation_threshold=1e-16,
matrix_product_state_max_bond_dimension=None,
mps_sample_measure_algorithm='mps_heuristic',
mps_log_data=False,
chop_threshold=1e-8,
mps_parallel_threshold=14,
mps_omp_threads=1)
@classmethod
def from_backend(cls, backend, **options):
"""Initialize simulator from backend."""
# pylint: disable=import-outside-toplevel
# Avoid cyclic import
from ..noise.noise_model import NoiseModel
# Get configuration and properties from backend
configuration = copy.copy(backend.configuration())
properties = copy.copy(backend.properties())
# Customize configuration name
name = configuration.backend_name
configuration.backend_name = 'qasm_simulator({})'.format(name)
# Use automatic noise model if none is provided
if 'noise_model' not in options:
noise_model = NoiseModel.from_backend(backend)
if not noise_model.is_ideal():
options['noise_model'] = noise_model
# Initialize simulator
sim = cls(configuration=configuration,
properties=properties,
**options)
return sim
def configuration(self):
"""Return the simulator backend configuration.
Returns:
BackendConfiguration: the configuration for the backend.
"""
config = copy.copy(self._configuration)
for key, val in self._options_configuration.items():
setattr(config, key, val)
# Update basis gates based on custom options, config, method,
# and noise model
config.custom_instructions = self._custom_instructions()
config.basis_gates = self._cached_basis_gates + config.custom_instructions
return config
def available_methods(self):
"""Return the available simulation methods."""
return copy.copy(self._AVAILABLE_METHODS)
def available_devices(self):
"""Return the available simulation methods."""
return copy.copy(self._AVAILABLE_DEVICES)
def _execute(self, qobj):
"""Execute a qobj on the backend.
Args:
qobj (QasmQobj): simulator input.
Returns:
dict: return a dictionary of results.
"""
qobj = map_legacy_method_options(qobj)
return cpp_execute(self._controller, qobj)
def set_options(self, **fields):
out_options = {}
update_basis_gates = False
for key, value in fields.items():
if key == 'method':
if value in LEGACY_METHOD_MAP:
value, device = LEGACY_METHOD_MAP[value]
out_options["device"] = device
self._set_method_config(value)
update_basis_gates = True
out_options[key] = value
if (value is not None and value not in self.available_methods()):
raise AerError(
"Invalid simulation method {}. Available methods"
" are: {}".format(value, self.available_methods()))
elif key in ['noise_model', 'basis_gates']:
update_basis_gates = True
out_options[key] = value
elif key == 'custom_instructions':
self._set_configuration_option(key, value)
else:
out_options[key] = value
super().set_options(**out_options)
if update_basis_gates:
self._cached_basis_gates = self._basis_gates()
def _validate(self, qobj):
"""Semantic validations of the qobj which cannot be done via schemas.
Warn if no measurements in circuit with classical registers.
"""
for experiment in qobj.experiments:
# If circuit contains classical registers but not
# measurements raise a warning
if experiment.config.memory_slots > 0:
# Check if measure opts missing
no_measure = True
for op in experiment.instructions:
if not no_measure:
break # we don't need to check any more ops
if no_measure and op.name == "measure":
no_measure = False
# Print warning if clbits but no measure
if no_measure:
logger.warning(
'No measurements in circuit "%s": '
'count data will return all zeros.',
experiment.header.name)
def _basis_gates(self):
"""Return simualtor basis gates.
This will be the option value of basis gates if it was set,
otherwise it will be the intersection of the configuration, noise model
and method supported basis gates.
"""
# Use option value for basis gates if set
if 'basis_gates' in self._options_configuration:
return self._options_configuration['basis_gates']
# Compute intersection with method basis gates
method_gates = self._method_basis_gates()
config_gates = self._configuration.basis_gates
if config_gates:
basis_gates = set(config_gates).intersection(
method_gates)
else:
basis_gates = method_gates
# Compute intersection with noise model basis gates
noise_model = getattr(self.options, 'noise_model', None)
if noise_model:
noise_gates = noise_model.basis_gates
basis_gates = basis_gates.intersection(noise_gates)
else:
noise_gates = None
if not basis_gates:
logger.warning(
"The intersection of configuration basis gates (%s), "
"simulation method basis gates (%s), and "
"noise model basis gates (%s) is empty",
config_gates, method_gates, noise_gates)
return sorted(basis_gates)
def _method_basis_gates(self):
"""Return method basis gates and custom instructions"""
method = self._options.get('method', None)
if method in ['density_matrix', 'density_matrix_gpu', 'density_matrix_thrust']:
return sorted([
'u1', 'u2', 'u3', 'u', 'p', 'r', 'rx', 'ry', 'rz', 'id', 'x',
'y', 'z', 'h', 's', 'sdg', 'sx', 'sxdg', 't', 'tdg', 'swap', 'cx',
'cy', 'cz', 'cp', 'cu1', 'rxx', 'ryy', 'rzz', 'rzx', 'ccx',
'unitary', 'diagonal', 'delay', 'pauli'
])
if method == 'matrix_product_state':
return sorted([
'u1', 'u2', 'u3', 'u', 'p', 'cp', 'cx', 'cy', 'cz', 'id', 'x',
'y', 'z', 'h', 's', 'sdg', 'sx', 'sxdg', 't', 'tdg', 'swap', 'ccx',
'unitary', 'roerror', 'delay', 'pauli', 'r', 'rx', 'ry', 'rz', 'rxx',
'ryy', 'rzz', 'rzx', 'csx', 'cswap', 'diagonal', 'initialize'
])
if method == 'stabilizer':
return sorted([
'id', 'x', 'y', 'z', 'h', 's', 'sdg', 'sx', 'sxdg', 'cx', 'cy', 'cz',
'swap', 'delay', 'pauli'
])
if method == 'extended_stabilizer':
return sorted([
'cx', 'cz', 'id', 'x', 'y', 'z', 'h', 's', 'sdg', 'sx', 'sxdg',
'swap', 'u0', 't', 'tdg', 'u1', 'p', 'ccx', 'ccz', 'delay', 'pauli'
])
return QasmSimulator._DEFAULT_BASIS_GATES
def _custom_instructions(self):
"""Return method basis gates and custom instructions"""
# pylint: disable = too-many-return-statements
if 'custom_instructions' in self._options_configuration:
return self._options_configuration['custom_instructions']
method = self._options.get('method', None)
if method in ['statevector', 'statevector_gpu', 'statevector_thrust']:
return sorted([
'roerror', 'kraus', 'snapshot', 'save_expval', 'save_expval_var',
'save_probabilities', 'save_probabilities_dict',
'save_amplitudes', 'save_amplitudes_sq', 'save_state',
'save_density_matrix', 'save_statevector', 'save_statevector_dict',
'set_statevector'
])
if method in ['density_matrix', 'density_matrix_gpu', 'density_matrix_thrust']:
return sorted([
'roerror', 'kraus', 'superop', 'snapshot', 'save_expval', 'save_expval_var',
'save_probabilities', 'save_probabilities_dict',
'save_state', 'save_density_matrix', 'save_amplitudes_sq',
'set_statevector', 'set_density_matrix'
])
if method == 'matrix_product_state':
return sorted([
'roerror', 'snapshot', 'kraus', 'save_expval', 'save_expval_var',
'save_probabilities', 'save_probabilities_dict',
'save_density_matrix', 'save_state', 'save_statevector',
'save_amplitudes', 'save_amplitudes_sq', 'save_matrix_product_state',
'set_matrix_product_state'])
if method == 'stabilizer':
return sorted([
'roerror', 'snapshot', 'save_expval', 'save_expval_var',
'save_probabilities', 'save_probabilities_dict',
'save_amplitudes_sq', 'save_state', 'save_stabilizer',
'set_stabilizer'
])
if method == 'extended_stabilizer':
return sorted(['roerror', 'snapshot', 'save_statevector'])
return QasmSimulator._DEFAULT_CUSTOM_INSTR
def _set_method_config(self, method=None):
"""Set non-basis gate options when setting method"""
super().set_options(method=method)
# Update configuration description and number of qubits
if method in ['statevector', 'statevector_gpu', 'statevector_thrust']:
description = 'A C++ statevector simulator with noise'
n_qubits = MAX_QUBITS_STATEVECTOR
elif method in ['density_matrix', 'density_matrix_gpu', 'density_matrix_thrust']:
description = 'A C++ density matrix simulator with noise'
n_qubits = MAX_QUBITS_STATEVECTOR // 2
elif method == 'matrix_product_state':
description = 'A C++ matrix product state simulator with noise'
n_qubits = 63 # TODO: not sure what to put here?
elif method == 'stabilizer':
description = 'A C++ Clifford stabilizer simulator with noise'
n_qubits = 10000 # TODO: estimate from memory
elif method == 'extended_stabilizer':
description = 'A C++ Clifford+T extended stabilizer simulator with noise'
n_qubits = 63 # TODO: estimate from memory
else:
# Clear options to default
description = None
n_qubits = None
self._set_configuration_option('description', description)
self._set_configuration_option('n_qubits', n_qubits)
| 43.968391
| 92
| 0.644664
|
import copy
import logging
from warnings import warn
from qiskit.providers.options import Options
from qiskit.providers.models import QasmBackendConfiguration
from ..version import __version__
from ..aererror import AerError
from .aerbackend import AerBackend
from .backend_utils import (cpp_execute, available_methods,
MAX_QUBITS_STATEVECTOR,
LEGACY_METHOD_MAP,
map_legacy_method_options)
from .controller_wrappers import aer_controller_execute
logger = logging.getLogger(__name__)
class QasmSimulator(AerBackend):
_DEFAULT_BASIS_GATES = sorted([
'u1', 'u2', 'u3', 'u', 'p', 'r', 'rx', 'ry', 'rz', 'id', 'x',
'y', 'z', 'h', 's', 'sdg', 'sx', 'sxdg', 't', 'tdg', 'swap', 'cx',
'cy', 'cz', 'csx', 'cp', 'cu', 'cu1', 'cu2', 'cu3', 'rxx', 'ryy',
'rzz', 'rzx', 'ccx', 'cswap', 'mcx', 'mcy', 'mcz', 'mcsx',
'mcp', 'mcphase', 'mcu', 'mcu1', 'mcu2', 'mcu3', 'mcrx', 'mcry', 'mcrz',
'mcr', 'mcswap', 'unitary', 'diagonal', 'multiplexer',
'initialize', 'delay', 'pauli', 'mcx_gray'
])
_DEFAULT_CUSTOM_INSTR = sorted([
'roerror', 'kraus', 'snapshot', 'save_expval', 'save_expval_var',
'save_probabilities', 'save_probabilities_dict',
'save_amplitudes', 'save_amplitudes_sq', 'save_state',
'save_density_matrix', 'save_statevector', 'save_statevector_dict',
'save_stabilizer', 'set_statevector', 'set_density_matrix',
'set_stabilizer'
])
_DEFAULT_CONFIGURATION = {
'backend_name': 'qasm_simulator',
'backend_version': __version__,
'n_qubits': MAX_QUBITS_STATEVECTOR,
'url': 'https://github.com/Qiskit/qiskit-aer',
'simulator': True,
'local': True,
'conditional': True,
'open_pulse': False,
'memory': True,
'max_shots': int(1e6),
'description': 'A C++ QasmQobj simulator with noise',
'coupling_map': None,
'basis_gates': _DEFAULT_BASIS_GATES,
'custom_instructions': _DEFAULT_CUSTOM_INSTR,
'gates': []
}
_SIMULATION_METHODS = [
'automatic', 'statevector', 'statevector_gpu',
'statevector_thrust', 'density_matrix',
'density_matrix_gpu', 'density_matrix_thrust',
'stabilizer', 'matrix_product_state', 'extended_stabilizer'
]
_AVAILABLE_METHODS = None
_SIMULATION_DEVICES = ('CPU', 'GPU', 'Thrust')
_AVAILABLE_DEVICES = None
def __init__(self,
configuration=None,
properties=None,
provider=None,
**backend_options):
warn('The `QasmSimulator` backend will be deprecated in the'
' future. It has been superseded by the `AerSimulator`'
' backend.', PendingDeprecationWarning)
self._controller = aer_controller_execute()
if QasmSimulator._AVAILABLE_METHODS is None:
QasmSimulator._AVAILABLE_METHODS = available_methods(
self._controller, QasmSimulator._SIMULATION_METHODS)
if configuration is None:
configuration = QasmBackendConfiguration.from_dict(
QasmSimulator._DEFAULT_CONFIGURATION)
else:
configuration.open_pulse = False
self._cached_basis_gates = self._DEFAULT_BASIS_GATES
super().__init__(configuration,
properties=properties,
provider=provider,
backend_options=backend_options)
def __repr__(self):
display = super().__repr__()[:-1]
pad = ' ' * (len(self.__class__.__name__) + 1)
method = getattr(self.options, 'method', None)
if method not in [None, 'automatic']:
display += ",\n{}method='{}'".format(pad, method)
noise_model = getattr(self.options, 'noise_model', None)
if noise_model is not None and not noise_model.is_ideal():
display += ',\n{}noise_model={})'.format(pad, repr(noise_model))
display += ")"
return display
@classmethod
def _default_options(cls):
return Options(
shots=1024,
method=None,
device="CPU",
precision="double",
executor=None,
max_job_size=None,
enable_truncation=True,
zero_threshold=1e-10,
validation_threshold=None,
max_parallel_threads=None,
max_parallel_experiments=None,
max_parallel_shots=None,
max_memory_mb=None,
optimize_ideal_threshold=5,
optimize_noise_threshold=12,
fusion_enable=True,
fusion_verbose=False,
fusion_max_qubit=5,
fusion_threshold=14,
accept_distributed_results=None,
blocking_qubits=None,
blocking_enable=False,
memory=None,
noise_model=None,
seed_simulator=None,
statevector_parallel_threshold=14,
statevector_sample_measure_opt=10,
stabilizer_max_snapshot_probabilities=32,
extended_stabilizer_sampling_method='resampled_metropolis',
extended_stabilizer_metropolis_mixing_time=5000,
extended_stabilizer_approximation_error=0.05,
extended_stabilizer_norm_estimation_samples=100,
extended_stabilizer_norm_estimation_repetitions=3,
extended_stabilizer_parallel_threshold=100,
extended_stabilizer_probabilities_snapshot_samples=3000,
matrix_product_state_truncation_threshold=1e-16,
matrix_product_state_max_bond_dimension=None,
mps_sample_measure_algorithm='mps_heuristic',
mps_log_data=False,
chop_threshold=1e-8,
mps_parallel_threshold=14,
mps_omp_threads=1)
@classmethod
def from_backend(cls, backend, **options):
from ..noise.noise_model import NoiseModel
configuration = copy.copy(backend.configuration())
properties = copy.copy(backend.properties())
name = configuration.backend_name
configuration.backend_name = 'qasm_simulator({})'.format(name)
if 'noise_model' not in options:
noise_model = NoiseModel.from_backend(backend)
if not noise_model.is_ideal():
options['noise_model'] = noise_model
sim = cls(configuration=configuration,
properties=properties,
**options)
return sim
def configuration(self):
config = copy.copy(self._configuration)
for key, val in self._options_configuration.items():
setattr(config, key, val)
config.custom_instructions = self._custom_instructions()
config.basis_gates = self._cached_basis_gates + config.custom_instructions
return config
def available_methods(self):
return copy.copy(self._AVAILABLE_METHODS)
def available_devices(self):
return copy.copy(self._AVAILABLE_DEVICES)
def _execute(self, qobj):
qobj = map_legacy_method_options(qobj)
return cpp_execute(self._controller, qobj)
def set_options(self, **fields):
out_options = {}
update_basis_gates = False
for key, value in fields.items():
if key == 'method':
if value in LEGACY_METHOD_MAP:
value, device = LEGACY_METHOD_MAP[value]
out_options["device"] = device
self._set_method_config(value)
update_basis_gates = True
out_options[key] = value
if (value is not None and value not in self.available_methods()):
raise AerError(
"Invalid simulation method {}. Available methods"
" are: {}".format(value, self.available_methods()))
elif key in ['noise_model', 'basis_gates']:
update_basis_gates = True
out_options[key] = value
elif key == 'custom_instructions':
self._set_configuration_option(key, value)
else:
out_options[key] = value
super().set_options(**out_options)
if update_basis_gates:
self._cached_basis_gates = self._basis_gates()
def _validate(self, qobj):
for experiment in qobj.experiments:
if experiment.config.memory_slots > 0:
no_measure = True
for op in experiment.instructions:
if not no_measure:
break
if no_measure and op.name == "measure":
no_measure = False
# Print warning if clbits but no measure
if no_measure:
logger.warning(
'No measurements in circuit "%s": '
'count data will return all zeros.',
experiment.header.name)
def _basis_gates(self):
# Use option value for basis gates if set
if 'basis_gates' in self._options_configuration:
return self._options_configuration['basis_gates']
# Compute intersection with method basis gates
method_gates = self._method_basis_gates()
config_gates = self._configuration.basis_gates
if config_gates:
basis_gates = set(config_gates).intersection(
method_gates)
else:
basis_gates = method_gates
# Compute intersection with noise model basis gates
noise_model = getattr(self.options, 'noise_model', None)
if noise_model:
noise_gates = noise_model.basis_gates
basis_gates = basis_gates.intersection(noise_gates)
else:
noise_gates = None
if not basis_gates:
logger.warning(
"The intersection of configuration basis gates (%s), "
"simulation method basis gates (%s), and "
"noise model basis gates (%s) is empty",
config_gates, method_gates, noise_gates)
return sorted(basis_gates)
def _method_basis_gates(self):
method = self._options.get('method', None)
if method in ['density_matrix', 'density_matrix_gpu', 'density_matrix_thrust']:
return sorted([
'u1', 'u2', 'u3', 'u', 'p', 'r', 'rx', 'ry', 'rz', 'id', 'x',
'y', 'z', 'h', 's', 'sdg', 'sx', 'sxdg', 't', 'tdg', 'swap', 'cx',
'cy', 'cz', 'cp', 'cu1', 'rxx', 'ryy', 'rzz', 'rzx', 'ccx',
'unitary', 'diagonal', 'delay', 'pauli'
])
if method == 'matrix_product_state':
return sorted([
'u1', 'u2', 'u3', 'u', 'p', 'cp', 'cx', 'cy', 'cz', 'id', 'x',
'y', 'z', 'h', 's', 'sdg', 'sx', 'sxdg', 't', 'tdg', 'swap', 'ccx',
'unitary', 'roerror', 'delay', 'pauli', 'r', 'rx', 'ry', 'rz', 'rxx',
'ryy', 'rzz', 'rzx', 'csx', 'cswap', 'diagonal', 'initialize'
])
if method == 'stabilizer':
return sorted([
'id', 'x', 'y', 'z', 'h', 's', 'sdg', 'sx', 'sxdg', 'cx', 'cy', 'cz',
'swap', 'delay', 'pauli'
])
if method == 'extended_stabilizer':
return sorted([
'cx', 'cz', 'id', 'x', 'y', 'z', 'h', 's', 'sdg', 'sx', 'sxdg',
'swap', 'u0', 't', 'tdg', 'u1', 'p', 'ccx', 'ccz', 'delay', 'pauli'
])
return QasmSimulator._DEFAULT_BASIS_GATES
def _custom_instructions(self):
# pylint: disable = too-many-return-statements
if 'custom_instructions' in self._options_configuration:
return self._options_configuration['custom_instructions']
method = self._options.get('method', None)
if method in ['statevector', 'statevector_gpu', 'statevector_thrust']:
return sorted([
'roerror', 'kraus', 'snapshot', 'save_expval', 'save_expval_var',
'save_probabilities', 'save_probabilities_dict',
'save_amplitudes', 'save_amplitudes_sq', 'save_state',
'save_density_matrix', 'save_statevector', 'save_statevector_dict',
'set_statevector'
])
if method in ['density_matrix', 'density_matrix_gpu', 'density_matrix_thrust']:
return sorted([
'roerror', 'kraus', 'superop', 'snapshot', 'save_expval', 'save_expval_var',
'save_probabilities', 'save_probabilities_dict',
'save_state', 'save_density_matrix', 'save_amplitudes_sq',
'set_statevector', 'set_density_matrix'
])
if method == 'matrix_product_state':
return sorted([
'roerror', 'snapshot', 'kraus', 'save_expval', 'save_expval_var',
'save_probabilities', 'save_probabilities_dict',
'save_density_matrix', 'save_state', 'save_statevector',
'save_amplitudes', 'save_amplitudes_sq', 'save_matrix_product_state',
'set_matrix_product_state'])
if method == 'stabilizer':
return sorted([
'roerror', 'snapshot', 'save_expval', 'save_expval_var',
'save_probabilities', 'save_probabilities_dict',
'save_amplitudes_sq', 'save_state', 'save_stabilizer',
'set_stabilizer'
])
if method == 'extended_stabilizer':
return sorted(['roerror', 'snapshot', 'save_statevector'])
return QasmSimulator._DEFAULT_CUSTOM_INSTR
def _set_method_config(self, method=None):
super().set_options(method=method)
# Update configuration description and number of qubits
if method in ['statevector', 'statevector_gpu', 'statevector_thrust']:
description = 'A C++ statevector simulator with noise'
n_qubits = MAX_QUBITS_STATEVECTOR
elif method in ['density_matrix', 'density_matrix_gpu', 'density_matrix_thrust']:
description = 'A C++ density matrix simulator with noise'
n_qubits = MAX_QUBITS_STATEVECTOR // 2
elif method == 'matrix_product_state':
description = 'A C++ matrix product state simulator with noise'
n_qubits = 63 # TODO: not sure what to put here?
elif method == 'stabilizer':
description = 'A C++ Clifford stabilizer simulator with noise'
n_qubits = 10000 # TODO: estimate from memory
elif method == 'extended_stabilizer':
description = 'A C++ Clifford+T extended stabilizer simulator with noise'
n_qubits = 63 # TODO: estimate from memory
else:
# Clear options to default
description = None
n_qubits = None
self._set_configuration_option('description', description)
self._set_configuration_option('n_qubits', n_qubits)
| true
| true
|
1c41b17f7bb836d14623cc4a397356afafd65cf3
| 1,512
|
py
|
Python
|
haystack/nodes/summarizer/base.py
|
ArzelaAscoIi/haystack
|
be8f50c9e3de4e264b3f345f5f4b9c9ec518ed08
|
[
"Apache-2.0"
] | 1
|
2022-03-06T02:13:15.000Z
|
2022-03-06T02:13:15.000Z
|
haystack/nodes/summarizer/base.py
|
ArzelaAscoIi/haystack
|
be8f50c9e3de4e264b3f345f5f4b9c9ec518ed08
|
[
"Apache-2.0"
] | null | null | null |
haystack/nodes/summarizer/base.py
|
ArzelaAscoIi/haystack
|
be8f50c9e3de4e264b3f345f5f4b9c9ec518ed08
|
[
"Apache-2.0"
] | 1
|
2022-03-23T18:17:02.000Z
|
2022-03-23T18:17:02.000Z
|
from typing import List, Dict, Optional
from abc import abstractmethod
from haystack.schema import Document
from haystack.nodes.base import BaseComponent
class BaseSummarizer(BaseComponent):
"""
Abstract class for Summarizer
"""
outgoing_edges = 1
@abstractmethod
def predict(self, documents: List[Document], generate_single_summary: Optional[bool] = None) -> List[Document]:
"""
Abstract method for creating a summary.
:param documents: Related documents (e.g. coming from a retriever) that the answer shall be conditioned on.
:param generate_single_summary: Whether to generate a single summary for all documents or one summary per document.
If set to "True", all docs will be joined to a single string that will then
be summarized.
Important: The summary will depend on the order of the supplied documents!
:return: List of Documents, where Document.text contains the summarization and Document.meta["context"]
the original, not summarized text
"""
pass
def run(self, documents: List[Document], generate_single_summary: Optional[bool] = None): # type: ignore
results: Dict = {"documents": []}
if documents:
results["documents"] = self.predict(documents=documents, generate_single_summary=generate_single_summary)
return results, "output_1"
| 38.769231
| 123
| 0.653439
|
from typing import List, Dict, Optional
from abc import abstractmethod
from haystack.schema import Document
from haystack.nodes.base import BaseComponent
class BaseSummarizer(BaseComponent):
outgoing_edges = 1
@abstractmethod
def predict(self, documents: List[Document], generate_single_summary: Optional[bool] = None) -> List[Document]:
pass
def run(self, documents: List[Document], generate_single_summary: Optional[bool] = None):
results: Dict = {"documents": []}
if documents:
results["documents"] = self.predict(documents=documents, generate_single_summary=generate_single_summary)
return results, "output_1"
| true
| true
|
1c41b19962ca40e88a19220ebf2fcd6921d38be5
| 12,224
|
py
|
Python
|
chainer/functions/activation/lstm.py
|
pyotr777/chainer
|
8532edbd921ab0ea98c9447957565777e4601662
|
[
"MIT"
] | null | null | null |
chainer/functions/activation/lstm.py
|
pyotr777/chainer
|
8532edbd921ab0ea98c9447957565777e4601662
|
[
"MIT"
] | null | null | null |
chainer/functions/activation/lstm.py
|
pyotr777/chainer
|
8532edbd921ab0ea98c9447957565777e4601662
|
[
"MIT"
] | null | null | null |
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function
from chainer import function_node
from chainer.utils import type_check
def _extract_gates(x):
r = x.reshape((len(x), x.shape[1] // 4, 4) + x.shape[2:])
return [r[:, :, i] for i in six.moves.range(4)]
def _sigmoid(x, xp=numpy):
half = x.dtype.type(0.5)
return xp.tanh(x * half) * half + half
def _grad_sigmoid(x):
return x * (1 - x)
def _grad_grad_sigmoid(x):
return x * (1 - x) * (1 - 2 * x)
def _grad_tanh(x):
return 1 - x * x
def _grad_grad_tanh(x, gx):
return -2 * x * gx
_preamble = '''
template <typename T> __device__ T sigmoid(T x) {
const T half = 0.5;
return tanh(x * half) * half + half;
}
template <typename T> __device__ T grad_sigmoid(T y) { return y * (1 - y); }
template <typename T> __device__ T grad_tanh(T y) { return 1 - y * y; }
#define COMMON_ROUTINE \
T aa = tanh(a); \
T ai = sigmoid(i_); \
T af = sigmoid(f); \
T ao = sigmoid(o);
'''
class LSTM(function_node.FunctionNode):
"""Long short-term memory unit with forget gate.
It has two inputs (c, x) and two outputs (c, h), where c indicates the cell
state. x must have four times channels compared to the number of units.
"""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
c_type, x_type = in_types
type_check.expect(
c_type.dtype.kind == 'f',
x_type.dtype == c_type.dtype,
c_type.ndim >= 2,
x_type.ndim >= 2,
c_type.ndim == x_type.ndim,
x_type.shape[0] <= c_type.shape[0],
x_type.shape[1] == 4 * c_type.shape[1],
)
for i in six.moves.range(2, type_check.eval(c_type.ndim)):
type_check.expect(x_type.shape[i] == c_type.shape[i])
def forward(self, inputs):
self.retain_inputs((0, 1))
c_prev, x = inputs
a, i, f, o = _extract_gates(x)
batch = len(x)
if isinstance(x, chainer.get_cpu_array_types()):
if intel64.should_use_ideep('>=auto'):
xp = intel64.ideep.get_array_module(x)
else:
xp = numpy
a = xp.tanh(a)
i = _sigmoid(i, xp)
f = _sigmoid(f, xp)
o = _sigmoid(o, xp)
c_next = numpy.empty_like(c_prev)
c_next[:batch] = a * i + f * c_prev[:batch]
h = o * xp.tanh(c_next[:batch])
else:
c_next = cuda.cupy.empty_like(c_prev)
h = cuda.cupy.empty_like(c_next[:batch])
cuda.elementwise(
'T c_prev, T a, T i_, T f, T o', 'T c, T h',
'''
COMMON_ROUTINE;
c = aa * ai + af * c_prev;
h = ao * tanh(c);
''',
'lstm_fwd', preamble=_preamble)(
c_prev[:batch], a, i, f, o, c_next[:batch], h)
c_next[batch:] = c_prev[batch:]
self.retain_outputs((0,))
return c_next, h
def backward(self, indexes, grads):
grad_inputs = (
self.get_retained_inputs() + self.get_retained_outputs() + grads)
return LSTMGrad()(*grad_inputs)
class LSTMGrad(function.Function):
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
c_prev, x, c_next, gc, gh = inputs
batch = len(x)
gx = xp.empty_like(x)
ga, gi, gf, go = _extract_gates(gx)
# Consider the case that either gradient is not given
if gc is None:
gc_update = 0
gc_rest = 0
else:
gc_update = gc[:batch]
gc_rest = gc[batch:]
if gh is None:
gh = 0
a, i, f, o = _extract_gates(x)
if xp is numpy:
if intel64.should_use_ideep('>=auto'):
xp = intel64.ideep.get_array_module(x)
tanh_a = xp.tanh(a)
sig_i = _sigmoid(i, xp)
sig_f = _sigmoid(f, xp)
sig_o = _sigmoid(o, xp)
co = xp.tanh(c_next[:batch])
gc_prev = numpy.empty_like(c_prev)
# multiply f later
gc_prev[:batch] = gh * sig_o * _grad_tanh(co) + gc_update
gc = gc_prev[:batch]
ga[:] = gc * sig_i * _grad_tanh(tanh_a)
gi[:] = gc * tanh_a * _grad_sigmoid(sig_i)
gf[:] = gc * c_prev[:batch] * _grad_sigmoid(sig_f)
go[:] = gh * co * _grad_sigmoid(sig_o)
gc_prev[:batch] *= sig_f # multiply f here
gc_prev[batch:] = gc_rest
else:
gc_prev = xp.empty_like(c_prev)
cuda.elementwise(
'T c_prev, T c, T gc, T gh, T a, T i_, T f, T o',
'T gc_prev, T ga, T gi, T gf, T go',
'''
COMMON_ROUTINE;
T co = tanh(c);
T temp = gh * ao * grad_tanh(co) + gc;
ga = temp * ai * grad_tanh(aa);
gi = temp * aa * grad_sigmoid(ai);
gf = temp * c_prev * grad_sigmoid(af);
go = gh * co * grad_sigmoid(ao);
gc_prev = temp * af;
''',
'lstm_bwd', preamble=_preamble)(
c_prev[:batch], c_next[:batch], gc_update, gh, a, i, f, o,
gc_prev[:batch], ga, gi, gf, go)
gc_prev[batch:] = gc_rest
return gc_prev, gx
def backward(self, inputs, grads):
xp = cuda.get_array_module(*inputs)
c_prev, x, c, gc, gh = inputs
ggc_prev, ggx = grads
gc_prev = xp.empty_like(c_prev)
gx = xp.empty_like(x)
gc_next = xp.empty_like(c)
ggc = xp.empty_like(ggc_prev)
ggh = xp.empty_like(gh)
batch = len(x)
gc_prev[batch:] = 0
gc_next[batch:] = 0
ggc[batch:] = ggc_prev[batch:]
ggh[batch:] = 0
c_prev = c_prev[:batch]
c = c[:batch]
gc = gc[:batch]
ggc_prev = ggc_prev[:batch]
ggx = ggx[:batch]
a, i, f, o = _extract_gates(x)
gga, ggi, ggf, ggo = _extract_gates(ggx)
ga, gi, gf, go = _extract_gates(gx)
gc_prev[:batch], ga[:], gi[:], gf[:], go[:], gc_next[:batch], \
ggc[:batch], ggh[:batch] \
= lstm_grad_grad(
c_prev, a, i, f, o, c, gc, gh, ggc_prev, gga, ggi, ggf, ggo)
return gc_prev, gx, gc_next, ggc, ggh
def _cupy_sigmoid(x):
half = x.dtype.type(0.5)
return cuda.fusion.tanh(x * half) * half + half
@cuda.fuse()
def lstm_grad_grad(
c_prev, a, i, f, o, c, gc, gh, ggc_prev, gga, ggi, ggf, ggo):
sig_o = _cupy_sigmoid(o)
gsig_o = _grad_sigmoid(sig_o)
ggsig_o = _grad_grad_sigmoid(sig_o)
sig_i = _cupy_sigmoid(i)
gsig_i = _grad_sigmoid(sig_i)
ggsig_i = _grad_grad_sigmoid(sig_i)
sig_f = _cupy_sigmoid(f)
gsig_f = _grad_sigmoid(sig_f)
ggsig_f = _grad_grad_sigmoid(sig_f)
tanh_a = cuda.fusion.tanh(a)
gtanh_a = _grad_tanh(tanh_a)
ggtanh_a = _grad_grad_tanh(tanh_a, gtanh_a)
tanh_c = cuda.fusion.tanh(c)
gtanh_c = _grad_tanh(tanh_c)
ggtanh_c = _grad_grad_tanh(tanh_c, gtanh_c)
gc_bar = gh * sig_o * gtanh_c + gc
gc_prev = ggf * gc_bar * gsig_f
ga = (gga * sig_i * ggtanh_a +
ggi * gtanh_a * gsig_i) * gc_bar
gi = (gga * gtanh_a * gsig_i +
ggi * tanh_a * ggsig_i) * gc_bar
gf = (ggc_prev * (gh * sig_o * gtanh_c + gc) * gsig_f +
ggf * gc_bar * c_prev * ggsig_f)
ggc = (
ggc_prev * sig_f +
gga * sig_i * gtanh_a +
ggi * tanh_a * gsig_i +
ggf * c_prev * gsig_f)
dgc_do = gh * gsig_o * gtanh_c
go = ggc * dgc_do + ggo * gh * tanh_c * ggsig_o
dgc_dc = gh * sig_o * ggtanh_c
gc_next = ggc * dgc_dc + ggo * gh * gtanh_c * gsig_o
ggh = ggc * sig_o * gtanh_c + ggo * tanh_c * gsig_o
return gc_prev, ga, gi, gf, go, gc_next, ggc, ggh
def lstm(c_prev, x):
"""Long Short-Term Memory units as an activation function.
This function implements LSTM units with forget gates. Let the previous
cell state ``c_prev`` and the input array ``x``.
First, the input array ``x`` is split into four arrays
:math:`a, i, f, o` of the same shapes along the second axis. It means that
``x`` 's second axis must have 4 times the ``c_prev`` 's second axis.
The split input arrays are corresponding to:
- :math:`a` : sources of cell input
- :math:`i` : sources of input gate
- :math:`f` : sources of forget gate
- :math:`o` : sources of output gate
Second, it computes the updated cell state ``c`` and the outgoing signal
``h`` as:
.. math::
c &= \\tanh(a) \\sigma(i)
+ c_{\\text{prev}} \\sigma(f), \\\\
h &= \\tanh(c) \\sigma(o),
where :math:`\\sigma` is the elementwise sigmoid function.
These are returned as a tuple of two variables.
This function supports variable length inputs. The mini-batch size of
the current input must be equal to or smaller than that of the previous
one. When mini-batch size of ``x`` is smaller than that of ``c``, this
function only updates ``c[0:len(x)]`` and doesn't change the rest of ``c``,
``c[len(x):]``.
So, please sort input sequences in descending order of lengths before
applying the function.
Args:
c_prev (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Variable that holds the previous cell state. The cell state
should be a zero array or the output of the previous call of LSTM.
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Variable that holds the sources of cell input, input gate, forget
gate and output gate. It must have the second dimension whose size
is four times of that of the cell state.
Returns:
tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``.
``c`` is the updated cell state. ``h`` indicates the outgoing signal.
See the original paper proposing LSTM with forget gates:
`Long Short-Term Memory in Recurrent Neural Networks \
<http://www.felixgers.de/papers/phd.pdf>`_.
.. seealso::
:class:`~chainer.links.LSTM`
.. admonition:: Example
Assuming ``y`` is the current incoming signal, ``c`` is the previous
cell state, and ``h`` is the previous outgoing signal from an ``lstm``
function. Each of ``y``, ``c`` and ``h`` has ``n_units`` channels.
Most typical preparation of ``x`` is:
>>> n_units = 100
>>> y = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> h = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> c = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> model = chainer.Chain()
>>> with model.init_scope():
... model.w = L.Linear(n_units, 4 * n_units)
... model.v = L.Linear(n_units, 4 * n_units)
>>> x = model.w(y) + model.v(h)
>>> c, h = F.lstm(c, x)
It corresponds to calculate the input array ``x``, or the input
sources :math:`a, i, f, o`, from the current incoming signal ``y`` and
the previous outgoing signal ``h``. Different parameters are used for
different kind of input sources.
.. note::
We use the naming rule below.
- incoming signal
The formal input of the formulation of LSTM (e.g. in NLP, word
vector or output of lower RNN layer). The input of
:class:`chainer.links.LSTM` is the *incoming signal*.
- input array
The array which is linear transformed from *incoming signal* and
the previous outgoing signal. The *input array* contains four
sources, the sources of cell input, input gate, forget gate and
output gate. The input of :class:`chainer.functions.LSTM` is the
*input array*.
"""
return LSTM().apply((c_prev, x))
| 33.307902
| 79
| 0.558082
|
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function
from chainer import function_node
from chainer.utils import type_check
def _extract_gates(x):
r = x.reshape((len(x), x.shape[1] // 4, 4) + x.shape[2:])
return [r[:, :, i] for i in six.moves.range(4)]
def _sigmoid(x, xp=numpy):
half = x.dtype.type(0.5)
return xp.tanh(x * half) * half + half
def _grad_sigmoid(x):
return x * (1 - x)
def _grad_grad_sigmoid(x):
return x * (1 - x) * (1 - 2 * x)
def _grad_tanh(x):
return 1 - x * x
def _grad_grad_tanh(x, gx):
return -2 * x * gx
_preamble = '''
template <typename T> __device__ T sigmoid(T x) {
const T half = 0.5;
return tanh(x * half) * half + half;
}
template <typename T> __device__ T grad_sigmoid(T y) { return y * (1 - y); }
template <typename T> __device__ T grad_tanh(T y) { return 1 - y * y; }
#define COMMON_ROUTINE \
T aa = tanh(a); \
T ai = sigmoid(i_); \
T af = sigmoid(f); \
T ao = sigmoid(o);
'''
class LSTM(function_node.FunctionNode):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
c_type, x_type = in_types
type_check.expect(
c_type.dtype.kind == 'f',
x_type.dtype == c_type.dtype,
c_type.ndim >= 2,
x_type.ndim >= 2,
c_type.ndim == x_type.ndim,
x_type.shape[0] <= c_type.shape[0],
x_type.shape[1] == 4 * c_type.shape[1],
)
for i in six.moves.range(2, type_check.eval(c_type.ndim)):
type_check.expect(x_type.shape[i] == c_type.shape[i])
def forward(self, inputs):
self.retain_inputs((0, 1))
c_prev, x = inputs
a, i, f, o = _extract_gates(x)
batch = len(x)
if isinstance(x, chainer.get_cpu_array_types()):
if intel64.should_use_ideep('>=auto'):
xp = intel64.ideep.get_array_module(x)
else:
xp = numpy
a = xp.tanh(a)
i = _sigmoid(i, xp)
f = _sigmoid(f, xp)
o = _sigmoid(o, xp)
c_next = numpy.empty_like(c_prev)
c_next[:batch] = a * i + f * c_prev[:batch]
h = o * xp.tanh(c_next[:batch])
else:
c_next = cuda.cupy.empty_like(c_prev)
h = cuda.cupy.empty_like(c_next[:batch])
cuda.elementwise(
'T c_prev, T a, T i_, T f, T o', 'T c, T h',
'''
COMMON_ROUTINE;
c = aa * ai + af * c_prev;
h = ao * tanh(c);
''',
'lstm_fwd', preamble=_preamble)(
c_prev[:batch], a, i, f, o, c_next[:batch], h)
c_next[batch:] = c_prev[batch:]
self.retain_outputs((0,))
return c_next, h
def backward(self, indexes, grads):
grad_inputs = (
self.get_retained_inputs() + self.get_retained_outputs() + grads)
return LSTMGrad()(*grad_inputs)
class LSTMGrad(function.Function):
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
c_prev, x, c_next, gc, gh = inputs
batch = len(x)
gx = xp.empty_like(x)
ga, gi, gf, go = _extract_gates(gx)
if gc is None:
gc_update = 0
gc_rest = 0
else:
gc_update = gc[:batch]
gc_rest = gc[batch:]
if gh is None:
gh = 0
a, i, f, o = _extract_gates(x)
if xp is numpy:
if intel64.should_use_ideep('>=auto'):
xp = intel64.ideep.get_array_module(x)
tanh_a = xp.tanh(a)
sig_i = _sigmoid(i, xp)
sig_f = _sigmoid(f, xp)
sig_o = _sigmoid(o, xp)
co = xp.tanh(c_next[:batch])
gc_prev = numpy.empty_like(c_prev)
gc_prev[:batch] = gh * sig_o * _grad_tanh(co) + gc_update
gc = gc_prev[:batch]
ga[:] = gc * sig_i * _grad_tanh(tanh_a)
gi[:] = gc * tanh_a * _grad_sigmoid(sig_i)
gf[:] = gc * c_prev[:batch] * _grad_sigmoid(sig_f)
go[:] = gh * co * _grad_sigmoid(sig_o)
gc_prev[:batch] *= sig_f
gc_prev[batch:] = gc_rest
else:
gc_prev = xp.empty_like(c_prev)
cuda.elementwise(
'T c_prev, T c, T gc, T gh, T a, T i_, T f, T o',
'T gc_prev, T ga, T gi, T gf, T go',
'''
COMMON_ROUTINE;
T co = tanh(c);
T temp = gh * ao * grad_tanh(co) + gc;
ga = temp * ai * grad_tanh(aa);
gi = temp * aa * grad_sigmoid(ai);
gf = temp * c_prev * grad_sigmoid(af);
go = gh * co * grad_sigmoid(ao);
gc_prev = temp * af;
''',
'lstm_bwd', preamble=_preamble)(
c_prev[:batch], c_next[:batch], gc_update, gh, a, i, f, o,
gc_prev[:batch], ga, gi, gf, go)
gc_prev[batch:] = gc_rest
return gc_prev, gx
def backward(self, inputs, grads):
xp = cuda.get_array_module(*inputs)
c_prev, x, c, gc, gh = inputs
ggc_prev, ggx = grads
gc_prev = xp.empty_like(c_prev)
gx = xp.empty_like(x)
gc_next = xp.empty_like(c)
ggc = xp.empty_like(ggc_prev)
ggh = xp.empty_like(gh)
batch = len(x)
gc_prev[batch:] = 0
gc_next[batch:] = 0
ggc[batch:] = ggc_prev[batch:]
ggh[batch:] = 0
c_prev = c_prev[:batch]
c = c[:batch]
gc = gc[:batch]
ggc_prev = ggc_prev[:batch]
ggx = ggx[:batch]
a, i, f, o = _extract_gates(x)
gga, ggi, ggf, ggo = _extract_gates(ggx)
ga, gi, gf, go = _extract_gates(gx)
gc_prev[:batch], ga[:], gi[:], gf[:], go[:], gc_next[:batch], \
ggc[:batch], ggh[:batch] \
= lstm_grad_grad(
c_prev, a, i, f, o, c, gc, gh, ggc_prev, gga, ggi, ggf, ggo)
return gc_prev, gx, gc_next, ggc, ggh
def _cupy_sigmoid(x):
half = x.dtype.type(0.5)
return cuda.fusion.tanh(x * half) * half + half
@cuda.fuse()
def lstm_grad_grad(
c_prev, a, i, f, o, c, gc, gh, ggc_prev, gga, ggi, ggf, ggo):
sig_o = _cupy_sigmoid(o)
gsig_o = _grad_sigmoid(sig_o)
ggsig_o = _grad_grad_sigmoid(sig_o)
sig_i = _cupy_sigmoid(i)
gsig_i = _grad_sigmoid(sig_i)
ggsig_i = _grad_grad_sigmoid(sig_i)
sig_f = _cupy_sigmoid(f)
gsig_f = _grad_sigmoid(sig_f)
ggsig_f = _grad_grad_sigmoid(sig_f)
tanh_a = cuda.fusion.tanh(a)
gtanh_a = _grad_tanh(tanh_a)
ggtanh_a = _grad_grad_tanh(tanh_a, gtanh_a)
tanh_c = cuda.fusion.tanh(c)
gtanh_c = _grad_tanh(tanh_c)
ggtanh_c = _grad_grad_tanh(tanh_c, gtanh_c)
gc_bar = gh * sig_o * gtanh_c + gc
gc_prev = ggf * gc_bar * gsig_f
ga = (gga * sig_i * ggtanh_a +
ggi * gtanh_a * gsig_i) * gc_bar
gi = (gga * gtanh_a * gsig_i +
ggi * tanh_a * ggsig_i) * gc_bar
gf = (ggc_prev * (gh * sig_o * gtanh_c + gc) * gsig_f +
ggf * gc_bar * c_prev * ggsig_f)
ggc = (
ggc_prev * sig_f +
gga * sig_i * gtanh_a +
ggi * tanh_a * gsig_i +
ggf * c_prev * gsig_f)
dgc_do = gh * gsig_o * gtanh_c
go = ggc * dgc_do + ggo * gh * tanh_c * ggsig_o
dgc_dc = gh * sig_o * ggtanh_c
gc_next = ggc * dgc_dc + ggo * gh * gtanh_c * gsig_o
ggh = ggc * sig_o * gtanh_c + ggo * tanh_c * gsig_o
return gc_prev, ga, gi, gf, go, gc_next, ggc, ggh
def lstm(c_prev, x):
return LSTM().apply((c_prev, x))
| true
| true
|
1c41b2b1ff17ba4ae909b0fcd1716087670f269c
| 32,333
|
py
|
Python
|
Integrations/Active_Directory_Query/Active_Directory_Query.py
|
danikdanik/content
|
6749affdb6d3567440ab4d7b60180fdde1486cb3
|
[
"MIT"
] | 1
|
2020-08-02T18:00:00.000Z
|
2020-08-02T18:00:00.000Z
|
Integrations/Active_Directory_Query/Active_Directory_Query.py
|
danikdanik/content
|
6749affdb6d3567440ab4d7b60180fdde1486cb3
|
[
"MIT"
] | 4
|
2021-03-26T00:33:20.000Z
|
2021-12-13T20:48:36.000Z
|
Integrations/Active_Directory_Query/Active_Directory_Query.py
|
danikdanik/content
|
6749affdb6d3567440ab4d7b60180fdde1486cb3
|
[
"MIT"
] | 1
|
2020-07-22T09:09:26.000Z
|
2020-07-22T09:09:26.000Z
|
import demistomock as demisto
from CommonServerPython import *
from ldap3 import Server, Connection, NTLM, SUBTREE, ALL_ATTRIBUTES, Tls
from ldap3.core.exceptions import LDAPSocketOpenError
from ldap3.extend import microsoft
import ssl
from datetime import datetime
# global connection
conn = None
''' GLOBAL VARS '''
# userAccountControl is a bitmask used to store a number of settings.
# find more at:
# https://support.microsoft.com/en-gb/help/305144/how-to-use-the-useraccountcontrol-flags-to-manipulate-user-account-pro
COOMON_ACCOUNT_CONTROL_FLAGS = {
512: "Enabled Account",
514: "Disabled account",
544: "Account Enabled - Require user to change password at first logon",
4096: "Workstation/server",
66048: "Enabled, password never expires",
66050: "Disabled, password never expires",
66080: "Enables, password never expires, password not required.",
532480: "Domain controller"
}
NORMAL_ACCOUNT = 512
DISABLED_ACCOUNT = 514
# common attributes for specific AD objects
DEFAULT_PERSON_ATTRIBUTES = [
'name',
'displayName',
'memberOf',
'mail',
'samAccountName',
'manager',
'userAccountControl'
]
DEFAULT_COMPUTER_ATTRIBUTES = [
'name',
'memberOf'
]
''' HELPER FUNCTIONS '''
def initialize_server(host, port, secure_connection, unsecure):
"""
uses the instance configuration to initialize the LDAP server
:param host: host or ip
:type host: string
:param port: port or None
:type port: number
:param secure_connection: SSL or None
:type secure_connection: string
:param unsecure: trust any cert
:type unsecure: boolean
:return: ldap3 Server
:rtype: Server
"""
if secure_connection == "SSL":
# intialize server with ssl
# port is configured by default as 389 or as 636 for LDAPS if not specified in configuration
demisto.debug("initializing sever with ssl (unsecure: {}). port: {}". format(unsecure, port or 'default(636)'))
if not unsecure:
demisto.debug("will require server certificate.")
tls = Tls(validate=ssl.CERT_REQUIRED)
if port:
return Server(host, port=port, use_ssl=True, tls=tls)
return Server(host, use_ssl=True, tls=tls)
if port:
return Server(host, port=port, use_ssl=True)
return Server(host, use_ssl=True)
demisto.debug("initializing server without secure connection. port: {}". format(port or 'default(389)'))
if port:
return Server(host, port=port)
return Server(host)
def account_entry(person_object, custome_attributes):
# create an account entry from a person objects
account = {
'Type': 'AD',
'ID': person_object.get('dn'),
'Email': person_object.get('email'),
'Username': person_object.get('samAccountName'),
'DisplayName': person_object.get('displayName'),
'Managr': person_object.get('manager'),
'Groups': person_object.get('memberOf')
}
for attr in custome_attributes:
account[attr] = person_object[attr]
return account
def endpoint_entry(computer_object, custome_attributes):
# create an endpoint entry from a computer object
endpoint = {
'Type': 'AD',
'ID': computer_object.get('dn'),
'Hostname': computer_object.get('name'),
'Groups': computer_object.get('memberOf')
}
for attr in custome_attributes:
endpoint[attr] = computer_object[attr]
return endpoint
def base_dn_verified(base_dn):
# serch AD with a simple query to test base DN is configured correctly
try:
search(
"(objectClass=user)",
base_dn,
size_limit=1
)
except Exception as e:
demisto.info(str(e))
return False
return True
''' COMMANDS '''
''' SEARCH '''
def search(search_filter, search_base, attributes=None, size_limit=0, time_limit=0):
"""
find entries in the DIT
Args:
search_base: the location in the DIT where the search will start
search_filte: LDAP query string
attributes: the attributes to specify for each entry found in the DIT
"""
success = conn.search(
search_base=search_base,
search_filter=search_filter,
attributes=attributes,
size_limit=size_limit,
time_limit=time_limit
)
if not success:
raise("Search failed")
return conn.entries
def search_with_paging(search_filter, search_base, attributes=None, page_size=100, size_limit=0, time_limit=0):
"""
find entries in the DIT
Args:
search_base: the location in the DIT where the search will start
search_filte: LDAP query string
attributes: the attributes to specify for each entrxy found in the DIT
"""
total_entries = 0
cookie = None
start = datetime.now()
entries = []
entries_left_to_fetch = size_limit
while True:
if 0 < entries_left_to_fetch < page_size:
page_size = entries_left_to_fetch
conn.search(
search_base,
search_filter,
search_scope=SUBTREE,
attributes=attributes,
paged_size=page_size,
paged_cookie=cookie
)
entries_left_to_fetch -= len(conn.entries)
total_entries += len(conn.entries)
cookie = conn.result['controls']['1.2.840.113556.1.4.319']['value']['cookie']
time_diff = (start - datetime.now()).seconds
entries.extend(conn.entries)
# stop when: 1.reached size limit 2.reached time limit 3. no cookie
if (size_limit and size_limit <= total_entries) or (time_limit and time_diff >= time_limit) or (not cookie):
break
# keep the raw entry for raw content (backward compatability)
raw = []
# flaten the entries
flat = []
for entry in entries:
entry = json.loads(entry.entry_to_json())
flat_entry = {
'dn': entry['dn']
}
for attr in entry.get('attributes', {}):
flat_entry[attr] = entry['attributes'][attr]
raw.append(entry)
flat.append(flat_entry)
return {
"raw": raw,
"flat": flat
}
def user_dn(sam_account_name, search_base):
search_filter = '(&(objectClass=user)(sAMAccountName={}))'.format(sam_account_name)
entries = search(
search_filter,
search_base
)
if not entries:
raise Exception("Could not get full DN for user with sAMAccountName '{}'".format(sam_account_name))
entry = json.loads(entries[0].entry_to_json())
return entry['dn']
def computer_dn(compuer_name, search_base):
search_filter = '(&(objectClass=user)(objectCategory=computer)(name={}))'.format(compuer_name)
entries = search(
search_filter,
search_base
)
if not entries:
raise Exception("Could not get full DN for computer with name '{}'".format(compuer_name))
entry = json.loads(entries[0].entry_to_json())
return entry['dn']
def group_dn(group_name, search_base):
search_filter = '(&(objectClass=group)(cn={}))'.format(group_name)
entries = search(
search_filter,
search_base
)
if not entries:
raise Exception("Could not get full DN for group with name '{}'".format(group_name))
entry = json.loads(entries[0].entry_to_json())
return entry['dn']
def free_search(default_base_dn, page_size):
args = demisto.args()
search_filter = args.get('filter')
size_limit = int(args.get('size-limit', '0'))
time_limit = int(args.get('time-limit', '0'))
search_base = args.get('base-dn') or default_base_dn
attributes = args.get('attributes')
context_output = args.get('context-output')
# if ALL was specified - get all the object's attributes, else expect a string of comma separated values
if attributes:
attributes = ALL_ATTRIBUTES if attributes == 'ALL' else attributes.split(',')
entries = search_with_paging(
search_filter,
search_base,
attributes=attributes,
size_limit=size_limit,
time_limit=time_limit,
page_size=page_size
)
ec = {} if context_output == 'no' else {'ActiveDirectory.Search(obj.dn == val.dn)': entries['flat']}
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory Search", entries['flat']),
'EntryContext': ec
}
demisto.results(demisto_entry)
def search_users(default_base_dn, page_size):
# this command is equivalant to script ADGetUser
# will preform a custom search to find users by a specific (one) attribute specified by the user
args = demisto.args()
attributes = []
custome_attributes = []
# zero is actually no limitation
limit = int(args.get('limit', '0'))
# default query - list all users
query = "(objectClass=User)(objectCategory=person)"
# query by user DN
if args.get('dn'):
query = "(&(objectClass=User)(objectCategory=person)(dn={}))".format(args['dn'])
# query by name
if args.get('name'):
query = "(&(objectClass=User)(objectCategory=person)(cn={}))".format(args['name'])
# query by email
if args.get('email'):
query = "(&(objectClass=User)(objectCategory=person)(mail={}))".format(args['email'])
# query by sAMAccountName
if args.get('username'):
query = "(&(objectClass=User)(objectCategory=person)(sAMAccountName={}))".format(args['username'])
# query by custom object attribute
if args.get('custom-field-type'):
if not args.get('custom-field-data'):
raise Exception('Please specify "custom-field-data" as well when quering by "custom-field-type"')
query = "(&(objectClass=User)(objectCategory=person)({}={}))".format(
args['custom-field-type'], args['ustom-field-data'])
if args.get('attributes'):
custome_attributes = args['attributes'].split(",")
attributes = set(custome_attributes + DEFAULT_PERSON_ATTRIBUTES)
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
size_limit=limit,
page_size=page_size
)
accounts = [account_entry(entry, custome_attributes) for entry in entries['flat']]
if args.get('user-account-control-out', '') == 'true':
# display a literal translation of the numeric account control flag
for i, user in enumerate(entries['flat']):
flag_no = user.get('userAccountControl')[0]
entries['flat'][i]['userAccountControl'] = COOMON_ACCOUNT_CONTROL_FLAGS.get(flag_no) or flag_no
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Users", entries['flat']),
'EntryContext': {
'ActiveDirectory.Users(obj.dn == val.dn)': entries['flat'],
# 'backward compatability' with ADGetUser script
'Account(obj.ID == val.ID)': accounts
}
}
demisto.results(demisto_entry)
def search_computers(default_base_dn, page_size):
# this command is equivalent to ADGetComputer script
args = demisto.args()
attributes = []
custome_attributes = []
# default query - list all users (computer category)
query = "(&(objectClass=user)(objectCategory=computer))"
# query by user DN
if args.get('dn'):
query = "(&(objectClass=user)(objectCategory=computer)(dn={}))".format(args['dn'])
# query by name
if args.get('name'):
query = "(&(objectClass=user)(objectCategory=computer)(name={}))".format(args['name'])
# query by custom object attribute
if args.get('custom-field-type'):
if not args.get('custom-field-data'):
raise Exception('Please specify "custom-field-data" as well when quering by "custom-field-type"')
query = "(&(objectClass=user)(objectCategory=computer)({}={}))".format(
args['custom-field-type'], args['ustom-field-data'])
if args.get('attributes'):
custome_attributes = args['attributes'].split(",")
attributes = set(custome_attributes + DEFAULT_COMPUTER_ATTRIBUTES)
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
page_size=page_size
)
endpoints = [endpoint_entry(entry, custome_attributes) for entry in entries['flat']]
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Computers", entries['flat']),
'EntryContext': {
'ActiveDirectory.Computers(obj.dn == val.dn)': entries['flat'],
# 'backward compatability' with ADGetComputer script
'Endpoint(obj.ID == val.ID)': endpoints
}
}
demisto.results(demisto_entry)
def search_group_members(default_base_dn, page_size):
# this command is equivalent to ADGetGroupMembers script
args = demisto.args()
member_type = args.get('member-type')
group_dn = args.get('group-dn')
custome_attributes = []
default_attributes = DEFAULT_PERSON_ATTRIBUTES if member_type == 'person' else DEFAULT_COMPUTER_ATTRIBUTES
if args.get('attributes'):
custome_attributes = args['attributes'].split(",")
attributes = set(custome_attributes + default_attributes)
# neasted search
query = "(&(objectCategory={})(objectClass=user)(memberOf:1.2.840.113556.1.4.1941:={}))".format(member_type,
group_dn)
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
page_size=page_size
)
members = [{'dn': entry['dn'], 'category': member_type} for entry in entries['flat']]
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Group Members", entries['flat']),
'EntryContext': {
'ActiveDirectory.Groups(obj.dn ==' + group_dn + ')': {
'dn': group_dn,
'members': members
}
}
}
if member_type == 'person':
demisto_entry['EntryContext']['ActiveDirectory.Users(obj.dn == val.dn)'] = entries['flat']
demisto_entry['EntryContext']['Account'] = [account_entry(
entry, custome_attributes) for entry in entries['flat']]
else:
demisto_entry['EntryContext']['ActiveDirectory.Computers(obj.dn == val.dn)'] = entries['flat']
demisto_entry['EntryContext']['Endpoint'] = [endpoint_entry(
entry, custome_attributes) for entry in entries['flat']]
demisto.results(demisto_entry)
''' DATABASE OPERATIONS '''
''' CREATE OBJECT'''
def create_user():
args = demisto.args()
object_classes = ["top", "person", "organizationalPerson", "user"]
user_dn = args.get('user-dn')
username = args.get("username")
password = args.get("password")
custome_attributes = args.get('custom-attributes')
attributes = {
"samAccountName": username
}
# set common user attributes
if args.get('display-name'):
attributes['displayName'] = args['display-name']
if args.get('description'):
attributes['description'] = args['description']
if args.get('email'):
attributes['mail'] = args['email']
if args.get('telephone-number'):
attributes['telephoneNumber'] = args['telephone-number']
if args.get('title'):
attributes['title'] = args['title']
# set user custome attributes
if custome_attributes:
try:
custome_attributes = json.loads(custome_attributes)
except Exception as e:
demisto.info(str(e))
raise Exception(
"Failed to parse custom attributes argument. Please see an example of this argument in the description."
)
for attribute_name, attribute_value in custome_attributes.items():
# can run default attribute stting
attributes[attribute_name] = attribute_value
# add user
success = conn.add(user_dn, object_classes, attributes)
if not success:
raise Exception("Failed to create user")
# set user password
success = conn.extend.microsoft.modify_password(user_dn, password)
if not success:
raise Exception("Failed to reset user password")
# enable user and expire password
modification = {
# enable user
'userAccountControl': [('MODIFY_REPLACE', NORMAL_ACCOUNT)],
# set to 0, to force password change on next login
"pwdLastSet": [('MODIFY_REPLACE', "0")]
}
modify_object(user_dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Created user with DN: {}".format(user_dn)
}
demisto.results(demisto_entry)
def create_contact():
args = demisto.args()
object_classes = ["top", "person", "organizationalPerson", "contact"]
contact_dn = args.get('contact-dn')
# set contact attributes
attributes = {}
if args.get('custom-attributes'):
try:
attributes = json.loads(args['custom-attributes'])
except Exception as e:
demisto.info(str(e))
raise Exception(
'Failed to parse custom attributes argument. Please see an example of this argument in the argument.'
)
# set common user attributes
if args.get('diaply-name'):
attributes['displayName'] = args['diaply-name']
if args.get('description'):
attributes['description'] = args['description']
if args.get('email'):
attributes['mail'] = args['email']
if args.get('telephone-number'):
attributes['telephoneNumber'] = args['telephone-number']
if args.get('title'):
attributes['title'] = args['title']
# add contact
success = conn.add(contact_dn, object_classes, attributes)
if not success:
raise Exception("Failed to create contact")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Created contact with DN: {}".format(contact_dn)
}
demisto.results(demisto_entry)
''' UPDATE OBJECT '''
def modify_object(dn, modification):
"""
modifys object in the DIT
"""
success = conn.modify(dn, modification)
if not success:
raise Exception("Failed to update object {} with the following modofication: {}".format(
dn, json.dumps(modification)))
def update_user(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
attribute_name = args.get('attribute-name')
attribute_value = args.get('attribute-value')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
modification = {}
modification[attribute_name] = [('MODIFY_REPLACE', attribute_value)]
# modify user
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Updated user's {} to {} ".format(attribute_name, attribute_value)
}
demisto.results(demisto_entry)
def update_contact():
args = demisto.args()
contact_dn = args.get('contact-dn')
modification = {}
modification[args.get('attribute-name')] = [('MODIFY_REPLACE', args.get('attribute-value'))]
# modify
modify_object(contact_dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Updated contact's {} to: {} ".format(args.get('attribute-name'), args.get('attribute-value'))
}
demisto.results(demisto_entry)
def modify_computer_ou(default_base_dn):
args = demisto.args()
computer_name = args.get('computer-name')
dn = computer_dn(computer_name, args.get('base-dn') or default_base_dn)
success = conn.modify_dn(dn, "CN={}".format(computer_name), new_superior=args.get('full-superior-dn'))
if not success:
raise Exception("Failed to modify computer OU")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Moved computer {} to {}".format(computer_name, args.get('full-superior-dn'))
}
demisto.results(demisto_entry)
def expire_user_password(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
modification = {
# set to 0, to force password change on next login
"pwdLastSet": [('MODIFY_REPLACE', "0")]
}
# modify user
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Expired password successfully"
}
demisto.results(demisto_entry)
def set_user_password(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
password = args.get('password')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
# set user password
success = conn.extend.microsoft.modify_password(dn, password)
if not success:
raise Exception("Failed to reset user password")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "User password successfully set"
}
demisto.results(demisto_entry)
def enable_user(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
# modify user
modification = {
'userAccountControl': [('MODIFY_REPLACE', NORMAL_ACCOUNT)]
}
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "User {} was enabled".format(sam_account_name)
}
demisto.results(demisto_entry)
def disable_user(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
# modify user
modification = {
'userAccountControl': [('MODIFY_REPLACE', DISABLED_ACCOUNT)]
}
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "User {} was disabled".format(sam_account_name)
}
demisto.results(demisto_entry)
def add_member_to_group(default_base_dn):
args = demisto.args()
search_base = args.get('base-dn') or default_base_dn
# get the dn of the member - either user or computer
args_err = "Pleade provide either username or computer-name"
member_dn = ''
if args.get('username') and args.get('computer-name'):
# both arguments passed
raise Exception(args_err)
if args.get('username'):
member_dn = user_dn(args['username'], search_base)
elif args.get('computer-name'):
member_dn = computer_dn(args['computer-name'], search_base)
else:
# none of the arguments passed
raise Exception(args_err)
grp_dn = group_dn(args.get('group-cn'), search_base)
success = microsoft.addMembersToGroups.ad_add_members_to_groups(conn, [member_dn], [grp_dn])
if not success:
raise Exception("Failed to add {} to group {]}".format(
args.get('username') or args.get('computer-name'),
args.get('group_name')
))
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Object with dn {} was added to group {}".format(member_dn, args.get('group-cn'))
}
demisto.results(demisto_entry)
def remove_member_from_group(default_base_dn):
args = demisto.args()
search_base = args.get('base-dn') or default_base_dn
# get the dn of the member - either user or computer
args_err = "Pleade provide either username or computer-name"
member_dn = ''
if args.get('username') and args.get('computer-name'):
# both arguments passed
raise Exception(args_err)
if args.get('username'):
member_dn = user_dn(args['username'], search_base)
elif args.get('computer-name'):
member_dn = computer_dn(args['computer-name'], search_base)
else:
# none of the arguments passed
raise Exception(args_err)
grp_dn = group_dn(args.get('group-cn'), search_base)
success = microsoft.removeMembersFromGroups.ad_remove_members_from_groups(conn, [member_dn], [grp_dn], True)
if not success:
raise Exception("Failed to remove {member} from group {group_name}".format({
"member": args.get('username') or args.get('computer-name'),
"group_name": args.get('group_name')
}))
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Object with dn {} removed from group {}".format(member_dn, args.get('group-cn'))
}
demisto.results(demisto_entry)
def unlock_account(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
success = microsoft.unlockAccount.ad_unlock_account(conn, dn)
if not success:
raise Exception("Failed to unlock user {}".format(sam_account_name))
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Unlocked user {}".format(sam_account_name)
}
demisto.results(demisto_entry)
''' DELETE OBJECT '''
def delete_user():
# can acually delete any object...
success = conn.delete(demisto.args().get('user-dn'))
if not success:
raise Exception('Failed to delete user')
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Deleted object with dn {}".format(demisto.args().get('user-dn'))
}
demisto.results(demisto_entry)
'''
TEST CONFIGURATION
authenticate user credentials while initializing connection wiith AD server
verify base DN is configured correctly
'''
def main():
''' INSTANCE CONFIGURATION '''
SERVER_IP = demisto.params().get('server_ip')
USERNAME = demisto.params().get('credentials')['identifier']
PASSWORD = demisto.params().get('credentials')['password']
DEFAULT_BASE_DN = demisto.params().get('base_dn')
SECURE_CONNECTION = demisto.params().get('secure_connection')
DEFAULT_PAGE_SIZE = int(demisto.params().get('page_size'))
NTLM_AUTH = demisto.params().get('ntlm')
UNSECURE = demisto.params().get('unsecure', False)
PORT = demisto.params().get('port')
try:
server = initialize_server(SERVER_IP, PORT, SECURE_CONNECTION, UNSECURE)
except Exception as e:
return_error(str(e))
return
global conn
if NTLM_AUTH:
# intialize connection to LDAP server with NTLM authentication
# user example: domain\user
domain_user = SERVER_IP + '\\' + USERNAME if '\\' not in USERNAME else USERNAME
conn = Connection(server, user=domain_user, password=PASSWORD, authentication=NTLM)
else:
# here username should be the user dn
conn = Connection(server, user=USERNAME, password=PASSWORD)
# bind operation is the “authenticate” operation.
try:
# open socket and bind to server
if not conn.bind():
message = "Failed to bind to server. Please validate the credentials configured correctly.\n{}".format(
json.dumps(conn.result))
demisto.info(message)
return_error(message)
return
except LDAPSocketOpenError as e:
exc_msg = str(e)
demisto.info(exc_msg)
message = "Failed to access LDAP server. Please validate the server host and port are configured correctly"
if 'ssl wrapping error' in exc_msg:
message = "Failed to access LDAP server. SSL error."
if not UNSECURE:
message += ' Try using: "Trust any certificate" option.'
demisto.info(message)
return_error(message)
return
demisto.info('Established connection with AD LDAP server')
if not base_dn_verified(DEFAULT_BASE_DN):
message = "Failed to verify the base DN configured for the instance.\n" \
"Last connection result: {}\n" \
"Last error from LDAP server: {}".format(json.dumps(conn.result), json.dumps(conn.last_error))
demisto.info(message)
return_error(message)
return
demisto.info('Verfied base DN "{}"'.format(DEFAULT_BASE_DN))
''' COMMAND EXECUTION '''
try:
if demisto.command() == 'test-module':
if conn.user == '':
# Empty response means you have no authentication status on the server, so you are an anonymous user.
raise Exception("Failed to authenticate user")
demisto.results('ok')
if demisto.command() == 'ad-search':
free_search(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-expire-password':
expire_user_password(DEFAULT_BASE_DN)
if demisto.command() == 'ad-set-new-password':
set_user_password(DEFAULT_BASE_DN)
if demisto.command() == 'ad-unlock-account':
unlock_account(DEFAULT_BASE_DN)
if demisto.command() == 'ad-disable-account':
disable_user(DEFAULT_BASE_DN)
if demisto.command() == 'ad-enable-account':
enable_user(DEFAULT_BASE_DN)
if demisto.command() == 'ad-remove-from-group':
remove_member_from_group(DEFAULT_BASE_DN)
if demisto.command() == 'ad-add-to-group':
add_member_to_group(DEFAULT_BASE_DN)
if demisto.command() == 'ad-create-user':
create_user()
if demisto.command() == 'ad-delete-user':
delete_user()
if demisto.command() == 'ad-update-user':
update_user(DEFAULT_BASE_DN)
if demisto.command() == 'ad-modify-computer-ou':
modify_computer_ou(DEFAULT_BASE_DN)
if demisto.command() == 'ad-create-contact':
create_contact()
if demisto.command() == 'ad-update-contact':
update_contact()
if demisto.command() == 'ad-get-user':
search_users(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-get-computer':
search_computers(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-get-group-members':
search_group_members(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
except Exception as e:
message = "{}\nLast connection result: {}\nLast error from LDAP server: {}".format(
str(e), json.dumps(conn.result), conn.last_error)
demisto.info(message)
return_error(message)
return
finally:
# disconnect and close the connection
conn.unbind()
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| 32.044599
| 120
| 0.638759
|
import demistomock as demisto
from CommonServerPython import *
from ldap3 import Server, Connection, NTLM, SUBTREE, ALL_ATTRIBUTES, Tls
from ldap3.core.exceptions import LDAPSocketOpenError
from ldap3.extend import microsoft
import ssl
from datetime import datetime
conn = None
COOMON_ACCOUNT_CONTROL_FLAGS = {
512: "Enabled Account",
514: "Disabled account",
544: "Account Enabled - Require user to change password at first logon",
4096: "Workstation/server",
66048: "Enabled, password never expires",
66050: "Disabled, password never expires",
66080: "Enables, password never expires, password not required.",
532480: "Domain controller"
}
NORMAL_ACCOUNT = 512
DISABLED_ACCOUNT = 514
DEFAULT_PERSON_ATTRIBUTES = [
'name',
'displayName',
'memberOf',
'mail',
'samAccountName',
'manager',
'userAccountControl'
]
DEFAULT_COMPUTER_ATTRIBUTES = [
'name',
'memberOf'
]
def initialize_server(host, port, secure_connection, unsecure):
if secure_connection == "SSL":
demisto.debug("initializing sever with ssl (unsecure: {}). port: {}". format(unsecure, port or 'default(636)'))
if not unsecure:
demisto.debug("will require server certificate.")
tls = Tls(validate=ssl.CERT_REQUIRED)
if port:
return Server(host, port=port, use_ssl=True, tls=tls)
return Server(host, use_ssl=True, tls=tls)
if port:
return Server(host, port=port, use_ssl=True)
return Server(host, use_ssl=True)
demisto.debug("initializing server without secure connection. port: {}". format(port or 'default(389)'))
if port:
return Server(host, port=port)
return Server(host)
def account_entry(person_object, custome_attributes):
account = {
'Type': 'AD',
'ID': person_object.get('dn'),
'Email': person_object.get('email'),
'Username': person_object.get('samAccountName'),
'DisplayName': person_object.get('displayName'),
'Managr': person_object.get('manager'),
'Groups': person_object.get('memberOf')
}
for attr in custome_attributes:
account[attr] = person_object[attr]
return account
def endpoint_entry(computer_object, custome_attributes):
endpoint = {
'Type': 'AD',
'ID': computer_object.get('dn'),
'Hostname': computer_object.get('name'),
'Groups': computer_object.get('memberOf')
}
for attr in custome_attributes:
endpoint[attr] = computer_object[attr]
return endpoint
def base_dn_verified(base_dn):
try:
search(
"(objectClass=user)",
base_dn,
size_limit=1
)
except Exception as e:
demisto.info(str(e))
return False
return True
def search(search_filter, search_base, attributes=None, size_limit=0, time_limit=0):
success = conn.search(
search_base=search_base,
search_filter=search_filter,
attributes=attributes,
size_limit=size_limit,
time_limit=time_limit
)
if not success:
raise("Search failed")
return conn.entries
def search_with_paging(search_filter, search_base, attributes=None, page_size=100, size_limit=0, time_limit=0):
total_entries = 0
cookie = None
start = datetime.now()
entries = []
entries_left_to_fetch = size_limit
while True:
if 0 < entries_left_to_fetch < page_size:
page_size = entries_left_to_fetch
conn.search(
search_base,
search_filter,
search_scope=SUBTREE,
attributes=attributes,
paged_size=page_size,
paged_cookie=cookie
)
entries_left_to_fetch -= len(conn.entries)
total_entries += len(conn.entries)
cookie = conn.result['controls']['1.2.840.113556.1.4.319']['value']['cookie']
time_diff = (start - datetime.now()).seconds
entries.extend(conn.entries)
if (size_limit and size_limit <= total_entries) or (time_limit and time_diff >= time_limit) or (not cookie):
break
raw = []
flat = []
for entry in entries:
entry = json.loads(entry.entry_to_json())
flat_entry = {
'dn': entry['dn']
}
for attr in entry.get('attributes', {}):
flat_entry[attr] = entry['attributes'][attr]
raw.append(entry)
flat.append(flat_entry)
return {
"raw": raw,
"flat": flat
}
def user_dn(sam_account_name, search_base):
search_filter = '(&(objectClass=user)(sAMAccountName={}))'.format(sam_account_name)
entries = search(
search_filter,
search_base
)
if not entries:
raise Exception("Could not get full DN for user with sAMAccountName '{}'".format(sam_account_name))
entry = json.loads(entries[0].entry_to_json())
return entry['dn']
def computer_dn(compuer_name, search_base):
search_filter = '(&(objectClass=user)(objectCategory=computer)(name={}))'.format(compuer_name)
entries = search(
search_filter,
search_base
)
if not entries:
raise Exception("Could not get full DN for computer with name '{}'".format(compuer_name))
entry = json.loads(entries[0].entry_to_json())
return entry['dn']
def group_dn(group_name, search_base):
search_filter = '(&(objectClass=group)(cn={}))'.format(group_name)
entries = search(
search_filter,
search_base
)
if not entries:
raise Exception("Could not get full DN for group with name '{}'".format(group_name))
entry = json.loads(entries[0].entry_to_json())
return entry['dn']
def free_search(default_base_dn, page_size):
args = demisto.args()
search_filter = args.get('filter')
size_limit = int(args.get('size-limit', '0'))
time_limit = int(args.get('time-limit', '0'))
search_base = args.get('base-dn') or default_base_dn
attributes = args.get('attributes')
context_output = args.get('context-output')
if attributes:
attributes = ALL_ATTRIBUTES if attributes == 'ALL' else attributes.split(',')
entries = search_with_paging(
search_filter,
search_base,
attributes=attributes,
size_limit=size_limit,
time_limit=time_limit,
page_size=page_size
)
ec = {} if context_output == 'no' else {'ActiveDirectory.Search(obj.dn == val.dn)': entries['flat']}
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory Search", entries['flat']),
'EntryContext': ec
}
demisto.results(demisto_entry)
def search_users(default_base_dn, page_size):
# this command is equivalant to script ADGetUser
# will preform a custom search to find users by a specific (one) attribute specified by the user
args = demisto.args()
attributes = []
custome_attributes = []
# zero is actually no limitation
limit = int(args.get('limit', '0'))
# default query - list all users
query = "(objectClass=User)(objectCategory=person)"
# query by user DN
if args.get('dn'):
query = "(&(objectClass=User)(objectCategory=person)(dn={}))".format(args['dn'])
# query by name
if args.get('name'):
query = "(&(objectClass=User)(objectCategory=person)(cn={}))".format(args['name'])
# query by email
if args.get('email'):
query = "(&(objectClass=User)(objectCategory=person)(mail={}))".format(args['email'])
# query by sAMAccountName
if args.get('username'):
query = "(&(objectClass=User)(objectCategory=person)(sAMAccountName={}))".format(args['username'])
# query by custom object attribute
if args.get('custom-field-type'):
if not args.get('custom-field-data'):
raise Exception('Please specify "custom-field-data" as well when quering by "custom-field-type"')
query = "(&(objectClass=User)(objectCategory=person)({}={}))".format(
args['custom-field-type'], args['ustom-field-data'])
if args.get('attributes'):
custome_attributes = args['attributes'].split(",")
attributes = set(custome_attributes + DEFAULT_PERSON_ATTRIBUTES)
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
size_limit=limit,
page_size=page_size
)
accounts = [account_entry(entry, custome_attributes) for entry in entries['flat']]
if args.get('user-account-control-out', '') == 'true':
# display a literal translation of the numeric account control flag
for i, user in enumerate(entries['flat']):
flag_no = user.get('userAccountControl')[0]
entries['flat'][i]['userAccountControl'] = COOMON_ACCOUNT_CONTROL_FLAGS.get(flag_no) or flag_no
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Users", entries['flat']),
'EntryContext': {
'ActiveDirectory.Users(obj.dn == val.dn)': entries['flat'],
# 'backward compatability' with ADGetUser script
'Account(obj.ID == val.ID)': accounts
}
}
demisto.results(demisto_entry)
def search_computers(default_base_dn, page_size):
# this command is equivalent to ADGetComputer script
args = demisto.args()
attributes = []
custome_attributes = []
# default query - list all users (computer category)
query = "(&(objectClass=user)(objectCategory=computer))"
# query by user DN
if args.get('dn'):
query = "(&(objectClass=user)(objectCategory=computer)(dn={}))".format(args['dn'])
# query by name
if args.get('name'):
query = "(&(objectClass=user)(objectCategory=computer)(name={}))".format(args['name'])
# query by custom object attribute
if args.get('custom-field-type'):
if not args.get('custom-field-data'):
raise Exception('Please specify "custom-field-data" as well when quering by "custom-field-type"')
query = "(&(objectClass=user)(objectCategory=computer)({}={}))".format(
args['custom-field-type'], args['ustom-field-data'])
if args.get('attributes'):
custome_attributes = args['attributes'].split(",")
attributes = set(custome_attributes + DEFAULT_COMPUTER_ATTRIBUTES)
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
page_size=page_size
)
endpoints = [endpoint_entry(entry, custome_attributes) for entry in entries['flat']]
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Computers", entries['flat']),
'EntryContext': {
'ActiveDirectory.Computers(obj.dn == val.dn)': entries['flat'],
# 'backward compatability' with ADGetComputer script
'Endpoint(obj.ID == val.ID)': endpoints
}
}
demisto.results(demisto_entry)
def search_group_members(default_base_dn, page_size):
# this command is equivalent to ADGetGroupMembers script
args = demisto.args()
member_type = args.get('member-type')
group_dn = args.get('group-dn')
custome_attributes = []
default_attributes = DEFAULT_PERSON_ATTRIBUTES if member_type == 'person' else DEFAULT_COMPUTER_ATTRIBUTES
if args.get('attributes'):
custome_attributes = args['attributes'].split(",")
attributes = set(custome_attributes + default_attributes)
# neasted search
query = "(&(objectCategory={})(objectClass=user)(memberOf:1.2.840.113556.1.4.1941:={}))".format(member_type,
group_dn)
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
page_size=page_size
)
members = [{'dn': entry['dn'], 'category': member_type} for entry in entries['flat']]
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Group Members", entries['flat']),
'EntryContext': {
'ActiveDirectory.Groups(obj.dn ==' + group_dn + ')': {
'dn': group_dn,
'members': members
}
}
}
if member_type == 'person':
demisto_entry['EntryContext']['ActiveDirectory.Users(obj.dn == val.dn)'] = entries['flat']
demisto_entry['EntryContext']['Account'] = [account_entry(
entry, custome_attributes) for entry in entries['flat']]
else:
demisto_entry['EntryContext']['ActiveDirectory.Computers(obj.dn == val.dn)'] = entries['flat']
demisto_entry['EntryContext']['Endpoint'] = [endpoint_entry(
entry, custome_attributes) for entry in entries['flat']]
demisto.results(demisto_entry)
def create_user():
args = demisto.args()
object_classes = ["top", "person", "organizationalPerson", "user"]
user_dn = args.get('user-dn')
username = args.get("username")
password = args.get("password")
custome_attributes = args.get('custom-attributes')
attributes = {
"samAccountName": username
}
# set common user attributes
if args.get('display-name'):
attributes['displayName'] = args['display-name']
if args.get('description'):
attributes['description'] = args['description']
if args.get('email'):
attributes['mail'] = args['email']
if args.get('telephone-number'):
attributes['telephoneNumber'] = args['telephone-number']
if args.get('title'):
attributes['title'] = args['title']
# set user custome attributes
if custome_attributes:
try:
custome_attributes = json.loads(custome_attributes)
except Exception as e:
demisto.info(str(e))
raise Exception(
"Failed to parse custom attributes argument. Please see an example of this argument in the description."
)
for attribute_name, attribute_value in custome_attributes.items():
# can run default attribute stting
attributes[attribute_name] = attribute_value
# add user
success = conn.add(user_dn, object_classes, attributes)
if not success:
raise Exception("Failed to create user")
# set user password
success = conn.extend.microsoft.modify_password(user_dn, password)
if not success:
raise Exception("Failed to reset user password")
# enable user and expire password
modification = {
# enable user
'userAccountControl': [('MODIFY_REPLACE', NORMAL_ACCOUNT)],
# set to 0, to force password change on next login
"pwdLastSet": [('MODIFY_REPLACE', "0")]
}
modify_object(user_dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Created user with DN: {}".format(user_dn)
}
demisto.results(demisto_entry)
def create_contact():
args = demisto.args()
object_classes = ["top", "person", "organizationalPerson", "contact"]
contact_dn = args.get('contact-dn')
# set contact attributes
attributes = {}
if args.get('custom-attributes'):
try:
attributes = json.loads(args['custom-attributes'])
except Exception as e:
demisto.info(str(e))
raise Exception(
'Failed to parse custom attributes argument. Please see an example of this argument in the argument.'
)
# set common user attributes
if args.get('diaply-name'):
attributes['displayName'] = args['diaply-name']
if args.get('description'):
attributes['description'] = args['description']
if args.get('email'):
attributes['mail'] = args['email']
if args.get('telephone-number'):
attributes['telephoneNumber'] = args['telephone-number']
if args.get('title'):
attributes['title'] = args['title']
# add contact
success = conn.add(contact_dn, object_classes, attributes)
if not success:
raise Exception("Failed to create contact")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Created contact with DN: {}".format(contact_dn)
}
demisto.results(demisto_entry)
def modify_object(dn, modification):
success = conn.modify(dn, modification)
if not success:
raise Exception("Failed to update object {} with the following modofication: {}".format(
dn, json.dumps(modification)))
def update_user(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
attribute_name = args.get('attribute-name')
attribute_value = args.get('attribute-value')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
modification = {}
modification[attribute_name] = [('MODIFY_REPLACE', attribute_value)]
# modify user
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Updated user's {} to {} ".format(attribute_name, attribute_value)
}
demisto.results(demisto_entry)
def update_contact():
args = demisto.args()
contact_dn = args.get('contact-dn')
modification = {}
modification[args.get('attribute-name')] = [('MODIFY_REPLACE', args.get('attribute-value'))]
modify_object(contact_dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Updated contact's {} to: {} ".format(args.get('attribute-name'), args.get('attribute-value'))
}
demisto.results(demisto_entry)
def modify_computer_ou(default_base_dn):
args = demisto.args()
computer_name = args.get('computer-name')
dn = computer_dn(computer_name, args.get('base-dn') or default_base_dn)
success = conn.modify_dn(dn, "CN={}".format(computer_name), new_superior=args.get('full-superior-dn'))
if not success:
raise Exception("Failed to modify computer OU")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Moved computer {} to {}".format(computer_name, args.get('full-superior-dn'))
}
demisto.results(demisto_entry)
def expire_user_password(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
modification = {
# set to 0, to force password change on next login
"pwdLastSet": [('MODIFY_REPLACE', "0")]
}
# modify user
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Expired password successfully"
}
demisto.results(demisto_entry)
def set_user_password(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
password = args.get('password')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
# set user password
success = conn.extend.microsoft.modify_password(dn, password)
if not success:
raise Exception("Failed to reset user password")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "User password successfully set"
}
demisto.results(demisto_entry)
def enable_user(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
# modify user
modification = {
'userAccountControl': [('MODIFY_REPLACE', NORMAL_ACCOUNT)]
}
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "User {} was enabled".format(sam_account_name)
}
demisto.results(demisto_entry)
def disable_user(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
# modify user
modification = {
'userAccountControl': [('MODIFY_REPLACE', DISABLED_ACCOUNT)]
}
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "User {} was disabled".format(sam_account_name)
}
demisto.results(demisto_entry)
def add_member_to_group(default_base_dn):
args = demisto.args()
search_base = args.get('base-dn') or default_base_dn
# get the dn of the member - either user or computer
args_err = "Pleade provide either username or computer-name"
member_dn = ''
if args.get('username') and args.get('computer-name'):
# both arguments passed
raise Exception(args_err)
if args.get('username'):
member_dn = user_dn(args['username'], search_base)
elif args.get('computer-name'):
member_dn = computer_dn(args['computer-name'], search_base)
else:
# none of the arguments passed
raise Exception(args_err)
grp_dn = group_dn(args.get('group-cn'), search_base)
success = microsoft.addMembersToGroups.ad_add_members_to_groups(conn, [member_dn], [grp_dn])
if not success:
raise Exception("Failed to add {} to group {]}".format(
args.get('username') or args.get('computer-name'),
args.get('group_name')
))
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Object with dn {} was added to group {}".format(member_dn, args.get('group-cn'))
}
demisto.results(demisto_entry)
def remove_member_from_group(default_base_dn):
args = demisto.args()
search_base = args.get('base-dn') or default_base_dn
# get the dn of the member - either user or computer
args_err = "Pleade provide either username or computer-name"
member_dn = ''
if args.get('username') and args.get('computer-name'):
# both arguments passed
raise Exception(args_err)
if args.get('username'):
member_dn = user_dn(args['username'], search_base)
elif args.get('computer-name'):
member_dn = computer_dn(args['computer-name'], search_base)
else:
# none of the arguments passed
raise Exception(args_err)
grp_dn = group_dn(args.get('group-cn'), search_base)
success = microsoft.removeMembersFromGroups.ad_remove_members_from_groups(conn, [member_dn], [grp_dn], True)
if not success:
raise Exception("Failed to remove {member} from group {group_name}".format({
"member": args.get('username') or args.get('computer-name'),
"group_name": args.get('group_name')
}))
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Object with dn {} removed from group {}".format(member_dn, args.get('group-cn'))
}
demisto.results(demisto_entry)
def unlock_account(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
success = microsoft.unlockAccount.ad_unlock_account(conn, dn)
if not success:
raise Exception("Failed to unlock user {}".format(sam_account_name))
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Unlocked user {}".format(sam_account_name)
}
demisto.results(demisto_entry)
def delete_user():
# can acually delete any object...
success = conn.delete(demisto.args().get('user-dn'))
if not success:
raise Exception('Failed to delete user')
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Deleted object with dn {}".format(demisto.args().get('user-dn'))
}
demisto.results(demisto_entry)
def main():
SERVER_IP = demisto.params().get('server_ip')
USERNAME = demisto.params().get('credentials')['identifier']
PASSWORD = demisto.params().get('credentials')['password']
DEFAULT_BASE_DN = demisto.params().get('base_dn')
SECURE_CONNECTION = demisto.params().get('secure_connection')
DEFAULT_PAGE_SIZE = int(demisto.params().get('page_size'))
NTLM_AUTH = demisto.params().get('ntlm')
UNSECURE = demisto.params().get('unsecure', False)
PORT = demisto.params().get('port')
try:
server = initialize_server(SERVER_IP, PORT, SECURE_CONNECTION, UNSECURE)
except Exception as e:
return_error(str(e))
return
global conn
if NTLM_AUTH:
# intialize connection to LDAP server with NTLM authentication
# user example: domain\user
domain_user = SERVER_IP + '\\' + USERNAME if '\\' not in USERNAME else USERNAME
conn = Connection(server, user=domain_user, password=PASSWORD, authentication=NTLM)
else:
# here username should be the user dn
conn = Connection(server, user=USERNAME, password=PASSWORD)
# bind operation is the “authenticate” operation.
try:
# open socket and bind to server
if not conn.bind():
message = "Failed to bind to server. Please validate the credentials configured correctly.\n{}".format(
json.dumps(conn.result))
demisto.info(message)
return_error(message)
return
except LDAPSocketOpenError as e:
exc_msg = str(e)
demisto.info(exc_msg)
message = "Failed to access LDAP server. Please validate the server host and port are configured correctly"
if 'ssl wrapping error' in exc_msg:
message = "Failed to access LDAP server. SSL error."
if not UNSECURE:
message += ' Try using: "Trust any certificate" option.'
demisto.info(message)
return_error(message)
return
demisto.info('Established connection with AD LDAP server')
if not base_dn_verified(DEFAULT_BASE_DN):
message = "Failed to verify the base DN configured for the instance.\n" \
"Last connection result: {}\n" \
"Last error from LDAP server: {}".format(json.dumps(conn.result), json.dumps(conn.last_error))
demisto.info(message)
return_error(message)
return
demisto.info('Verfied base DN "{}"'.format(DEFAULT_BASE_DN))
try:
if demisto.command() == 'test-module':
if conn.user == '':
# Empty response means you have no authentication status on the server, so you are an anonymous user.
raise Exception("Failed to authenticate user")
demisto.results('ok')
if demisto.command() == 'ad-search':
free_search(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-expire-password':
expire_user_password(DEFAULT_BASE_DN)
if demisto.command() == 'ad-set-new-password':
set_user_password(DEFAULT_BASE_DN)
if demisto.command() == 'ad-unlock-account':
unlock_account(DEFAULT_BASE_DN)
if demisto.command() == 'ad-disable-account':
disable_user(DEFAULT_BASE_DN)
if demisto.command() == 'ad-enable-account':
enable_user(DEFAULT_BASE_DN)
if demisto.command() == 'ad-remove-from-group':
remove_member_from_group(DEFAULT_BASE_DN)
if demisto.command() == 'ad-add-to-group':
add_member_to_group(DEFAULT_BASE_DN)
if demisto.command() == 'ad-create-user':
create_user()
if demisto.command() == 'ad-delete-user':
delete_user()
if demisto.command() == 'ad-update-user':
update_user(DEFAULT_BASE_DN)
if demisto.command() == 'ad-modify-computer-ou':
modify_computer_ou(DEFAULT_BASE_DN)
if demisto.command() == 'ad-create-contact':
create_contact()
if demisto.command() == 'ad-update-contact':
update_contact()
if demisto.command() == 'ad-get-user':
search_users(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-get-computer':
search_computers(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-get-group-members':
search_group_members(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
except Exception as e:
message = "{}\nLast connection result: {}\nLast error from LDAP server: {}".format(
str(e), json.dumps(conn.result), conn.last_error)
demisto.info(message)
return_error(message)
return
finally:
# disconnect and close the connection
conn.unbind()
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| true
| true
|
1c41b2c3fd85c3158634955ff714604086a1bca8
| 854
|
py
|
Python
|
imageclassifierapp/services/classifier.py
|
onl1ner/django-image-classifier
|
6bb0726fbd61bb60bd245356ca85d7030ced131e
|
[
"MIT"
] | null | null | null |
imageclassifierapp/services/classifier.py
|
onl1ner/django-image-classifier
|
6bb0726fbd61bb60bd245356ca85d7030ced131e
|
[
"MIT"
] | null | null | null |
imageclassifierapp/services/classifier.py
|
onl1ner/django-image-classifier
|
6bb0726fbd61bb60bd245356ca85d7030ced131e
|
[
"MIT"
] | 1
|
2022-02-26T17:50:12.000Z
|
2022-02-26T17:50:12.000Z
|
import os
import numpy as np
import tensorflow as tf
from PIL import Image
from django.conf import settings
from keras.preprocessing import image
from keras.models import load_model
class Classifier:
IMG_SIZE = (32, 32)
LABELS = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def __init__(self, image):
self.image = image
def classify(self):
resized_img = self.image.resize(self.IMG_SIZE, Image.ANTIALIAS)
img_array = image.img_to_array(resized_img)
data = np.expand_dims(img_array, axis = 0)
file = os.path.join(settings.BASE_DIR, 'model/model.h5')
model = load_model(file)
prediction = model.predict(data)
label_index = np.argmax(prediction, axis = 1)[0]
return self.LABELS[label_index]
pass
| 24.4
| 103
| 0.653396
|
import os
import numpy as np
import tensorflow as tf
from PIL import Image
from django.conf import settings
from keras.preprocessing import image
from keras.models import load_model
class Classifier:
IMG_SIZE = (32, 32)
LABELS = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def __init__(self, image):
self.image = image
def classify(self):
resized_img = self.image.resize(self.IMG_SIZE, Image.ANTIALIAS)
img_array = image.img_to_array(resized_img)
data = np.expand_dims(img_array, axis = 0)
file = os.path.join(settings.BASE_DIR, 'model/model.h5')
model = load_model(file)
prediction = model.predict(data)
label_index = np.argmax(prediction, axis = 1)[0]
return self.LABELS[label_index]
pass
| true
| true
|
1c41b3489aa516f6ce1d161f96d86415ed8d1718
| 11,407
|
py
|
Python
|
analyseUsage.py
|
shenyuanv/powerAnalysis
|
8ebd4c9ad79c1bfe7ac13008fe39a74b00d64805
|
[
"MIT"
] | null | null | null |
analyseUsage.py
|
shenyuanv/powerAnalysis
|
8ebd4c9ad79c1bfe7ac13008fe39a74b00d64805
|
[
"MIT"
] | null | null | null |
analyseUsage.py
|
shenyuanv/powerAnalysis
|
8ebd4c9ad79c1bfe7ac13008fe39a74b00d64805
|
[
"MIT"
] | null | null | null |
import sys
import sqlite3
from datetime import datetime
from datetime import timedelta
import numpy as np
import argparse
from collections import namedtuple
def contiguous_regions(condition):
d = np.diff(condition)
idx, = d.nonzero()
idx += 1
if condition[0]:
idx = np.r_[0, idx]
if condition[-1]:
idx = np.r_[idx, condition.size]
idx.shape = (-1,2)
return idx
def valid_date(s):
try:
return datetime.strptime(s, "%Y-%m-%d %H:%M")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
def extractSecondsActiveFromResultSet(rows, activeState):
x = [datetime.fromtimestamp(row[0]) for row in rows]
y = [row[1] for row in rows]
condition = np.abs(y) == activeState
regions = contiguous_regions(condition)
count = timedelta(0)
for reg in regions:
timeOfRow = x[reg[0]];
if (reg[1] < len(x)):
count += (x[reg[1]] - x[reg[0]])
return count.total_seconds()
def formatTimeDelta(timedelta):
hours, remainder = divmod(timedelta.total_seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return '%d:%02d:%02d' % (hours, minutes, seconds)
def main(argv):
parser=argparse.ArgumentParser()
parser.add_argument('inputFile')
parser.add_argument('-s', "--startDate", help="The Start Date - format YYYY-MM-DD HH:MM", required=False, type=valid_date)
parser.add_argument('-e', "--endDate", help="The End Date - format YYYY-MM-DD HH:MM", required=False, type=valid_date)
args=parser.parse_args()
whereClause = ''
if args.startDate:
whereClause = 'timestamp > {startDate} '.format(startDate = args.startDate.strftime('%s'))
if args.endDate:
if args.startDate:
whereClause += ' AND '
whereClause += ' timestamp < {endDate} '.format(endDate = args.endDate.strftime('%s'))
db = sqlite3.connect(argv[0])
db.row_factory = sqlite3.Row
cursor = db.cursor()
cursor.execute('''SELECT timestamp, Active
FROM PLDisplayAgent_EventPoint_Display {whereClause}
ORDER BY timestamp'''.format(whereClause=('', 'WHERE {0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
if len(all_rows):
displayOnLength =extractSecondsActiveFromResultSet(all_rows, 1)
else:
displayOnLength = 0
cursor.execute('''SELECT timestamp, state
FROM PLSleepWakeAgent_EventForward_PowerState {whereClause}
ORDER BY timestamp'''.format(whereClause=('', 'WHERE {0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
if len(all_rows):
deviceOnLength =extractSecondsActiveFromResultSet(all_rows, 0)
else:
deviceOnLength = 0
(startTimeInData, endTimeInData) = (all_rows[0][0], all_rows[-1][0])
overallBreakdown = '''<table class="table table-striped table-bordered display responsive">
<tbody>
<tr><td>Display active for {0}</td></tr>
<tr><td>Device active for {1}</td></tr>
</tbody>
</table>
'''.format(str(timedelta(seconds=displayOnLength)),str(timedelta(seconds=deviceOnLength)))
# App list
cursor.execute('''SELECT AppName, AppBundleId, AppBundleVersion, AppIs3rdParty
FROM PLApplicationAgent_EventNone_AllApps''')
all_rows = cursor.fetchall()
appListBody = ''
for row in all_rows:
appListBody += '<tr><td>{0}</td><td>{1}</td><td>{2}</td></tr>\n'.format(row[0], row[1], row[2])
applistBreakdown = '''<table id="applistBreakDown" class="table table-striped table-condensed">
<thead>
<tr>
<td class="col-md-3">App Name</td>
<td>AppBundleId</td>
<td>AppBundleVersion</td>
</tr>
</thead>
<tbody>{appListBody}</tbody>
</table>'''.format(appListBody = appListBody)
# Per Process Timing
cursor.execute('''SELECT processname, SUM(value) AS TotalTime
FROM PLProcessMonitorAgent_EventInterval_ProcessMonitorInterval_Dynamic, PLProcessMonitorAgent_EventInterval_ProcessMonitorInterval
WHERE PLProcessMonitorAgent_EventInterval_ProcessMonitorInterval.ID = PLProcessMonitorAgent_EventInterval_ProcessMonitorInterval_Dynamic.FK_ID
{whereClause}
GROUP BY processname
ORDER BY TotalTime DESC'''.format(whereClause=('', 'AND {0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
perProcessBreakdownBody = ''
for row in all_rows:
perProcessBreakdownBody += '<tr><td>{0}</td><td>{1}</td></tr>\n'.format(row[0], row[1])
perProcesssBreakdown = '''<table id="processBreakdown" class="table table-striped table-condensed">
<thead>
<tr>
<td class="col-md-3">Process Name</td>
<td>Time (s)</td>
</tr>
</thead>
<tbody>{perProcessBreakdownBody}</tbody>
</table>'''.format(perProcessBreakdownBody = perProcessBreakdownBody)
# Signal Bars
cursor.execute('''SELECT signalBars, ROUND(CAST(COUNT(*) AS REAL)/total, 2) * 100 AS percent
FROM PLBBAgent_EventPoint_TelephonyActivity
CROSS JOIN
( SELECT COUNT(*) AS total
FROM PLBBAgent_EventPoint_TelephonyActivity
WHERE airplaneMode="off"
{whereClause}
)
WHERE airplaneMode="off" {whereClause}
GROUP BY signalBars'''.format(whereClause=('', 'AND {0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
signalBody = ''
for row in all_rows:
signalBody += '<tr><td>{0}</td><td>{1}</td></tr>\n'.format(row[0], row[1])
signalBreakdown = '''<table id="signalBreakdown" class="table table-striped table-condensed">
<thead>
<tr>
<td class="col-md-3">Number of Bars</td>
<td>%</td>
</tr>
</thead>
<tbody>{signalBody}</tbody>
</table>'''.format(signalBody = signalBody)
#locations
cursor.execute('''SELECT Client, Type, COUNT(Client) AS Count
FROM PLLocationAgent_EventForward_ClientStatus
{whereClause}
GROUP BY Client ORDER BY Count DESC'''.format(whereClause=('', 'WHERE {0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
locationBody = ''
for row in all_rows:
locationBody += '<tr><td>{0}</td><td>{1}</td><td>{2}</td></tr>\n'.format(row[0], row[1], row[2])
locationBreakdown = '''<table id="locationBreakdown" class="table table-striped table-condensed">
<thead>
<tr>
<td class="col-md-3">Client</td>
<td>Type</td>
<td>Number of Requests</td>
</tr>
</thead>
<tbody>{locationBody}</tbody>
</table>'''.format(locationBody = locationBody)
#power consumption
cursor.execute('''SELECT Name, SUM(Energy) AS TotalEnergy
FROM PLAccountingOperator_Aggregate_RootNodeEnergy, PLAccountingOperator_EventNone_Nodes
WHERE PLAccountingOperator_Aggregate_RootNodeEnergy.NodeID = PLAccountingOperator_EventNone_Nodes.ID
{whereClause}
GROUP BY Name
ORDER BY TotalEnergy DESC'''.format(whereClause=('', 'AND {0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
perProcessPowerConsumption = ''
for row in all_rows:
perProcessPowerConsumption += '<tr><td>{0}</td><td>{1}</td></tr>\n'.format(row[0], row[1])
powerBreakDown = '''<table id="powerBreakDown" class="table table-striped table-condensed">
<thead>
<tr>
<td class="col-md-3">Node Name</td>
<td>Power Usage</td>
</tr>
</thead>
<tbody>{perProcessPowerConsumption}</tbody>
</table>'''.format(perProcessPowerConsumption = perProcessPowerConsumption)
#memory usage
cursor.execute('''SELECT PLApplicationAgent_EventNone_AllApps.AppName, PLApplicationAgent_EventBackward_ApplicationMemory.AppBundleId, avg(PeakMemory) AS avgpeak
FROM PLApplicationAgent_EventBackward_ApplicationMemory
LEFT JOIN PLApplicationAgent_EventNone_AllApps
ON PLApplicationAgent_EventBackward_ApplicationMemory.AppBundleId = PLApplicationAgent_EventNone_AllApps.AppBundleId
{whereClause}
GROUP BY PLApplicationAgent_EventBackward_ApplicationMemory.AppBundleId
ORDER BY avgpeak DESC'''.format(whereClause=('', '{0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
perProcessMemPeaks = ''
for row in all_rows:
AppName = row[0] if row[0] else ''
perProcessMemPeaks += '<tr><td>{0}</td><td>{1}</td><td>{2}</td></tr>\n'.format(row[1], AppName.encode('utf-8'), row[2])
memoryBreakDown = '''<table id="memoryBreakDown" class="table table-striped table-condensed">
<thead>
<tr>
<td class="col-md-3">AppBundleId</td>
<td>AppName</td>
<td>Peak Memory</td>
</tr>
</thead>
<tbody>{perProcessMemPeaks}</tbody>
</table>'''.format(perProcessMemPeaks = perProcessMemPeaks)
f = open('report.html', 'w')
report = '''<html>
<link rel="stylesheet" type="text/css" href="https://netdna.bootstrapcdn.com/bootstrap/3.0.3/css/bootstrap.min.css">
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/plug-ins/380cb78f450/integration/bootstrap/3/dataTables.bootstrap.css">
<script type="text/javascript" language="javascript" src="https://code.jquery.com/jquery-1.10.2.min.js"></script>
<script type="text/javascript" language="javascript" src="https://cdn.datatables.net/1.10.3/js/jquery.dataTables.min.js"></script>
<script type="text/javascript" language="javascript" src="https://cdn.datatables.net/plug-ins/380cb78f450/integration/bootstrap/3/dataTables.bootstrap.js"></script>
<script type="text/javascript" charset="utf-8">
$(document).ready(function() {{
$('#processBreakdown').DataTable( {{
"responsive": true,
"order": [[ 1, "desc" ]]
}});
$('#notificationBreakdown').DataTable( {{
"responsive": true,
"order": [[ 1, "desc" ]]
}});
$('#locationBreakdown').DataTable( {{
"responsive": true,
"order": [[ 1, "desc" ]]
}});
$('#powerBreakDown').DataTable( {{
"responsive": true,
"order": [[ 1, "desc" ]]
}});
$('#memoryBreakDown').DataTable( {{
"responsive": true,
"order": [[ 1, "desc" ]]
}});
$('#applistBreakdown').DataTable( {{
"responsive": true,
"order": [[ 1, "desc" ]]
}});
}});
</script>
<body>
<div class="container">
<h1>Energy Report - {startDate} to {endDate}<h1>
<h2>Overall Metrics</h2>
{overallBreakdown}
<h2>App list breakdown</h2>
{applistBreakdown}
<h2>Process time breakdown</h2>
{perProcesssBreakdown}
<h2>Core Location</h2>
{locationBreakdown}
<h2>Signal Breakdown</h2>
{signalBreakdown}
<h2>Power Breakdown</h2>
{powerBreakDown}
<h2>Memory Breakdown</h2>
{memoryBreakDown}
</div>
<body>
</html>'''.format(startDate = datetime.fromtimestamp(startTimeInData).strftime("%Y-%m-%d %H:%M"),
endDate = datetime.fromtimestamp(endTimeInData).strftime("%Y-%m-%d %H:%M"),
overallBreakdown = overallBreakdown,
perProcesssBreakdown = perProcesssBreakdown,
signalBreakdown=signalBreakdown,
locationBreakdown = locationBreakdown,
powerBreakDown = powerBreakDown,
memoryBreakDown = memoryBreakDown,
applistBreakdown = applistBreakdown)
f.write(report)
f.close()
db.close()
if __name__ == "__main__":
main(sys.argv[1:])
| 34.152695
| 166
| 0.656877
|
import sys
import sqlite3
from datetime import datetime
from datetime import timedelta
import numpy as np
import argparse
from collections import namedtuple
def contiguous_regions(condition):
d = np.diff(condition)
idx, = d.nonzero()
idx += 1
if condition[0]:
idx = np.r_[0, idx]
if condition[-1]:
idx = np.r_[idx, condition.size]
idx.shape = (-1,2)
return idx
def valid_date(s):
try:
return datetime.strptime(s, "%Y-%m-%d %H:%M")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
def extractSecondsActiveFromResultSet(rows, activeState):
x = [datetime.fromtimestamp(row[0]) for row in rows]
y = [row[1] for row in rows]
condition = np.abs(y) == activeState
regions = contiguous_regions(condition)
count = timedelta(0)
for reg in regions:
timeOfRow = x[reg[0]];
if (reg[1] < len(x)):
count += (x[reg[1]] - x[reg[0]])
return count.total_seconds()
def formatTimeDelta(timedelta):
hours, remainder = divmod(timedelta.total_seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return '%d:%02d:%02d' % (hours, minutes, seconds)
def main(argv):
parser=argparse.ArgumentParser()
parser.add_argument('inputFile')
parser.add_argument('-s', "--startDate", help="The Start Date - format YYYY-MM-DD HH:MM", required=False, type=valid_date)
parser.add_argument('-e', "--endDate", help="The End Date - format YYYY-MM-DD HH:MM", required=False, type=valid_date)
args=parser.parse_args()
whereClause = ''
if args.startDate:
whereClause = 'timestamp > {startDate} '.format(startDate = args.startDate.strftime('%s'))
if args.endDate:
if args.startDate:
whereClause += ' AND '
whereClause += ' timestamp < {endDate} '.format(endDate = args.endDate.strftime('%s'))
db = sqlite3.connect(argv[0])
db.row_factory = sqlite3.Row
cursor = db.cursor()
cursor.execute('''SELECT timestamp, Active
FROM PLDisplayAgent_EventPoint_Display {whereClause}
ORDER BY timestamp'''.format(whereClause=('', 'WHERE {0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
if len(all_rows):
displayOnLength =extractSecondsActiveFromResultSet(all_rows, 1)
else:
displayOnLength = 0
cursor.execute('''SELECT timestamp, state
FROM PLSleepWakeAgent_EventForward_PowerState {whereClause}
ORDER BY timestamp'''.format(whereClause=('', 'WHERE {0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
if len(all_rows):
deviceOnLength =extractSecondsActiveFromResultSet(all_rows, 0)
else:
deviceOnLength = 0
(startTimeInData, endTimeInData) = (all_rows[0][0], all_rows[-1][0])
overallBreakdown = '''<table class="table table-striped table-bordered display responsive">
<tbody>
<tr><td>Display active for {0}</td></tr>
<tr><td>Device active for {1}</td></tr>
</tbody>
</table>
'''.format(str(timedelta(seconds=displayOnLength)),str(timedelta(seconds=deviceOnLength)))
cursor.execute('''SELECT AppName, AppBundleId, AppBundleVersion, AppIs3rdParty
FROM PLApplicationAgent_EventNone_AllApps''')
all_rows = cursor.fetchall()
appListBody = ''
for row in all_rows:
appListBody += '<tr><td>{0}</td><td>{1}</td><td>{2}</td></tr>\n'.format(row[0], row[1], row[2])
applistBreakdown = '''<table id="applistBreakDown" class="table table-striped table-condensed">
<thead>
<tr>
<td class="col-md-3">App Name</td>
<td>AppBundleId</td>
<td>AppBundleVersion</td>
</tr>
</thead>
<tbody>{appListBody}</tbody>
</table>'''.format(appListBody = appListBody)
cursor.execute('''SELECT processname, SUM(value) AS TotalTime
FROM PLProcessMonitorAgent_EventInterval_ProcessMonitorInterval_Dynamic, PLProcessMonitorAgent_EventInterval_ProcessMonitorInterval
WHERE PLProcessMonitorAgent_EventInterval_ProcessMonitorInterval.ID = PLProcessMonitorAgent_EventInterval_ProcessMonitorInterval_Dynamic.FK_ID
{whereClause}
GROUP BY processname
ORDER BY TotalTime DESC'''.format(whereClause=('', 'AND {0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
perProcessBreakdownBody = ''
for row in all_rows:
perProcessBreakdownBody += '<tr><td>{0}</td><td>{1}</td></tr>\n'.format(row[0], row[1])
perProcesssBreakdown = '''<table id="processBreakdown" class="table table-striped table-condensed">
<thead>
<tr>
<td class="col-md-3">Process Name</td>
<td>Time (s)</td>
</tr>
</thead>
<tbody>{perProcessBreakdownBody}</tbody>
</table>'''.format(perProcessBreakdownBody = perProcessBreakdownBody)
cursor.execute('''SELECT signalBars, ROUND(CAST(COUNT(*) AS REAL)/total, 2) * 100 AS percent
FROM PLBBAgent_EventPoint_TelephonyActivity
CROSS JOIN
( SELECT COUNT(*) AS total
FROM PLBBAgent_EventPoint_TelephonyActivity
WHERE airplaneMode="off"
{whereClause}
)
WHERE airplaneMode="off" {whereClause}
GROUP BY signalBars'''.format(whereClause=('', 'AND {0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
signalBody = ''
for row in all_rows:
signalBody += '<tr><td>{0}</td><td>{1}</td></tr>\n'.format(row[0], row[1])
signalBreakdown = '''<table id="signalBreakdown" class="table table-striped table-condensed">
<thead>
<tr>
<td class="col-md-3">Number of Bars</td>
<td>%</td>
</tr>
</thead>
<tbody>{signalBody}</tbody>
</table>'''.format(signalBody = signalBody)
cursor.execute('''SELECT Client, Type, COUNT(Client) AS Count
FROM PLLocationAgent_EventForward_ClientStatus
{whereClause}
GROUP BY Client ORDER BY Count DESC'''.format(whereClause=('', 'WHERE {0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
locationBody = ''
for row in all_rows:
locationBody += '<tr><td>{0}</td><td>{1}</td><td>{2}</td></tr>\n'.format(row[0], row[1], row[2])
locationBreakdown = '''<table id="locationBreakdown" class="table table-striped table-condensed">
<thead>
<tr>
<td class="col-md-3">Client</td>
<td>Type</td>
<td>Number of Requests</td>
</tr>
</thead>
<tbody>{locationBody}</tbody>
</table>'''.format(locationBody = locationBody)
cursor.execute('''SELECT Name, SUM(Energy) AS TotalEnergy
FROM PLAccountingOperator_Aggregate_RootNodeEnergy, PLAccountingOperator_EventNone_Nodes
WHERE PLAccountingOperator_Aggregate_RootNodeEnergy.NodeID = PLAccountingOperator_EventNone_Nodes.ID
{whereClause}
GROUP BY Name
ORDER BY TotalEnergy DESC'''.format(whereClause=('', 'AND {0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
perProcessPowerConsumption = ''
for row in all_rows:
perProcessPowerConsumption += '<tr><td>{0}</td><td>{1}</td></tr>\n'.format(row[0], row[1])
powerBreakDown = '''<table id="powerBreakDown" class="table table-striped table-condensed">
<thead>
<tr>
<td class="col-md-3">Node Name</td>
<td>Power Usage</td>
</tr>
</thead>
<tbody>{perProcessPowerConsumption}</tbody>
</table>'''.format(perProcessPowerConsumption = perProcessPowerConsumption)
cursor.execute('''SELECT PLApplicationAgent_EventNone_AllApps.AppName, PLApplicationAgent_EventBackward_ApplicationMemory.AppBundleId, avg(PeakMemory) AS avgpeak
FROM PLApplicationAgent_EventBackward_ApplicationMemory
LEFT JOIN PLApplicationAgent_EventNone_AllApps
ON PLApplicationAgent_EventBackward_ApplicationMemory.AppBundleId = PLApplicationAgent_EventNone_AllApps.AppBundleId
{whereClause}
GROUP BY PLApplicationAgent_EventBackward_ApplicationMemory.AppBundleId
ORDER BY avgpeak DESC'''.format(whereClause=('', '{0}'.format(whereClause))[len(whereClause) > 0]))
all_rows = cursor.fetchall()
perProcessMemPeaks = ''
for row in all_rows:
AppName = row[0] if row[0] else ''
perProcessMemPeaks += '<tr><td>{0}</td><td>{1}</td><td>{2}</td></tr>\n'.format(row[1], AppName.encode('utf-8'), row[2])
memoryBreakDown = '''<table id="memoryBreakDown" class="table table-striped table-condensed">
<thead>
<tr>
<td class="col-md-3">AppBundleId</td>
<td>AppName</td>
<td>Peak Memory</td>
</tr>
</thead>
<tbody>{perProcessMemPeaks}</tbody>
</table>'''.format(perProcessMemPeaks = perProcessMemPeaks)
f = open('report.html', 'w')
report = '''<html>
<link rel="stylesheet" type="text/css" href="https://netdna.bootstrapcdn.com/bootstrap/3.0.3/css/bootstrap.min.css">
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/plug-ins/380cb78f450/integration/bootstrap/3/dataTables.bootstrap.css">
<script type="text/javascript" language="javascript" src="https://code.jquery.com/jquery-1.10.2.min.js"></script>
<script type="text/javascript" language="javascript" src="https://cdn.datatables.net/1.10.3/js/jquery.dataTables.min.js"></script>
<script type="text/javascript" language="javascript" src="https://cdn.datatables.net/plug-ins/380cb78f450/integration/bootstrap/3/dataTables.bootstrap.js"></script>
<script type="text/javascript" charset="utf-8">
$(document).ready(function() {{
$('#processBreakdown').DataTable( {{
"responsive": true,
"order": [[ 1, "desc" ]]
}});
$('#notificationBreakdown').DataTable( {{
"responsive": true,
"order": [[ 1, "desc" ]]
}});
$('#locationBreakdown').DataTable( {{
"responsive": true,
"order": [[ 1, "desc" ]]
}});
$('#powerBreakDown').DataTable( {{
"responsive": true,
"order": [[ 1, "desc" ]]
}});
$('#memoryBreakDown').DataTable( {{
"responsive": true,
"order": [[ 1, "desc" ]]
}});
$('#applistBreakdown').DataTable( {{
"responsive": true,
"order": [[ 1, "desc" ]]
}});
}});
</script>
<body>
<div class="container">
<h1>Energy Report - {startDate} to {endDate}<h1>
<h2>Overall Metrics</h2>
{overallBreakdown}
<h2>App list breakdown</h2>
{applistBreakdown}
<h2>Process time breakdown</h2>
{perProcesssBreakdown}
<h2>Core Location</h2>
{locationBreakdown}
<h2>Signal Breakdown</h2>
{signalBreakdown}
<h2>Power Breakdown</h2>
{powerBreakDown}
<h2>Memory Breakdown</h2>
{memoryBreakDown}
</div>
<body>
</html>'''.format(startDate = datetime.fromtimestamp(startTimeInData).strftime("%Y-%m-%d %H:%M"),
endDate = datetime.fromtimestamp(endTimeInData).strftime("%Y-%m-%d %H:%M"),
overallBreakdown = overallBreakdown,
perProcesssBreakdown = perProcesssBreakdown,
signalBreakdown=signalBreakdown,
locationBreakdown = locationBreakdown,
powerBreakDown = powerBreakDown,
memoryBreakDown = memoryBreakDown,
applistBreakdown = applistBreakdown)
f.write(report)
f.close()
db.close()
if __name__ == "__main__":
main(sys.argv[1:])
| true
| true
|
1c41b353fc7188837b5aed9c63eaf270d8a72a87
| 962
|
py
|
Python
|
Python/DFS/med_course_schedule.py
|
animeshramesh/interview-prep
|
882e8bc8b4653a713754ab31a3b08e05505be2bc
|
[
"Apache-2.0"
] | null | null | null |
Python/DFS/med_course_schedule.py
|
animeshramesh/interview-prep
|
882e8bc8b4653a713754ab31a3b08e05505be2bc
|
[
"Apache-2.0"
] | null | null | null |
Python/DFS/med_course_schedule.py
|
animeshramesh/interview-prep
|
882e8bc8b4653a713754ab31a3b08e05505be2bc
|
[
"Apache-2.0"
] | null | null | null |
"""
Trick is to keep track of current path.
Time: O(V+E)
Space: O(V+E)
"""
from collections import defaultdict
class Solution:
def dfs_cycle(self, node):
self.visited[node]=True
self.current_path[node]=True
for neighbour in list(self.graph[node]):
if not self.visited[neighbour]:
if self.dfs_cycle(neighbour):
return True
elif self.current_path[neighbour]:
return True
self.current_path[node]=False
return False
def canFinish(self, numCourses, prerequisites):
self.graph = defaultdict(set)
for p in prerequisites:
src, dest = p
self.graph[src].add(dest)
self.visited = [False]*numCourses
self.current_path = [False]*numCourses
for i in range(numCourses):
if not self.visited[i] and self.dfs_cycle(i):
return False
return True
| 23.463415
| 57
| 0.582121
|
from collections import defaultdict
class Solution:
def dfs_cycle(self, node):
self.visited[node]=True
self.current_path[node]=True
for neighbour in list(self.graph[node]):
if not self.visited[neighbour]:
if self.dfs_cycle(neighbour):
return True
elif self.current_path[neighbour]:
return True
self.current_path[node]=False
return False
def canFinish(self, numCourses, prerequisites):
self.graph = defaultdict(set)
for p in prerequisites:
src, dest = p
self.graph[src].add(dest)
self.visited = [False]*numCourses
self.current_path = [False]*numCourses
for i in range(numCourses):
if not self.visited[i] and self.dfs_cycle(i):
return False
return True
| true
| true
|
1c41b5c3a6b112b35c678de3ddff2b80cb09f9b3
| 7,391
|
py
|
Python
|
codewars/robotic_tatoo_removal.py
|
davidlukac/codekata-python
|
e4a9297fa658d2d36de43b3547353be85c08e990
|
[
"MIT"
] | null | null | null |
codewars/robotic_tatoo_removal.py
|
davidlukac/codekata-python
|
e4a9297fa658d2d36de43b3547353be85c08e990
|
[
"MIT"
] | null | null | null |
codewars/robotic_tatoo_removal.py
|
davidlukac/codekata-python
|
e4a9297fa658d2d36de43b3547353be85c08e990
|
[
"MIT"
] | null | null | null |
# Robotic Tattoo Removal
# http://www.codewars.com/kata/robotic-tattoo-removal
import unittest
from typing import List
def robot(skin_scan: List[List[chr]]) -> List[List[chr]]:
for row_num, row in enumerate(skin_scan):
for val_key, val in enumerate(row):
if val == "X":
skin_scan[row_num][val_key] = '*'
return skin_scan
def robot_2(skin_scan: List[List[chr]]) -> List[List[chr]]:
return list(map(lambda row: list(map(lambda val: '*' if val == 'X' else val, row)), skin_scan))
if __name__ == '__main__':
unittest.main()
class TattooRobotTest(unittest.TestCase):
in_1 = [
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", "X", "X", " ", " ", " ", "X", "X", " ", " "],
[" ", "X", " ", " ", "X", " ", "X", " ", " ", "X", " "],
[" ", "X", " ", " ", " ", "X", " ", " ", " ", "X", " "],
[" ", "X", " ", " ", " ", "X", " ", " ", " ", "X", " "],
[" ", "X", " ", " ", " ", " ", " ", " ", " ", "X", " "],
[" ", "X", " ", " ", " ", " ", " ", " ", " ", "X", " "],
[" ", "X", " ", " ", " ", " ", " ", " ", " ", "X", " "],
[" ", " ", "X", " ", " ", " ", " ", " ", "X", " ", " "],
[" ", " ", " ", "X", " ", " ", " ", "X", " ", " ", " "],
[" ", " ", " ", " ", "X", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", "P", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "]
]
out_1 = [
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", "*", "*", " ", " ", " ", "*", "*", " ", " "],
[" ", "*", " ", " ", "*", " ", "*", " ", " ", "*", " "],
[" ", "*", " ", " ", " ", "*", " ", " ", " ", "*", " "],
[" ", "*", " ", " ", " ", "*", " ", " ", " ", "*", " "],
[" ", "*", " ", " ", " ", " ", " ", " ", " ", "*", " "],
[" ", "*", " ", " ", " ", " ", " ", " ", " ", "*", " "],
[" ", "*", " ", " ", " ", " ", " ", " ", " ", "*", " "],
[" ", " ", "*", " ", " ", " ", " ", " ", "*", " ", " "],
[" ", " ", " ", "*", " ", " ", " ", "*", " ", " ", " "],
[" ", " ", " ", " ", "*", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", "P", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "]
]
in_2 = [
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", "X", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", "X", "X", " ", " ", "X", "X", " ", " ", " "],
[" ", "X", " ", " ", " ", " ", " ", " ", "X", " ", " "],
[" ", "X", " ", " ", " ", " ", " ", " ", "X", " ", " "],
[" ", "X", " ", " ", "X", "X", " ", " ", "X", " ", " "],
[" ", " ", "X", "X", " ", " ", "X", "X", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "]
]
out_2 = [
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", "*", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", "*", "*", " ", " ", "*", "*", " ", " ", " "],
[" ", "*", " ", " ", " ", " ", " ", " ", "*", " ", " "],
[" ", "*", " ", " ", " ", " ", " ", " ", "*", " ", " "],
[" ", "*", " ", " ", "*", "*", " ", " ", "*", " ", " "],
[" ", " ", "*", "*", " ", " ", "*", "*", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "]
]
in_3 = [
[" ", "X", " ", " ", " ", "X", " ", " ", " ", "X", " "],
["X", "$", "X", " ", " ", "X", " ", " ", "X", "$", "X"],
[" ", "X", " ", " ", " ", "X", " ", " ", " ", "X", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
["X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X"],
["X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X"],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", "X", " ", " ", " ", "X", " ", " ", " ", "X", " "],
["X", "$", "X", " ", " ", "X", " ", " ", "X", "$", "X"],
[" ", "X", " ", " ", " ", "X", " ", " ", " ", "X", " "]
]
out_3 = [
[" ", "*", " ", " ", " ", "*", " ", " ", " ", "*", " "],
["*", "$", "*", " ", " ", "*", " ", " ", "*", "$", "*"],
[" ", "*", " ", " ", " ", "*", " ", " ", " ", "*", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", "*", " ", " ", " ", "*", " ", " ", " ", "*", " "],
["*", "$", "*", " ", " ", "*", " ", " ", "*", "$", "*"],
[" ", "*", " ", " ", " ", "*", " ", " ", " ", "*", " "]
]
def test(self):
self.assertEqual(robot(self.in_1), self.out_1)
self.assertEqual(robot_2(self.in_1), self.out_1)
def second_test(self):
self.assertEqual(robot(self.in_2), self.out_2)
self.assertEqual(robot_2(self.in_2), self.out_2)
def third_test(self):
self.assertEqual(robot(self.in_3), self.out_3)
self.assertEqual(robot_2(self.in_3), self.out_3)
| 49.273333
| 99
| 0.111487
|
import unittest
from typing import List
def robot(skin_scan: List[List[chr]]) -> List[List[chr]]:
for row_num, row in enumerate(skin_scan):
for val_key, val in enumerate(row):
if val == "X":
skin_scan[row_num][val_key] = '*'
return skin_scan
def robot_2(skin_scan: List[List[chr]]) -> List[List[chr]]:
return list(map(lambda row: list(map(lambda val: '*' if val == 'X' else val, row)), skin_scan))
if __name__ == '__main__':
unittest.main()
class TattooRobotTest(unittest.TestCase):
in_1 = [
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", "X", "X", " ", " ", " ", "X", "X", " ", " "],
[" ", "X", " ", " ", "X", " ", "X", " ", " ", "X", " "],
[" ", "X", " ", " ", " ", "X", " ", " ", " ", "X", " "],
[" ", "X", " ", " ", " ", "X", " ", " ", " ", "X", " "],
[" ", "X", " ", " ", " ", " ", " ", " ", " ", "X", " "],
[" ", "X", " ", " ", " ", " ", " ", " ", " ", "X", " "],
[" ", "X", " ", " ", " ", " ", " ", " ", " ", "X", " "],
[" ", " ", "X", " ", " ", " ", " ", " ", "X", " ", " "],
[" ", " ", " ", "X", " ", " ", " ", "X", " ", " ", " "],
[" ", " ", " ", " ", "X", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", "P", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "]
]
out_1 = [
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", "*", "*", " ", " ", " ", "*", "*", " ", " "],
[" ", "*", " ", " ", "*", " ", "*", " ", " ", "*", " "],
[" ", "*", " ", " ", " ", "*", " ", " ", " ", "*", " "],
[" ", "*", " ", " ", " ", "*", " ", " ", " ", "*", " "],
[" ", "*", " ", " ", " ", " ", " ", " ", " ", "*", " "],
[" ", "*", " ", " ", " ", " ", " ", " ", " ", "*", " "],
[" ", "*", " ", " ", " ", " ", " ", " ", " ", "*", " "],
[" ", " ", "*", " ", " ", " ", " ", " ", "*", " ", " "],
[" ", " ", " ", "*", " ", " ", " ", "*", " ", " ", " "],
[" ", " ", " ", " ", "*", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", "P", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "]
]
in_2 = [
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", "X", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", " ", "X", " ", " ", "X", " ", " ", " ", " "],
[" ", " ", "X", "X", " ", " ", "X", "X", " ", " ", " "],
[" ", "X", " ", " ", " ", " ", " ", " ", "X", " ", " "],
[" ", "X", " ", " ", " ", " ", " ", " ", "X", " ", " "],
[" ", "X", " ", " ", "X", "X", " ", " ", "X", " ", " "],
[" ", " ", "X", "X", " ", " ", "X", "X", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "]
]
out_2 = [
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", "*", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", " ", "*", " ", " ", "*", " ", " ", " ", " "],
[" ", " ", "*", "*", " ", " ", "*", "*", " ", " ", " "],
[" ", "*", " ", " ", " ", " ", " ", " ", "*", " ", " "],
[" ", "*", " ", " ", " ", " ", " ", " ", "*", " ", " "],
[" ", "*", " ", " ", "*", "*", " ", " ", "*", " ", " "],
[" ", " ", "*", "*", " ", " ", "*", "*", " ", " ", " "],
[" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "]
]
in_3 = [
[" ", "X", " ", " ", " ", "X", " ", " ", " ", "X", " "],
["X", "$", "X", " ", " ", "X", " ", " ", "X", "$", "X"],
[" ", "X", " ", " ", " ", "X", " ", " ", " ", "X", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
["X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X"],
["X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X"],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "X", " ", " ", " ", " ", " "],
[" ", "X", " ", " ", " ", "X", " ", " ", " ", "X", " "],
["X", "$", "X", " ", " ", "X", " ", " ", "X", "$", "X"],
[" ", "X", " ", " ", " ", "X", " ", " ", " ", "X", " "]
]
out_3 = [
[" ", "*", " ", " ", " ", "*", " ", " ", " ", "*", " "],
["*", "$", "*", " ", " ", "*", " ", " ", "*", "$", "*"],
[" ", "*", " ", " ", " ", "*", " ", " ", " ", "*", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
["*", "*", "*", "*", "*", "*", "*", "*", "*", "*", "*"],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", "*", " ", " ", " ", " ", " "],
[" ", "*", " ", " ", " ", "*", " ", " ", " ", "*", " "],
["*", "$", "*", " ", " ", "*", " ", " ", "*", "$", "*"],
[" ", "*", " ", " ", " ", "*", " ", " ", " ", "*", " "]
]
def test(self):
self.assertEqual(robot(self.in_1), self.out_1)
self.assertEqual(robot_2(self.in_1), self.out_1)
def second_test(self):
self.assertEqual(robot(self.in_2), self.out_2)
self.assertEqual(robot_2(self.in_2), self.out_2)
def third_test(self):
self.assertEqual(robot(self.in_3), self.out_3)
self.assertEqual(robot_2(self.in_3), self.out_3)
| true
| true
|
1c41b5c3e1f7c2822909254a498a98b9ade8a129
| 2,083
|
py
|
Python
|
NLP/UNIMO/src/utils/args.py
|
zhangyimi/Research
|
866f91d9774a38d205d6e9a3b1ee6293748261b3
|
[
"Apache-2.0"
] | 1,319
|
2020-02-14T10:42:07.000Z
|
2022-03-31T15:42:18.000Z
|
NLP/UNIMO/src/utils/args.py
|
green9989/Research
|
94519a72e7936c77f62a31709634b72c09aabf74
|
[
"Apache-2.0"
] | 192
|
2020-02-14T02:53:34.000Z
|
2022-03-31T02:25:48.000Z
|
NLP/UNIMO/src/utils/args.py
|
green9989/Research
|
94519a72e7936c77f62a31709634b72c09aabf74
|
[
"Apache-2.0"
] | 720
|
2020-02-14T02:12:38.000Z
|
2022-03-31T12:21:15.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Arguments for configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import argparse
def str2bool(v):
"""str to bool"""
# because argparse does not support to parse "true, False" as python
# boolean directly
return v.lower() in ("true", "t", "1")
class ArgumentGroup(object):
"""argument group"""
def __init__(self, parser, title, des):
self._group = parser.add_argument_group(title=title, description=des)
def add_arg(self, name, type, default, help, positional_arg=False, **kwargs):
"""add argument"""
prefix = "" if positional_arg else "--"
type = str2bool if type == bool else type
self._group.add_argument(
prefix + name,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
def print_arguments(args):
"""print arguments"""
print('----------- Configuration Arguments -----------')
for arg, value in sorted(six.iteritems(vars(args))):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
def inv_arguments(args):
"""inverse arguments"""
print('[Warning] Only keyword argument type is supported.')
args_list = []
for arg, value in sorted(six.iteritems(vars(args))):
args_list.extend(['--' + str(arg), str(value)])
return args_list
| 33.063492
| 81
| 0.647144
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import argparse
def str2bool(v):
return v.lower() in ("true", "t", "1")
class ArgumentGroup(object):
def __init__(self, parser, title, des):
self._group = parser.add_argument_group(title=title, description=des)
def add_arg(self, name, type, default, help, positional_arg=False, **kwargs):
prefix = "" if positional_arg else "--"
type = str2bool if type == bool else type
self._group.add_argument(
prefix + name,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
def print_arguments(args):
print('----------- Configuration Arguments -----------')
for arg, value in sorted(six.iteritems(vars(args))):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
def inv_arguments(args):
print('[Warning] Only keyword argument type is supported.')
args_list = []
for arg, value in sorted(six.iteritems(vars(args))):
args_list.extend(['--' + str(arg), str(value)])
return args_list
| true
| true
|
1c41b6346b45ffe3f201d87ca218fc6252c634a6
| 36,353
|
py
|
Python
|
HoganPA04.py
|
MicahHogan/HoganPythonPA04
|
62f3e32acba921aae9795e9a109177aac242311c
|
[
"Apache-2.0"
] | null | null | null |
HoganPA04.py
|
MicahHogan/HoganPythonPA04
|
62f3e32acba921aae9795e9a109177aac242311c
|
[
"Apache-2.0"
] | null | null | null |
HoganPA04.py
|
MicahHogan/HoganPythonPA04
|
62f3e32acba921aae9795e9a109177aac242311c
|
[
"Apache-2.0"
] | null | null | null |
#Week 4 Programming Assignment
#North Seattle College, CSC 110
#Author: Micah Hogan
#Email: hogan.micah.j@gmail.com
#Constants
#Initial User Input
name = str(input("Please select a name: "))
print("")
mount = str(input("Please select an animal to ride: "))
print("")
weapon = str(input("Please select a weapon: "))
print("")
role = str(input("Please select a role; (Sorcerer), (Brawler), or (Priest): "))
print("")
while not(role == "Sorcerer" or role == "Brawler" or role == "Priest"):
print ("Please select (Sorcerer), (Brawler), or (Priest).")
print("")
role = str(input("Please select a role; (Sorcerer), (Brawler), or (Priest): "))
print("")
race = str(input("Please select a race; (Human), (Elf), or (Troll): "))
print("")
while not(race == "Human" or race == "Elf" or race == "Troll"):
print("Please select (Human), (Elf), or (Troll).")
print("")
race = str(input("Please select a race; (Human), (Elf), or (Troll): "))
print("")
#Main function
#This function guides the user through a choose-your-own adventure style story
#The story has different outcomes based on the user's choices
def main ():
#Output
#This is introductory flavor text based on the constants created by intial user input
#This text creates the setting and style for the story
#After the introductory flavor text, this function calls the first function which requires a decision from the user, wake_up_groggy
if race == "Human":
print("You have selected "+race+". While perhaps not the most exciting choice, it does sound safe.")
print("")
elif race == "Elf":
print("You have selected "+race+". You begin to feel as one with the Earth as a tiny charm of hummingbirds works together in concert to lower a crown made of thistle and ivy upon your head.")
print("")
else:
print("You have selected "+race+". Your features and appendages begin to swell as your skin thickens and becomes cracked and leathery with a distinct green hue.")
print("")
if role == "Sorcerer":
print("You have selected "+role+". You put on your pointy hat, mutter something unintelligble and wiggle the fingers attached to your gangly arms poking out of the billowy sleeves of your long, flowing robe.")
print("")
elif role == "Brawler":
print("You have selected "+role+". You clench your jaws and cock your head to the side as you crack your knuckles loudly.")
print("")
else:
print("You have selected "+role+". A heavenly aura envelops you as your hands begin to pulse with a glowing warmth.")
print("")
print("Welcome to Arcana, "+name+", the land of fantasy and adventure!")
print("")
print("So, "+name+", you've been training as a "+role+"? We'll see if that helps you while you're here... In any case, be sure to always keep your "+weapon+" with you. It's your only means of protecting yourself.")
print("")
print("Lastly, "+name+", I've rounded up the largest, strongest and most well-trained "+mount+" I could find for you to ride. It's being fitted in the stables for a saddle right now. Have fun, and happy adventuring, "+ name+"!")
print("")
wake_up_groggy()
#This function begins the adventure story, and serves as the restarting function if the user fails to win the game
#This function invites the user to go for a ride on their mount after waking up groggy and requires an answer of (yes) or (no) from the user
#This function calls one of two functions based on the answer: upset_stomach_story and breakfast_invitation, each of which begin either a questline or another decision
def wake_up_groggy():
print("")
print("You slowly begin to awaken, your body sore from the prior evening, but your memory is fuzzy. You're unsure how you've arrived in the corner of the bazaar you've woken up in, bustling with the sounds of morning commerce. It's clear to you that you've spent the night here, but you have no memory of arriving the prior evening. You check your knapsack for your "+weapon+", relieved to find it in it's place. A noise behind you startles you, and you whip your head around only to find yourself face-to-face with your trusty "+mount+", who proceeds to sloppily and lovingly lick your face. You sense that it is eager for some exercise as it nuzzles it's saddle.")
print("")
ride_choice = str(input("Would you like to go for a ride? "))
while not(ride_choice == "yes" or ride_choice == "no"):
print("")
print ("Please choose (yes) or (no).")
print("")
ride_choice = str(input("Would you like to go for a ride? "))
if ride_choice == "yes":
print("")
print("You laugh as your "+mount+" tickles your nose with his tongue. ‘Ok, ok. I get it! You want to go for a ride and get a little exercise before breakfast, eh?’ You swing yourself up on the saddle and grab the reins with one hand as you steady yourself with the other, your "+mount+" excitedly racing off underneath you as the two of you escape from view over the horizon.")
print("")
print("Your stomach begins to churn with the steady, rhythmic bouncing of your "+mount+"’s pace, and you yank hard on one rein, curtailing the morning sprint and redirecting your heading back towards the open-air market where you awoke earlier. ‘Alright, ok, there you are. Good boy, let’s go now. Let’s go get some breakfast.’")
print("")
print("The jostling ride continues back to where you began, and the combination of the bouncing and your empty stomach is making you feel a bit ill. As you dismount and hitch up your "+mount+" you hear a squeaky voice behind you.")
upset_stomach_story()
else:
print("")
breakfast_invitation()
#This function invites the user to have breakfast with a kind elderly woman and requires an answer of (yes) or (no) from the user
#This function calls one of two functions based on the user's answer: breakfast_story or upset_stomach_story, each of which begin either a questline or another decision
def breakfast_invitation ():
print("")
choice = input("A kind elderly woman crooks a wrinkled finger toward you as you look around, rubbing your eyes. Behind her, you see a bowl full of speckled brown eggs sitting on the counter next to bacon sizzling on a griddle, a table set with bread, rolls, butter and jam and a pitcher of milk and orange juice. She smiles and asks you in a gravelly voice, 'Would you care to join me for breakfast?' ")
print("")
while not(choice == "yes" or choice == "no"):
print ("Please choose (yes) or (no).")
print("")
choice = input("'Would you care to join me for breakfast?' ")
print("")
if choice == "yes":
breakfast_story()
else:
upset_stomach_story()
#This function introduces the user to the questline "Slay the Grenwald"
#This function invites the user to play a game of rock, paper, scissors with the kind elderly woman who invited the user to breakfast
#This function determines who will do the dishes in the story and requires an answer of (rock), (paper), or (scissors) from the user
#This function calls one of three functions based on the user's answer: rock_story, paper_story or scissors_story, each of which have 3 different endings to the "Slay the Grenwald" questline
def breakfast_story ():
print("")
choice = input("You sit down at the wizened old woman's table and enjoy a hearty breakfast. As the two of you finish sopping up egg yolk with bits of bread from your plates, you overhear two young boys whispering excitedly as they scurry past you through the open-air market. 'Did you hear about the Grenwald last night? It seems she's taken another, and I understand the King has issued a ransom for her head!' The old woman laughs as your gaze follows the young boys. 'So, you fancy yourself an adventurer, do you? The last "+race+" that set out to slay the Grenwald never came back... Although, it is quite the handsome reward that the King is offering! Anyways, before you get carried away with all of that malarkey, you owe me a game of Rock, paper, scissors,' she said with a twinkle in her eye. 'Loser does the dishes!' Which would you like to play: rock, paper, or scissors? ")
print("")
while not(choice == "rock" or choice == "paper" or choice == "scissors"):
print ("Please choose (rock), (paper) or (scissors).")
print("")
choice = input("(Rock), (paper), (scissors)? ")
print("")
if choice == "rock":
rock_story()
elif choice == "paper":
paper_story()
else:
scissors_story()
print("")
#This function introduces the user to a kind stranger who notices that the user has an upset stomach
#This function invites the user to choose a red pill or a blue pill to treat their upset stomach and requires an answer of (red) or (blue) from the user
def upset_stomach_story ():
print("")
choice = str(input("A well-dressed and fidgety gnome who speaks excitedly with his hands approaches you, 'Well aren’t you a funny-looking "+race+"! Or maybe you’re just not feeling well? Anyways, why don’t you take a look at what I have up my sleeve, one of these is sure to make you feel better!' You peer at him curiously as he furtively digs in his pockets. 'Erm, ah, they were just right here...' he mutters to himself. 'Aha! Here they are! Your choice, one pill ought to due, red or blue?' he proclaims as he thrusts forward both palms, each proudly displaying a healthy sized pill: one red and one blue. Would you like the red pill or the blue pill? "))
print("")
while not (choice == "red" or choice == "blue"):
print("Please choose (red) or (blue).")
print("")
choice = str(input("Would you like the red pill or the blue pill? "))
print("")
if choice == "red":
print("You place your open palm in front of the red pill, and the squeaky-voiced gnome drops the pill in your open hand, 'Quite brave, didn't even ask what it is! You remind me of my aunt. She was a mighty "+role+", and I bet you could be a "+role+" someday also, if you trained hard enough. Anyways, that's another story for another time. I hope the pill suits you well!'")
print("")
print("Immediately after ingesting the red pill your head begins to swim. Your consciousness floats away until you are unsure what is real and what is make-believe. You find yourself at the foot of a castle, where a young man has set up a shell game, 'Watch the pea, use your eyes, get it right and win a prize!' he exclaims as you watch a passerby wager a coin on a round of the game. The huckster slides a pea under one of 3 empty half-walnut shells and lays them flat on a board in front of him. Slowly at first, and then ever-faster, he nimbly and dextrously maneuvers the shells beneath his fingers, faster and faster until all you can see is a blur of hands, all while they lay flat on the board in front of him. At once he stops. The passerby grins sheepishly and half-heartedly points to one of the shells. The showman flashes a wide, toothy smile as he flips up the empty shell, revealing that this was not the winner. With a flourish, he reshuffles the shells in front of him and gestures towards you, 'And you, charming young "+race+" with the smile of a "+role+". How about a free turn at my game? What do you say? Nothing to lose, don't you see?'")
print("")
red_pill_story()
else:
print("Your gaze moves to the blue pill, and before you can get a word out edge-wise, the sneaky little sucker tosses it in your mouth with a gleeful cackle, 'Roses are red, this pill is blue. Not sure what's in it, but let's hope it's good for you!'.")
print("")
print("Your reality fades and mutes as you shift dimensions. You hear a loud, audible *SNAP* and your teeth flash cold momentarily. Your eyelids flit and flutter as you come to, making out your surroundings. Before you is a path, and on either side are thick blackberry bushes surrounding tall, wide-based evergreen trees.")
print("")
print("As you walk down the path, you come across an old man resting under a tree. 'Could you do me a favor young whippersnapper?' he asks you, 'I need your help. My daughter has gone on ahead of me, and I told her I would catch up, but I am injured. She needs your protection. After she left, word was sent to me by a messenger that a gang of thieves is laying in wait for her, just a short ways past where we are now. There is a 3-way fork in the road, and I'm not sure which path she has chosen. You'll need to guess correctly in order to find her. Please, "+name+", you're our only hope! I have my own kingdom not many days travel from here; should you save my daughter the Princess I shall reward you handsomely.'")
print("")
blue_pill_story()
#This function features three different endings to the "Slay the Grenwald" questline
#This function wins the game if the user has previously selected "Sorcerer" as role and "rock" as option in game with kind elderly woman
#This function loses and restarts the game if the user has previously selected either "Brawler" or "Priest" as role and "rock" as option in game with kind elderly woman
def rock_story():
if role == "Sorcerer":
print("You set off to find the Grenwald with a spring in your step after helping the kind elderly woman with the breakfast dishes. You find the behemoth alone on top of a rocky mesa. The beast squares off against you and charges. You use your powers as a "+role+" to cause an avalanche of rocks to fall on the Grenwald's head, stopping the charging monster dead in it's tracks. You have slain the Grenwald!")
print("")
congratulations()
elif role == "Brawler":
print("Your loins warm with anticipation, you set out on your mission to locate the Grenwald after helping the kind elderly woman with the breakfast dishes. Soon, you find the gargantuan matriarch as she is sleeping. You find a perch above her, and roll a large rock over the precipice to drop on her head, in hopes of incapacitating her. As you are rolling the rock into place to drop on her head, another, larger rock falls from above you, and hits you on the head. You begin to lose consciousness from internal hemorrhaging.")
print("")
wake_up_groggy()
else:
print("After you finish doing the dishes for the kind elderly woman, you head off to vanquish the Grenwald. After a time, you come to a rock quarry. There, you find the Grenwald hard at work, carving large chunks of granite out of the walls of the quarry and smashing them into smaller, more uniform pieces. You attempt to take her by surprise, but her reflexes catch you off guard, and she whips around as you try to sneak up on her. She has you dead to rights. She picks up two large slabs of granite, each larger than you, and slowly squeezes you between them. You begin to lose consciousness from a collapsed lung.")
print("")
wake_up_groggy()
#This function features three different endings to the "Slay the Grenwald" questline
#This function wins the game if the user has previously selected "Brawler" as role and "paper" as option in game with kind elderly woman
#This function loses and restarts the game if the user has previously selected either "Sorcerer" or "Priest" as role and "paper" as option in game with kind elderly woman
def paper_story():
if role == "Sorcerer":
print("You finish helping the kind elderly woman with the breakfast dishes, then set out to slay the Grenwald. After several days travel, you locate the mythical beast. You cast a spell from afar, and it does nothing but anger her. Perhaps a "+role+" isn't meant to be slaying the Grenwald. You begin to faint and lose consciousness from low blood pressure.")
print("")
wake_up_groggy()
elif role == "Brawler":
print("After finishing helping the kind elderly woman with the breakfast dishes, you set out to slay the Grenwald. It takes you several days to find her, but when you finally do, you've had time to think of a clever plan. Although you are strong, you conclude that she is much stronger, and decide to use your brains instead of your brawn to defeat her. You bring out a storybook, and while she is resting after a meal, you begin to read her stories from a hiding place, so that she can only hear you and not see you. She grows sleepy from listening to the stories on a full belly, and is soon snoring loudly. You take this opportunity to take her life mercilessly so that you may collect the bounty and protect the citizens from her wrath. You have slain the Grenwald!")
print("")
congratulations()
else:
print("After doing the breakfast dishes with the kind elderly woman, you begin your mission to slay the Grenwald. After several days of hunting, you have the opportunity to come face-to-face with her. You summon your power of the "+role+" and attempt to heal the evil out of her, but this only enrages her. She mauls your face. You begin to lose consciousness due to loss of blood.")
print("")
wake_up_groggy()
#This function features three different endings to the "Slay the Grenwald" questline
#This function wins the game if the user has previously selected "Priest" as role and "scissors" as option in game with kind elderly woman
#This function loses and restarts the game if the user has previously selected either "Sorcerer" or "Brawler" as role and "scissors" as option in game with kind elderly woman
def scissors_story():
if role == "Sorcerer":
print("You and the kind elderly woman finish the dishes from breakfast, and then you ignore her advice and go off to find the Grenwald. When you come across the monster alone in an open field, you charge towards her. With a swat of her tail you fly high into the air, and fall straight into her throat. You begin to lose consciousness from the smell of her innards.")
print("")
wake_up_groggy()
elif role == "Brawler":
print("You finish up doing the breakfast dishes with the kind elderly woman and head out to find the Grenwald. You keep your "+weapon+" drawn and are ready for a battle at any moment, but when you happen upon her you find that her sheer brute strength is no match for any "+role+", let alone one of your prowess. She toys with you like a cat with a mouse, then swats your feet out from underneath you. You begin to lose consciousness from nerve damage in your spinal cord.")
print("")
wake_up_groggy()
else:
print("You and the kind elderly woman finish the breakfast dishes together before you set out to slay the Grenwald. You come across her after days of searching out in the countryside and she bellows '"+name+"! I have been waiting for you!' You shiver in anticipation of a battle to end all battles. Your hands glow with a priestly aura as you summon the power of the deities above. You charge at the Grenwald and strike her once, twice, three times with your "+weapon+". She lies still. You have slain the Grenwald!")
print("")
congratulations()
#This function introduces the user to the "Find the Treasure" questline, invites the user to play a game of Three Card Monte and requires an answer of (1), (2) or (3) from the user
#This function features three different questline story endings for each choice of card 1, 2, or 3 for a total of 9 questline story endings
def red_pill_story():
try:
number_choice = int(input("What'll it be, 1, 2, or 3? " ))
print("")
while not(number_choice == 1 or number_choice == 2 or number_choice == 3):
print("Please choose (1), (2) or (3).")
print("")
number_choice = int(input("What'll it be, 1, 2 or 3? " ))
print("")
number_punishments_angels_singing()
print("The shell game host laughs uproariously as he shows you your empty shell, 'Lady luck may not be on your side just yet, but don't let that stop you from finding the treasure inside the castle!' he intones as he gestures with a grand sweep of his hand toward the entrance just behind him.")
print("")
#This section of the function features three different questline story endings for the "Find the Treasure" questline
#This section of the function wins the game if the user has previously selected "Sorcerer" as role and 3 as the card in the game of Three Card Monte
#This section of the function loses the game if the user has previously selected "Sorcerer" as role and either 1 or 2 as the card in the game of Three Card Monte
if role == "Sorcerer":
if number_choice == 1:
print("Following the confidence man's lead, you enter the castle in search of the storied treasure. You hear the creak of a door opening in the room to your left. Entering the room, you are temporarily blinded by a flash of light as a torch is ignited. 'Ewww! It's a "+race+" and it's ALIVE! Quick, get the switch! This one smells like a "+role+"!' You count", number_punishments_angels_singing(number_choice), "lashings before you collapse in pain, unconscious.")
print("")
wake_up_groggy()
elif number_choice == 2:
print("'Treasure, eh?' you think to yourself as you step through the threshold into the castle, 'I could always use a little extra pocket money.' You see a set of stairs leading up from the entryway, and make your way up the first flight. '"+name+"...' You hear a ghostly whisper from behind a curtain in the corner. Slowly, you make your way to the curtain, and pull it back. '"+name+"... Thank you for joining us. I hope you weren't planning on leaving any time soon!' A sharp blow from behind drops you to the floor. You count", number_punishments_angels_singing(number_choice), "lashings before you lose consciousness.")
print("")
wake_up_groggy()
else:
print("Upon entering the castle, a giant ogre is standing in front of you, ready to fight. 'Fee fi, fo fum. I smell the blood of someone dumb!' Although you are unprepared for battle, your quick reactions and prowess with your "+weapon+" make for a short fight against the ogre. Once he is vanquished, you rummage through his knapsack and find the diamond-crusted golden tiara. You have found the treasure! You count", number_punishments_angels_singing(number_choice), "angels singing your praises.")
print("")
congratulations()
#This section of the function features three different questline story endings for the "Find the Treasure" questline
#This section of the function wins the game if the user has previously selected "Brawler" as role and 1 as the card in the game of Three Card Monte
#This section of the function loses the game if the user has previously selected "Brawler" as role and either 2 or 3 as the card in the game of Three Card Monte
elif role == "Brawler":
if number_choice == 1:
print("Carefully withdrawing your "+weapon+", you enter the castle. Instantly, you are besieged by a wild pack of angry dwarves. 'You'll never have our treasure of precious gems you smelly "+role+"! You'll never make it out alive!' Coolly and calmly you dispatch the horde of bearded bullies, and as you stop to catch your breath, you notice a glint of gold peeking out from under the rug in front of you. Sweeping away the rug, you uncover a pile of precious gems and metals. You have found the treasure! You count", number_punishments_angels_singing(number_choice), "angels singing your praises.")
print("")
congratulations()
elif number_choice == 2:
print("As you enter the castle you notice a trap door in the far corner. A thick iron hoop is attached to the wooden door, which you use to pull the hatch open slowly as it emits a low groan. Stairs lead below you, and you hear a small voice calling out in the distance, 'Help. Help me. I'm just a little lonely "+race+" down here all by myself. My mother is too busy training to be a "+role+" to take care of me. Oh, someone please help me, and all of this treasure down here will be yours!' You race down the stairs, eager to lend a helping hand and line your pockets. 'Sucker,' snickers a deep sinister voice behind you as you come around the corner. The last thing you see is a flash of light as you feel a thick thud on the back of your head. You count", number_punishments_angels_singing(number_choice), " tiny birds flying around your head before you lose consciousness.")
print("")
wake_up_groggy()
else:
print("You storm the entrance of the castle, your "+weapon+" drawn and your mind fresh with the tactics passed down from generations of "+role+"s before you. There, in front of you, lies the treasure. It is within your reach. But, just before the treasure sits a small table laden with dozens and dozens of donuts. You decide it couldn't possibly hurt to stop and have a quick snack before collecting the treasure. You count", number_punishments_angels_singing(number_choice), "donuts put away before you doze off. Mmmmm, donuts.")
print("")
wake_up_groggy()
#This section of the function features three different questline story endings for the "Find the Treasure" questline
#This section of the function wins the game if the user has previously selected "Priest" as role and 2 as the card in the game of Three Card Monte
#This section of the function loses the game if the user has previously selected "Priest" as role and either 1 or 3 as the card in the game of Three Card Monte
else:
if number_choice == 1:
print("You enter the castle with your senses on full alert. You pass through the foyer into the courtyard, lush, green and lit by the sun. At the top of the tallest tree there is a platform laden with jewels, gold, and silver. You have found the treasure. Now, it's just a matter of getting to it. You begin to climb the tree when you feel something whistle past your ear, narrowly missing you. Seconds later, you hear a thud above you and you quickly look up to find an arrow quivering in the trunk of the tree above your head. You're being shot at by guards with bows. One sinks into your thigh, and the pain is unbearable. You count", number_punishments_angels_singing(number_choice), "arrows sunk into your body before you lose consciousness.")
print("")
wake_up_groggy()
elif number_choice == 2:
print("You slowly enter the castle, more concerned with your safety than with obtaining treasure. You see a peasant girl bent low over a figure laying in bed, weeping, 'My kingdom, all of it, for my father's health!' she wails. Smiling, your hands begin to glow. You approach the man slowly with intent and place your hands on his temples. A slow smile plays across his lips as his eyes blink twice and he regains consciousness. The King notes your pious deeds, and bestows upon you the greatest honors and riches. You count", number_punishments_angels_singing(number_choice), "angels singing your praises.")
print("")
congratulations()
else:
print("You enter the castle with great aplomb. Certain of success, you storm through room through room with great bravado. You come to a great room, and you see a pile of jewels centered in the middle. You sprint towards the treasure, only to fall down a trap door stairway that was loosely covered with a rug as a booby trap. You count", number_punishments_angels_singing(number_choice), "stairs that your head bounces off of before you lose consciousness.")
print("")
wake_up_groggy()
print("")
except ValueError:
print("")
print("Please choose (1), (2), or (3). ")
print("")
red_pill_story()
#This function takes the value entered as the choice of cards in the Three Card Monte game, adds two and squares the total to return a value
#This function uses the value returned as the number of punishments the user counts before losing consciousness in the event of a loss in the "Find the Treasure" questline
#This function uses the value returned as the number of angels in the choir singing the user's praises in the event of a win in the "Find the Treasure" questline
def number_punishments_angels_singing(number_choice):
punishments_angels_singing = (number_choice + 2) ** 2
return punishments_angels_singing
#This function introduces the user to the "Save the Princess" questline and invites the user to select from a trio of paths before them
#This function requires an answer of (left), (right) or (middle)
#This function features three different questline story endings for each choice of (left), (right) or (middle) for a total of 9 questline story endings
def blue_pill_story():
road_choice = str(input("Determined to help the worried, injured King, you set off down the path. Soon, you come to a fork with three trails in the path. Left, right or middle? "))
print("")
while not(road_choice == "left" or road_choice == "right" or road_choice == "middle"):
print("Please choose (left), (right), or (middle).")
print("")
road_choice = str(input("Left, right or middle? "))
print("")
#This section of the function features three different questline story endings for the "Save the Princess" questline
#This section of the function wins the game if the user has previously selected "Sorcerer" as role and "middle" as the path before them
#This section of the function loses the game if the user has previously selected "Sorcerer" as role and either "left" or "right" as the path before them
if role == "Sorcerer":
if road_choice == "left":
print("You choose the left trail and continue down. You find the gang of thieves, but not the Princess. The thieves take your pants and leave you on the side of the path. 'Not my day', you think to yourself. Night falls and you soon lose consciousness from a severe case of hypothermia.")
print("")
wake_up_groggy()
elif road_choice == "right":
print("You choose the right trail and continue on. There is no sign of the Princess, and you are running low on supplies. You pass an abandoned caravan with skeletal remains. You contract a severe case of dysentery, and lose consciousness from lack of fluids.")
print("")
wake_up_groggy()
else:
print("You choose the middle trail. Your clairvoyance is more profound than usual, and you sense that the Princess is near. The gang of thieves appears, with the Princess in stow as a hostage. A wicked firefight ensues, and you summon all your skills as a "+role+" to win the battle. You have saved the Princess!")
print("")
congratulations()
#This section of the function features three different questline story endings for the "Save the Princess" questline
#This section of the function wins the game if the user has previously selected "Brawler" as role and "right" as the path before them
#This section of the function loses and restarts the game if the user has previously selected "Brawler" as role and either "left" or "middle" as path before them
elif role == "Brawler":
if road_choice == "left":
print("You amble down the left trail, unsure of what you'll find. You hear the low growl of an animal and turn behind you to see a large black bear charging at you. You turn to run, but it's too late. With a single swipe the large beast mauls you down. You feign death as you begin to lose consciousness, in hopes the bear forgets you and goes on his way.")
wake_up_groggy()
elif road_choice == "right":
print("You choose the trail on the right. After a short while, your keen senses tell you that the King's daughter is just ahead. Lo and behold, as you come around a bend in the path you see the flaxen-haired beauty. As you rush to intercept her, she is surrounded by a group of Dark Elves. There are 5 of them against only you, but your "+weapon+" and "+role+" training prove to be no match. You have saved the Princess!")
print("")
congratulations()
else:
print("You walk slowly down the middle trail. As afternoon turns to dusk and dusk turns to night, you begin to grow weary. You stop to set a small fire to keep warm for the night, but neglect to notice how arid and dry the underbrush surrounding you is. A gust of wind picks up after you've built your fire, and spreads the flames in a small semi-circle around you. As the bone-dry tinder on the ground begins to ignite, you frantically stomp on the ground, attempting to extinguish it in vain. The flames slowly lick at your appendages as you begin to lose consciousness from the searing pain.")
wake_up_groggy()
#This section of the function features three different questline story endings for the "Save the Princess" questline
#This section of the function wins the game if the user has previously selected "Priest" as role and "left" as the path before them
#This section of the function loses and restarts if the user has previously selected "Priest" as role and either "right" or "middle" as the path before them
else:
if road_choice == "left":
print("You choose the trail on the left. As you walk along the woods, you notice a small songbird hopping on one foot, seemingly unable to fly. You summon your training as a "+role+" and lay a single hand on the breast of the songbird. A flash of light appears as the songbird transforms into the raven-haired princess, 'I had to test your true powers,' she exclaims, 'to see if you were the one who was worthy of saving me!' You have saved the Princess!")
print("")
congratulations()
elif road_choice == "right":
print("You choose the trail on the right. You walk for miles and miles, and as the shadows grow longer you begin to run low on water. After another hour passes with no sign of more, you become giddy at the sound of running water just off the trail. You excitedly trample through the brush towards the sound of running water, ducking branches and following your ears. You are so excited and moving so quickly that you trip and fall into the river, which is icy cold and quickly deposits you over a waterfall and into a rocky ravine. You begin to lose consciousness from a head injury.")
print("")
wake_up_groggy()
else:
print("You choose the trail in the middle. There is no sign of the Princess, nor of any other life. The landscape becomes more and more barren. A dust devil begins to swirl around you, and soon has developed into a mini-tornado. You are picked up off the ground against your will and whipped around with enough centrifugal force to make your head spin. The wind finally releases you, slamming you on your back on the ground. You begin to lose consciousness from lack of oxygen.")
print("")
wake_up_groggy()
#This function congratulates the user after winning the game
def congratulations():
print("Congratulations, "+name+", you are a hero amongst heroes and your name shall be forever storied. Arcana is eternally indebted to you.")
main()
| 97.723118
| 1,178
| 0.706764
|
name = str(input("Please select a name: "))
print("")
mount = str(input("Please select an animal to ride: "))
print("")
weapon = str(input("Please select a weapon: "))
print("")
role = str(input("Please select a role; (Sorcerer), (Brawler), or (Priest): "))
print("")
while not(role == "Sorcerer" or role == "Brawler" or role == "Priest"):
print ("Please select (Sorcerer), (Brawler), or (Priest).")
print("")
role = str(input("Please select a role; (Sorcerer), (Brawler), or (Priest): "))
print("")
race = str(input("Please select a race; (Human), (Elf), or (Troll): "))
print("")
while not(race == "Human" or race == "Elf" or race == "Troll"):
print("Please select (Human), (Elf), or (Troll).")
print("")
race = str(input("Please select a race; (Human), (Elf), or (Troll): "))
print("")
def main ():
#Output
#This is introductory flavor text based on the constants created by intial user input
#This text creates the setting and style for the story
#After the introductory flavor text, this function calls the first function which requires a decision from the user, wake_up_groggy
if race == "Human":
print("You have selected "+race+". While perhaps not the most exciting choice, it does sound safe.")
print("")
elif race == "Elf":
print("You have selected "+race+". You begin to feel as one with the Earth as a tiny charm of hummingbirds works together in concert to lower a crown made of thistle and ivy upon your head.")
print("")
else:
print("You have selected "+race+". Your features and appendages begin to swell as your skin thickens and becomes cracked and leathery with a distinct green hue.")
print("")
if role == "Sorcerer":
print("You have selected "+role+". You put on your pointy hat, mutter something unintelligble and wiggle the fingers attached to your gangly arms poking out of the billowy sleeves of your long, flowing robe.")
print("")
elif role == "Brawler":
print("You have selected "+role+". You clench your jaws and cock your head to the side as you crack your knuckles loudly.")
print("")
else:
print("You have selected "+role+". A heavenly aura envelops you as your hands begin to pulse with a glowing warmth.")
print("")
print("Welcome to Arcana, "+name+", the land of fantasy and adventure!")
print("")
print("So, "+name+", you've been training as a "+role+"? We'll see if that helps you while you're here... In any case, be sure to always keep your "+weapon+" with you. It's your only means of protecting yourself.")
print("")
print("Lastly, "+name+", I've rounded up the largest, strongest and most well-trained "+mount+" I could find for you to ride. It's being fitted in the stables for a saddle right now. Have fun, and happy adventuring, "+ name+"!")
print("")
wake_up_groggy()
#This function begins the adventure story, and serves as the restarting function if the user fails to win the game
#This function invites the user to go for a ride on their mount after waking up groggy and requires an answer of (yes) or (no) from the user
#This function calls one of two functions based on the answer: upset_stomach_story and breakfast_invitation, each of which begin either a questline or another decision
def wake_up_groggy():
print("")
print("You slowly begin to awaken, your body sore from the prior evening, but your memory is fuzzy. You're unsure how you've arrived in the corner of the bazaar you've woken up in, bustling with the sounds of morning commerce. It's clear to you that you've spent the night here, but you have no memory of arriving the prior evening. You check your knapsack for your "+weapon+", relieved to find it in it's place. A noise behind you startles you, and you whip your head around only to find yourself face-to-face with your trusty "+mount+", who proceeds to sloppily and lovingly lick your face. You sense that it is eager for some exercise as it nuzzles it's saddle.")
print("")
ride_choice = str(input("Would you like to go for a ride? "))
while not(ride_choice == "yes" or ride_choice == "no"):
print("")
print ("Please choose (yes) or (no).")
print("")
ride_choice = str(input("Would you like to go for a ride? "))
if ride_choice == "yes":
print("")
print("You laugh as your "+mount+" tickles your nose with his tongue. ‘Ok, ok. I get it! You want to go for a ride and get a little exercise before breakfast, eh?’ You swing yourself up on the saddle and grab the reins with one hand as you steady yourself with the other, your "+mount+" excitedly racing off underneath you as the two of you escape from view over the horizon.")
print("")
print("Your stomach begins to churn with the steady, rhythmic bouncing of your "+mount+"’s pace, and you yank hard on one rein, curtailing the morning sprint and redirecting your heading back towards the open-air market where you awoke earlier. ‘Alright, ok, there you are. Good boy, let’s go now. Let’s go get some breakfast.’")
print("")
print("The jostling ride continues back to where you began, and the combination of the bouncing and your empty stomach is making you feel a bit ill. As you dismount and hitch up your "+mount+" you hear a squeaky voice behind you.")
upset_stomach_story()
else:
print("")
breakfast_invitation()
def breakfast_invitation ():
print("")
choice = input("A kind elderly woman crooks a wrinkled finger toward you as you look around, rubbing your eyes. Behind her, you see a bowl full of speckled brown eggs sitting on the counter next to bacon sizzling on a griddle, a table set with bread, rolls, butter and jam and a pitcher of milk and orange juice. She smiles and asks you in a gravelly voice, 'Would you care to join me for breakfast?' ")
print("")
while not(choice == "yes" or choice == "no"):
print ("Please choose (yes) or (no).")
print("")
choice = input("'Would you care to join me for breakfast?' ")
print("")
if choice == "yes":
breakfast_story()
else:
upset_stomach_story()
#This function introduces the user to the questline "Slay the Grenwald"
#This function invites the user to play a game of rock, paper, scissors with the kind elderly woman who invited the user to breakfast
#This function determines who will do the dishes in the story and requires an answer of (rock), (paper), or (scissors) from the user
#This function calls one of three functions based on the user's answer: rock_story, paper_story or scissors_story, each of which have 3 different endings to the "Slay the Grenwald" questline
def breakfast_story ():
print("")
choice = input("You sit down at the wizened old woman's table and enjoy a hearty breakfast. As the two of you finish sopping up egg yolk with bits of bread from your plates, you overhear two young boys whispering excitedly as they scurry past you through the open-air market. 'Did you hear about the Grenwald last night? It seems she's taken another, and I understand the King has issued a ransom for her head!' The old woman laughs as your gaze follows the young boys. 'So, you fancy yourself an adventurer, do you? The last "+race+" that set out to slay the Grenwald never came back... Although, it is quite the handsome reward that the King is offering! Anyways, before you get carried away with all of that malarkey, you owe me a game of Rock, paper, scissors,' she said with a twinkle in her eye. 'Loser does the dishes!' Which would you like to play: rock, paper, or scissors? ")
print("")
while not(choice == "rock" or choice == "paper" or choice == "scissors"):
print ("Please choose (rock), (paper) or (scissors).")
print("")
choice = input("(Rock), (paper), (scissors)? ")
print("")
if choice == "rock":
rock_story()
elif choice == "paper":
paper_story()
else:
scissors_story()
print("")
def upset_stomach_story ():
print("")
choice = str(input("A well-dressed and fidgety gnome who speaks excitedly with his hands approaches you, 'Well aren’t you a funny-looking "+race+"! Or maybe you’re just not feeling well? Anyways, why don’t you take a look at what I have up my sleeve, one of these is sure to make you feel better!' You peer at him curiously as he furtively digs in his pockets. 'Erm, ah, they were just right here...' he mutters to himself. 'Aha! Here they are! Your choice, one pill ought to due, red or blue?' he proclaims as he thrusts forward both palms, each proudly displaying a healthy sized pill: one red and one blue. Would you like the red pill or the blue pill? "))
print("")
while not (choice == "red" or choice == "blue"):
print("Please choose (red) or (blue).")
print("")
choice = str(input("Would you like the red pill or the blue pill? "))
print("")
if choice == "red":
print("You place your open palm in front of the red pill, and the squeaky-voiced gnome drops the pill in your open hand, 'Quite brave, didn't even ask what it is! You remind me of my aunt. She was a mighty "+role+", and I bet you could be a "+role+" someday also, if you trained hard enough. Anyways, that's another story for another time. I hope the pill suits you well!'")
print("")
print("Immediately after ingesting the red pill your head begins to swim. Your consciousness floats away until you are unsure what is real and what is make-believe. You find yourself at the foot of a castle, where a young man has set up a shell game, 'Watch the pea, use your eyes, get it right and win a prize!' he exclaims as you watch a passerby wager a coin on a round of the game. The huckster slides a pea under one of 3 empty half-walnut shells and lays them flat on a board in front of him. Slowly at first, and then ever-faster, he nimbly and dextrously maneuvers the shells beneath his fingers, faster and faster until all you can see is a blur of hands, all while they lay flat on the board in front of him. At once he stops. The passerby grins sheepishly and half-heartedly points to one of the shells. The showman flashes a wide, toothy smile as he flips up the empty shell, revealing that this was not the winner. With a flourish, he reshuffles the shells in front of him and gestures towards you, 'And you, charming young "+race+" with the smile of a "+role+". How about a free turn at my game? What do you say? Nothing to lose, don't you see?'")
print("")
red_pill_story()
else:
print("Your gaze moves to the blue pill, and before you can get a word out edge-wise, the sneaky little sucker tosses it in your mouth with a gleeful cackle, 'Roses are red, this pill is blue. Not sure what's in it, but let's hope it's good for you!'.")
print("")
print("Your reality fades and mutes as you shift dimensions. You hear a loud, audible *SNAP* and your teeth flash cold momentarily. Your eyelids flit and flutter as you come to, making out your surroundings. Before you is a path, and on either side are thick blackberry bushes surrounding tall, wide-based evergreen trees.")
print("")
print("As you walk down the path, you come across an old man resting under a tree. 'Could you do me a favor young whippersnapper?' he asks you, 'I need your help. My daughter has gone on ahead of me, and I told her I would catch up, but I am injured. She needs your protection. After she left, word was sent to me by a messenger that a gang of thieves is laying in wait for her, just a short ways past where we are now. There is a 3-way fork in the road, and I'm not sure which path she has chosen. You'll need to guess correctly in order to find her. Please, "+name+", you're our only hope! I have my own kingdom not many days travel from here; should you save my daughter the Princess I shall reward you handsomely.'")
print("")
blue_pill_story()
#This function features three different endings to the "Slay the Grenwald" questline
#This function wins the game if the user has previously selected "Sorcerer" as role and "rock" as option in game with kind elderly woman
#This function loses and restarts the game if the user has previously selected either "Brawler" or "Priest" as role and "rock" as option in game with kind elderly woman
def rock_story():
if role == "Sorcerer":
print("You set off to find the Grenwald with a spring in your step after helping the kind elderly woman with the breakfast dishes. You find the behemoth alone on top of a rocky mesa. The beast squares off against you and charges. You use your powers as a "+role+" to cause an avalanche of rocks to fall on the Grenwald's head, stopping the charging monster dead in it's tracks. You have slain the Grenwald!")
print("")
congratulations()
elif role == "Brawler":
print("Your loins warm with anticipation, you set out on your mission to locate the Grenwald after helping the kind elderly woman with the breakfast dishes. Soon, you find the gargantuan matriarch as she is sleeping. You find a perch above her, and roll a large rock over the precipice to drop on her head, in hopes of incapacitating her. As you are rolling the rock into place to drop on her head, another, larger rock falls from above you, and hits you on the head. You begin to lose consciousness from internal hemorrhaging.")
print("")
wake_up_groggy()
else:
print("After you finish doing the dishes for the kind elderly woman, you head off to vanquish the Grenwald. After a time, you come to a rock quarry. There, you find the Grenwald hard at work, carving large chunks of granite out of the walls of the quarry and smashing them into smaller, more uniform pieces. You attempt to take her by surprise, but her reflexes catch you off guard, and she whips around as you try to sneak up on her. She has you dead to rights. She picks up two large slabs of granite, each larger than you, and slowly squeezes you between them. You begin to lose consciousness from a collapsed lung.")
print("")
wake_up_groggy()
#This function features three different endings to the "Slay the Grenwald" questline
#This function wins the game if the user has previously selected "Brawler" as role and "paper" as option in game with kind elderly woman
#This function loses and restarts the game if the user has previously selected either "Sorcerer" or "Priest" as role and "paper" as option in game with kind elderly woman
def paper_story():
if role == "Sorcerer":
print("You finish helping the kind elderly woman with the breakfast dishes, then set out to slay the Grenwald. After several days travel, you locate the mythical beast. You cast a spell from afar, and it does nothing but anger her. Perhaps a "+role+" isn't meant to be slaying the Grenwald. You begin to faint and lose consciousness from low blood pressure.")
print("")
wake_up_groggy()
elif role == "Brawler":
print("After finishing helping the kind elderly woman with the breakfast dishes, you set out to slay the Grenwald. It takes you several days to find her, but when you finally do, you've had time to think of a clever plan. Although you are strong, you conclude that she is much stronger, and decide to use your brains instead of your brawn to defeat her. You bring out a storybook, and while she is resting after a meal, you begin to read her stories from a hiding place, so that she can only hear you and not see you. She grows sleepy from listening to the stories on a full belly, and is soon snoring loudly. You take this opportunity to take her life mercilessly so that you may collect the bounty and protect the citizens from her wrath. You have slain the Grenwald!")
print("")
congratulations()
else:
print("After doing the breakfast dishes with the kind elderly woman, you begin your mission to slay the Grenwald. After several days of hunting, you have the opportunity to come face-to-face with her. You summon your power of the "+role+" and attempt to heal the evil out of her, but this only enrages her. She mauls your face. You begin to lose consciousness due to loss of blood.")
print("")
wake_up_groggy()
#This function features three different endings to the "Slay the Grenwald" questline
#This function wins the game if the user has previously selected "Priest" as role and "scissors" as option in game with kind elderly woman
#This function loses and restarts the game if the user has previously selected either "Sorcerer" or "Brawler" as role and "scissors" as option in game with kind elderly woman
def scissors_story():
if role == "Sorcerer":
print("You and the kind elderly woman finish the dishes from breakfast, and then you ignore her advice and go off to find the Grenwald. When you come across the monster alone in an open field, you charge towards her. With a swat of her tail you fly high into the air, and fall straight into her throat. You begin to lose consciousness from the smell of her innards.")
print("")
wake_up_groggy()
elif role == "Brawler":
print("You finish up doing the breakfast dishes with the kind elderly woman and head out to find the Grenwald. You keep your "+weapon+" drawn and are ready for a battle at any moment, but when you happen upon her you find that her sheer brute strength is no match for any "+role+", let alone one of your prowess. She toys with you like a cat with a mouse, then swats your feet out from underneath you. You begin to lose consciousness from nerve damage in your spinal cord.")
print("")
wake_up_groggy()
else:
print("You and the kind elderly woman finish the breakfast dishes together before you set out to slay the Grenwald. You come across her after days of searching out in the countryside and she bellows '"+name+"! I have been waiting for you!' You shiver in anticipation of a battle to end all battles. Your hands glow with a priestly aura as you summon the power of the deities above. You charge at the Grenwald and strike her once, twice, three times with your "+weapon+". She lies still. You have slain the Grenwald!")
print("")
congratulations()
#This function introduces the user to the "Find the Treasure" questline, invites the user to play a game of Three Card Monte and requires an answer of (1), (2) or (3) from the user
#This function features three different questline story endings for each choice of card 1, 2, or 3 for a total of 9 questline story endings
def red_pill_story():
try:
number_choice = int(input("What'll it be, 1, 2, or 3? " ))
print("")
while not(number_choice == 1 or number_choice == 2 or number_choice == 3):
print("Please choose (1), (2) or (3).")
print("")
number_choice = int(input("What'll it be, 1, 2 or 3? " ))
print("")
number_punishments_angels_singing()
print("The shell game host laughs uproariously as he shows you your empty shell, 'Lady luck may not be on your side just yet, but don't let that stop you from finding the treasure inside the castle!' he intones as he gestures with a grand sweep of his hand toward the entrance just behind him.")
print("")
if role == "Sorcerer":
if number_choice == 1:
print("Following the confidence man's lead, you enter the castle in search of the storied treasure. You hear the creak of a door opening in the room to your left. Entering the room, you are temporarily blinded by a flash of light as a torch is ignited. 'Ewww! It's a "+race+" and it's ALIVE! Quick, get the switch! This one smells like a "+role+"!' You count", number_punishments_angels_singing(number_choice), "lashings before you collapse in pain, unconscious.")
print("")
wake_up_groggy()
elif number_choice == 2:
print("'Treasure, eh?' you think to yourself as you step through the threshold into the castle, 'I could always use a little extra pocket money.' You see a set of stairs leading up from the entryway, and make your way up the first flight. '"+name+"...' You hear a ghostly whisper from behind a curtain in the corner. Slowly, you make your way to the curtain, and pull it back. '"+name+"... Thank you for joining us. I hope you weren't planning on leaving any time soon!' A sharp blow from behind drops you to the floor. You count", number_punishments_angels_singing(number_choice), "lashings before you lose consciousness.")
print("")
wake_up_groggy()
else:
print("Upon entering the castle, a giant ogre is standing in front of you, ready to fight. 'Fee fi, fo fum. I smell the blood of someone dumb!' Although you are unprepared for battle, your quick reactions and prowess with your "+weapon+" make for a short fight against the ogre. Once he is vanquished, you rummage through his knapsack and find the diamond-crusted golden tiara. You have found the treasure! You count", number_punishments_angels_singing(number_choice), "angels singing your praises.")
print("")
congratulations()
elif role == "Brawler":
if number_choice == 1:
print("Carefully withdrawing your "+weapon+", you enter the castle. Instantly, you are besieged by a wild pack of angry dwarves. 'You'll never have our treasure of precious gems you smelly "+role+"! You'll never make it out alive!' Coolly and calmly you dispatch the horde of bearded bullies, and as you stop to catch your breath, you notice a glint of gold peeking out from under the rug in front of you. Sweeping away the rug, you uncover a pile of precious gems and metals. You have found the treasure! You count", number_punishments_angels_singing(number_choice), "angels singing your praises.")
print("")
congratulations()
elif number_choice == 2:
print("As you enter the castle you notice a trap door in the far corner. A thick iron hoop is attached to the wooden door, which you use to pull the hatch open slowly as it emits a low groan. Stairs lead below you, and you hear a small voice calling out in the distance, 'Help. Help me. I'm just a little lonely "+race+" down here all by myself. My mother is too busy training to be a "+role+" to take care of me. Oh, someone please help me, and all of this treasure down here will be yours!' You race down the stairs, eager to lend a helping hand and line your pockets. 'Sucker,' snickers a deep sinister voice behind you as you come around the corner. The last thing you see is a flash of light as you feel a thick thud on the back of your head. You count", number_punishments_angels_singing(number_choice), " tiny birds flying around your head before you lose consciousness.")
print("")
wake_up_groggy()
else:
print("You storm the entrance of the castle, your "+weapon+" drawn and your mind fresh with the tactics passed down from generations of "+role+"s before you. There, in front of you, lies the treasure. It is within your reach. But, just before the treasure sits a small table laden with dozens and dozens of donuts. You decide it couldn't possibly hurt to stop and have a quick snack before collecting the treasure. You count", number_punishments_angels_singing(number_choice), "donuts put away before you doze off. Mmmmm, donuts.")
print("")
wake_up_groggy()
else:
if number_choice == 1:
print("You enter the castle with your senses on full alert. You pass through the foyer into the courtyard, lush, green and lit by the sun. At the top of the tallest tree there is a platform laden with jewels, gold, and silver. You have found the treasure. Now, it's just a matter of getting to it. You begin to climb the tree when you feel something whistle past your ear, narrowly missing you. Seconds later, you hear a thud above you and you quickly look up to find an arrow quivering in the trunk of the tree above your head. You're being shot at by guards with bows. One sinks into your thigh, and the pain is unbearable. You count", number_punishments_angels_singing(number_choice), "arrows sunk into your body before you lose consciousness.")
print("")
wake_up_groggy()
elif number_choice == 2:
print("You slowly enter the castle, more concerned with your safety than with obtaining treasure. You see a peasant girl bent low over a figure laying in bed, weeping, 'My kingdom, all of it, for my father's health!' she wails. Smiling, your hands begin to glow. You approach the man slowly with intent and place your hands on his temples. A slow smile plays across his lips as his eyes blink twice and he regains consciousness. The King notes your pious deeds, and bestows upon you the greatest honors and riches. You count", number_punishments_angels_singing(number_choice), "angels singing your praises.")
print("")
congratulations()
else:
print("You enter the castle with great aplomb. Certain of success, you storm through room through room with great bravado. You come to a great room, and you see a pile of jewels centered in the middle. You sprint towards the treasure, only to fall down a trap door stairway that was loosely covered with a rug as a booby trap. You count", number_punishments_angels_singing(number_choice), "stairs that your head bounces off of before you lose consciousness.")
print("")
wake_up_groggy()
print("")
except ValueError:
print("")
print("Please choose (1), (2), or (3). ")
print("")
red_pill_story()
#This function takes the value entered as the choice of cards in the Three Card Monte game, adds two and squares the total to return a value
#This function uses the value returned as the number of punishments the user counts before losing consciousness in the event of a loss in the "Find the Treasure" questline
#This function uses the value returned as the number of angels in the choir singing the user's praises in the event of a win in the "Find the Treasure" questline
def number_punishments_angels_singing(number_choice):
punishments_angels_singing = (number_choice + 2) ** 2
return punishments_angels_singing
def blue_pill_story():
road_choice = str(input("Determined to help the worried, injured King, you set off down the path. Soon, you come to a fork with three trails in the path. Left, right or middle? "))
print("")
while not(road_choice == "left" or road_choice == "right" or road_choice == "middle"):
print("Please choose (left), (right), or (middle).")
print("")
road_choice = str(input("Left, right or middle? "))
print("")
if role == "Sorcerer":
if road_choice == "left":
print("You choose the left trail and continue down. You find the gang of thieves, but not the Princess. The thieves take your pants and leave you on the side of the path. 'Not my day', you think to yourself. Night falls and you soon lose consciousness from a severe case of hypothermia.")
print("")
wake_up_groggy()
elif road_choice == "right":
print("You choose the right trail and continue on. There is no sign of the Princess, and you are running low on supplies. You pass an abandoned caravan with skeletal remains. You contract a severe case of dysentery, and lose consciousness from lack of fluids.")
print("")
wake_up_groggy()
else:
print("You choose the middle trail. Your clairvoyance is more profound than usual, and you sense that the Princess is near. The gang of thieves appears, with the Princess in stow as a hostage. A wicked firefight ensues, and you summon all your skills as a "+role+" to win the battle. You have saved the Princess!")
print("")
congratulations()
elif role == "Brawler":
if road_choice == "left":
print("You amble down the left trail, unsure of what you'll find. You hear the low growl of an animal and turn behind you to see a large black bear charging at you. You turn to run, but it's too late. With a single swipe the large beast mauls you down. You feign death as you begin to lose consciousness, in hopes the bear forgets you and goes on his way.")
wake_up_groggy()
elif road_choice == "right":
print("You choose the trail on the right. After a short while, your keen senses tell you that the King's daughter is just ahead. Lo and behold, as you come around a bend in the path you see the flaxen-haired beauty. As you rush to intercept her, she is surrounded by a group of Dark Elves. There are 5 of them against only you, but your "+weapon+" and "+role+" training prove to be no match. You have saved the Princess!")
print("")
congratulations()
else:
print("You walk slowly down the middle trail. As afternoon turns to dusk and dusk turns to night, you begin to grow weary. You stop to set a small fire to keep warm for the night, but neglect to notice how arid and dry the underbrush surrounding you is. A gust of wind picks up after you've built your fire, and spreads the flames in a small semi-circle around you. As the bone-dry tinder on the ground begins to ignite, you frantically stomp on the ground, attempting to extinguish it in vain. The flames slowly lick at your appendages as you begin to lose consciousness from the searing pain.")
wake_up_groggy()
else:
if road_choice == "left":
print("You choose the trail on the left. As you walk along the woods, you notice a small songbird hopping on one foot, seemingly unable to fly. You summon your training as a "+role+" and lay a single hand on the breast of the songbird. A flash of light appears as the songbird transforms into the raven-haired princess, 'I had to test your true powers,' she exclaims, 'to see if you were the one who was worthy of saving me!' You have saved the Princess!")
print("")
congratulations()
elif road_choice == "right":
print("You choose the trail on the right. You walk for miles and miles, and as the shadows grow longer you begin to run low on water. After another hour passes with no sign of more, you become giddy at the sound of running water just off the trail. You excitedly trample through the brush towards the sound of running water, ducking branches and following your ears. You are so excited and moving so quickly that you trip and fall into the river, which is icy cold and quickly deposits you over a waterfall and into a rocky ravine. You begin to lose consciousness from a head injury.")
print("")
wake_up_groggy()
else:
print("You choose the trail in the middle. There is no sign of the Princess, nor of any other life. The landscape becomes more and more barren. A dust devil begins to swirl around you, and soon has developed into a mini-tornado. You are picked up off the ground against your will and whipped around with enough centrifugal force to make your head spin. The wind finally releases you, slamming you on your back on the ground. You begin to lose consciousness from lack of oxygen.")
print("")
wake_up_groggy()
def congratulations():
print("Congratulations, "+name+", you are a hero amongst heroes and your name shall be forever storied. Arcana is eternally indebted to you.")
main()
| true
| true
|
1c41b672f8df9b9fd7792551d1a2974e70223fa4
| 1,330
|
py
|
Python
|
tests/test_convert_units.py
|
timcera/tstoolbox
|
a32fa399d96082f01b7eedfd6c8893bdb881845c
|
[
"BSD-3-Clause"
] | 5
|
2016-10-13T18:06:41.000Z
|
2021-06-29T19:47:36.000Z
|
tests/test_convert_units.py
|
timcera/tstoolbox
|
a32fa399d96082f01b7eedfd6c8893bdb881845c
|
[
"BSD-3-Clause"
] | 21
|
2016-04-28T16:48:03.000Z
|
2021-12-16T18:07:07.000Z
|
tests/test_convert_units.py
|
timcera/tstoolbox
|
a32fa399d96082f01b7eedfd6c8893bdb881845c
|
[
"BSD-3-Clause"
] | 3
|
2018-03-21T21:07:52.000Z
|
2021-01-22T20:07:49.000Z
|
# -*- coding: utf-8 -*-
from unittest import TestCase
import pint_pandas
import pytest
from pandas.testing import assert_frame_equal
from tstoolbox import tstoolbox
class TestConvertUnits(TestCase):
@staticmethod
def test_convert_units():
a = tstoolbox.read("tests/data_gainesville_daily_precip.csv", target_units="in")
b = tstoolbox.equation(
"x1/25.4", input_ts="tests/data_gainesville_daily_precip.csv"
)
b.columns = ["ADaymet-prcp:in"]
assert_frame_equal(a, b, check_dtype=False)
a = tstoolbox.read("tests/data_gainesville_daily_precip.csv", target_units="km")
b = tstoolbox.equation(
"x1/(1000*1000)", input_ts="tests/data_gainesville_daily_precip.csv"
)
b.columns = ["ADaymet-prcp:km"]
assert_frame_equal(a, b, check_dtype=False)
with pytest.raises(ValueError) as e_info:
_ = tstoolbox.read(
"tests/data_gainesville_daily_precip.csv", source_units="ft3/s"
)
assert r'The units specified by the "source_units" keyword and in the' in str(
e_info.value
)
with pytest.raises(ValueError) as e_info:
_ = tstoolbox.read(
"tests/data_gainesville_daily_precip.csv", target_units="ft3/s"
)
| 32.439024
| 88
| 0.642857
|
from unittest import TestCase
import pint_pandas
import pytest
from pandas.testing import assert_frame_equal
from tstoolbox import tstoolbox
class TestConvertUnits(TestCase):
@staticmethod
def test_convert_units():
a = tstoolbox.read("tests/data_gainesville_daily_precip.csv", target_units="in")
b = tstoolbox.equation(
"x1/25.4", input_ts="tests/data_gainesville_daily_precip.csv"
)
b.columns = ["ADaymet-prcp:in"]
assert_frame_equal(a, b, check_dtype=False)
a = tstoolbox.read("tests/data_gainesville_daily_precip.csv", target_units="km")
b = tstoolbox.equation(
"x1/(1000*1000)", input_ts="tests/data_gainesville_daily_precip.csv"
)
b.columns = ["ADaymet-prcp:km"]
assert_frame_equal(a, b, check_dtype=False)
with pytest.raises(ValueError) as e_info:
_ = tstoolbox.read(
"tests/data_gainesville_daily_precip.csv", source_units="ft3/s"
)
assert r'The units specified by the "source_units" keyword and in the' in str(
e_info.value
)
with pytest.raises(ValueError) as e_info:
_ = tstoolbox.read(
"tests/data_gainesville_daily_precip.csv", target_units="ft3/s"
)
| true
| true
|
1c41b6a02f1c62ace1a896acf571367c51dcd8de
| 10,511
|
py
|
Python
|
kubernetes/test/test_apps_v1beta2_api.py
|
anemerovsky-essextec/python
|
6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_apps_v1beta2_api.py
|
anemerovsky-essextec/python
|
6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_apps_v1beta2_api.py
|
anemerovsky-essextec/python
|
6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.apps_v1beta2_api import AppsV1beta2Api
class TestAppsV1beta2Api(unittest.TestCase):
""" AppsV1beta2Api unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.apps_v1beta2_api.AppsV1beta2Api()
def tearDown(self):
pass
def test_create_namespaced_controller_revision(self):
"""
Test case for create_namespaced_controller_revision
"""
pass
def test_create_namespaced_daemon_set(self):
"""
Test case for create_namespaced_daemon_set
"""
pass
def test_create_namespaced_deployment(self):
"""
Test case for create_namespaced_deployment
"""
pass
def test_create_namespaced_replica_set(self):
"""
Test case for create_namespaced_replica_set
"""
pass
def test_create_namespaced_stateful_set(self):
"""
Test case for create_namespaced_stateful_set
"""
pass
def test_delete_collection_namespaced_controller_revision(self):
"""
Test case for delete_collection_namespaced_controller_revision
"""
pass
def test_delete_collection_namespaced_daemon_set(self):
"""
Test case for delete_collection_namespaced_daemon_set
"""
pass
def test_delete_collection_namespaced_deployment(self):
"""
Test case for delete_collection_namespaced_deployment
"""
pass
def test_delete_collection_namespaced_replica_set(self):
"""
Test case for delete_collection_namespaced_replica_set
"""
pass
def test_delete_collection_namespaced_stateful_set(self):
"""
Test case for delete_collection_namespaced_stateful_set
"""
pass
def test_delete_namespaced_controller_revision(self):
"""
Test case for delete_namespaced_controller_revision
"""
pass
def test_delete_namespaced_daemon_set(self):
"""
Test case for delete_namespaced_daemon_set
"""
pass
def test_delete_namespaced_deployment(self):
"""
Test case for delete_namespaced_deployment
"""
pass
def test_delete_namespaced_replica_set(self):
"""
Test case for delete_namespaced_replica_set
"""
pass
def test_delete_namespaced_stateful_set(self):
"""
Test case for delete_namespaced_stateful_set
"""
pass
def test_get_api_resources(self):
"""
Test case for get_api_resources
"""
pass
def test_list_controller_revision_for_all_namespaces(self):
"""
Test case for list_controller_revision_for_all_namespaces
"""
pass
def test_list_daemon_set_for_all_namespaces(self):
"""
Test case for list_daemon_set_for_all_namespaces
"""
pass
def test_list_deployment_for_all_namespaces(self):
"""
Test case for list_deployment_for_all_namespaces
"""
pass
def test_list_namespaced_controller_revision(self):
"""
Test case for list_namespaced_controller_revision
"""
pass
def test_list_namespaced_daemon_set(self):
"""
Test case for list_namespaced_daemon_set
"""
pass
def test_list_namespaced_deployment(self):
"""
Test case for list_namespaced_deployment
"""
pass
def test_list_namespaced_replica_set(self):
"""
Test case for list_namespaced_replica_set
"""
pass
def test_list_namespaced_stateful_set(self):
"""
Test case for list_namespaced_stateful_set
"""
pass
def test_list_replica_set_for_all_namespaces(self):
"""
Test case for list_replica_set_for_all_namespaces
"""
pass
def test_list_stateful_set_for_all_namespaces(self):
"""
Test case for list_stateful_set_for_all_namespaces
"""
pass
def test_patch_namespaced_controller_revision(self):
"""
Test case for patch_namespaced_controller_revision
"""
pass
def test_patch_namespaced_daemon_set(self):
"""
Test case for patch_namespaced_daemon_set
"""
pass
def test_patch_namespaced_daemon_set_status(self):
"""
Test case for patch_namespaced_daemon_set_status
"""
pass
def test_patch_namespaced_deployment(self):
"""
Test case for patch_namespaced_deployment
"""
pass
def test_patch_namespaced_deployment_scale(self):
"""
Test case for patch_namespaced_deployment_scale
"""
pass
def test_patch_namespaced_deployment_status(self):
"""
Test case for patch_namespaced_deployment_status
"""
pass
def test_patch_namespaced_replica_set(self):
"""
Test case for patch_namespaced_replica_set
"""
pass
def test_patch_namespaced_replica_set_scale(self):
"""
Test case for patch_namespaced_replica_set_scale
"""
pass
def test_patch_namespaced_replica_set_status(self):
"""
Test case for patch_namespaced_replica_set_status
"""
pass
def test_patch_namespaced_stateful_set(self):
"""
Test case for patch_namespaced_stateful_set
"""
pass
def test_patch_namespaced_stateful_set_scale(self):
"""
Test case for patch_namespaced_stateful_set_scale
"""
pass
def test_patch_namespaced_stateful_set_status(self):
"""
Test case for patch_namespaced_stateful_set_status
"""
pass
def test_read_namespaced_controller_revision(self):
"""
Test case for read_namespaced_controller_revision
"""
pass
def test_read_namespaced_daemon_set(self):
"""
Test case for read_namespaced_daemon_set
"""
pass
def test_read_namespaced_daemon_set_status(self):
"""
Test case for read_namespaced_daemon_set_status
"""
pass
def test_read_namespaced_deployment(self):
"""
Test case for read_namespaced_deployment
"""
pass
def test_read_namespaced_deployment_scale(self):
"""
Test case for read_namespaced_deployment_scale
"""
pass
def test_read_namespaced_deployment_status(self):
"""
Test case for read_namespaced_deployment_status
"""
pass
def test_read_namespaced_replica_set(self):
"""
Test case for read_namespaced_replica_set
"""
pass
def test_read_namespaced_replica_set_scale(self):
"""
Test case for read_namespaced_replica_set_scale
"""
pass
def test_read_namespaced_replica_set_status(self):
"""
Test case for read_namespaced_replica_set_status
"""
pass
def test_read_namespaced_stateful_set(self):
"""
Test case for read_namespaced_stateful_set
"""
pass
def test_read_namespaced_stateful_set_scale(self):
"""
Test case for read_namespaced_stateful_set_scale
"""
pass
def test_read_namespaced_stateful_set_status(self):
"""
Test case for read_namespaced_stateful_set_status
"""
pass
def test_replace_namespaced_controller_revision(self):
"""
Test case for replace_namespaced_controller_revision
"""
pass
def test_replace_namespaced_daemon_set(self):
"""
Test case for replace_namespaced_daemon_set
"""
pass
def test_replace_namespaced_daemon_set_status(self):
"""
Test case for replace_namespaced_daemon_set_status
"""
pass
def test_replace_namespaced_deployment(self):
"""
Test case for replace_namespaced_deployment
"""
pass
def test_replace_namespaced_deployment_scale(self):
"""
Test case for replace_namespaced_deployment_scale
"""
pass
def test_replace_namespaced_deployment_status(self):
"""
Test case for replace_namespaced_deployment_status
"""
pass
def test_replace_namespaced_replica_set(self):
"""
Test case for replace_namespaced_replica_set
"""
pass
def test_replace_namespaced_replica_set_scale(self):
"""
Test case for replace_namespaced_replica_set_scale
"""
pass
def test_replace_namespaced_replica_set_status(self):
"""
Test case for replace_namespaced_replica_set_status
"""
pass
def test_replace_namespaced_stateful_set(self):
"""
Test case for replace_namespaced_stateful_set
"""
pass
def test_replace_namespaced_stateful_set_scale(self):
"""
Test case for replace_namespaced_stateful_set_scale
"""
pass
def test_replace_namespaced_stateful_set_status(self):
"""
Test case for replace_namespaced_stateful_set_status
"""
pass
if __name__ == '__main__':
unittest.main()
| 19.72045
| 105
| 0.597755
|
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.apps_v1beta2_api import AppsV1beta2Api
class TestAppsV1beta2Api(unittest.TestCase):
def setUp(self):
self.api = kubernetes.client.apis.apps_v1beta2_api.AppsV1beta2Api()
def tearDown(self):
pass
def test_create_namespaced_controller_revision(self):
pass
def test_create_namespaced_daemon_set(self):
pass
def test_create_namespaced_deployment(self):
pass
def test_create_namespaced_replica_set(self):
pass
def test_create_namespaced_stateful_set(self):
pass
def test_delete_collection_namespaced_controller_revision(self):
pass
def test_delete_collection_namespaced_daemon_set(self):
pass
def test_delete_collection_namespaced_deployment(self):
pass
def test_delete_collection_namespaced_replica_set(self):
pass
def test_delete_collection_namespaced_stateful_set(self):
pass
def test_delete_namespaced_controller_revision(self):
pass
def test_delete_namespaced_daemon_set(self):
pass
def test_delete_namespaced_deployment(self):
pass
def test_delete_namespaced_replica_set(self):
pass
def test_delete_namespaced_stateful_set(self):
pass
def test_get_api_resources(self):
pass
def test_list_controller_revision_for_all_namespaces(self):
pass
def test_list_daemon_set_for_all_namespaces(self):
pass
def test_list_deployment_for_all_namespaces(self):
pass
def test_list_namespaced_controller_revision(self):
pass
def test_list_namespaced_daemon_set(self):
pass
def test_list_namespaced_deployment(self):
pass
def test_list_namespaced_replica_set(self):
pass
def test_list_namespaced_stateful_set(self):
pass
def test_list_replica_set_for_all_namespaces(self):
pass
def test_list_stateful_set_for_all_namespaces(self):
pass
def test_patch_namespaced_controller_revision(self):
pass
def test_patch_namespaced_daemon_set(self):
pass
def test_patch_namespaced_daemon_set_status(self):
pass
def test_patch_namespaced_deployment(self):
pass
def test_patch_namespaced_deployment_scale(self):
pass
def test_patch_namespaced_deployment_status(self):
pass
def test_patch_namespaced_replica_set(self):
pass
def test_patch_namespaced_replica_set_scale(self):
pass
def test_patch_namespaced_replica_set_status(self):
pass
def test_patch_namespaced_stateful_set(self):
pass
def test_patch_namespaced_stateful_set_scale(self):
pass
def test_patch_namespaced_stateful_set_status(self):
pass
def test_read_namespaced_controller_revision(self):
pass
def test_read_namespaced_daemon_set(self):
pass
def test_read_namespaced_daemon_set_status(self):
pass
def test_read_namespaced_deployment(self):
pass
def test_read_namespaced_deployment_scale(self):
pass
def test_read_namespaced_deployment_status(self):
pass
def test_read_namespaced_replica_set(self):
pass
def test_read_namespaced_replica_set_scale(self):
pass
def test_read_namespaced_replica_set_status(self):
pass
def test_read_namespaced_stateful_set(self):
pass
def test_read_namespaced_stateful_set_scale(self):
pass
def test_read_namespaced_stateful_set_status(self):
pass
def test_replace_namespaced_controller_revision(self):
pass
def test_replace_namespaced_daemon_set(self):
pass
def test_replace_namespaced_daemon_set_status(self):
pass
def test_replace_namespaced_deployment(self):
pass
def test_replace_namespaced_deployment_scale(self):
pass
def test_replace_namespaced_deployment_status(self):
pass
def test_replace_namespaced_replica_set(self):
pass
def test_replace_namespaced_replica_set_scale(self):
pass
def test_replace_namespaced_replica_set_status(self):
pass
def test_replace_namespaced_stateful_set(self):
pass
def test_replace_namespaced_stateful_set_scale(self):
pass
def test_replace_namespaced_stateful_set_status(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c41b7adadfa8fa732c4d2ec2617a5980c1b03cc
| 3,783
|
py
|
Python
|
frappe/desk/page/messages/messages.py
|
kardmode/frappe
|
d8f46daa7157545e4d302a2d54c059419d0113f3
|
[
"MIT"
] | null | null | null |
frappe/desk/page/messages/messages.py
|
kardmode/frappe
|
d8f46daa7157545e4d302a2d54c059419d0113f3
|
[
"MIT"
] | null | null | null |
frappe/desk/page/messages/messages.py
|
kardmode/frappe
|
d8f46daa7157545e4d302a2d54c059419d0113f3
|
[
"MIT"
] | 5
|
2016-11-12T12:14:58.000Z
|
2018-03-21T15:45:45.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.desk.notifications import delete_notification_count_for
from frappe.core.doctype.user.user import STANDARD_USERS
from frappe.utils.user import get_enabled_system_users
from frappe.utils import cint, get_fullname
@frappe.whitelist()
def get_list(arg=None):
"""get list of messages"""
frappe.form_dict['limit_start'] = int(frappe.form_dict['limit_start'])
frappe.form_dict['limit_page_length'] = int(frappe.form_dict['limit_page_length'])
frappe.form_dict['user'] = frappe.session['user']
# set all messages as read
frappe.db.begin()
frappe.db.sql("""UPDATE `tabComment`
set docstatus = 1 where comment_doctype in ('My Company', 'Message')
and comment_docname = %s
""", frappe.session.user)
delete_notification_count_for("Messages")
frappe.db.commit()
if frappe.form_dict['contact'] == frappe.session['user']:
# return messages
return frappe.db.sql("""select * from `tabComment`
where (owner=%(contact)s
or comment_docname=%(user)s
or (owner=comment_docname and ifnull(parenttype, "")!="Assignment")
or owner=comment_docname)
and comment_doctype ='Message'
order by creation desc
limit %(limit_start)s, %(limit_page_length)s""", frappe.local.form_dict, as_dict=1)
else:
return frappe.db.sql("""select * from `tabComment`
where ((owner=%(contact)s and comment_docname=%(user)s)
or (owner=%(user)s and comment_docname=%(contact)s)
or (owner=%(contact)s and comment_docname=%(contact)s))
and comment_doctype ='Message'
order by creation desc
limit %(limit_start)s, %(limit_page_length)s""", frappe.local.form_dict, as_dict=1)
@frappe.whitelist()
def get_active_users():
data = frappe.db.sql("""select name,
(select count(*) from tabSessions where user=tabUser.name
and timediff(now(), lastupdate) < time("01:00:00")) as has_session
from tabUser
where enabled=1 and
ifnull(user_type, '')!='Website User' and
name not in ({})
order by first_name""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS, as_dict=1)
# make sure current user is at the top, using has_session = 100
users = [d.name for d in data]
if frappe.session.user in users:
data[users.index(frappe.session.user)]["has_session"] = 100
else:
# in case of administrator
data.append({"name": frappe.session.user, "has_session": 100})
return data
@frappe.whitelist()
def post(txt, contact, parenttype=None, notify=False, subject=None):
import frappe
"""post message"""
d = frappe.new_doc('Comment')
d.parenttype = parenttype
d.comment = txt
d.comment_docname = contact
d.comment_doctype = 'Message'
d.comment_by_fullname = get_fullname(frappe.session.user)
d.insert(ignore_permissions=True)
delete_notification_count_for("Messages")
if notify and cint(notify):
if contact==frappe.session.user:
_notify([user.name for user in get_enabled_system_users()], txt)
else:
_notify(contact, txt, subject)
return d
@frappe.whitelist()
def delete(arg=None):
frappe.get_doc("Comment", frappe.form_dict['name']).delete()
def _notify(contact, txt, subject=None):
from frappe.utils import get_fullname, get_url
try:
if not isinstance(contact, list):
contact = [frappe.db.get_value("User", contact, "email") or contact]
frappe.sendmail(\
recipients=contact,
sender= frappe.db.get_value("User", frappe.session.user, "email"),
subject=subject or "New Message from " + get_fullname(frappe.session.user),
message=frappe.get_template("templates/emails/new_message.html").render({
"from": get_fullname(frappe.session.user),
"message": txt,
"link": get_url()
}),
bulk=True)
except frappe.OutgoingEmailError:
pass
| 32.333333
| 98
| 0.731959
|
from __future__ import unicode_literals
import frappe
from frappe.desk.notifications import delete_notification_count_for
from frappe.core.doctype.user.user import STANDARD_USERS
from frappe.utils.user import get_enabled_system_users
from frappe.utils import cint, get_fullname
@frappe.whitelist()
def get_list(arg=None):
frappe.form_dict['limit_start'] = int(frappe.form_dict['limit_start'])
frappe.form_dict['limit_page_length'] = int(frappe.form_dict['limit_page_length'])
frappe.form_dict['user'] = frappe.session['user']
frappe.db.begin()
frappe.db.sql("""UPDATE `tabComment`
set docstatus = 1 where comment_doctype in ('My Company', 'Message')
and comment_docname = %s
""", frappe.session.user)
delete_notification_count_for("Messages")
frappe.db.commit()
if frappe.form_dict['contact'] == frappe.session['user']:
return frappe.db.sql("""select * from `tabComment`
where (owner=%(contact)s
or comment_docname=%(user)s
or (owner=comment_docname and ifnull(parenttype, "")!="Assignment")
or owner=comment_docname)
and comment_doctype ='Message'
order by creation desc
limit %(limit_start)s, %(limit_page_length)s""", frappe.local.form_dict, as_dict=1)
else:
return frappe.db.sql("""select * from `tabComment`
where ((owner=%(contact)s and comment_docname=%(user)s)
or (owner=%(user)s and comment_docname=%(contact)s)
or (owner=%(contact)s and comment_docname=%(contact)s))
and comment_doctype ='Message'
order by creation desc
limit %(limit_start)s, %(limit_page_length)s""", frappe.local.form_dict, as_dict=1)
@frappe.whitelist()
def get_active_users():
data = frappe.db.sql("""select name,
(select count(*) from tabSessions where user=tabUser.name
and timediff(now(), lastupdate) < time("01:00:00")) as has_session
from tabUser
where enabled=1 and
ifnull(user_type, '')!='Website User' and
name not in ({})
order by first_name""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS, as_dict=1)
users = [d.name for d in data]
if frappe.session.user in users:
data[users.index(frappe.session.user)]["has_session"] = 100
else:
data.append({"name": frappe.session.user, "has_session": 100})
return data
@frappe.whitelist()
def post(txt, contact, parenttype=None, notify=False, subject=None):
import frappe
d = frappe.new_doc('Comment')
d.parenttype = parenttype
d.comment = txt
d.comment_docname = contact
d.comment_doctype = 'Message'
d.comment_by_fullname = get_fullname(frappe.session.user)
d.insert(ignore_permissions=True)
delete_notification_count_for("Messages")
if notify and cint(notify):
if contact==frappe.session.user:
_notify([user.name for user in get_enabled_system_users()], txt)
else:
_notify(contact, txt, subject)
return d
@frappe.whitelist()
def delete(arg=None):
frappe.get_doc("Comment", frappe.form_dict['name']).delete()
def _notify(contact, txt, subject=None):
from frappe.utils import get_fullname, get_url
try:
if not isinstance(contact, list):
contact = [frappe.db.get_value("User", contact, "email") or contact]
frappe.sendmail(\
recipients=contact,
sender= frappe.db.get_value("User", frappe.session.user, "email"),
subject=subject or "New Message from " + get_fullname(frappe.session.user),
message=frappe.get_template("templates/emails/new_message.html").render({
"from": get_fullname(frappe.session.user),
"message": txt,
"link": get_url()
}),
bulk=True)
except frappe.OutgoingEmailError:
pass
| true
| true
|
1c41b83e466645e23e02647f5200a2a956032b65
| 1,098
|
py
|
Python
|
nipype/interfaces/dipy/tests/test_auto_DTI.py
|
moloney/nipype
|
a7a9c85c79cb1412ba03406074f83200447ef50b
|
[
"Apache-2.0"
] | 7
|
2017-02-17T08:54:26.000Z
|
2022-03-10T20:57:23.000Z
|
nipype/interfaces/dipy/tests/test_auto_DTI.py
|
moloney/nipype
|
a7a9c85c79cb1412ba03406074f83200447ef50b
|
[
"Apache-2.0"
] | 1
|
2016-04-25T15:07:09.000Z
|
2016-04-25T15:07:09.000Z
|
nipype/interfaces/dipy/tests/test_auto_DTI.py
|
moloney/nipype
|
a7a9c85c79cb1412ba03406074f83200447ef50b
|
[
"Apache-2.0"
] | 2
|
2017-09-23T16:22:00.000Z
|
2019-08-01T14:18:52.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..tensors import DTI
def test_DTI_inputs():
input_map = dict(
b0_thres=dict(usedefault=True, ),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
in_bval=dict(mandatory=True, ),
in_bvec=dict(mandatory=True, ),
in_file=dict(mandatory=True, ),
mask_file=dict(),
out_prefix=dict(),
)
inputs = DTI.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_DTI_outputs():
output_map = dict(
ad_file=dict(),
fa_file=dict(),
md_file=dict(),
out_file=dict(),
rd_file=dict(),
)
outputs = DTI.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 28.894737
| 67
| 0.598361
|
from __future__ import unicode_literals
from ..tensors import DTI
def test_DTI_inputs():
input_map = dict(
b0_thres=dict(usedefault=True, ),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
in_bval=dict(mandatory=True, ),
in_bvec=dict(mandatory=True, ),
in_file=dict(mandatory=True, ),
mask_file=dict(),
out_prefix=dict(),
)
inputs = DTI.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_DTI_outputs():
output_map = dict(
ad_file=dict(),
fa_file=dict(),
md_file=dict(),
out_file=dict(),
rd_file=dict(),
)
outputs = DTI.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| true
| true
|
1c41b978de96a66633f186c828e64a2297200373
| 7,955
|
py
|
Python
|
src/app/modules/a3dc_interface.py
|
KatonaLab/Build3D
|
f1430080d5bee9febfbc83c9b2cb2ebf345037ee
|
[
"MIT"
] | null | null | null |
src/app/modules/a3dc_interface.py
|
KatonaLab/Build3D
|
f1430080d5bee9febfbc83c9b2cb2ebf345037ee
|
[
"MIT"
] | 5
|
2021-03-19T09:28:07.000Z
|
2022-03-12T00:09:14.000Z
|
src/app/modules/a3dc_interface.py
|
KatonaLab/Build3D
|
f1430080d5bee9febfbc83c9b2cb2ebf345037ee
|
[
"MIT"
] | 1
|
2019-12-23T16:44:49.000Z
|
2019-12-23T16:44:49.000Z
|
import time
import collections
from modules.packages.a3dc.ImageClass import ImageClass
from modules.packages.a3dc.segmentation import tag_image
import modules.packages.a3dc.core as core
def tagImage(image):
'''
Function that runs ITK connected components on input image
:param image: nd Array
:param outputImage: nd Array
'''
# Start timing
tstart = time.process_time()
# Creatre LogText and start logging
logText = '\nRunning connected components on : ' + str(image.metadata['Name'])
#Tag image
output_array=tag_image(image.get_3d_array())
#Create metadata ditionary and set type to match tagged image
output_metadata=image.metadata
image.metadata['Type']=str(output_array.dtype)
# Finish timing and add to logText
tstop = time.process_time()
logText += '\n\tProcessing finished in ' + str((tstop - tstart)) + ' seconds! '
return ImageClass(output_array, output_metadata), logText
def threshold(image, method="Otsu", **kwargs):
'''
:param image:
:param imageDictionary:
:param method:
:param kwargs:
lowerThreshold, upperThreshold, mode,blockSize=5, offSet=0
:return:
LogText
'''
# Creatre LogText and start logging
logText = 'Thresholding: '+image.metadata['Name']
#Measure raw image data:
raw_data=core.analyze_raw(image)
logText += '\n\tRaw Image Parameters: ' + str(raw_data)
logText += '\n\tMethod: ' + method
logText += '\n\tSettings: ' + str(kwargs).replace('}','').replace('{','')
output, thresholdValue=core.threshold(image, method, **kwargs)
logText += '\n\tThreshold Value: ' +str(thresholdValue)
return output, logText
def analyze(tagged_image, image_list=None, measurementInput=['voxelCount', 'meanIntensity', 'sumIntensity']):
'''
Analyzes tagedImage and appends 'database' to its dictionary that contain measured values.
:param tagged_img: tagged image
:param taggedDictionary: dictionary with descriptors of tagged image
:param imageList: image list where intensity is measured within objects of tagged_img
:param dictionaryList: list of dictionaries that apartain to each element in imageList
:param outputImage: output image
:param outputDictionary: dictionary with descriptors of outputImage
:return:
'''
# Start timing
tstart = time.process_time()
# Creatre LogText and start logging
logText = '\nAnalyzing: ' + str(tagged_image.metadata['Name'])
#Print list of images in Imagelist to log text
if image_list != None:
logText += '\n\tMeasuring intensity in: '
for img in image_list:
logText += img.metadata['Name']
#Analyze image
tagged_img=core.analyze(tagged_image, image_list, measurementInput)
#Add number of objects to logText
logText += '\n\tNumber of objects: '+str(len(tagged_img.database['tag']))
# Finish timing and add to logText
tstop = time.process_time()
logText += '\n\tProcessing finished in ' + str((tstop - tstart)) + ' seconds! '
return tagged_img, logText
def apply_filter(image, filter_dict=None, remove_filtered=False, overwrite=True):
'''
Filters dictionary stored in the 'database' key of the inputDisctionary to be filtered and removes filtered taggs if filterImage=True. Boolean mask is appended to inputDictionary['database']
and returned through the output dictionary. If removeFiltered=True tags are removed from the output. If overWrite=True a new Boolean mask is created.
:param inputDictionary: Dictionary containing informason related to inputImage
:param inputImage: Tagged image
:param filterDict: Dictionary contains the keywords to be filtered and the min/maximum value as the following example:
dictFilter={'volume':{'min':2, 'max':11}}#, 'mean in '+taggedDictList[0]['name']: {'min':2, 'max':3}}
:param outputDictionary
:param inputImage
:param removeFiltered: If True objects that are filtered out are removed
:return:
'''
# Start timing
tstart = time.process_time()
# Creatre LogText and start logging
logText = '\nFiltering: ' + str(image.metadata['Name'])
logText += '\n\tFilter settings: '+str(filter_dict).replace('{', ' ').replace('}', ' ')
logText += '\n\t\tremoveFiltered=' + str(remove_filtered)
logText += '\n\t\toverwrite=' + str(overwrite)
# Filter
output_image=core.apply_filter(image, filter_dict, remove_filtered, overwrite)
# Finish timing and add to logText
tstop = time.process_time()
logText += '\n\tProcessing finished in ' + str((tstop - tstart)) + ' seconds! '
return output_image , logText
def colocalization(tagged_img_list, source_image_list=None, overlapping_filter=None,
remove_filtered=False, overWrite=True):
'''
:param tagged_img_list:
:param taggedDictList:
:param sourceImageList:
:param overlappingFilterList:
:param filterImage:
:return:
'''
# Start timingsourceDictionayList
tstart = time.process_time()
# Creatre LogText
logText = '\nColocalization analysis started using: '
for img in tagged_img_list:
logText += '\t ' + str(img.metadata['Name'])
# Add Filter settings
logText += '\n\tFilter settings: ' + str(overlapping_filter).replace('{', ' ').replace('}', ' ')
logText += '\n\t\tremoveFiltered=' + str(remove_filtered)
logText += '\n\t\toverwrite=' + str(overWrite)
# Determine connectivity data
overlapping_image, _ =core.colocalization(tagged_img_list, source_image_list, overlapping_filter, remove_filtered, overWrite)
#Print number of objects to logText
logText += '\n\tNumber of Overlapping Objects: '+str(len(overlapping_image.database['tag']))
# Finish timing and add to logText
tstop = time.process_time()
logText += '\n\tProcessing finished in ' + str((tstop - tstart)) + ' seconds! '
return overlapping_image, logText
def save_data(image_list, path, file_name='output', to_text=True):
'''
:param dictionaryList: Save dictionaries in inputDictionaryList
:param path: path where file is saved
:param toText: if True data are saved to text
:param fileName: fileneme WITHOUT extension
:return:
'''
# Start timing
tstart = time.process_time()
#If input is not list create list
if not isinstance(image_list, collections.Iterable):
image_list=[image_list]
# Creatre LogText and start logging
logText = '\nSaving database: '
# Add names of dictionary sources to logText
for img in image_list:
logText += '\t' + str(img.metadata['Name'])
#Add settings to logText
# Add filter settings to logText
logText += '\n\tPath: '+str(path)
logText += '\n\tFilename: '+str(file_name)
if to_text==True: logText += '.txt'
elif to_text==False:logText += '.xlsx'
core.save_data(image_list, path, file_name, to_text)
# Finish timing and add to logText
tstop = time.process_time()
logText += '\n\tProcessing finished in ' + str((tstop - tstart)) + ' seconds! '
return logText
def save_image(image_list, path, file_name):
# Start timing
tstart = time.process_time()
#If input is not list create list
if not isinstance(image_list, collections.Iterable):
image_list=[image_list]
# Creatre LogText and start logging
logText = '\nSaving image: '
logText += '\t' + str([x.metadata['Name'] for x in image_list])
logText += '\n\tPath: '+str(path)
logText += '\n\tFile Name: '+str(file_name)
#Save image
core.save_image(image_list, path, file_name)
# Finish timing and add to logText
tstop = time.process_time()
logText += '\n\tProcessing finished in ' + str((tstop - tstart)) + ' seconds! '
return logText
| 33.707627
| 194
| 0.674921
|
import time
import collections
from modules.packages.a3dc.ImageClass import ImageClass
from modules.packages.a3dc.segmentation import tag_image
import modules.packages.a3dc.core as core
def tagImage(image):
tstart = time.process_time()
logText = '\nRunning connected components on : ' + str(image.metadata['Name'])
output_array=tag_image(image.get_3d_array())
output_metadata=image.metadata
image.metadata['Type']=str(output_array.dtype)
tstop = time.process_time()
logText += '\n\tProcessing finished in ' + str((tstop - tstart)) + ' seconds! '
return ImageClass(output_array, output_metadata), logText
def threshold(image, method="Otsu", **kwargs):
logText = 'Thresholding: '+image.metadata['Name']
raw_data=core.analyze_raw(image)
logText += '\n\tRaw Image Parameters: ' + str(raw_data)
logText += '\n\tMethod: ' + method
logText += '\n\tSettings: ' + str(kwargs).replace('}','').replace('{','')
output, thresholdValue=core.threshold(image, method, **kwargs)
logText += '\n\tThreshold Value: ' +str(thresholdValue)
return output, logText
def analyze(tagged_image, image_list=None, measurementInput=['voxelCount', 'meanIntensity', 'sumIntensity']):
tstart = time.process_time()
logText = '\nAnalyzing: ' + str(tagged_image.metadata['Name'])
if image_list != None:
logText += '\n\tMeasuring intensity in: '
for img in image_list:
logText += img.metadata['Name']
tagged_img=core.analyze(tagged_image, image_list, measurementInput)
logText += '\n\tNumber of objects: '+str(len(tagged_img.database['tag']))
tstop = time.process_time()
logText += '\n\tProcessing finished in ' + str((tstop - tstart)) + ' seconds! '
return tagged_img, logText
def apply_filter(image, filter_dict=None, remove_filtered=False, overwrite=True):
tstart = time.process_time()
logText = '\nFiltering: ' + str(image.metadata['Name'])
logText += '\n\tFilter settings: '+str(filter_dict).replace('{', ' ').replace('}', ' ')
logText += '\n\t\tremoveFiltered=' + str(remove_filtered)
logText += '\n\t\toverwrite=' + str(overwrite)
output_image=core.apply_filter(image, filter_dict, remove_filtered, overwrite)
tstop = time.process_time()
logText += '\n\tProcessing finished in ' + str((tstop - tstart)) + ' seconds! '
return output_image , logText
def colocalization(tagged_img_list, source_image_list=None, overlapping_filter=None,
remove_filtered=False, overWrite=True):
tstart = time.process_time()
logText = '\nColocalization analysis started using: '
for img in tagged_img_list:
logText += '\t ' + str(img.metadata['Name'])
logText += '\n\tFilter settings: ' + str(overlapping_filter).replace('{', ' ').replace('}', ' ')
logText += '\n\t\tremoveFiltered=' + str(remove_filtered)
logText += '\n\t\toverwrite=' + str(overWrite)
overlapping_image, _ =core.colocalization(tagged_img_list, source_image_list, overlapping_filter, remove_filtered, overWrite)
logText += '\n\tNumber of Overlapping Objects: '+str(len(overlapping_image.database['tag']))
tstop = time.process_time()
logText += '\n\tProcessing finished in ' + str((tstop - tstart)) + ' seconds! '
return overlapping_image, logText
def save_data(image_list, path, file_name='output', to_text=True):
tstart = time.process_time()
if not isinstance(image_list, collections.Iterable):
image_list=[image_list]
logText = '\nSaving database: '
for img in image_list:
logText += '\t' + str(img.metadata['Name'])
logText += '\n\tPath: '+str(path)
logText += '\n\tFilename: '+str(file_name)
if to_text==True: logText += '.txt'
elif to_text==False:logText += '.xlsx'
core.save_data(image_list, path, file_name, to_text)
tstop = time.process_time()
logText += '\n\tProcessing finished in ' + str((tstop - tstart)) + ' seconds! '
return logText
def save_image(image_list, path, file_name):
tstart = time.process_time()
if not isinstance(image_list, collections.Iterable):
image_list=[image_list]
logText = '\nSaving image: '
logText += '\t' + str([x.metadata['Name'] for x in image_list])
logText += '\n\tPath: '+str(path)
logText += '\n\tFile Name: '+str(file_name)
core.save_image(image_list, path, file_name)
tstop = time.process_time()
logText += '\n\tProcessing finished in ' + str((tstop - tstart)) + ' seconds! '
return logText
| true
| true
|
1c41ba1d5c8d38fd5e567d508126d7895d87d08b
| 8,978
|
py
|
Python
|
donkeycar/parts/lidar.py
|
BillyCheung10botics/donkeycar
|
a3278818367e65250a381e59458b5be13b7d2b7c
|
[
"MIT"
] | null | null | null |
donkeycar/parts/lidar.py
|
BillyCheung10botics/donkeycar
|
a3278818367e65250a381e59458b5be13b7d2b7c
|
[
"MIT"
] | null | null | null |
donkeycar/parts/lidar.py
|
BillyCheung10botics/donkeycar
|
a3278818367e65250a381e59458b5be13b7d2b7c
|
[
"MIT"
] | null | null | null |
"""
Lidar
"""
# requies glob to be installed: "pip3 install glob2"
# requires rplidar to be installed: "pip3 install rplidar"
import time
import math
import pickle
import serial
import numpy as np
from donkeycar.utils import norm_deg, dist, deg2rad, arr_to_img
from PIL import Image, ImageDraw
class RPLidar(object):
'''
https://github.com/SkoltechRobotics/rplidar
'''
def __init__(self, lower_limit = 0, upper_limit = 360, debug=False):
from rplidar import RPLidar
import glob
port_found = False
self.lower_limit = lower_limit
self.upper_limit = upper_limit
temp_list = glob.glob ('/dev/ttyUSB*')
result = []
for a_port in temp_list:
try:
s = serial.Serial(a_port)
s.close()
result.append(a_port)
port_found = True
except serial.SerialException:
pass
if port_found:
self.port = result[0]
self.distances = [] #a list of distance measurements
self.angles = [] # a list of angles corresponding to dist meas above
self.lidar = RPLidar(self.port, baudrate=115200)
self.lidar.clear_input()
time.sleep(1)
self.on = True
#print(self.lidar.get_info())
#print(self.lidar.get_health())
else:
print("No Lidar found")
def update(self):
scans = self.lidar.iter_scans(550)
while self.on:
try:
for scan in scans:
self.distances = [item[2] for item in scan]
self.angles = [item[1] for item in scan]
except serial.serialutil.SerialException:
print('serial.serialutil.SerialException from Lidar. common when shutting down.')
def run_threaded(self):
sorted_distances = []
if (self.angles != []) and (self.distances != []):
angs = np.copy(self.angles)
dists = np.copy(self.distances)
filter_angs = angs[(angs > self.lower_limit) & (angs < self.upper_limit)]
filter_dist = dists[(angs > self.lower_limit) & (angs < self.upper_limit)] #sorts distances based on angle values
angles_ind = np.argsort(filter_angs) # returns the indexes that sorts filter_angs
if angles_ind != []:
sorted_distances = np.argsort(filter_dist) # sorts distances based on angle indexes
return sorted_distances
def shutdown(self):
self.on = False
time.sleep(2)
self.lidar.stop()
self.lidar.stop_motor()
self.lidar.disconnect()
class YDLidar(object):
'''
https://pypi.org/project/PyLidar3/
'''
def __init__(self, port='/dev/ttyUSB0'):
import PyLidar3
self.port = port
self.distances = [] #a list of distance measurements
self.angles = [] # a list of angles corresponding to dist meas above
self.lidar = PyLidar3.YdLidarX4(port)
if(self.lidar.Connect()):
print(self.lidar.GetDeviceInfo())
self.gen = self.lidar.StartScanning()
else:
print("Error connecting to lidar")
self.on = True
def init(self, port='/dev/ttyUSB0'):
import PyLidar3
print("Starting lidar...")
self.port = port
self.distances = [] #a list of distance measurements
self.angles = [] # a list of angles corresponding to dist meas above
self.lidar = PyLidar3.YdLidarX4(port)
if(self.lidar.Connect()):
print(self.lidar.GetDeviceInfo())
gen = self.lidar.StartScanning()
return gen
else:
print("Error connecting to lidar")
self.on = True
#print(self.lidar.get_info())
#print(self.lidar.get_health())
def update(self, lidar, debug = False):
while self.on:
try:
self.data = next(lidar)
for angle in range(0,360):
if(self.data[angle]>1000):
self.angles = [angle]
self.distances = [self.data[angle]]
if debug:
return self.distances, self.angles
except serial.serialutil.SerialException:
print('serial.serialutil.SerialException from Lidar. common when shutting down.')
def run_threaded(self):
return self.distances, self.angles
def shutdown(self):
self.on = False
time.sleep(2)
self.lidar.StopScanning()
self.lidar.Disconnect()
class LidarPlot(object):
'''
takes the raw lidar measurements and plots it to an image
'''
PLOT_TYPE_LINE = 0
PLOT_TYPE_CIRC = 1
def __init__(self, resolution=(500,500),
max_dist=1000, #mm
radius_plot=3,
plot_type=PLOT_TYPE_CIRC):
self.frame = Image.new('RGB', resolution)
self.max_dist = max_dist
self.rad = radius_plot
self.resolution = resolution
if plot_type == self.PLOT_TYPE_CIRC:
self.plot_fn = self.plot_circ
else:
self.plot_fn = self.plot_line
def plot_line(self, img, dist, theta, max_dist, draw):
'''
scale dist so that max_dist is edge of img (mm)
and img is PIL Image, draw the line using the draw ImageDraw object
'''
center = (img.width / 2, img.height / 2)
max_pixel = min(center[0], center[1])
dist = dist / max_dist * max_pixel
if dist < 0 :
dist = 0
elif dist > max_pixel:
dist = max_pixel
theta = np.radians(theta)
sx = math.cos(theta) * dist + center[0]
sy = math.sin(theta) * dist + center[1]
ex = math.cos(theta) * (dist + self.rad) + center[0]
ey = math.sin(theta) * (dist + self.rad) + center[1]
fill = 128
draw.line((sx,sy, ex, ey), fill=(fill, fill, fill), width=1)
def plot_circ(self, img, dist, theta, max_dist, draw):
'''
scale dist so that max_dist is edge of img (mm)
and img is PIL Image, draw the circle using the draw ImageDraw object
'''
center = (img.width / 2, img.height / 2)
max_pixel = min(center[0], center[1])
dist = dist / max_dist * max_pixel
if dist < 0 :
dist = 0
elif dist > max_pixel:
dist = max_pixel
theta = np.radians(theta)
sx = int(math.cos(theta) * dist + center[0])
sy = int(math.sin(theta) * dist + center[1])
ex = int(math.cos(theta) * (dist + 2 * self.rad) + center[0])
ey = int(math.sin(theta) * (dist + 2 * self.rad) + center[1])
fill = 128
draw.ellipse((min(sx, ex), min(sy, ey), max(sx, ex), max(sy, ey)), fill=(fill, fill, fill))
def plot_scan(self, img, distances, angles, max_dist, draw):
for dist, angle in zip(distances, angles):
self.plot_fn(img, dist, angle, max_dist, draw)
def run(self, distances, angles):
'''
takes two lists of equal length, one of distance values, the other of angles corresponding to the dist meas
'''
self.frame = Image.new('RGB', self.resolution, (255, 255, 255))
draw = ImageDraw.Draw(self.frame)
self.plot_scan(self.frame, distances, angles, self.max_dist, draw)
return self.frame
def shutdown(self):
pass
class BreezySLAM(object):
'''
https://github.com/simondlevy/BreezySLAM
'''
def __init__(self, MAP_SIZE_PIXELS=500, MAP_SIZE_METERS=10):
from breezyslam.algorithms import RMHC_SLAM
from breezyslam.sensors import Laser
laser_model = Laser(scan_size=360, scan_rate_hz=10., detection_angle_degrees=360, distance_no_detection_mm=12000)
MAP_QUALITY=5
self.slam = RMHC_SLAM(laser_model, MAP_SIZE_PIXELS, MAP_SIZE_METERS, MAP_QUALITY)
def run(self, distances, angles, map_bytes):
self.slam.update(distances, scan_angles_degrees=angles)
x, y, theta = self.slam.getpos()
if map_bytes is not None:
self.slam.getmap(map_bytes)
#print('x', x, 'y', y, 'theta', norm_deg(theta))
return x, y, deg2rad(norm_deg(theta))
def shutdown(self):
pass
class BreezyMap(object):
'''
bitmap that may optionally be constructed by BreezySLAM
'''
def __init__(self, MAP_SIZE_PIXELS=500):
self.mapbytes = bytearray(MAP_SIZE_PIXELS * MAP_SIZE_PIXELS)
def run(self):
return self.mapbytes
def shutdown(self):
pass
class MapToImage(object):
def __init__(self, resolution=(500, 500)):
self.resolution = resolution
def run(self, map_bytes):
np_arr = np.array(map_bytes).reshape(self.resolution)
return arr_to_img(np_arr)
def shutdown(self):
pass
| 33.251852
| 125
| 0.583538
|
import time
import math
import pickle
import serial
import numpy as np
from donkeycar.utils import norm_deg, dist, deg2rad, arr_to_img
from PIL import Image, ImageDraw
class RPLidar(object):
def __init__(self, lower_limit = 0, upper_limit = 360, debug=False):
from rplidar import RPLidar
import glob
port_found = False
self.lower_limit = lower_limit
self.upper_limit = upper_limit
temp_list = glob.glob ('/dev/ttyUSB*')
result = []
for a_port in temp_list:
try:
s = serial.Serial(a_port)
s.close()
result.append(a_port)
port_found = True
except serial.SerialException:
pass
if port_found:
self.port = result[0]
self.distances = []
self.angles = []
self.lidar = RPLidar(self.port, baudrate=115200)
self.lidar.clear_input()
time.sleep(1)
self.on = True
else:
print("No Lidar found")
def update(self):
scans = self.lidar.iter_scans(550)
while self.on:
try:
for scan in scans:
self.distances = [item[2] for item in scan]
self.angles = [item[1] for item in scan]
except serial.serialutil.SerialException:
print('serial.serialutil.SerialException from Lidar. common when shutting down.')
def run_threaded(self):
sorted_distances = []
if (self.angles != []) and (self.distances != []):
angs = np.copy(self.angles)
dists = np.copy(self.distances)
filter_angs = angs[(angs > self.lower_limit) & (angs < self.upper_limit)]
filter_dist = dists[(angs > self.lower_limit) & (angs < self.upper_limit)]
angles_ind = np.argsort(filter_angs)
if angles_ind != []:
sorted_distances = np.argsort(filter_dist)
return sorted_distances
def shutdown(self):
self.on = False
time.sleep(2)
self.lidar.stop()
self.lidar.stop_motor()
self.lidar.disconnect()
class YDLidar(object):
def __init__(self, port='/dev/ttyUSB0'):
import PyLidar3
self.port = port
self.distances = []
self.angles = []
self.lidar = PyLidar3.YdLidarX4(port)
if(self.lidar.Connect()):
print(self.lidar.GetDeviceInfo())
self.gen = self.lidar.StartScanning()
else:
print("Error connecting to lidar")
self.on = True
def init(self, port='/dev/ttyUSB0'):
import PyLidar3
print("Starting lidar...")
self.port = port
self.distances = []
self.angles = []
self.lidar = PyLidar3.YdLidarX4(port)
if(self.lidar.Connect()):
print(self.lidar.GetDeviceInfo())
gen = self.lidar.StartScanning()
return gen
else:
print("Error connecting to lidar")
self.on = True
def update(self, lidar, debug = False):
while self.on:
try:
self.data = next(lidar)
for angle in range(0,360):
if(self.data[angle]>1000):
self.angles = [angle]
self.distances = [self.data[angle]]
if debug:
return self.distances, self.angles
except serial.serialutil.SerialException:
print('serial.serialutil.SerialException from Lidar. common when shutting down.')
def run_threaded(self):
return self.distances, self.angles
def shutdown(self):
self.on = False
time.sleep(2)
self.lidar.StopScanning()
self.lidar.Disconnect()
class LidarPlot(object):
PLOT_TYPE_LINE = 0
PLOT_TYPE_CIRC = 1
def __init__(self, resolution=(500,500),
max_dist=1000,
radius_plot=3,
plot_type=PLOT_TYPE_CIRC):
self.frame = Image.new('RGB', resolution)
self.max_dist = max_dist
self.rad = radius_plot
self.resolution = resolution
if plot_type == self.PLOT_TYPE_CIRC:
self.plot_fn = self.plot_circ
else:
self.plot_fn = self.plot_line
def plot_line(self, img, dist, theta, max_dist, draw):
center = (img.width / 2, img.height / 2)
max_pixel = min(center[0], center[1])
dist = dist / max_dist * max_pixel
if dist < 0 :
dist = 0
elif dist > max_pixel:
dist = max_pixel
theta = np.radians(theta)
sx = math.cos(theta) * dist + center[0]
sy = math.sin(theta) * dist + center[1]
ex = math.cos(theta) * (dist + self.rad) + center[0]
ey = math.sin(theta) * (dist + self.rad) + center[1]
fill = 128
draw.line((sx,sy, ex, ey), fill=(fill, fill, fill), width=1)
def plot_circ(self, img, dist, theta, max_dist, draw):
center = (img.width / 2, img.height / 2)
max_pixel = min(center[0], center[1])
dist = dist / max_dist * max_pixel
if dist < 0 :
dist = 0
elif dist > max_pixel:
dist = max_pixel
theta = np.radians(theta)
sx = int(math.cos(theta) * dist + center[0])
sy = int(math.sin(theta) * dist + center[1])
ex = int(math.cos(theta) * (dist + 2 * self.rad) + center[0])
ey = int(math.sin(theta) * (dist + 2 * self.rad) + center[1])
fill = 128
draw.ellipse((min(sx, ex), min(sy, ey), max(sx, ex), max(sy, ey)), fill=(fill, fill, fill))
def plot_scan(self, img, distances, angles, max_dist, draw):
for dist, angle in zip(distances, angles):
self.plot_fn(img, dist, angle, max_dist, draw)
def run(self, distances, angles):
self.frame = Image.new('RGB', self.resolution, (255, 255, 255))
draw = ImageDraw.Draw(self.frame)
self.plot_scan(self.frame, distances, angles, self.max_dist, draw)
return self.frame
def shutdown(self):
pass
class BreezySLAM(object):
def __init__(self, MAP_SIZE_PIXELS=500, MAP_SIZE_METERS=10):
from breezyslam.algorithms import RMHC_SLAM
from breezyslam.sensors import Laser
laser_model = Laser(scan_size=360, scan_rate_hz=10., detection_angle_degrees=360, distance_no_detection_mm=12000)
MAP_QUALITY=5
self.slam = RMHC_SLAM(laser_model, MAP_SIZE_PIXELS, MAP_SIZE_METERS, MAP_QUALITY)
def run(self, distances, angles, map_bytes):
self.slam.update(distances, scan_angles_degrees=angles)
x, y, theta = self.slam.getpos()
if map_bytes is not None:
self.slam.getmap(map_bytes)
return x, y, deg2rad(norm_deg(theta))
def shutdown(self):
pass
class BreezyMap(object):
def __init__(self, MAP_SIZE_PIXELS=500):
self.mapbytes = bytearray(MAP_SIZE_PIXELS * MAP_SIZE_PIXELS)
def run(self):
return self.mapbytes
def shutdown(self):
pass
class MapToImage(object):
def __init__(self, resolution=(500, 500)):
self.resolution = resolution
def run(self, map_bytes):
np_arr = np.array(map_bytes).reshape(self.resolution)
return arr_to_img(np_arr)
def shutdown(self):
pass
| true
| true
|
1c41ba43a17d398946ed978dc080c422fb4fbbb7
| 11,818
|
py
|
Python
|
src/models/inception_resnet_v1.py
|
zixia/python-facenet
|
d86e0c49a9ce413bef6e58a19a9f723aadcef968
|
[
"MIT"
] | 4
|
2018-06-11T03:02:49.000Z
|
2018-07-11T07:18:52.000Z
|
src/models/inception_resnet_v1.py
|
zixia/python-facenet
|
d86e0c49a9ce413bef6e58a19a9f723aadcef968
|
[
"MIT"
] | null | null | null |
src/models/inception_resnet_v1.py
|
zixia/python-facenet
|
d86e0c49a9ce413bef6e58a19a9f723aadcef968
|
[
"MIT"
] | 2
|
2017-08-31T05:35:36.000Z
|
2018-10-11T16:42:15.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the Inception Resnet V1 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
# Inception-Resnet-A
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
up35 = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up35
if activation_fn:
net = activation_fn(net)
return net
# Inception-Resnet-B
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 128, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 128, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 128, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up17 = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up17
if activation_fn:
net = activation_fn(net)
return net
# Inception-Resnet-C
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 192, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up8 = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up8
if activation_fn:
net = activation_fn(net)
return net
# pylint: disable=C0103
def reduction_a(net, k, l, m, n):
"""reduction
"""
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, n, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, k, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, l, 3,
scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, m, 3,
stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)
return net
def reduction_b(net):
"""reduction b"""
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 256, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 256, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv_1, tower_conv1_1,
tower_conv2_2, tower_pool], 3)
return net
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
"""inference"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=slim.initializers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v1(images, is_training=phase_train,
dropout_keep_prob=keep_probability,
bottleneck_layer_size=bottleneck_layer_size,
reuse=reuse)
def inception_resnet_v1(inputs, is_training=True,
dropout_keep_prob=0.8,
bottleneck_layer_size=128,
reuse=None,
scope='InceptionResnetV1'):
"""Creates the Inception Resnet V1 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
end_points['Conv2d_1a_3x3'] = net
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding='VALID',
scope='Conv2d_2a_3x3')
end_points['Conv2d_2a_3x3'] = net
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
end_points['Conv2d_2b_3x3'] = net
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_3a_3x3')
end_points['MaxPool_3a_3x3'] = net
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding='VALID',
scope='Conv2d_3b_1x1')
end_points['Conv2d_3b_1x1'] = net
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding='VALID',
scope='Conv2d_4a_3x3')
end_points['Conv2d_4a_3x3'] = net
# 35 x 35 x 256
net = slim.conv2d(net, 256, 3, stride=2, padding='VALID',
scope='Conv2d_4b_3x3')
end_points['Conv2d_4b_3x3'] = net
# 5 x Inception-resnet-A
net = slim.repeat(net, 5, block35, scale=0.17)
end_points['Mixed_5a'] = net
# Reduction-A
with tf.variable_scope('Mixed_6a'):
net = reduction_a(net, 192, 192, 256, 384)
end_points['Mixed_6a'] = net
# 10 x Inception-Resnet-B
net = slim.repeat(net, 10, block17, scale=0.10)
end_points['Mixed_6b'] = net
# Reduction-B
with tf.variable_scope('Mixed_7a'):
net = reduction_b(net)
end_points['Mixed_7a'] = net
# 5 x Inception-Resnet-C
net = slim.repeat(net, 5, block8, scale=0.20)
end_points['Mixed_8a'] = net
net = block8(net, activation_fn=None)
end_points['Mixed_8b'] = net
with tf.variable_scope('Logits'):
end_points['PrePool'] = net
#pylint: disable=no-member
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a_8x8')
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None,
scope='Bottleneck', reuse=False)
return net, end_points
| 46.527559
| 90
| 0.568793
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
up35 = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up35
if activation_fn:
net = activation_fn(net)
return net
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 128, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 128, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 128, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up17 = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up17
if activation_fn:
net = activation_fn(net)
return net
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 192, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up8 = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up8
if activation_fn:
net = activation_fn(net)
return net
def reduction_a(net, k, l, m, n):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, n, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, k, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, l, 3,
scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, m, 3,
stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)
return net
def reduction_b(net):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 256, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 256, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv_1, tower_conv1_1,
tower_conv2_2, tower_pool], 3)
return net
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
'decay': 0.995,
'epsilon': 0.001,
'updates_collections': None,
'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=slim.initializers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v1(images, is_training=phase_train,
dropout_keep_prob=keep_probability,
bottleneck_layer_size=bottleneck_layer_size,
reuse=reuse)
def inception_resnet_v1(inputs, is_training=True,
dropout_keep_prob=0.8,
bottleneck_layer_size=128,
reuse=None,
scope='InceptionResnetV1'):
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
end_points['Conv2d_1a_3x3'] = net
net = slim.conv2d(net, 32, 3, padding='VALID',
scope='Conv2d_2a_3x3')
end_points['Conv2d_2a_3x3'] = net
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
end_points['Conv2d_2b_3x3'] = net
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_3a_3x3')
end_points['MaxPool_3a_3x3'] = net
net = slim.conv2d(net, 80, 1, padding='VALID',
scope='Conv2d_3b_1x1')
end_points['Conv2d_3b_1x1'] = net
net = slim.conv2d(net, 192, 3, padding='VALID',
scope='Conv2d_4a_3x3')
end_points['Conv2d_4a_3x3'] = net
net = slim.conv2d(net, 256, 3, stride=2, padding='VALID',
scope='Conv2d_4b_3x3')
end_points['Conv2d_4b_3x3'] = net
net = slim.repeat(net, 5, block35, scale=0.17)
end_points['Mixed_5a'] = net
with tf.variable_scope('Mixed_6a'):
net = reduction_a(net, 192, 192, 256, 384)
end_points['Mixed_6a'] = net
net = slim.repeat(net, 10, block17, scale=0.10)
end_points['Mixed_6b'] = net
with tf.variable_scope('Mixed_7a'):
net = reduction_b(net)
end_points['Mixed_7a'] = net
net = slim.repeat(net, 5, block8, scale=0.20)
end_points['Mixed_8a'] = net
net = block8(net, activation_fn=None)
end_points['Mixed_8b'] = net
with tf.variable_scope('Logits'):
end_points['PrePool'] = net
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a_8x8')
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None,
scope='Bottleneck', reuse=False)
return net, end_points
| true
| true
|
1c41bac1991930d3ffc2130cc34e9dcae42d3d11
| 539
|
py
|
Python
|
output/models/nist_data/list_pkg/date_time/schema_instance/nistschema_sv_iv_list_date_time_min_length_2_xsd/nistschema_sv_iv_list_date_time_min_length_2.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/list_pkg/date_time/schema_instance/nistschema_sv_iv_list_date_time_min_length_2_xsd/nistschema_sv_iv_list_date_time_min_length_2.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/list_pkg/date_time/schema_instance/nistschema_sv_iv_list_date_time_min_length_2_xsd/nistschema_sv_iv_list_date_time_min_length_2.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List
from xsdata.models.datatype import XmlDateTime
__NAMESPACE__ = "NISTSchema-SV-IV-list-dateTime-minLength-2-NS"
@dataclass
class NistschemaSvIvListDateTimeMinLength2:
class Meta:
name = "NISTSchema-SV-IV-list-dateTime-minLength-2"
namespace = "NISTSchema-SV-IV-list-dateTime-minLength-2-NS"
value: List[XmlDateTime] = field(
default_factory=list,
metadata={
"min_length": 6,
"tokens": True,
}
)
| 25.666667
| 67
| 0.679035
|
from dataclasses import dataclass, field
from typing import List
from xsdata.models.datatype import XmlDateTime
__NAMESPACE__ = "NISTSchema-SV-IV-list-dateTime-minLength-2-NS"
@dataclass
class NistschemaSvIvListDateTimeMinLength2:
class Meta:
name = "NISTSchema-SV-IV-list-dateTime-minLength-2"
namespace = "NISTSchema-SV-IV-list-dateTime-minLength-2-NS"
value: List[XmlDateTime] = field(
default_factory=list,
metadata={
"min_length": 6,
"tokens": True,
}
)
| true
| true
|
1c41bd9504b778ac59180a5355284db104a5351c
| 1,723
|
py
|
Python
|
src/main/resources/pytz/zoneinfo/America/Eirunepe.py
|
TheEin/swagger-maven-plugin
|
cf93dce2d5c8d3534f4cf8c612b11e2d2313871b
|
[
"Apache-2.0"
] | 65
|
2015-11-14T13:46:01.000Z
|
2021-08-14T05:54:04.000Z
|
lib/pytz/zoneinfo/America/Eirunepe.py
|
tjsavage/polymer-dashboard
|
19bc467f1206613f8eec646b6f2bc43cc319ef75
|
[
"CNRI-Python",
"Linux-OpenIB"
] | 13
|
2016-03-31T20:00:17.000Z
|
2021-08-20T14:52:31.000Z
|
lib/pytz/zoneinfo/America/Eirunepe.py
|
tjsavage/polymer-dashboard
|
19bc467f1206613f8eec646b6f2bc43cc319ef75
|
[
"CNRI-Python",
"Linux-OpenIB"
] | 20
|
2015-03-18T08:41:37.000Z
|
2020-12-18T02:58:30.000Z
|
'''tzinfo timezone information for America/Eirunepe.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Eirunepe(DstTzInfo):
'''America/Eirunepe timezone definition. See datetime.tzinfo for details'''
zone = 'America/Eirunepe'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1914,1,1,4,39,28),
d(1931,10,3,16,0,0),
d(1932,4,1,4,0,0),
d(1932,10,3,5,0,0),
d(1933,4,1,4,0,0),
d(1949,12,1,5,0,0),
d(1950,4,16,5,0,0),
d(1950,12,1,5,0,0),
d(1951,4,1,4,0,0),
d(1951,12,1,5,0,0),
d(1952,4,1,4,0,0),
d(1952,12,1,5,0,0),
d(1953,3,1,4,0,0),
d(1963,12,9,5,0,0),
d(1964,3,1,4,0,0),
d(1965,1,31,5,0,0),
d(1965,3,31,4,0,0),
d(1965,12,1,5,0,0),
d(1966,3,1,4,0,0),
d(1966,11,1,5,0,0),
d(1967,3,1,4,0,0),
d(1967,11,1,5,0,0),
d(1968,3,1,4,0,0),
d(1985,11,2,5,0,0),
d(1986,3,15,4,0,0),
d(1986,10,25,5,0,0),
d(1987,2,14,4,0,0),
d(1987,10,25,5,0,0),
d(1988,2,7,4,0,0),
d(1993,10,17,5,0,0),
d(1994,2,20,4,0,0),
]
_transition_info = [
i(-16740,0,'LMT'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
]
Eirunepe = Eirunepe()
| 20.759036
| 79
| 0.597214
|
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Eirunepe(DstTzInfo):
zone = 'America/Eirunepe'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1914,1,1,4,39,28),
d(1931,10,3,16,0,0),
d(1932,4,1,4,0,0),
d(1932,10,3,5,0,0),
d(1933,4,1,4,0,0),
d(1949,12,1,5,0,0),
d(1950,4,16,5,0,0),
d(1950,12,1,5,0,0),
d(1951,4,1,4,0,0),
d(1951,12,1,5,0,0),
d(1952,4,1,4,0,0),
d(1952,12,1,5,0,0),
d(1953,3,1,4,0,0),
d(1963,12,9,5,0,0),
d(1964,3,1,4,0,0),
d(1965,1,31,5,0,0),
d(1965,3,31,4,0,0),
d(1965,12,1,5,0,0),
d(1966,3,1,4,0,0),
d(1966,11,1,5,0,0),
d(1967,3,1,4,0,0),
d(1967,11,1,5,0,0),
d(1968,3,1,4,0,0),
d(1985,11,2,5,0,0),
d(1986,3,15,4,0,0),
d(1986,10,25,5,0,0),
d(1987,2,14,4,0,0),
d(1987,10,25,5,0,0),
d(1988,2,7,4,0,0),
d(1993,10,17,5,0,0),
d(1994,2,20,4,0,0),
]
_transition_info = [
i(-16740,0,'LMT'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
i(-14400,3600,'ACST'),
i(-18000,0,'ACT'),
]
Eirunepe = Eirunepe()
| true
| true
|
1c41bea762f109d22971558c3e9a1108e0c44bbd
| 419
|
py
|
Python
|
EM-beamer/image/bessel.py
|
xfli376/Lecture
|
4ee193769df089053726ec6e7792718e30f633a4
|
[
"Apache-2.0"
] | null | null | null |
EM-beamer/image/bessel.py
|
xfli376/Lecture
|
4ee193769df089053726ec6e7792718e30f633a4
|
[
"Apache-2.0"
] | null | null | null |
EM-beamer/image/bessel.py
|
xfli376/Lecture
|
4ee193769df089053726ec6e7792718e30f633a4
|
[
"Apache-2.0"
] | null | null | null |
from scipy import optimize, special
from numpy import *
from matplotlib import pyplot as pb
x = arange(0,20,0.01)
for k in arange(0.5,5.5):
y = special.jv(k,x)
pb.plot(x,y)
f = lambda x: -special.jv(k,x)
x_max = optimize.fminbound(f,0,6)
pb.plot([x_max], [special.jv(k,x_max)],'ro')
pb.title('Different Bessel functions and their local maxima')
pb.savefig('myplot.png')
pb.show()
| 27.933333
| 66
| 0.644391
|
from scipy import optimize, special
from numpy import *
from matplotlib import pyplot as pb
x = arange(0,20,0.01)
for k in arange(0.5,5.5):
y = special.jv(k,x)
pb.plot(x,y)
f = lambda x: -special.jv(k,x)
x_max = optimize.fminbound(f,0,6)
pb.plot([x_max], [special.jv(k,x_max)],'ro')
pb.title('Different Bessel functions and their local maxima')
pb.savefig('myplot.png')
pb.show()
| true
| true
|
1c41c0dd3400c46c01883be0652a07078deef3cb
| 2,616
|
py
|
Python
|
pydoc_fork/__main__.py
|
matthewdeanmartin/pydoc_fork
|
174475b15be966f3751d5563b4db0beecc3ab1f9
|
[
"MIT"
] | null | null | null |
pydoc_fork/__main__.py
|
matthewdeanmartin/pydoc_fork
|
174475b15be966f3751d5563b4db0beecc3ab1f9
|
[
"MIT"
] | 1
|
2022-01-17T16:28:45.000Z
|
2022-01-17T16:28:45.000Z
|
pydoc_fork/__main__.py
|
matthewdeanmartin/pydoc_fork
|
174475b15be966f3751d5563b4db0beecc3ab1f9
|
[
"MIT"
] | null | null | null |
# noinspection PyPep8
"""pydoc_fork
A fork of pydoc that is optimized for generating html documentation in a CI context
Usage:
pydoc_fork <package>... [options]
pydoc_fork (-h | --help)
pydoc_fork --version
Options:
-h --help Show this screen.
-v --version Show version.
--quiet No printing or logging.
--verbose Crank up the logging.
--config <config> pyproject.toml or other toml config.
--document_internals respect underscore or __all__ private
--prefer_docs_python_org link to python.org or generate own stdlib docs
-o --output <folder> where to write files
"""
# TODO: implement this
# pydoc_fork dot_notation <importable>... [--output=<folder>] [--document_internals]
# pydoc_fork source_path <path>... [--output=<folder>] [--document_internals]
import logging
import sys
import docopt
from pydoc_fork import commands, settings
from pydoc_fork.settings import load_config
LOGGER = logging.getLogger(__name__)
LOGGERS = []
__version__ = "3.0.0"
def main() -> int:
"""Get the args object from command parameters"""
arguments = docopt.docopt(__doc__, version=f"pydoc_fork {__version__}")
config_path = arguments.get("<config>")
if config_path:
load_config(config_path)
LOGGER.debug(f"Invoking with docopts: {str(arguments)}")
output_folder = arguments["--output"]
# TODO: add lists of packages
package = arguments["<package>"] or []
# quiet = bool(arguments.get("--quiet", False))
if arguments.get("--document_internals"):
settings.DOCUMENT_INTERNALS = arguments["--document_internals"]
if arguments.get("--prefer_docs_python_org"):
settings.PREFER_DOCS_PYTHON_ORG = arguments["--prefer_docs_python_org"]
if arguments.get("--verbose"):
# root logger, all modules
for root in ("pydoc_fork", "__main__"):
logger = logging.getLogger(root)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
logger.addHandler(handler)
LOGGERS.append(logger)
commands.process_path_or_dot_name(
package,
output_folder=output_folder,
)
# # TODO
# print("Don't recognize that command.")
# return -1
return 0
if __name__ == "__main__":
sys.exit(main())
| 31.518072
| 86
| 0.64526
|
import logging
import sys
import docopt
from pydoc_fork import commands, settings
from pydoc_fork.settings import load_config
LOGGER = logging.getLogger(__name__)
LOGGERS = []
__version__ = "3.0.0"
def main() -> int:
arguments = docopt.docopt(__doc__, version=f"pydoc_fork {__version__}")
config_path = arguments.get("<config>")
if config_path:
load_config(config_path)
LOGGER.debug(f"Invoking with docopts: {str(arguments)}")
output_folder = arguments["--output"]
package = arguments["<package>"] or []
if arguments.get("--document_internals"):
settings.DOCUMENT_INTERNALS = arguments["--document_internals"]
if arguments.get("--prefer_docs_python_org"):
settings.PREFER_DOCS_PYTHON_ORG = arguments["--prefer_docs_python_org"]
if arguments.get("--verbose"):
for root in ("pydoc_fork", "__main__"):
logger = logging.getLogger(root)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
logger.addHandler(handler)
LOGGERS.append(logger)
commands.process_path_or_dot_name(
package,
output_folder=output_folder,
)
# return -1
return 0
if __name__ == "__main__":
sys.exit(main())
| true
| true
|
1c41c120c75f8f421df964a15f8054a414382c3e
| 608
|
py
|
Python
|
tests/test_cross_validation.py
|
ezietsman/lightfm
|
59303f4436fc31adc569a277b94b07e4509a6ab2
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cross_validation.py
|
ezietsman/lightfm
|
59303f4436fc31adc569a277b94b07e4509a6ab2
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cross_validation.py
|
ezietsman/lightfm
|
59303f4436fc31adc569a277b94b07e4509a6ab2
|
[
"Apache-2.0"
] | 1
|
2020-10-07T01:29:32.000Z
|
2020-10-07T01:29:32.000Z
|
import pytest
from lightfm.cross_validation import random_train_test_split
from lightfm.datasets import fetch_movielens
def _assert_disjoint(x, y):
x = x.tocsr()
y = y.tocoo()
for (i, j) in zip(y.row, y.col):
assert x[i, j] == 0.0
@pytest.mark.parametrize('test_percentage',
[0.2, 0.5, 0.7])
def test_random_train_test_split(test_percentage):
data = fetch_movielens()['train']
train, test = random_train_test_split(data, test_percentage=test_percentage)
assert test.nnz / float(data.nnz) == test_percentage
_assert_disjoint(train, test)
| 23.384615
| 80
| 0.682566
|
import pytest
from lightfm.cross_validation import random_train_test_split
from lightfm.datasets import fetch_movielens
def _assert_disjoint(x, y):
x = x.tocsr()
y = y.tocoo()
for (i, j) in zip(y.row, y.col):
assert x[i, j] == 0.0
@pytest.mark.parametrize('test_percentage',
[0.2, 0.5, 0.7])
def test_random_train_test_split(test_percentage):
data = fetch_movielens()['train']
train, test = random_train_test_split(data, test_percentage=test_percentage)
assert test.nnz / float(data.nnz) == test_percentage
_assert_disjoint(train, test)
| true
| true
|
1c41c1db6305da2a3a38f35d262b4e376d92fd5d
| 3,935
|
py
|
Python
|
lib/pathfinding.py
|
Dogeek/codevo
|
690d161b4099d37597246f1ca3164f60a350e662
|
[
"MIT"
] | null | null | null |
lib/pathfinding.py
|
Dogeek/codevo
|
690d161b4099d37597246f1ca3164f60a350e662
|
[
"MIT"
] | null | null | null |
lib/pathfinding.py
|
Dogeek/codevo
|
690d161b4099d37597246f1ca3164f60a350e662
|
[
"MIT"
] | null | null | null |
import collections
import heapq
#http://www.redblobgames.com/pathfinding/a-star/implementation.html
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
class Queue:
def __init__(self):
self.elements = collections.deque()
def empty(self):
return len(self.elements) == 0
def put(self, x):
self.elements.append(x)
def get(self):
return self.elements.popleft()
class SquareGrid:
def __init__(self, width, height):
self.width = width
self.height = height
self.walls = []
def in_bounds(self, id_):
(x, y) = id_
return 0 <= x < self.width and 0 <= y < self.height
def passable(self, id_):
return id_ not in self.walls
def neighbors(self, id_):
(x, y) = id_
results = [(x+1, y), (x, y-1), (x-1, y), (x, y+1)]
if (x + y) % 2 == 0: results.reverse() # aesthetics
results = filter(self.in_bounds, results)
results = filter(self.passable, results)
return results
class SimpleGraph:
def __init__(self):
self.edges = {}
def neighbors(self, id):
return self.edges[id]
class GridWithWeights(SquareGrid):
def __init__(self, width, height):
super().__init__(width, height)
self.weights = {}
def cost(self, from_node, to_node):
return self.weights.get(to_node, 1)
def breadth_first_search(graph, start, goal):
frontier = Queue()
frontier.put(start)
came_from = {}
came_from[start] = None
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in graph.neighbors(current):
if next not in came_from:
frontier.put(next)
came_from[next] = current
return came_from
def dijkstra_search(graph, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.cost(current, next)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost
frontier.put(next, priority)
came_from[next] = current
return came_from, cost_so_far
def reconstruct_path(came_from, start, goal):
current = goal
path = [current]
while current != start:
current = came_from[current]
path.append(current)
path.append(start) # optional
path.reverse() # optional
return path
def heuristic(a, b):
(x1, y1) = a
(x2, y2) = b
return abs(x1 - x2) + abs(y1 - y2)
def a_star(graph, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.cost(current, next)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
return came_from, cost_so_far
| 26.587838
| 71
| 0.574333
|
import collections
import heapq
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
class Queue:
def __init__(self):
self.elements = collections.deque()
def empty(self):
return len(self.elements) == 0
def put(self, x):
self.elements.append(x)
def get(self):
return self.elements.popleft()
class SquareGrid:
def __init__(self, width, height):
self.width = width
self.height = height
self.walls = []
def in_bounds(self, id_):
(x, y) = id_
return 0 <= x < self.width and 0 <= y < self.height
def passable(self, id_):
return id_ not in self.walls
def neighbors(self, id_):
(x, y) = id_
results = [(x+1, y), (x, y-1), (x-1, y), (x, y+1)]
if (x + y) % 2 == 0: results.reverse()
results = filter(self.in_bounds, results)
results = filter(self.passable, results)
return results
class SimpleGraph:
def __init__(self):
self.edges = {}
def neighbors(self, id):
return self.edges[id]
class GridWithWeights(SquareGrid):
def __init__(self, width, height):
super().__init__(width, height)
self.weights = {}
def cost(self, from_node, to_node):
return self.weights.get(to_node, 1)
def breadth_first_search(graph, start, goal):
frontier = Queue()
frontier.put(start)
came_from = {}
came_from[start] = None
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in graph.neighbors(current):
if next not in came_from:
frontier.put(next)
came_from[next] = current
return came_from
def dijkstra_search(graph, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.cost(current, next)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost
frontier.put(next, priority)
came_from[next] = current
return came_from, cost_so_far
def reconstruct_path(came_from, start, goal):
current = goal
path = [current]
while current != start:
current = came_from[current]
path.append(current)
path.append(start)
path.reverse()
return path
def heuristic(a, b):
(x1, y1) = a
(x2, y2) = b
return abs(x1 - x2) + abs(y1 - y2)
def a_star(graph, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.cost(current, next)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
return came_from, cost_so_far
| true
| true
|
1c41c2313befada08cc1f3c0337c864c6f92a845
| 296
|
py
|
Python
|
MergeQuerySet/Query/models.py
|
FalseG0d/AdvancedDjango
|
52715ffea132e591f98f94b781960fc12a8613e4
|
[
"MIT"
] | 9
|
2020-10-17T14:03:35.000Z
|
2022-01-12T17:51:14.000Z
|
MergeQuerySet/Query/models.py
|
bharathjinka09/AdvancedDjango
|
f06e1a0621e182ea6015b06e79eae99ddb04affb
|
[
"MIT"
] | null | null | null |
MergeQuerySet/Query/models.py
|
bharathjinka09/AdvancedDjango
|
f06e1a0621e182ea6015b06e79eae99ddb04affb
|
[
"MIT"
] | 4
|
2020-10-20T06:52:26.000Z
|
2022-01-07T23:51:59.000Z
|
from django.db import models
# Create your models here.
class Employee(models.Model):
name=models.CharField(max_length=20)
age=models.IntegerField(max_length=20)
dept=models.CharField(max_length=20)
exp_score=models.IntegerField()
def __str__(self):
return self.name
| 26.909091
| 42
| 0.736486
|
from django.db import models
class Employee(models.Model):
name=models.CharField(max_length=20)
age=models.IntegerField(max_length=20)
dept=models.CharField(max_length=20)
exp_score=models.IntegerField()
def __str__(self):
return self.name
| true
| true
|
1c41c2d37e53d00e0314729750dcfaf1d5b000f1
| 2,175
|
py
|
Python
|
test/sagemaker_tests/tensorflow/tensorflow2_training/integration/sagemaker/test_tuning_model_dir.py
|
Elizaaaaa/deep-learning-containers
|
6274ecb264645070d11b27e5c7e60d2e4110537d
|
[
"Apache-2.0"
] | 1
|
2021-07-14T20:13:12.000Z
|
2021-07-14T20:13:12.000Z
|
test/sagemaker_tests/tensorflow/tensorflow2_training/integration/sagemaker/test_tuning_model_dir.py
|
Elizaaaaa/deep-learning-containers
|
6274ecb264645070d11b27e5c7e60d2e4110537d
|
[
"Apache-2.0"
] | null | null | null |
test/sagemaker_tests/tensorflow/tensorflow2_training/integration/sagemaker/test_tuning_model_dir.py
|
Elizaaaaa/deep-learning-containers
|
6274ecb264645070d11b27e5c7e60d2e4110537d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import pytest
from sagemaker.tensorflow import TensorFlow
from sagemaker.tuner import HyperparameterTuner, IntegerParameter
from ...integration.utils import processor, py_version, unique_name_from_base # noqa: F401
@pytest.mark.integration("hpo")
@pytest.mark.model("N/A")
def test_model_dir_with_training_job_name(sagemaker_session, ecr_image, instance_type, framework_version):
resource_path = os.path.join(os.path.dirname(__file__), '../..', 'resources')
script = os.path.join(resource_path, 'tuning_model_dir', 'entry.py')
estimator = TensorFlow(entry_point=script,
role='SageMakerRole',
train_instance_type=instance_type,
train_instance_count=1,
image_name=ecr_image,
framework_version=framework_version,
py_version='py3',
sagemaker_session=sagemaker_session)
tuner = HyperparameterTuner(estimator=estimator,
objective_metric_name='accuracy',
hyperparameter_ranges={'arbitrary_value': IntegerParameter(0, 1)},
metric_definitions=[{'Name': 'accuracy', 'Regex': 'accuracy=([01])'}],
max_jobs=1,
max_parallel_jobs=1)
# User script has logic to check for the correct model_dir
tuner.fit(job_name=unique_name_from_base('test-tf-model-dir', max_length=32))
tuner.wait()
| 43.5
| 106
| 0.652874
|
from __future__ import absolute_import
import os
import pytest
from sagemaker.tensorflow import TensorFlow
from sagemaker.tuner import HyperparameterTuner, IntegerParameter
from ...integration.utils import processor, py_version, unique_name_from_base
@pytest.mark.integration("hpo")
@pytest.mark.model("N/A")
def test_model_dir_with_training_job_name(sagemaker_session, ecr_image, instance_type, framework_version):
resource_path = os.path.join(os.path.dirname(__file__), '../..', 'resources')
script = os.path.join(resource_path, 'tuning_model_dir', 'entry.py')
estimator = TensorFlow(entry_point=script,
role='SageMakerRole',
train_instance_type=instance_type,
train_instance_count=1,
image_name=ecr_image,
framework_version=framework_version,
py_version='py3',
sagemaker_session=sagemaker_session)
tuner = HyperparameterTuner(estimator=estimator,
objective_metric_name='accuracy',
hyperparameter_ranges={'arbitrary_value': IntegerParameter(0, 1)},
metric_definitions=[{'Name': 'accuracy', 'Regex': 'accuracy=([01])'}],
max_jobs=1,
max_parallel_jobs=1)
tuner.fit(job_name=unique_name_from_base('test-tf-model-dir', max_length=32))
tuner.wait()
| true
| true
|
1c41c39880779cec8cbf1aca7a5a8ae07a48c33f
| 1,797
|
py
|
Python
|
dominio/dao.py
|
MinisterioPublicoRJ/api-cadg
|
a8998c4c234a65192f1dca8ea9a17a1d4a496556
|
[
"MIT"
] | 6
|
2020-02-11T18:45:58.000Z
|
2020-05-26T12:37:28.000Z
|
dominio/dao.py
|
MinisterioPublicoRJ/api-cadg
|
a8998c4c234a65192f1dca8ea9a17a1d4a496556
|
[
"MIT"
] | 120
|
2019-07-01T14:45:32.000Z
|
2022-01-25T19:10:16.000Z
|
dominio/dao.py
|
MinisterioPublicoRJ/apimpmapas
|
196ad25a4922448b8ae7a66012a2843c7b7194ad
|
[
"MIT"
] | null | null | null |
from dominio.db_connectors import execute as impala_execute
from dominio.exceptions import APIEmptyResultError
class GenericDAO:
"""Classe que implementa métodos genéricos de execução de query no
impala a partir de um arquivo, e posterior serialização.
Atributos:
- QUERIES_DIR (path): Caminho da pasta onde estão as queries.
- query_file (str): Nome do arquivo .sql contendo a query a executar.
- columns (list): Lista de nome das colunas a usar na serialização.
- serializer (Serializer): Serializador a ser utilizado (opcional).
- table_namespaces (dict): Define os schemas a serem formatados na query.
"""
QUERIES_DIR = ""
query_file = ""
columns = []
serializer = None
table_namespaces = {}
@classmethod
def query(cls):
with open(cls.QUERIES_DIR.child(cls.query_file)) as fobj:
query = fobj.read()
return query.format(**cls.table_namespaces)
@classmethod
def execute(cls, **kwargs):
return impala_execute(cls.query(), kwargs)
@classmethod
def serialize(cls, result_set):
ser_data = [dict(zip(cls.columns, row)) for row in result_set]
if cls.serializer:
ser_data = cls.serializer(ser_data, many=True).data
return ser_data
@classmethod
def get(cls, accept_empty=False, **kwargs):
result_set = cls.execute(**kwargs)
if not result_set and not accept_empty:
cls.raise_empty_result_error()
return cls.serialize(result_set)
@classmethod
def raise_empty_result_error(cls):
raise APIEmptyResultError
class SingleDataObjectDAO(GenericDAO):
@classmethod
def serialize(cls, result_set):
data = super().serialize(result_set)
return data[0] if data else {}
| 30.457627
| 77
| 0.676683
|
from dominio.db_connectors import execute as impala_execute
from dominio.exceptions import APIEmptyResultError
class GenericDAO:
QUERIES_DIR = ""
query_file = ""
columns = []
serializer = None
table_namespaces = {}
@classmethod
def query(cls):
with open(cls.QUERIES_DIR.child(cls.query_file)) as fobj:
query = fobj.read()
return query.format(**cls.table_namespaces)
@classmethod
def execute(cls, **kwargs):
return impala_execute(cls.query(), kwargs)
@classmethod
def serialize(cls, result_set):
ser_data = [dict(zip(cls.columns, row)) for row in result_set]
if cls.serializer:
ser_data = cls.serializer(ser_data, many=True).data
return ser_data
@classmethod
def get(cls, accept_empty=False, **kwargs):
result_set = cls.execute(**kwargs)
if not result_set and not accept_empty:
cls.raise_empty_result_error()
return cls.serialize(result_set)
@classmethod
def raise_empty_result_error(cls):
raise APIEmptyResultError
class SingleDataObjectDAO(GenericDAO):
@classmethod
def serialize(cls, result_set):
data = super().serialize(result_set)
return data[0] if data else {}
| true
| true
|
1c41c4e8eedb6f685d8668a37578173dbb3c3525
| 4,707
|
py
|
Python
|
xlsxwriter/test/worksheet/test_sparkline10.py
|
yxwlr995/-Python-Pandas-XlsxWriter
|
cd28c1b968795b67f3013c49a0e02ffda5898163
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/worksheet/test_sparkline10.py
|
yxwlr995/-Python-Pandas-XlsxWriter
|
cd28c1b968795b67f3013c49a0e02ffda5898163
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/worksheet/test_sparkline10.py
|
yxwlr995/-Python-Pandas-XlsxWriter
|
cd28c1b968795b67f3013c49a0e02ffda5898163
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2020-04-12T16:44:58.000Z
|
2020-04-12T16:44:58.000Z
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
import unittest
from ..compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with no cell data."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.name = 'Sheet1'
worksheet.excel_version = 2010
data = [-2, 2, 3, -1, 0]
worksheet.write_row('A1', data)
# Set up sparklines.
worksheet.add_sparkline('F1', {'range': 'A1:E1',
'high_point': True,
'low_point': True,
'negative_points': True,
'first_point': True,
'last_point': True,
'markers': True,
'series_color': '#C00000',
'negative_color': '#FF0000',
'markers_color': '#FFC000',
'first_color': '#00B050',
'last_color': '#00B0F0',
'high_color': '#FFFF00',
'low_color': '#92D050',
})
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1:E1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData>
<row r="1" spans="1:5" x14ac:dyDescent="0.25">
<c r="A1">
<v>-2</v>
</c>
<c r="B1">
<v>2</v>
</c>
<c r="C1">
<v>3</v>
</c>
<c r="D1">
<v>-1</v>
</c>
<c r="E1">
<v>0</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{05C60535-1F16-4fd2-B633-F4F36F0B64E0}">
<x14:sparklineGroups xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:sparklineGroup displayEmptyCellsAs="gap" markers="1" high="1" low="1" first="1" last="1" negative="1">
<x14:colorSeries rgb="FFC00000"/>
<x14:colorNegative rgb="FFFF0000"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers rgb="FFFFC000"/>
<x14:colorFirst rgb="FF00B050"/>
<x14:colorLast rgb="FF00B0F0"/>
<x14:colorHigh rgb="FFFFFF00"/>
<x14:colorLow rgb="FF92D050"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A1:E1</xm:f>
<xm:sqref>F1</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
</x14:sparklineGroups>
</ext>
</extLst>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
| 42.026786
| 337
| 0.4215
|
<x14:sparklineGroups xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:sparklineGroup displayEmptyCellsAs="gap" markers="1" high="1" low="1" first="1" last="1" negative="1">
<x14:colorSeries rgb="FFC00000"/>
<x14:colorNegative rgb="FFFF0000"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers rgb="FFFFC000"/>
<x14:colorFirst rgb="FF00B050"/>
<x14:colorLast rgb="FF00B0F0"/>
<x14:colorHigh rgb="FFFFFF00"/>
<x14:colorLow rgb="FF92D050"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A1:E1</xm:f>
<xm:sqref>F1</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
</x14:sparklineGroups>
</ext>
</extLst>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c41c619c9ab1c1348ec5a6a9f77ef5b1fa70dda
| 2,439
|
py
|
Python
|
oblique/nxutils.py
|
blais/oblique
|
8cf9932b20b9d82a29f072d7c69c746e4643a77c
|
[
"Apache-2.0"
] | 1
|
2020-06-20T13:41:29.000Z
|
2020-06-20T13:41:29.000Z
|
oblique/nxutils.py
|
blais/oblique
|
8cf9932b20b9d82a29f072d7c69c746e4643a77c
|
[
"Apache-2.0"
] | null | null | null |
oblique/nxutils.py
|
blais/oblique
|
8cf9932b20b9d82a29f072d7c69c746e4643a77c
|
[
"Apache-2.0"
] | null | null | null |
"""Test program for rendering of a NetworkX graph of the databased."""
import argparse
import collections
import random
import time
import webbrowser
from typing import Text
import colour
import networkx as nx
from oblique import extmodule
_COLORS = """
aliceblue antiquewhite aqua aquamarine azure beige bisque black blanchedalmond
blue blueviolet brown burlywood cadetblue chartreuse chocolate coral
cornflowerblue cornsilk crimson cyan darkblue darkcyan darkgoldenrod darkgray
darkgreen darkgrey darkkhaki darkmagenta darkolivegreen darkorange darkorchid
darkred darksalmon darkseagreen darkslateblue darkslategray darkslategrey
darkturquoise darkviolet deeppink deepskyblue dimgray dimgrey dodgerblue
firebrick floralwhite forestgreen fuchsia gainsboro gold goldenrod gray grey
green greenyellow honeydew hotpink indianred indigo ivory khaki lavender
lavenderblush lawngreen lemonchiffon lightblue lightcoral lightcyan
lightgoldenrodyellow lightgray lightgreen lightgrey lightpink lightsalmon
lightseagreen lightskyblue lightslategray lightslategrey lightsteelblue
lightyellow lime limegreen linen magenta maroon mediumaquamarine mediumblue
mediumorchid mediumpurple mediumseagreen mediumslateblue mediumspringgreen
mediumturquoise mediumvioletred midnightblue mintcream mistyrose moccasin
navajowhite navy oldlace olive olivedrab orange orangered orchid palegoldenrod
palegreen paleturquoise palevioletred papayawhip peachpuff peru pink plum
powderblue purple red rosybrown royalblue saddlebrown salmon sandybrown seagreen
seashell sienna silver skyblue slateblue slategray slategrey snow springgreen
steelblue tan thistle tomato turquoise violet wheat yellow yellowgreen
""".split()
def make_id(ref: extmodule.Ref) -> Text:
"""Convert a ref to a string iddentifier."""
return "{}/{}".format(ref.type, ref.ident)
def convert_to_nx(db: extmodule.Database) -> nx.DiGraph:
"""Convert an internal database to a NetworkX graph."""
colors = list(_COLORS)
colormap = collections.defaultdict(lambda: colors.pop())
g = nx.DiGraph()
for obj in db.object():
color = colormap[obj.id.type]
objid = make_id(obj.id) if obj.id.type != 'item' else obj.contents
col = colour.Color(color)
g.add_node(objid, contents=obj.contents, fillcolor="{}60".format(col.hex_l), style="filled")
for ref in obj.refs():
nid = make_id(ref)
g.add_edge(objid, nid)
return g
if __name__ == '__main__':
main()
| 39.33871
| 96
| 0.812218
|
import argparse
import collections
import random
import time
import webbrowser
from typing import Text
import colour
import networkx as nx
from oblique import extmodule
_COLORS = """
aliceblue antiquewhite aqua aquamarine azure beige bisque black blanchedalmond
blue blueviolet brown burlywood cadetblue chartreuse chocolate coral
cornflowerblue cornsilk crimson cyan darkblue darkcyan darkgoldenrod darkgray
darkgreen darkgrey darkkhaki darkmagenta darkolivegreen darkorange darkorchid
darkred darksalmon darkseagreen darkslateblue darkslategray darkslategrey
darkturquoise darkviolet deeppink deepskyblue dimgray dimgrey dodgerblue
firebrick floralwhite forestgreen fuchsia gainsboro gold goldenrod gray grey
green greenyellow honeydew hotpink indianred indigo ivory khaki lavender
lavenderblush lawngreen lemonchiffon lightblue lightcoral lightcyan
lightgoldenrodyellow lightgray lightgreen lightgrey lightpink lightsalmon
lightseagreen lightskyblue lightslategray lightslategrey lightsteelblue
lightyellow lime limegreen linen magenta maroon mediumaquamarine mediumblue
mediumorchid mediumpurple mediumseagreen mediumslateblue mediumspringgreen
mediumturquoise mediumvioletred midnightblue mintcream mistyrose moccasin
navajowhite navy oldlace olive olivedrab orange orangered orchid palegoldenrod
palegreen paleturquoise palevioletred papayawhip peachpuff peru pink plum
powderblue purple red rosybrown royalblue saddlebrown salmon sandybrown seagreen
seashell sienna silver skyblue slateblue slategray slategrey snow springgreen
steelblue tan thistle tomato turquoise violet wheat yellow yellowgreen
""".split()
def make_id(ref: extmodule.Ref) -> Text:
return "{}/{}".format(ref.type, ref.ident)
def convert_to_nx(db: extmodule.Database) -> nx.DiGraph:
colors = list(_COLORS)
colormap = collections.defaultdict(lambda: colors.pop())
g = nx.DiGraph()
for obj in db.object():
color = colormap[obj.id.type]
objid = make_id(obj.id) if obj.id.type != 'item' else obj.contents
col = colour.Color(color)
g.add_node(objid, contents=obj.contents, fillcolor="{}60".format(col.hex_l), style="filled")
for ref in obj.refs():
nid = make_id(ref)
g.add_edge(objid, nid)
return g
if __name__ == '__main__':
main()
| true
| true
|
1c41c738875cce571738f3457940bf419e33844e
| 1,328
|
py
|
Python
|
src/constant.py
|
zeabusTeam/zeabus_vision
|
bc58872ae4f02656bc153f32968e61a8f3d7cf15
|
[
"MIT"
] | 1
|
2019-05-28T12:59:21.000Z
|
2019-05-28T12:59:21.000Z
|
src/constant.py
|
zeabusTeam/zeabus_vision
|
bc58872ae4f02656bc153f32968e61a8f3d7cf15
|
[
"MIT"
] | 2
|
2019-04-30T11:35:10.000Z
|
2019-10-22T10:00:18.000Z
|
src/constant.py
|
zeabusTeam/zeabus_vision
|
bc58872ae4f02656bc153f32968e61a8f3d7cf15
|
[
"MIT"
] | null | null | null |
"""
File name: ansi_color_code.py
Maintainer: AyumiizZ
Python Version: 2.7
About: ansi code for printing color text
"""
class AnsiCode:
"""
Class name: AnsiCode
Maintainer: AyumiizZ
About: ansi code for printing color text
"""
DEFAULT = '\033[0m'
BOLD = '\033[1m'
LIGHT = '\033[2m'
ITALIC = '\033[3m'
UNDERLINE = '\033[4m'
HL = '\033[7m'
INVISIBLE = '\033[8m'
CROSS = '\033[9m'
BLACK = '\033[30m'
LIGHT_RED = '\033[31m'
LIGHT_GREEN = '\033[32m'
LIGHT_YELLOW = '\033[33m'
LIGHT_BLUE = '\033[34m'
LIGHT_PURPLE = '\033[35m'
LIGHT_CYAN = '\033[36m'
LIGHT_WHITE = '\033[37m'
LIGHT_BLACK_HL = '\033[100m'
LIGHT_RED_HL = '\033[41m'
LIGHT_GREEN_HL = '\033[42m'
LIGHT_YELLOW_HL = '\033[43m'
LIGHT_BLUE_HL = '\033[44m'
LIGHT_PURPLE_HL = '\033[45m'
LIGHT_CYAN_HL = '\033[46m'
LIGHT_WHITE_HL = '\033[47m'
LIGHT_BLACK = '\033[90m'
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
BLUE = '\033[94m'
PURPLE = '\033[95m'
CYAN = '\033[96m'
WHITE = '\033[97m'
BLACK_HL = '\033[40m'
RED_HL = '\033[101m'
GREEN_HL = '\033[102m'
YELLOW_HL = '\033[103m'
BLUE_HL = '\033[104m'
PURPLE_HL = '\033[105m'
CYAN_HL = '\033[106m'
WHITE_HL = '\033[107m'
| 24.145455
| 44
| 0.568524
|
class AnsiCode:
DEFAULT = '\033[0m'
BOLD = '\033[1m'
LIGHT = '\033[2m'
ITALIC = '\033[3m'
UNDERLINE = '\033[4m'
HL = '\033[7m'
INVISIBLE = '\033[8m'
CROSS = '\033[9m'
BLACK = '\033[30m'
LIGHT_RED = '\033[31m'
LIGHT_GREEN = '\033[32m'
LIGHT_YELLOW = '\033[33m'
LIGHT_BLUE = '\033[34m'
LIGHT_PURPLE = '\033[35m'
LIGHT_CYAN = '\033[36m'
LIGHT_WHITE = '\033[37m'
LIGHT_BLACK_HL = '\033[100m'
LIGHT_RED_HL = '\033[41m'
LIGHT_GREEN_HL = '\033[42m'
LIGHT_YELLOW_HL = '\033[43m'
LIGHT_BLUE_HL = '\033[44m'
LIGHT_PURPLE_HL = '\033[45m'
LIGHT_CYAN_HL = '\033[46m'
LIGHT_WHITE_HL = '\033[47m'
LIGHT_BLACK = '\033[90m'
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
BLUE = '\033[94m'
PURPLE = '\033[95m'
CYAN = '\033[96m'
WHITE = '\033[97m'
BLACK_HL = '\033[40m'
RED_HL = '\033[101m'
GREEN_HL = '\033[102m'
YELLOW_HL = '\033[103m'
BLUE_HL = '\033[104m'
PURPLE_HL = '\033[105m'
CYAN_HL = '\033[106m'
WHITE_HL = '\033[107m'
| true
| true
|
1c41c96b4fb065a49ecc4b698ded32cd58c99a42
| 94
|
py
|
Python
|
demo/__init__.py
|
JiangFeng07/NLPIK
|
bacd52e24690e8ba706895b54a076ee05d785d7b
|
[
"Apache-2.0"
] | null | null | null |
demo/__init__.py
|
JiangFeng07/NLPIK
|
bacd52e24690e8ba706895b54a076ee05d785d7b
|
[
"Apache-2.0"
] | null | null | null |
demo/__init__.py
|
JiangFeng07/NLPIK
|
bacd52e24690e8ba706895b54a076ee05d785d7b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/local/bin/ python3
# -*- coding: utf-8 -*-
# @Time : 2022-03-14 13:58
# @Author : Leo
| 23.5
| 27
| 0.56383
| true
| true
|
|
1c41c991a06c33be3766ae390ff2eb872ce7d1f6
| 7,875
|
py
|
Python
|
tensorflow/python/keras/layers/pooling_test.py
|
alvinlin-pn/tensorflow
|
c9cd1784bf287543d89593ca1432170cdbf694de
|
[
"Apache-2.0"
] | 2
|
2021-10-10T23:52:17.000Z
|
2022-01-22T00:24:39.000Z
|
tensorflow/python/keras/layers/pooling_test.py
|
alvinlin-pn/tensorflow
|
c9cd1784bf287543d89593ca1432170cdbf694de
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/layers/pooling_test.py
|
alvinlin-pn/tensorflow
|
c9cd1784bf287543d89593ca1432170cdbf694de
|
[
"Apache-2.0"
] | 1
|
2020-06-07T22:42:37.000Z
|
2020-06-07T22:42:37.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pooling layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
class GlobalPoolingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_1d(self):
testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,
input_shape=(3, 4, 5))
testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling1D, input_shape=(3, 4, 5))
testing_utils.layer_test(keras.layers.pooling.GlobalAveragePooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5))
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_1d_masking_support(self):
model = keras.Sequential()
model.add(keras.layers.Masking(mask_value=0., input_shape=(None, 4)))
model.add(keras.layers.GlobalAveragePooling1D())
model.compile(loss='mae', optimizer='rmsprop')
model_input = np.random.random((2, 3, 4))
model_input[0, 1:, :] = 0
output = model.predict(model_input)
self.assertAllClose(output[0], model_input[0, 0, :])
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_2d(self):
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling2D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5, 6))
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling2D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 5, 6, 4))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling2D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5, 6))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling2D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 5, 6, 4))
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_3d(self):
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling3D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 3, 4, 3))
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling3D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 4, 3, 4, 3))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling3D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 3, 4, 3))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling3D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 4, 3, 4, 3))
class Pooling2DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_2d(self):
pool_size = (3, 3)
for strides in [(1, 1), (2, 2)]:
testing_utils.layer_test(
keras.layers.MaxPooling2D,
kwargs={
'strides': strides,
'padding': 'valid',
'pool_size': pool_size
},
input_shape=(3, 5, 6, 4))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_2d(self):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={'strides': (2, 2),
'padding': 'same',
'pool_size': (2, 2)},
input_shape=(3, 5, 6, 4))
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={'strides': (2, 2),
'padding': 'valid',
'pool_size': (3, 3)},
input_shape=(3, 5, 6, 4))
# This part of the test can only run on GPU but doesn't appear
# to be properly assigned to a GPU when running in eager mode.
if not context.executing_eagerly():
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
if test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={
'strides': (1, 1),
'padding': 'valid',
'pool_size': (2, 2),
'data_format': 'channels_first'
},
input_shape=(3, 4, 5, 6))
class Pooling3DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_3d(self):
if test.is_built_with_rocm():
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
pool_size = (3, 3, 3)
testing_utils.layer_test(
keras.layers.MaxPooling3D,
kwargs={'strides': 2,
'padding': 'valid',
'pool_size': pool_size},
input_shape=(3, 11, 12, 10, 4))
testing_utils.layer_test(
keras.layers.MaxPooling3D,
kwargs={
'strides': 3,
'padding': 'valid',
'data_format': 'channels_first',
'pool_size': pool_size
},
input_shape=(3, 4, 11, 12, 10))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_3d(self):
if test.is_built_with_rocm():
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
pool_size = (3, 3, 3)
testing_utils.layer_test(
keras.layers.AveragePooling3D,
kwargs={'strides': 2,
'padding': 'valid',
'pool_size': pool_size},
input_shape=(3, 11, 12, 10, 4))
testing_utils.layer_test(
keras.layers.AveragePooling3D,
kwargs={
'strides': 3,
'padding': 'valid',
'data_format': 'channels_first',
'pool_size': pool_size
},
input_shape=(3, 4, 11, 12, 10))
class Pooling1DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_1d(self):
for padding in ['valid', 'same']:
for stride in [1, 2]:
testing_utils.layer_test(
keras.layers.MaxPooling1D,
kwargs={'strides': stride,
'padding': padding},
input_shape=(3, 5, 4))
testing_utils.layer_test(
keras.layers.MaxPooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 2, 6))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_1d(self):
for padding in ['valid', 'same']:
for stride in [1, 2]:
testing_utils.layer_test(
keras.layers.AveragePooling1D,
kwargs={'strides': stride,
'padding': padding},
input_shape=(3, 5, 4))
testing_utils.layer_test(
keras.layers.AveragePooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 2, 6))
if __name__ == '__main__':
test.main()
| 35.472973
| 80
| 0.629968
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
class GlobalPoolingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_1d(self):
testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,
input_shape=(3, 4, 5))
testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling1D, input_shape=(3, 4, 5))
testing_utils.layer_test(keras.layers.pooling.GlobalAveragePooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5))
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_1d_masking_support(self):
model = keras.Sequential()
model.add(keras.layers.Masking(mask_value=0., input_shape=(None, 4)))
model.add(keras.layers.GlobalAveragePooling1D())
model.compile(loss='mae', optimizer='rmsprop')
model_input = np.random.random((2, 3, 4))
model_input[0, 1:, :] = 0
output = model.predict(model_input)
self.assertAllClose(output[0], model_input[0, 0, :])
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_2d(self):
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling2D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5, 6))
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling2D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 5, 6, 4))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling2D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 5, 6))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling2D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 5, 6, 4))
@tf_test_util.run_in_graph_and_eager_modes
def test_globalpooling_3d(self):
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling3D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 3, 4, 3))
testing_utils.layer_test(
keras.layers.pooling.GlobalMaxPooling3D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 4, 3, 4, 3))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling3D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 4, 3, 4, 3))
testing_utils.layer_test(
keras.layers.pooling.GlobalAveragePooling3D,
kwargs={'data_format': 'channels_last'},
input_shape=(3, 4, 3, 4, 3))
class Pooling2DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_2d(self):
pool_size = (3, 3)
for strides in [(1, 1), (2, 2)]:
testing_utils.layer_test(
keras.layers.MaxPooling2D,
kwargs={
'strides': strides,
'padding': 'valid',
'pool_size': pool_size
},
input_shape=(3, 5, 6, 4))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_2d(self):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={'strides': (2, 2),
'padding': 'same',
'pool_size': (2, 2)},
input_shape=(3, 5, 6, 4))
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={'strides': (2, 2),
'padding': 'valid',
'pool_size': (3, 3)},
input_shape=(3, 5, 6, 4))
# to be properly assigned to a GPU when running in eager mode.
if not context.executing_eagerly():
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
if test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.AveragePooling2D,
kwargs={
'strides': (1, 1),
'padding': 'valid',
'pool_size': (2, 2),
'data_format': 'channels_first'
},
input_shape=(3, 4, 5, 6))
class Pooling3DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_3d(self):
if test.is_built_with_rocm():
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
pool_size = (3, 3, 3)
testing_utils.layer_test(
keras.layers.MaxPooling3D,
kwargs={'strides': 2,
'padding': 'valid',
'pool_size': pool_size},
input_shape=(3, 11, 12, 10, 4))
testing_utils.layer_test(
keras.layers.MaxPooling3D,
kwargs={
'strides': 3,
'padding': 'valid',
'data_format': 'channels_first',
'pool_size': pool_size
},
input_shape=(3, 4, 11, 12, 10))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_3d(self):
if test.is_built_with_rocm():
self.skipTest('Pooling with 3D tensors is not supported in ROCm')
pool_size = (3, 3, 3)
testing_utils.layer_test(
keras.layers.AveragePooling3D,
kwargs={'strides': 2,
'padding': 'valid',
'pool_size': pool_size},
input_shape=(3, 11, 12, 10, 4))
testing_utils.layer_test(
keras.layers.AveragePooling3D,
kwargs={
'strides': 3,
'padding': 'valid',
'data_format': 'channels_first',
'pool_size': pool_size
},
input_shape=(3, 4, 11, 12, 10))
class Pooling1DTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_maxpooling_1d(self):
for padding in ['valid', 'same']:
for stride in [1, 2]:
testing_utils.layer_test(
keras.layers.MaxPooling1D,
kwargs={'strides': stride,
'padding': padding},
input_shape=(3, 5, 4))
testing_utils.layer_test(
keras.layers.MaxPooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 2, 6))
@tf_test_util.run_in_graph_and_eager_modes
def test_averagepooling_1d(self):
for padding in ['valid', 'same']:
for stride in [1, 2]:
testing_utils.layer_test(
keras.layers.AveragePooling1D,
kwargs={'strides': stride,
'padding': padding},
input_shape=(3, 5, 4))
testing_utils.layer_test(
keras.layers.AveragePooling1D,
kwargs={'data_format': 'channels_first'},
input_shape=(3, 2, 6))
if __name__ == '__main__':
test.main()
| true
| true
|
1c41c9d5321b033ba8af5cef0b26ef8e0efec614
| 273
|
py
|
Python
|
tpc-ds/load_db.py
|
ambient-docker/ora2postgres
|
bde236f1cfed625cff718378bfaeea2f07e889f0
|
[
"MIT"
] | 2
|
2018-12-03T07:53:44.000Z
|
2018-12-03T07:54:15.000Z
|
tpc-ds/load_db.py
|
ambient-docker/ora2postgres
|
bde236f1cfed625cff718378bfaeea2f07e889f0
|
[
"MIT"
] | 1
|
2018-12-02T07:36:41.000Z
|
2018-12-02T07:36:41.000Z
|
tpc-ds/load_db.py
|
ambient-docker/ora2postgres
|
bde236f1cfed625cff718378bfaeea2f07e889f0
|
[
"MIT"
] | 15
|
2018-12-03T07:54:59.000Z
|
2019-06-12T13:53:40.000Z
|
#! /usr/bin/python3
import glob, os
for file in glob.glob("*.ctl"):
if file == "dbgen_version.ctl" :
pass
else:
logfile = file.replace(".ctl",".log")
cmd = 'sqlldr userid=tpcds/p4ssw0rd control={} log={}'.format(file,logfile)
os.system(cmd)
| 22.75
| 80
| 0.604396
|
import glob, os
for file in glob.glob("*.ctl"):
if file == "dbgen_version.ctl" :
pass
else:
logfile = file.replace(".ctl",".log")
cmd = 'sqlldr userid=tpcds/p4ssw0rd control={} log={}'.format(file,logfile)
os.system(cmd)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.