max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
galaxy/api/views/search.py
|
ironfroggy/galaxy
| 0
|
6629851
|
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import operator
from collections import OrderedDict
import six
from django.db.models import F, Func, Value, Count, ExpressionWrapper, Q
from django.db.models.functions import Coalesce
from django.db.models import fields as db_fields
from django.urls import reverse
from django.contrib.postgres import search as psql_search
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from galaxy import constants
from galaxy.accounts import models as auth_models
from galaxy.api import filters
from galaxy.api import serializers
from galaxy.api.views import base_views as base
from galaxy.main import models
__all__ = [
'ApiV1SearchView',
'ContentSearchView',
'RoleSearchView',
'UserSearchView',
'PlatformsSearchView',
'CloudPlatformsSearchView',
'TagsSearchView',
]
RANK_FUNCTION = 'ts_rank'
RANK_NORMALIZATION = 32
DOWNLOAD_RANK_MULTIPLIER = 0.4
CONTENT_SCORE_MULTIPLIER = 0.2
COMMUNITY_SCORE_MODIFIER = 0.002
COMMUNITY_SCORE_MODIFIER_MIN = 0.005
class ApiV1SearchView(base.APIView):
permission_classes = (AllowAny,)
view_name = 'Search'
def get(self, request, *args, **kwargs):
data = OrderedDict()
data['cloud_platforms'] = reverse('api:cloud_platforms_search_view')
data['content'] = reverse('api:content_search_view')
data['platforms'] = reverse('api:platforms_search_view')
data['roles'] = reverse('api:roles_search_view')
data['tags'] = reverse('api:tags_search_view')
data['top_contributors'] = reverse('api:top_contributors_list')
data['users'] = reverse('api:user_search_view')
return Response(data)
class ContentSearchView(base.ListAPIView):
serializer_class = serializers.RoleSearchSerializer
filter_backends = [filters.OrderByFilter]
def get_queryset(self):
return (
models.Content.objects.distinct()
.select_related(
'content_type',
'namespace',
'repository',
'repository__provider_namespace',
'repository__provider_namespace__namespace',
)
.prefetch_related(
'videos',
'tags',
'dependencies',
'platforms',
'repository__versions',
)
.filter(
repository__provider_namespace__namespace__isnull=False,
repository__provider_namespace__namespace__active=True)
)
# TODO(cutwater): Use serializer to parse request arguments
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
# Content type
content_type = request.GET.get('content_type', '').split()
queryset = self.add_content_type(queryset, content_type)
# Platforms
platforms = request.GET.get('platforms', '').split()
queryset = self.add_platforms_filter(queryset, platforms)
# Cloud platforms
cloud_platforms = request.GET.get('cloud_platforms', '').split()
queryset = self.add_cloud_platforms_filter(queryset, cloud_platforms)
# Namespaces
namespaces = request.GET.get('namespaces', '').split()
queryset = self.add_namespaces_filter(queryset, namespaces)
# Tags
tags = request.GET.get('tags', '').split()
queryset = self.add_tags_filter(queryset, tags)
# Keywords
keywords = request.GET.get('keywords', '').split()
queryset = self.add_keywords_filter(queryset, keywords)
# Vendor
is_vendor = request.GET.get('vendor', None)
queryset = self.add_vendor_filter(queryset, is_vendor)
# Deprecated
is_deprecated = request.GET.get('deprecated', None)
queryset = self.add_deprecated_filter(queryset, is_deprecated)
# Support for ansible-galaxy <= 2.6 autocomplete params
keywords = request.GET.get('autocomplete', None)
# Calling self.add_keywords_filter() with no keywords sets existing
# search_rank values to 0, so we want to avoid calling if autocomplete
# is missing.
if keywords is not None:
queryset = self.add_keywords_filter(queryset, keywords.split())
tags = request.GET.get('tags_autocomplete', '').split()
queryset = self.add_tags_filter(queryset, tags)
platforms = request.GET.get('platforms_autocomplete', '').split()
queryset = self.add_platforms_filter(queryset, platforms)
namespaces = request.GET.get('username_autocomplete', '').split()
queryset = self.add_namespaces_filter(queryset, namespaces)
queryset = self.add_relevance(queryset)
return self.make_response(queryset)
@staticmethod
def add_relevance(queryset):
c = 'repository__community_score'
d = 'repository__download_count'
# ln((MOD*c + MIN) * d + 1)
# where c = community_score and d = download_count
# We're using the community_score as a modifier to the download count
# instead of just allocating a certain number of points based on the
# score. The reason for this is that the download score is
# a logaritmic scale so adding a fixed number of points ended up
# boosting scores way too much for content with low numbers of
# downloads. This system allows for the weight of the community score
# to scale with the number of downloads
download_count_ln_expr = Func(
(((Coalesce(F(c), 0) * COMMUNITY_SCORE_MODIFIER) +
COMMUNITY_SCORE_MODIFIER_MIN)
* F(d)) + 1,
function='ln'
)
download_rank_expr = (
F('download_count_ln')
/ (1 + F('download_count_ln'))
* DOWNLOAD_RANK_MULTIPLIER
)
q = 'repository__quality_score'
# This function is better than using a linear function because it
# makes it so that the effect of losing the first few points is
# relatively minor, which reduces the impact of errors in scoring.
quality_rank_expr = (
Func(Coalesce(F(q), 0) + 1, function='log')
* CONTENT_SCORE_MULTIPLIER
)
relevance_expr = (
F('search_rank') + F('download_rank') + F('quality_rank')
)
return queryset.annotate(
download_count_ln=ExpressionWrapper(
download_count_ln_expr,
output_field=db_fields.FloatField()),
download_rank=ExpressionWrapper(
download_rank_expr,
output_field=db_fields.FloatField()),
quality_rank=ExpressionWrapper(
quality_rank_expr,
output_field=db_fields.FloatField()),
relevance=ExpressionWrapper(
relevance_expr,
output_field=db_fields.FloatField()),
)
@staticmethod
def add_content_type(queryset, content_types):
if not content_types:
return queryset
content_types = models.ContentType.objects.filter(
name__in=content_types)
return queryset.filter(content_type__in=content_types)
@staticmethod
def add_tags_filter(queryset, tags):
if not tags:
return queryset
return queryset.filter(
tags__in=models.Tag.objects.filter(name__in=tags))
@staticmethod
def add_namespaces_filter(queryset, namespaces):
if not namespaces:
return queryset
queries = [Q(namespace__name__icontains=name) for name in namespaces]
query = six.moves.reduce(operator.or_, queries)
return queryset.filter(query)
@staticmethod
def add_platforms_filter(queryset, platforms):
if not platforms:
return queryset
return queryset.filter(
platforms__in=models.Platform.objects.filter(name__in=platforms))
@staticmethod
def add_cloud_platforms_filter(queryset, cloud_platforms):
if not cloud_platforms:
return queryset
return queryset.filter(
cloud_platforms__in=models.CloudPlatform.objects.filter(
name__in=cloud_platforms))
@staticmethod
def add_keywords_filter(queryset, keywords):
if not keywords:
return queryset.annotate(
search_rank=Value(0.0, output_field=db_fields.FloatField()))
tsquery = six.moves.reduce(
operator.and_,
(psql_search.SearchQuery(kw) for kw in keywords))
search_rank_fn = Func(
F('search_vector'), tsquery, RANK_NORMALIZATION,
function=RANK_FUNCTION, output_field=db_fields.FloatField())
return (queryset.annotate(search_rank=search_rank_fn)
.filter(search_vector=tsquery))
@staticmethod
def add_vendor_filter(queryset, is_vendor):
if is_vendor is None:
return queryset
is_vendor_value = False
if is_vendor.lower() in ('true', 'yes', '1'):
is_vendor_value = True
return queryset.filter(namespace__is_vendor=is_vendor_value)
@staticmethod
def add_deprecated_filter(queryset, is_deprecated):
if is_deprecated is None:
return queryset
is_deprecated_value = False
if is_deprecated.lower() in ('true', 'yes', '1'):
is_deprecated_value = True
return queryset.filter(repository__deprecated=is_deprecated_value)
class RoleSearchView(ContentSearchView):
def get_queryset(self):
queryset = super(RoleSearchView, self).get_queryset()
role_type = models.ContentType.get(constants.ContentType.ROLE)
return queryset.filter(content_type=role_type)
# FIXME(cutwater): Keeping views compatible with ELK based.
# Refactor request parameters parsing
class UserSearchView(base.ListAPIView):
model = auth_models.CustomUser
serializer_class = serializers.UserSerializer
filter_backends = [filters.OrderByFilter]
def list(self, request, *args, **kwargs):
search_query = None
for key, value in request.GET.items():
if key in ('username', 'content', 'autocomplete'):
search_query = value
queryset = self.filter_queryset(self.get_queryset())
if search_query:
queryset.filter(username__istartswith=search_query)
return self.make_response(queryset)
class PlatformsSearchView(base.ListAPIView):
model = models.Platform
serializer_class = serializers.PlatformSearchSerializer
filter_backends = [filters.OrderByFilter]
def get_queryset(self):
return (super(PlatformsSearchView, self).get_queryset()
.annotate(roles_count=Count('roles')))
def list(self, request, *args, **kwargs):
name = None
releases = None
autocomplete = None
for key, value in request.GET.items():
if key == 'name':
name = value
elif key == 'releases':
releases = value.split()
elif key in ('content', 'autocomplete'):
autocomplete = value
queryset = self.filter_queryset(self.get_queryset())
if name:
queryset = queryset.filter(name=name)
if releases:
queryset = queryset.filter(release__in=releases)
if autocomplete:
where_clause = """
to_tsvector(
name || ' ' || release || ' ' || coalesce(alias, ''))
@@ to_tsquery(quote_literal(%s) || ':*')
"""
queryset = queryset.extra(where=[where_clause],
params=[autocomplete])
return self.make_response(queryset)
class CloudPlatformsSearchView(base.ListAPIView):
model = models.CloudPlatform
serializer_class = serializers.CloudPlatformSearchSerializer
filter_backends = [filters.OrderByFilter]
def get_queryset(self):
return (super(CloudPlatformsSearchView, self).get_queryset()
.annotate(roles_count=Count('roles')))
def list(self, request, *args, **kwargs):
match_query = None
search_query = None
for key, value in request.GET.items():
if key == 'name':
match_query = value
elif key in ('content', 'autocomplete'):
search_query = value
queryset = self.filter_queryset(self.get_queryset())
if match_query:
queryset = queryset.filter(name=match_query)
if search_query:
queryset = queryset.filter(name__istartswith=search_query)
return self.make_response(queryset)
class TagsSearchView(base.ListAPIView):
model = models.Tag
serializer_class = serializers.TagSearchSerializer
filter_backends = [filters.OrderByFilter]
def get_queryset(self):
return (super(TagsSearchView, self).get_queryset()
.annotate(roles_count=Count('roles')))
def list(self, request, *args, **kwargs):
search_query = None
for key, value in request.GET.items():
if key in ('tag', 'content', 'autocomplete'):
search_query = value
queryset = self.filter_queryset(self.get_queryset())
if search_query:
queryset.filter(name_istartswith=search_query)
return self.make_response(queryset)
|
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import operator
from collections import OrderedDict
import six
from django.db.models import F, Func, Value, Count, ExpressionWrapper, Q
from django.db.models.functions import Coalesce
from django.db.models import fields as db_fields
from django.urls import reverse
from django.contrib.postgres import search as psql_search
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from galaxy import constants
from galaxy.accounts import models as auth_models
from galaxy.api import filters
from galaxy.api import serializers
from galaxy.api.views import base_views as base
from galaxy.main import models
__all__ = [
'ApiV1SearchView',
'ContentSearchView',
'RoleSearchView',
'UserSearchView',
'PlatformsSearchView',
'CloudPlatformsSearchView',
'TagsSearchView',
]
RANK_FUNCTION = 'ts_rank'
RANK_NORMALIZATION = 32
DOWNLOAD_RANK_MULTIPLIER = 0.4
CONTENT_SCORE_MULTIPLIER = 0.2
COMMUNITY_SCORE_MODIFIER = 0.002
COMMUNITY_SCORE_MODIFIER_MIN = 0.005
class ApiV1SearchView(base.APIView):
permission_classes = (AllowAny,)
view_name = 'Search'
def get(self, request, *args, **kwargs):
data = OrderedDict()
data['cloud_platforms'] = reverse('api:cloud_platforms_search_view')
data['content'] = reverse('api:content_search_view')
data['platforms'] = reverse('api:platforms_search_view')
data['roles'] = reverse('api:roles_search_view')
data['tags'] = reverse('api:tags_search_view')
data['top_contributors'] = reverse('api:top_contributors_list')
data['users'] = reverse('api:user_search_view')
return Response(data)
class ContentSearchView(base.ListAPIView):
serializer_class = serializers.RoleSearchSerializer
filter_backends = [filters.OrderByFilter]
def get_queryset(self):
return (
models.Content.objects.distinct()
.select_related(
'content_type',
'namespace',
'repository',
'repository__provider_namespace',
'repository__provider_namespace__namespace',
)
.prefetch_related(
'videos',
'tags',
'dependencies',
'platforms',
'repository__versions',
)
.filter(
repository__provider_namespace__namespace__isnull=False,
repository__provider_namespace__namespace__active=True)
)
# TODO(cutwater): Use serializer to parse request arguments
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
# Content type
content_type = request.GET.get('content_type', '').split()
queryset = self.add_content_type(queryset, content_type)
# Platforms
platforms = request.GET.get('platforms', '').split()
queryset = self.add_platforms_filter(queryset, platforms)
# Cloud platforms
cloud_platforms = request.GET.get('cloud_platforms', '').split()
queryset = self.add_cloud_platforms_filter(queryset, cloud_platforms)
# Namespaces
namespaces = request.GET.get('namespaces', '').split()
queryset = self.add_namespaces_filter(queryset, namespaces)
# Tags
tags = request.GET.get('tags', '').split()
queryset = self.add_tags_filter(queryset, tags)
# Keywords
keywords = request.GET.get('keywords', '').split()
queryset = self.add_keywords_filter(queryset, keywords)
# Vendor
is_vendor = request.GET.get('vendor', None)
queryset = self.add_vendor_filter(queryset, is_vendor)
# Deprecated
is_deprecated = request.GET.get('deprecated', None)
queryset = self.add_deprecated_filter(queryset, is_deprecated)
# Support for ansible-galaxy <= 2.6 autocomplete params
keywords = request.GET.get('autocomplete', None)
# Calling self.add_keywords_filter() with no keywords sets existing
# search_rank values to 0, so we want to avoid calling if autocomplete
# is missing.
if keywords is not None:
queryset = self.add_keywords_filter(queryset, keywords.split())
tags = request.GET.get('tags_autocomplete', '').split()
queryset = self.add_tags_filter(queryset, tags)
platforms = request.GET.get('platforms_autocomplete', '').split()
queryset = self.add_platforms_filter(queryset, platforms)
namespaces = request.GET.get('username_autocomplete', '').split()
queryset = self.add_namespaces_filter(queryset, namespaces)
queryset = self.add_relevance(queryset)
return self.make_response(queryset)
@staticmethod
def add_relevance(queryset):
c = 'repository__community_score'
d = 'repository__download_count'
# ln((MOD*c + MIN) * d + 1)
# where c = community_score and d = download_count
# We're using the community_score as a modifier to the download count
# instead of just allocating a certain number of points based on the
# score. The reason for this is that the download score is
# a logaritmic scale so adding a fixed number of points ended up
# boosting scores way too much for content with low numbers of
# downloads. This system allows for the weight of the community score
# to scale with the number of downloads
download_count_ln_expr = Func(
(((Coalesce(F(c), 0) * COMMUNITY_SCORE_MODIFIER) +
COMMUNITY_SCORE_MODIFIER_MIN)
* F(d)) + 1,
function='ln'
)
download_rank_expr = (
F('download_count_ln')
/ (1 + F('download_count_ln'))
* DOWNLOAD_RANK_MULTIPLIER
)
q = 'repository__quality_score'
# This function is better than using a linear function because it
# makes it so that the effect of losing the first few points is
# relatively minor, which reduces the impact of errors in scoring.
quality_rank_expr = (
Func(Coalesce(F(q), 0) + 1, function='log')
* CONTENT_SCORE_MULTIPLIER
)
relevance_expr = (
F('search_rank') + F('download_rank') + F('quality_rank')
)
return queryset.annotate(
download_count_ln=ExpressionWrapper(
download_count_ln_expr,
output_field=db_fields.FloatField()),
download_rank=ExpressionWrapper(
download_rank_expr,
output_field=db_fields.FloatField()),
quality_rank=ExpressionWrapper(
quality_rank_expr,
output_field=db_fields.FloatField()),
relevance=ExpressionWrapper(
relevance_expr,
output_field=db_fields.FloatField()),
)
@staticmethod
def add_content_type(queryset, content_types):
if not content_types:
return queryset
content_types = models.ContentType.objects.filter(
name__in=content_types)
return queryset.filter(content_type__in=content_types)
@staticmethod
def add_tags_filter(queryset, tags):
if not tags:
return queryset
return queryset.filter(
tags__in=models.Tag.objects.filter(name__in=tags))
@staticmethod
def add_namespaces_filter(queryset, namespaces):
if not namespaces:
return queryset
queries = [Q(namespace__name__icontains=name) for name in namespaces]
query = six.moves.reduce(operator.or_, queries)
return queryset.filter(query)
@staticmethod
def add_platforms_filter(queryset, platforms):
if not platforms:
return queryset
return queryset.filter(
platforms__in=models.Platform.objects.filter(name__in=platforms))
@staticmethod
def add_cloud_platforms_filter(queryset, cloud_platforms):
if not cloud_platforms:
return queryset
return queryset.filter(
cloud_platforms__in=models.CloudPlatform.objects.filter(
name__in=cloud_platforms))
@staticmethod
def add_keywords_filter(queryset, keywords):
if not keywords:
return queryset.annotate(
search_rank=Value(0.0, output_field=db_fields.FloatField()))
tsquery = six.moves.reduce(
operator.and_,
(psql_search.SearchQuery(kw) for kw in keywords))
search_rank_fn = Func(
F('search_vector'), tsquery, RANK_NORMALIZATION,
function=RANK_FUNCTION, output_field=db_fields.FloatField())
return (queryset.annotate(search_rank=search_rank_fn)
.filter(search_vector=tsquery))
@staticmethod
def add_vendor_filter(queryset, is_vendor):
if is_vendor is None:
return queryset
is_vendor_value = False
if is_vendor.lower() in ('true', 'yes', '1'):
is_vendor_value = True
return queryset.filter(namespace__is_vendor=is_vendor_value)
@staticmethod
def add_deprecated_filter(queryset, is_deprecated):
if is_deprecated is None:
return queryset
is_deprecated_value = False
if is_deprecated.lower() in ('true', 'yes', '1'):
is_deprecated_value = True
return queryset.filter(repository__deprecated=is_deprecated_value)
class RoleSearchView(ContentSearchView):
def get_queryset(self):
queryset = super(RoleSearchView, self).get_queryset()
role_type = models.ContentType.get(constants.ContentType.ROLE)
return queryset.filter(content_type=role_type)
# FIXME(cutwater): Keeping views compatible with ELK based.
# Refactor request parameters parsing
class UserSearchView(base.ListAPIView):
model = auth_models.CustomUser
serializer_class = serializers.UserSerializer
filter_backends = [filters.OrderByFilter]
def list(self, request, *args, **kwargs):
search_query = None
for key, value in request.GET.items():
if key in ('username', 'content', 'autocomplete'):
search_query = value
queryset = self.filter_queryset(self.get_queryset())
if search_query:
queryset.filter(username__istartswith=search_query)
return self.make_response(queryset)
class PlatformsSearchView(base.ListAPIView):
model = models.Platform
serializer_class = serializers.PlatformSearchSerializer
filter_backends = [filters.OrderByFilter]
def get_queryset(self):
return (super(PlatformsSearchView, self).get_queryset()
.annotate(roles_count=Count('roles')))
def list(self, request, *args, **kwargs):
name = None
releases = None
autocomplete = None
for key, value in request.GET.items():
if key == 'name':
name = value
elif key == 'releases':
releases = value.split()
elif key in ('content', 'autocomplete'):
autocomplete = value
queryset = self.filter_queryset(self.get_queryset())
if name:
queryset = queryset.filter(name=name)
if releases:
queryset = queryset.filter(release__in=releases)
if autocomplete:
where_clause = """
to_tsvector(
name || ' ' || release || ' ' || coalesce(alias, ''))
@@ to_tsquery(quote_literal(%s) || ':*')
"""
queryset = queryset.extra(where=[where_clause],
params=[autocomplete])
return self.make_response(queryset)
class CloudPlatformsSearchView(base.ListAPIView):
model = models.CloudPlatform
serializer_class = serializers.CloudPlatformSearchSerializer
filter_backends = [filters.OrderByFilter]
def get_queryset(self):
return (super(CloudPlatformsSearchView, self).get_queryset()
.annotate(roles_count=Count('roles')))
def list(self, request, *args, **kwargs):
match_query = None
search_query = None
for key, value in request.GET.items():
if key == 'name':
match_query = value
elif key in ('content', 'autocomplete'):
search_query = value
queryset = self.filter_queryset(self.get_queryset())
if match_query:
queryset = queryset.filter(name=match_query)
if search_query:
queryset = queryset.filter(name__istartswith=search_query)
return self.make_response(queryset)
class TagsSearchView(base.ListAPIView):
model = models.Tag
serializer_class = serializers.TagSearchSerializer
filter_backends = [filters.OrderByFilter]
def get_queryset(self):
return (super(TagsSearchView, self).get_queryset()
.annotate(roles_count=Count('roles')))
def list(self, request, *args, **kwargs):
search_query = None
for key, value in request.GET.items():
if key in ('tag', 'content', 'autocomplete'):
search_query = value
queryset = self.filter_queryset(self.get_queryset())
if search_query:
queryset.filter(name_istartswith=search_query)
return self.make_response(queryset)
|
en
| 0.843186
|
# (c) 2012-2018, Ansible by Red Hat # # This file is part of Ansible Galaxy # # Ansible Galaxy is free software: you can redistribute it and/or modify # it under the terms of the Apache License as published by # the Apache Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Ansible Galaxy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Apache License for more details. # # You should have received a copy of the Apache License # along with Galaxy. If not, see <http://www.apache.org/licenses/>. # TODO(cutwater): Use serializer to parse request arguments # Content type # Platforms # Cloud platforms # Namespaces # Tags # Keywords # Vendor # Deprecated # Support for ansible-galaxy <= 2.6 autocomplete params # Calling self.add_keywords_filter() with no keywords sets existing # search_rank values to 0, so we want to avoid calling if autocomplete # is missing. # ln((MOD*c + MIN) * d + 1) # where c = community_score and d = download_count # We're using the community_score as a modifier to the download count # instead of just allocating a certain number of points based on the # score. The reason for this is that the download score is # a logaritmic scale so adding a fixed number of points ended up # boosting scores way too much for content with low numbers of # downloads. This system allows for the weight of the community score # to scale with the number of downloads # This function is better than using a linear function because it # makes it so that the effect of losing the first few points is # relatively minor, which reduces the impact of errors in scoring. # FIXME(cutwater): Keeping views compatible with ELK based. # Refactor request parameters parsing to_tsvector( name || ' ' || release || ' ' || coalesce(alias, '')) @@ to_tsquery(quote_literal(%s) || ':*')
| 1.791056
| 2
|
PlotFun/logHist.py
|
MatthewPeterKelly/miscpy
| 1
|
6629852
|
<reponame>MatthewPeterKelly/miscpy<filename>PlotFun/logHist.py
import matplotlib.pyplot as plt
import numpy as np
def logHist(X, N=30,fig=None, noclear=False, pdf=False, **kywds):
'''
Plot logarithmic histogram or probability density function from
sampled data.
Args:
X (numpy.ndarray): 1-D array of sampled values
N (Optional[int]): Number of bins (default 30)
fig (Optional[int]): Figure number (default None)
noclear (Optioanl[bool]): Clear figure (default False)
pdf (Optional[bool]): If True normalize by bin width (default False)
and display as curve instead of bar chart.
Note: results are always normalized by number of samples
**kywds: Arbitrary keyword arguments passed to matplotlib.pyplot.bar
(or matplotlib.pyplot.semilogx if pdf is True)
Returns:
x (ndarray): abscissa values of frequencies
n (ndarray): (normalized) frequency values
'''
x = np.logspace(np.log10(np.min(X)),np.log10(np.max(X)),N+1)
n,x = np.histogram(X,bins=x)
n = n/float(X.size)
plt.figure(fig)
if not noclear: plt.clf()
if pdf:
n /= np.diff(x)
x = x[:-1]+np.diff(x)/2
plt.semilogx(x,n,**kywds)
else:
plt.bar(x[:len(x)-1],n,width=np.diff(x),**kywds)
a = plt.gca()
a.set_xlim(10.**np.floor(np.log10(np.min(X))),10.**np.ceil(np.log10(np.max(X))))
a.set_xscale('log')
plt.axis()
return x,n
|
import matplotlib.pyplot as plt
import numpy as np
def logHist(X, N=30,fig=None, noclear=False, pdf=False, **kywds):
'''
Plot logarithmic histogram or probability density function from
sampled data.
Args:
X (numpy.ndarray): 1-D array of sampled values
N (Optional[int]): Number of bins (default 30)
fig (Optional[int]): Figure number (default None)
noclear (Optioanl[bool]): Clear figure (default False)
pdf (Optional[bool]): If True normalize by bin width (default False)
and display as curve instead of bar chart.
Note: results are always normalized by number of samples
**kywds: Arbitrary keyword arguments passed to matplotlib.pyplot.bar
(or matplotlib.pyplot.semilogx if pdf is True)
Returns:
x (ndarray): abscissa values of frequencies
n (ndarray): (normalized) frequency values
'''
x = np.logspace(np.log10(np.min(X)),np.log10(np.max(X)),N+1)
n,x = np.histogram(X,bins=x)
n = n/float(X.size)
plt.figure(fig)
if not noclear: plt.clf()
if pdf:
n /= np.diff(x)
x = x[:-1]+np.diff(x)/2
plt.semilogx(x,n,**kywds)
else:
plt.bar(x[:len(x)-1],n,width=np.diff(x),**kywds)
a = plt.gca()
a.set_xlim(10.**np.floor(np.log10(np.min(X))),10.**np.ceil(np.log10(np.max(X))))
a.set_xscale('log')
plt.axis()
return x,n
|
en
| 0.535085
|
Plot logarithmic histogram or probability density function from sampled data. Args: X (numpy.ndarray): 1-D array of sampled values N (Optional[int]): Number of bins (default 30) fig (Optional[int]): Figure number (default None) noclear (Optioanl[bool]): Clear figure (default False) pdf (Optional[bool]): If True normalize by bin width (default False) and display as curve instead of bar chart. Note: results are always normalized by number of samples **kywds: Arbitrary keyword arguments passed to matplotlib.pyplot.bar (or matplotlib.pyplot.semilogx if pdf is True) Returns: x (ndarray): abscissa values of frequencies n (ndarray): (normalized) frequency values
| 3.425909
| 3
|
konst/json.py
|
albertoconnor/django-konst
| 10
|
6629853
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from django.core.serializers.json import DjangoJSONEncoder
from konst import Constant
class ExtendedJSONEncoder(DjangoJSONEncoder):
"""Add support for serializing our class Constant."""
def default(self, obj):
if isinstance(obj, Constant):
return obj.v
else:
return super(ExtendedJSONEncoder, self).default(obj)
def dumps(*args, **kwargs):
kwargs["cls"] = kwargs.pop("cls", ExtendedJSONEncoder)
return json.dumps(*args, **kwargs)
def loads(*args, **kwargs):
return json.loads(*args, **kwargs)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from django.core.serializers.json import DjangoJSONEncoder
from konst import Constant
class ExtendedJSONEncoder(DjangoJSONEncoder):
"""Add support for serializing our class Constant."""
def default(self, obj):
if isinstance(obj, Constant):
return obj.v
else:
return super(ExtendedJSONEncoder, self).default(obj)
def dumps(*args, **kwargs):
kwargs["cls"] = kwargs.pop("cls", ExtendedJSONEncoder)
return json.dumps(*args, **kwargs)
def loads(*args, **kwargs):
return json.loads(*args, **kwargs)
|
en
| 0.776151
|
# -*- coding: utf-8 -*- Add support for serializing our class Constant.
| 2.313372
| 2
|
src/event_handler.py
|
andy-c-jones/verify-event-recorder-service
| 0
|
6629854
|
<gh_stars>0
import boto3
import logging
import os
from src.database import create_db_connection, write_to_database
from src.decryption import decrypt_message
from src.event_mapper import event_from_json
from src.s3 import fetch_decryption_key
from src.sqs import fetch_single_message, delete_message
logging.basicConfig(level=logging.INFO)
# noinspection PyUnusedLocal
def store_queued_events(_, __):
sqs_client = boto3.client('sqs')
queue_url = os.environ['QUEUE_URL']
db_connection = create_db_connection()
decryption_key = fetch_decryption_key()
while True:
message = fetch_single_message(sqs_client, queue_url)
if message is None:
break
# noinspection PyBroadException
# catch all errors and log them - we never want a single failing message to kill the process.
try:
decrypted_message = decrypt_message(message['Body'], decryption_key)
event = event_from_json(decrypted_message)
write_to_database(event, db_connection)
delete_message(sqs_client, queue_url, message)
except Exception as exception:
logging.getLogger('event-recorder').exception('Failed to store message')
|
import boto3
import logging
import os
from src.database import create_db_connection, write_to_database
from src.decryption import decrypt_message
from src.event_mapper import event_from_json
from src.s3 import fetch_decryption_key
from src.sqs import fetch_single_message, delete_message
logging.basicConfig(level=logging.INFO)
# noinspection PyUnusedLocal
def store_queued_events(_, __):
sqs_client = boto3.client('sqs')
queue_url = os.environ['QUEUE_URL']
db_connection = create_db_connection()
decryption_key = fetch_decryption_key()
while True:
message = fetch_single_message(sqs_client, queue_url)
if message is None:
break
# noinspection PyBroadException
# catch all errors and log them - we never want a single failing message to kill the process.
try:
decrypted_message = decrypt_message(message['Body'], decryption_key)
event = event_from_json(decrypted_message)
write_to_database(event, db_connection)
delete_message(sqs_client, queue_url, message)
except Exception as exception:
logging.getLogger('event-recorder').exception('Failed to store message')
|
en
| 0.625768
|
# noinspection PyUnusedLocal # noinspection PyBroadException # catch all errors and log them - we never want a single failing message to kill the process.
| 1.978056
| 2
|
05 - Modules/04 - imports - import key - usage summary/mainfile.py
|
python-demo-codes/basics
| 2
|
6629855
|
<gh_stars>1-10
# HEAD
# Modules - Understanding and using import key with directory module
# DESCRIPTION
# Describes usage of import statements and using a
# directory module for import
# import statement, variations of import
# RESOURCES
#
# __name__
# |----- /mod -----
# | |
# | |----- printfile.py
# |
# |----- mainfile.py
# USING as a script file (running the file as a script)
# Considers the script file to be within folder `mod` named `printfile.py`
# Considers the main file importing the script file to be outside the `mod` folder and in the same level
# When __name__ is not assigned:
# default value for __name__ assigned will be __main__
# When __name__ is assigned:
# value assigned for __name__ will be the one provided
# When you have `if name == "value"` statements
# One that will be triggered will be the one that matches
# USING the file as a module (running a different file that imports the file specified)
# Considers the script file to be within folder `mod` named `printfile.py`
# Considers the main file importing the script file to be outside the `mod` folder and in the same level
# When __name__ is not assigned:
# default value for module will be generally filename or path.filename
# (based on whether it will be within the same folder or within different folder)
# default value for __name__ assigned will be name of the module
# When __name__ is assigned:
# value assigned for __name__ will be the one provided
# module name will be take as filename or path.filename, which will be used for import statement
# (based on whether it will be within the same folder or within different folder)
# When you import the file into a different file
# and __name__ is assigned a different value then
# it will not be triggering `if __name__ == "__main__"` block and
# it will be triggering `if __name__ == "providedvalue"` block
# DIFFERENT FORMS OF USAGES
# WAY ONE
# import mod.filename as pr
# from mod import filename as pr
# WAY TWO
# import mod.filename
# from mod import filename
# WAY THREE
# import sys
#
# sys.path.append('./mod/')
# import filename
# WAY FOUR
# import sys
#
# sys.path.append('./')
# import mod.filename as pr
# WAY FIVE
# all obj will be available in the global space
# including __name__. Hence, overridden
# from mod.filename import *
# WAY SIX
# Will only import func and func_two obj
# from mod.filename import func, func_two
# Wrong
# from mod.filename import * as pr
# # Error Handling using ImportError
# try:
# # Non-existent module
# import def
# except ImportError:
# print('Module not found')
print('__name__', __name__)
print('mod.filename.__name__', __name__)
|
# HEAD
# Modules - Understanding and using import key with directory module
# DESCRIPTION
# Describes usage of import statements and using a
# directory module for import
# import statement, variations of import
# RESOURCES
#
# __name__
# |----- /mod -----
# | |
# | |----- printfile.py
# |
# |----- mainfile.py
# USING as a script file (running the file as a script)
# Considers the script file to be within folder `mod` named `printfile.py`
# Considers the main file importing the script file to be outside the `mod` folder and in the same level
# When __name__ is not assigned:
# default value for __name__ assigned will be __main__
# When __name__ is assigned:
# value assigned for __name__ will be the one provided
# When you have `if name == "value"` statements
# One that will be triggered will be the one that matches
# USING the file as a module (running a different file that imports the file specified)
# Considers the script file to be within folder `mod` named `printfile.py`
# Considers the main file importing the script file to be outside the `mod` folder and in the same level
# When __name__ is not assigned:
# default value for module will be generally filename or path.filename
# (based on whether it will be within the same folder or within different folder)
# default value for __name__ assigned will be name of the module
# When __name__ is assigned:
# value assigned for __name__ will be the one provided
# module name will be take as filename or path.filename, which will be used for import statement
# (based on whether it will be within the same folder or within different folder)
# When you import the file into a different file
# and __name__ is assigned a different value then
# it will not be triggering `if __name__ == "__main__"` block and
# it will be triggering `if __name__ == "providedvalue"` block
# DIFFERENT FORMS OF USAGES
# WAY ONE
# import mod.filename as pr
# from mod import filename as pr
# WAY TWO
# import mod.filename
# from mod import filename
# WAY THREE
# import sys
#
# sys.path.append('./mod/')
# import filename
# WAY FOUR
# import sys
#
# sys.path.append('./')
# import mod.filename as pr
# WAY FIVE
# all obj will be available in the global space
# including __name__. Hence, overridden
# from mod.filename import *
# WAY SIX
# Will only import func and func_two obj
# from mod.filename import func, func_two
# Wrong
# from mod.filename import * as pr
# # Error Handling using ImportError
# try:
# # Non-existent module
# import def
# except ImportError:
# print('Module not found')
print('__name__', __name__)
print('mod.filename.__name__', __name__)
|
en
| 0.741557
|
# HEAD # Modules - Understanding and using import key with directory module # DESCRIPTION # Describes usage of import statements and using a # directory module for import # import statement, variations of import # RESOURCES # # __name__ # |----- /mod ----- # | | # | |----- printfile.py # | # |----- mainfile.py # USING as a script file (running the file as a script) # Considers the script file to be within folder `mod` named `printfile.py` # Considers the main file importing the script file to be outside the `mod` folder and in the same level # When __name__ is not assigned: # default value for __name__ assigned will be __main__ # When __name__ is assigned: # value assigned for __name__ will be the one provided # When you have `if name == "value"` statements # One that will be triggered will be the one that matches # USING the file as a module (running a different file that imports the file specified) # Considers the script file to be within folder `mod` named `printfile.py` # Considers the main file importing the script file to be outside the `mod` folder and in the same level # When __name__ is not assigned: # default value for module will be generally filename or path.filename # (based on whether it will be within the same folder or within different folder) # default value for __name__ assigned will be name of the module # When __name__ is assigned: # value assigned for __name__ will be the one provided # module name will be take as filename or path.filename, which will be used for import statement # (based on whether it will be within the same folder or within different folder) # When you import the file into a different file # and __name__ is assigned a different value then # it will not be triggering `if __name__ == "__main__"` block and # it will be triggering `if __name__ == "providedvalue"` block # DIFFERENT FORMS OF USAGES # WAY ONE # import mod.filename as pr # from mod import filename as pr # WAY TWO # import mod.filename # from mod import filename # WAY THREE # import sys # # sys.path.append('./mod/') # import filename # WAY FOUR # import sys # # sys.path.append('./') # import mod.filename as pr # WAY FIVE # all obj will be available in the global space # including __name__. Hence, overridden # from mod.filename import * # WAY SIX # Will only import func and func_two obj # from mod.filename import func, func_two # Wrong # from mod.filename import * as pr # # Error Handling using ImportError # try: # # Non-existent module # import def # except ImportError: # print('Module not found')
| 3.432123
| 3
|
consumer_example.py
|
moonsense/python-sdk
| 6
|
6629856
|
"""
Copyright 2021 Moonsense, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This example script shows how you can easily write a session data consumer
that will periodically pull for new session data chunks and process them.
On start, the scripts checks for active sessions that received new data in
the last 90 seconds and for each stars a thread to process chunks.
Each thread maintains a list of processed chunks and for every new chunk
it will call the process_chunk() method. That method gets access to all the
bundles in a chunk and can publish cards in the session that will be
visible in the Recorder app.
"""
import logging
import itertools
import time
import threading
from datetime import datetime, timedelta, timezone
from moonsense.models import Session, Chunk
from moonsense.client import Client
# Override the process_chunk method to do useful things with the session data
def process_chunk(client: Client, session: Session, chunk: Chunk) -> None:
bundles = list(client.read_chunk(session.session_id, chunk.chunk_id))
logging.info(f"Chunk {chunk.chunk_id} has {len(bundles)} bundles")
# client.create_card(
# session.session_id,
# "Python Consumer Example",
# f"Chunk {chunk.chunk_id} has {len(bundles)} bundles",
# )
def main():
client = Client()
monitored = set()
while True:
logging.info("Checking for recent sessions that are actively receiving data")
for session in itertools.islice(client.list_sessions(), 25):
if session.session_id not in monitored and active(session):
logging.info("Starting to monitor %s", session.session_id)
monitored.add(session.session_id)
threading.Thread(
target=worker,
args=(
client,
session,
),
).start()
logging.info("Waiting for 5 seconds until the next check")
time.sleep(5)
def worker(client: Client, session: Session) -> None:
logging.info(
"Fetching the initial list of session data chunks for session %s",
session.session_id,
)
processed = set(c.chunk_id for c in client.list_chunks(session.session_id))
while active(session):
for chunk in client.list_chunks(session.session_id):
if chunk.chunk_id not in processed:
logging.info(
"Processing chunk %s for session %s",
chunk.chunk_id,
session.session_id,
)
process_chunk(client, session, chunk)
processed.add(chunk.chunk_id)
logging.info("Waiting for 30 seconds to check for new session data chunks")
time.sleep(30)
session = client.describe_session(session.session_id)
logging.info("Done monitoring session %s", session.session_id)
def active(session: Session) -> bool:
newest_event = datetime.fromtimestamp(session.newest_event.seconds, timezone.utc)
return newest_event > datetime.now(timezone.utc) - timedelta(seconds=90)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
|
"""
Copyright 2021 Moonsense, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This example script shows how you can easily write a session data consumer
that will periodically pull for new session data chunks and process them.
On start, the scripts checks for active sessions that received new data in
the last 90 seconds and for each stars a thread to process chunks.
Each thread maintains a list of processed chunks and for every new chunk
it will call the process_chunk() method. That method gets access to all the
bundles in a chunk and can publish cards in the session that will be
visible in the Recorder app.
"""
import logging
import itertools
import time
import threading
from datetime import datetime, timedelta, timezone
from moonsense.models import Session, Chunk
from moonsense.client import Client
# Override the process_chunk method to do useful things with the session data
def process_chunk(client: Client, session: Session, chunk: Chunk) -> None:
bundles = list(client.read_chunk(session.session_id, chunk.chunk_id))
logging.info(f"Chunk {chunk.chunk_id} has {len(bundles)} bundles")
# client.create_card(
# session.session_id,
# "Python Consumer Example",
# f"Chunk {chunk.chunk_id} has {len(bundles)} bundles",
# )
def main():
client = Client()
monitored = set()
while True:
logging.info("Checking for recent sessions that are actively receiving data")
for session in itertools.islice(client.list_sessions(), 25):
if session.session_id not in monitored and active(session):
logging.info("Starting to monitor %s", session.session_id)
monitored.add(session.session_id)
threading.Thread(
target=worker,
args=(
client,
session,
),
).start()
logging.info("Waiting for 5 seconds until the next check")
time.sleep(5)
def worker(client: Client, session: Session) -> None:
logging.info(
"Fetching the initial list of session data chunks for session %s",
session.session_id,
)
processed = set(c.chunk_id for c in client.list_chunks(session.session_id))
while active(session):
for chunk in client.list_chunks(session.session_id):
if chunk.chunk_id not in processed:
logging.info(
"Processing chunk %s for session %s",
chunk.chunk_id,
session.session_id,
)
process_chunk(client, session, chunk)
processed.add(chunk.chunk_id)
logging.info("Waiting for 30 seconds to check for new session data chunks")
time.sleep(30)
session = client.describe_session(session.session_id)
logging.info("Done monitoring session %s", session.session_id)
def active(session: Session) -> bool:
newest_event = datetime.fromtimestamp(session.newest_event.seconds, timezone.utc)
return newest_event > datetime.now(timezone.utc) - timedelta(seconds=90)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
|
en
| 0.843038
|
Copyright 2021 Moonsense, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This example script shows how you can easily write a session data consumer that will periodically pull for new session data chunks and process them. On start, the scripts checks for active sessions that received new data in the last 90 seconds and for each stars a thread to process chunks. Each thread maintains a list of processed chunks and for every new chunk it will call the process_chunk() method. That method gets access to all the bundles in a chunk and can publish cards in the session that will be visible in the Recorder app. # Override the process_chunk method to do useful things with the session data # client.create_card( # session.session_id, # "Python Consumer Example", # f"Chunk {chunk.chunk_id} has {len(bundles)} bundles", # )
| 2.06596
| 2
|
clustering_system/clustering/igmm/CrpClustering.py
|
vanam/clustering
| 5
|
6629857
|
<filename>clustering_system/clustering/igmm/CrpClustering.py
import math
import numpy as np
import scipy.misc
from clustering_system.clustering.ClusteringABC import CovarianceType
from clustering_system.clustering.GibbsClusteringABC import GibbsClusteringABC
from clustering_system.clustering.mixture.GaussianMixtureABC import PriorABC
from clustering_system.utils import draw
from clustering_system.visualization.LikelihoodVisualizer import LikelihoodVisualizer
class CrpClustering(GibbsClusteringABC):
"""Clustering based on the Chinese Restaurant Process"""
def __init__(self, K:int, D: int, alpha: float, prior: PriorABC, n_iterations: int,
visualizer: LikelihoodVisualizer = None,
covariance_type: CovarianceType = CovarianceType.full):
"""
:param K: Init number of clusters
:param D: The length of a feature vector
:param alpha: Hyperparameter of sitting alone
:param prior: Prior
:param n_iterations: The number of iterations to perform each update
:param visualizer: Likelihood visualizer
:param covariance_type: Covariance type
"""
super().__init__(D, alpha, prior, n_iterations, K_max=K, visualizer=visualizer, covariance_type=covariance_type)
# Cache
self.log_alpha = math.log(self.alpha)
def _sample_document(self, i: int):
"""
Sample document i
:param i: document id
"""
# Remove component assignment for a document i
self._remove_document(i)
cluster_numbers = np.unique(self.mixture.z)
cluster_numbers = cluster_numbers[cluster_numbers != -1] # Do not consider unassigned items
# Calculate component assignment probabilities for each component
probabilities = self.mixture.get_posterior_predictive(i, cluster_numbers) + self._get_mixture_probability(cluster_numbers)
# Calculate component assignment probabilities for new component
probabilities = np.append(
self.mixture.get_prior_predictive(i) + self._get_new_cluster_mixture_probability(),
probabilities
)
# Convert log probabilities to probabilities (softmax)
probabilities = np.exp(probabilities - scipy.misc.logsumexp(probabilities))
# Sample new component assignment
z_i = draw(probabilities)
z = cluster_numbers[z_i] if z_i < len(cluster_numbers) else self._get_new_cluster_number()
# Add document to new component
self._add_document(i, z)
def _get_mixture_probability(self, cluster_numbers: np.ndarray) -> np.ndarray:
"""
Return the log mixture probability under component `k` for each component.
:return: np.ndarray of K floats where K is number of non-empty components sorted by cluster number
"""
K = len(cluster_numbers)
probabilities = np.empty(K, float)
for i, cn in enumerate(cluster_numbers):
probabilities[i] = self.mixture.N_k[cn]
return probabilities
def _get_new_cluster_mixture_probability(self) -> float:
"""
Return the log mixture probability for new component.
:return: log probability
"""
return self.log_alpha
|
<filename>clustering_system/clustering/igmm/CrpClustering.py
import math
import numpy as np
import scipy.misc
from clustering_system.clustering.ClusteringABC import CovarianceType
from clustering_system.clustering.GibbsClusteringABC import GibbsClusteringABC
from clustering_system.clustering.mixture.GaussianMixtureABC import PriorABC
from clustering_system.utils import draw
from clustering_system.visualization.LikelihoodVisualizer import LikelihoodVisualizer
class CrpClustering(GibbsClusteringABC):
"""Clustering based on the Chinese Restaurant Process"""
def __init__(self, K:int, D: int, alpha: float, prior: PriorABC, n_iterations: int,
visualizer: LikelihoodVisualizer = None,
covariance_type: CovarianceType = CovarianceType.full):
"""
:param K: Init number of clusters
:param D: The length of a feature vector
:param alpha: Hyperparameter of sitting alone
:param prior: Prior
:param n_iterations: The number of iterations to perform each update
:param visualizer: Likelihood visualizer
:param covariance_type: Covariance type
"""
super().__init__(D, alpha, prior, n_iterations, K_max=K, visualizer=visualizer, covariance_type=covariance_type)
# Cache
self.log_alpha = math.log(self.alpha)
def _sample_document(self, i: int):
"""
Sample document i
:param i: document id
"""
# Remove component assignment for a document i
self._remove_document(i)
cluster_numbers = np.unique(self.mixture.z)
cluster_numbers = cluster_numbers[cluster_numbers != -1] # Do not consider unassigned items
# Calculate component assignment probabilities for each component
probabilities = self.mixture.get_posterior_predictive(i, cluster_numbers) + self._get_mixture_probability(cluster_numbers)
# Calculate component assignment probabilities for new component
probabilities = np.append(
self.mixture.get_prior_predictive(i) + self._get_new_cluster_mixture_probability(),
probabilities
)
# Convert log probabilities to probabilities (softmax)
probabilities = np.exp(probabilities - scipy.misc.logsumexp(probabilities))
# Sample new component assignment
z_i = draw(probabilities)
z = cluster_numbers[z_i] if z_i < len(cluster_numbers) else self._get_new_cluster_number()
# Add document to new component
self._add_document(i, z)
def _get_mixture_probability(self, cluster_numbers: np.ndarray) -> np.ndarray:
"""
Return the log mixture probability under component `k` for each component.
:return: np.ndarray of K floats where K is number of non-empty components sorted by cluster number
"""
K = len(cluster_numbers)
probabilities = np.empty(K, float)
for i, cn in enumerate(cluster_numbers):
probabilities[i] = self.mixture.N_k[cn]
return probabilities
def _get_new_cluster_mixture_probability(self) -> float:
"""
Return the log mixture probability for new component.
:return: log probability
"""
return self.log_alpha
|
en
| 0.741033
|
Clustering based on the Chinese Restaurant Process :param K: Init number of clusters :param D: The length of a feature vector :param alpha: Hyperparameter of sitting alone :param prior: Prior :param n_iterations: The number of iterations to perform each update :param visualizer: Likelihood visualizer :param covariance_type: Covariance type # Cache Sample document i :param i: document id # Remove component assignment for a document i # Do not consider unassigned items # Calculate component assignment probabilities for each component # Calculate component assignment probabilities for new component # Convert log probabilities to probabilities (softmax) # Sample new component assignment # Add document to new component Return the log mixture probability under component `k` for each component. :return: np.ndarray of K floats where K is number of non-empty components sorted by cluster number Return the log mixture probability for new component. :return: log probability
| 2.620925
| 3
|
test/test_cli.py
|
laundmo/tidypy
| 33
|
6629858
|
import subprocess
import pytest
from click.testing import CliRunner
from six import text_type
from tidypy.cli import main
def test_default():
runner = CliRunner()
result = runner.invoke(main, ['--version'])
assert result.exit_code == 0
assert result.output != ''
result = runner.invoke(main, ['--help'])
assert result.exit_code == 0
assert result.output != ''
def test_check(tmpdir):
runner = CliRunner()
project_dir = tmpdir.mkdir('empty_project')
result = runner.invoke(main, ['check', text_type(project_dir), '--disable-progress', '--report=null'])
assert result.exit_code == 0
assert result.output == ''
result = runner.invoke(main, ['check', 'test/project1'])
assert result.exit_code == 1
assert result.output != ''
assert 'Unexpected exception' not in result.output
result = runner.invoke(main, ['check', 'test/project1', '--exclude=blahblah', '--disable-merge', '--disable-noqa', '--workers=3', '--tool=pylint', '--disable-progress', '--report=null:%s/foo' % (text_type(project_dir),)])
assert result.exit_code == 1
assert result.output == ''
def test_list_codes():
runner = CliRunner()
result = runner.invoke(main, ['list-codes'])
assert result.exit_code == 0
assert result.output != ''
toml_result = runner.invoke(main, ['list-codes', '--format=toml'])
assert toml_result.exit_code == 0
assert toml_result.output == result.output
json_result = runner.invoke(main, ['list-codes', '--format=json'])
assert json_result.exit_code == 0
assert json_result.output != ''
assert json_result.output != toml_result.output
yaml_result = runner.invoke(main, ['list-codes', '--format=yaml'])
assert yaml_result.exit_code == 0
assert yaml_result.output != ''
assert yaml_result.output != toml_result.output
assert yaml_result.output != json_result.output
csv_result = runner.invoke(main, ['list-codes', '--format=csv'])
assert csv_result.exit_code == 0
assert csv_result.output != ''
assert csv_result.output != toml_result.output
assert csv_result.output != json_result.output
assert csv_result.output != yaml_result.output
def test_default_config():
runner = CliRunner()
result = runner.invoke(main, ['default-config'])
assert result.exit_code == 0
assert result.output != ''
result2 = runner.invoke(main, ['default-config', '--pyproject'])
assert result2.exit_code == 0
assert result2.output != ''
assert result2.output != result.output
def test_broken_config(tmpdir):
runner = CliRunner()
project_dir = tmpdir.mkdir('broken_project')
project_dir.join('pyproject.toml').write('broken[garbage')
result = runner.invoke(main, ['check', text_type(project_dir)])
assert result.exit_code == 1
assert result.output.startswith('Could not parse config file')
def test_purge_config_cache():
runner = CliRunner()
result = runner.invoke(main, ['purge-config-cache'])
assert result.exit_code == 0
def executable_exists(executable):
try:
subprocess.call([executable])
except OSError:
return False
else:
return True
@pytest.mark.skipif(not executable_exists('git'), reason='git not available')
def test_vcs_git(tmpdir):
git_dir = text_type(tmpdir.mkdir('git'))
subprocess.call(['git', 'init', git_dir])
runner = CliRunner()
result = runner.invoke(main, ['install-vcs', 'git', git_dir])
assert result.exit_code == 0
result = runner.invoke(main, ['install-vcs', 'git', git_dir])
assert result.exit_code == 0
result = runner.invoke(main, ['remove-vcs', 'git', git_dir])
assert result.exit_code == 0
result = runner.invoke(main, ['remove-vcs', 'git', git_dir])
assert result.exit_code == 0
other_dir = text_type(tmpdir.mkdir('other'))
result = runner.invoke(main, ['install-vcs', 'git', other_dir])
assert result.exit_code == 1
result = runner.invoke(main, ['remove-vcs', 'git', other_dir])
assert result.exit_code == 0
@pytest.mark.skipif(not executable_exists('hg'), reason='hg not available')
def test_vcs_mercurial(tmpdir):
hg_dir = text_type(tmpdir.mkdir('mercurial'))
subprocess.call(['hg', 'init', hg_dir])
runner = CliRunner()
result = runner.invoke(main, ['install-vcs', 'hg', hg_dir])
assert result.exit_code == 0
result = runner.invoke(main, ['install-vcs', 'hg', hg_dir])
assert result.exit_code == 0
result = runner.invoke(main, ['remove-vcs', 'hg', hg_dir])
assert result.exit_code == 0
result = runner.invoke(main, ['remove-vcs', 'hg', hg_dir])
assert result.exit_code == 0
other_dir = text_type(tmpdir.mkdir('other'))
result = runner.invoke(main, ['install-vcs', 'hg', other_dir])
assert result.exit_code == 1
result = runner.invoke(main, ['remove-vcs', 'hg', other_dir])
assert result.exit_code == 1
def test_extensions():
runner = CliRunner()
result = runner.invoke(main, ['extensions'])
assert result.exit_code == 0
assert result.output != ''
toml_result = runner.invoke(main, ['extensions', '--format=toml'])
assert toml_result.exit_code == 0
assert toml_result.output == result.output
json_result = runner.invoke(main, ['extensions', '--format=json'])
assert json_result.exit_code == 0
assert json_result.output != ''
assert json_result.output != toml_result.output
yaml_result = runner.invoke(main, ['extensions', '--format=yaml'])
assert yaml_result.exit_code == 0
assert yaml_result.output != ''
assert yaml_result.output != toml_result.output
assert yaml_result.output != json_result.output
csv_result = runner.invoke(main, ['extensions', '--format=csv'])
assert csv_result.exit_code == 0
assert csv_result.output != ''
assert csv_result.output != toml_result.output
assert csv_result.output != json_result.output
assert csv_result.output != yaml_result.output
|
import subprocess
import pytest
from click.testing import CliRunner
from six import text_type
from tidypy.cli import main
def test_default():
runner = CliRunner()
result = runner.invoke(main, ['--version'])
assert result.exit_code == 0
assert result.output != ''
result = runner.invoke(main, ['--help'])
assert result.exit_code == 0
assert result.output != ''
def test_check(tmpdir):
runner = CliRunner()
project_dir = tmpdir.mkdir('empty_project')
result = runner.invoke(main, ['check', text_type(project_dir), '--disable-progress', '--report=null'])
assert result.exit_code == 0
assert result.output == ''
result = runner.invoke(main, ['check', 'test/project1'])
assert result.exit_code == 1
assert result.output != ''
assert 'Unexpected exception' not in result.output
result = runner.invoke(main, ['check', 'test/project1', '--exclude=blahblah', '--disable-merge', '--disable-noqa', '--workers=3', '--tool=pylint', '--disable-progress', '--report=null:%s/foo' % (text_type(project_dir),)])
assert result.exit_code == 1
assert result.output == ''
def test_list_codes():
runner = CliRunner()
result = runner.invoke(main, ['list-codes'])
assert result.exit_code == 0
assert result.output != ''
toml_result = runner.invoke(main, ['list-codes', '--format=toml'])
assert toml_result.exit_code == 0
assert toml_result.output == result.output
json_result = runner.invoke(main, ['list-codes', '--format=json'])
assert json_result.exit_code == 0
assert json_result.output != ''
assert json_result.output != toml_result.output
yaml_result = runner.invoke(main, ['list-codes', '--format=yaml'])
assert yaml_result.exit_code == 0
assert yaml_result.output != ''
assert yaml_result.output != toml_result.output
assert yaml_result.output != json_result.output
csv_result = runner.invoke(main, ['list-codes', '--format=csv'])
assert csv_result.exit_code == 0
assert csv_result.output != ''
assert csv_result.output != toml_result.output
assert csv_result.output != json_result.output
assert csv_result.output != yaml_result.output
def test_default_config():
runner = CliRunner()
result = runner.invoke(main, ['default-config'])
assert result.exit_code == 0
assert result.output != ''
result2 = runner.invoke(main, ['default-config', '--pyproject'])
assert result2.exit_code == 0
assert result2.output != ''
assert result2.output != result.output
def test_broken_config(tmpdir):
runner = CliRunner()
project_dir = tmpdir.mkdir('broken_project')
project_dir.join('pyproject.toml').write('broken[garbage')
result = runner.invoke(main, ['check', text_type(project_dir)])
assert result.exit_code == 1
assert result.output.startswith('Could not parse config file')
def test_purge_config_cache():
runner = CliRunner()
result = runner.invoke(main, ['purge-config-cache'])
assert result.exit_code == 0
def executable_exists(executable):
try:
subprocess.call([executable])
except OSError:
return False
else:
return True
@pytest.mark.skipif(not executable_exists('git'), reason='git not available')
def test_vcs_git(tmpdir):
git_dir = text_type(tmpdir.mkdir('git'))
subprocess.call(['git', 'init', git_dir])
runner = CliRunner()
result = runner.invoke(main, ['install-vcs', 'git', git_dir])
assert result.exit_code == 0
result = runner.invoke(main, ['install-vcs', 'git', git_dir])
assert result.exit_code == 0
result = runner.invoke(main, ['remove-vcs', 'git', git_dir])
assert result.exit_code == 0
result = runner.invoke(main, ['remove-vcs', 'git', git_dir])
assert result.exit_code == 0
other_dir = text_type(tmpdir.mkdir('other'))
result = runner.invoke(main, ['install-vcs', 'git', other_dir])
assert result.exit_code == 1
result = runner.invoke(main, ['remove-vcs', 'git', other_dir])
assert result.exit_code == 0
@pytest.mark.skipif(not executable_exists('hg'), reason='hg not available')
def test_vcs_mercurial(tmpdir):
hg_dir = text_type(tmpdir.mkdir('mercurial'))
subprocess.call(['hg', 'init', hg_dir])
runner = CliRunner()
result = runner.invoke(main, ['install-vcs', 'hg', hg_dir])
assert result.exit_code == 0
result = runner.invoke(main, ['install-vcs', 'hg', hg_dir])
assert result.exit_code == 0
result = runner.invoke(main, ['remove-vcs', 'hg', hg_dir])
assert result.exit_code == 0
result = runner.invoke(main, ['remove-vcs', 'hg', hg_dir])
assert result.exit_code == 0
other_dir = text_type(tmpdir.mkdir('other'))
result = runner.invoke(main, ['install-vcs', 'hg', other_dir])
assert result.exit_code == 1
result = runner.invoke(main, ['remove-vcs', 'hg', other_dir])
assert result.exit_code == 1
def test_extensions():
runner = CliRunner()
result = runner.invoke(main, ['extensions'])
assert result.exit_code == 0
assert result.output != ''
toml_result = runner.invoke(main, ['extensions', '--format=toml'])
assert toml_result.exit_code == 0
assert toml_result.output == result.output
json_result = runner.invoke(main, ['extensions', '--format=json'])
assert json_result.exit_code == 0
assert json_result.output != ''
assert json_result.output != toml_result.output
yaml_result = runner.invoke(main, ['extensions', '--format=yaml'])
assert yaml_result.exit_code == 0
assert yaml_result.output != ''
assert yaml_result.output != toml_result.output
assert yaml_result.output != json_result.output
csv_result = runner.invoke(main, ['extensions', '--format=csv'])
assert csv_result.exit_code == 0
assert csv_result.output != ''
assert csv_result.output != toml_result.output
assert csv_result.output != json_result.output
assert csv_result.output != yaml_result.output
|
none
| 1
| 1.930803
| 2
|
|
Python/Facebook_Auto_Post/facebook_auto_post.py
|
ShubhamGupta577/Rotten-Scripts
| 4
|
6629859
|
<filename>Python/Facebook_Auto_Post/facebook_auto_post.py
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.common.exceptions import NoSuchElementException
import time
try:
driver = webdriver.Chrome(
'<===============ENTER YOUR CHROME DRIVER PATH===========>')
driver.get('https://www.facebook.com/')
print("Facebook Open Succesfully...!")
time.sleep(5)
# Facebook Email
facebookEmail = input("Enter your Email ID:")
email = driver.find_element_by_xpath(
"//input[@id='email' or @name='email']")
email.send_keys(facebookEmail)
print("Email Id Entered Successfully")
# Facebook Password
facebookPassword = <PASSWORD>("Enter your Facebook password: ")
password = driver.find_element_by_xpath("//input[@id='pass']")
password.send_keys(<PASSWORD>)
print("Password Entered Succesfully")
# Submit Button
button = driver.find_element_by_xpath("//input[@id='u_0_r']")
button.click()
print("Logged in Successfully")
time.sleep(15)
# Trying to Post the Content on Facebook.
inputbox = driver.find_element_by_css_selector("span._5qtp")
inputbox.click()
time.sleep(5)
Text = input("\tWhats on your mind? Write your thoughts here: \n")
text = driver.find_element_by_css_selector("#composer_text_input_box")
text.click()
text.send_keys(Text)
postbutton = driver.find_element_by_xpath("//*[text()='Post']")
postbutton.click()
time.sleep(15)
driver.close()
except Exception:
print("Something Went Wrong...!")
|
<filename>Python/Facebook_Auto_Post/facebook_auto_post.py
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.common.exceptions import NoSuchElementException
import time
try:
driver = webdriver.Chrome(
'<===============ENTER YOUR CHROME DRIVER PATH===========>')
driver.get('https://www.facebook.com/')
print("Facebook Open Succesfully...!")
time.sleep(5)
# Facebook Email
facebookEmail = input("Enter your Email ID:")
email = driver.find_element_by_xpath(
"//input[@id='email' or @name='email']")
email.send_keys(facebookEmail)
print("Email Id Entered Successfully")
# Facebook Password
facebookPassword = <PASSWORD>("Enter your Facebook password: ")
password = driver.find_element_by_xpath("//input[@id='pass']")
password.send_keys(<PASSWORD>)
print("Password Entered Succesfully")
# Submit Button
button = driver.find_element_by_xpath("//input[@id='u_0_r']")
button.click()
print("Logged in Successfully")
time.sleep(15)
# Trying to Post the Content on Facebook.
inputbox = driver.find_element_by_css_selector("span._5qtp")
inputbox.click()
time.sleep(5)
Text = input("\tWhats on your mind? Write your thoughts here: \n")
text = driver.find_element_by_css_selector("#composer_text_input_box")
text.click()
text.send_keys(Text)
postbutton = driver.find_element_by_xpath("//*[text()='Post']")
postbutton.click()
time.sleep(15)
driver.close()
except Exception:
print("Something Went Wrong...!")
|
en
| 0.737642
|
# Facebook Email # Facebook Password # Submit Button # Trying to Post the Content on Facebook.
| 3.014894
| 3
|
models/tests/test_node.py
|
MaferMazu/blockchain-network-simulator
| 0
|
6629860
|
"""Test Identity Class and Functions."""
from faker import Faker
from cryptos import Bitcoin
from models.identity import Identity, Identities
from models.node import Node, Network
faker = Faker()
def test_node():
"""Test creation of node."""
node = Node(1,9000)
assert node.name == "nodo1"
assert str(node) == str({"name": "nodo1", "port": 9000})
def test_empty_network():
"""Test creation of empty network."""
network = Network()
assert network is not None
def test_network():
"""Test creation of network."""
network = Network()
network.identities.gen_x_identities(5)
network.gen_x_nodes(3)
network.create_linear_network()
assert len(network.identities) == 8
assert len(network.connections) == 2
|
"""Test Identity Class and Functions."""
from faker import Faker
from cryptos import Bitcoin
from models.identity import Identity, Identities
from models.node import Node, Network
faker = Faker()
def test_node():
"""Test creation of node."""
node = Node(1,9000)
assert node.name == "nodo1"
assert str(node) == str({"name": "nodo1", "port": 9000})
def test_empty_network():
"""Test creation of empty network."""
network = Network()
assert network is not None
def test_network():
"""Test creation of network."""
network = Network()
network.identities.gen_x_identities(5)
network.gen_x_nodes(3)
network.create_linear_network()
assert len(network.identities) == 8
assert len(network.connections) == 2
|
en
| 0.853355
|
Test Identity Class and Functions. Test creation of node. Test creation of empty network. Test creation of network.
| 3.122393
| 3
|
tests/emulated_modules/sample_1.py
|
olasd/hiro
| 0
|
6629861
|
"""
"""
from . import sub_module_1
|
"""
"""
from . import sub_module_1
|
none
| 1
| 0.980241
| 1
|
|
fomo_social_harvester/twitter_pipe.py
|
dgnsrekt/fomo-social-harvester
| 0
|
6629862
|
# STANDARDLIB
import csv
from csv import DictWriter
from datetime import date
import logging
from multiprocessing import Pool, cpu_count
from pathlib import Path
from time import sleep
import json
# THIRD-PARTY
import luigi
import luigi.contrib.postgres
import pandas as pd
import schedule
import structlog
from structlog.stdlib import LoggerFactory
from peewee import ProgrammingError
# LOCAL-APP
import bad_links
import base_pipe
from constains import DATAPATH
from scraper import twitter
from scraper.utils import get_current_hour
from models.twitter import Twitter, create_twitter_table, clean_twitter_table
structlog.configure(logger_factory=LoggerFactory())
logging.basicConfig(level='INFO')
class ParseTwitterMemberCountTask(luigi.Task):
date = luigi.DateParameter(default=date.today())
hour = luigi.DateHourParameter(default=get_current_hour())
limit = luigi.Parameter(default=None)
def requires(self):
return [base_pipe.CreateDateFolder(date=self.date), base_pipe.ParseTwitterJSONtoCSVTask(date=self.date)]
def output(self):
path = Path(str(self.input()[0].path)) / f'Twitter_Data_{self.hour}.csv'
return luigi.LocalTarget(str(path))
def run(self):
twitter_links = []
with self.input()[1].open('r') as f:
reader = csv.reader(f)
header = next(reader)
for i, row in enumerate(reader):
name = row[0]
link = row[1]
if not bad_links.Twitter.is_link_bad(link):
twitter_links.append({'name': name, 'link': link}) # IDEA: tuple
if self.limit:
if i > self.limit:
break
max_processes = cpu_count() * 2
print(f'parsing {len(twitter_links)} twitter links')
with Pool(max_processes) as p:
member_records = p.map(twitter.parse_twitter_count, twitter_links)
if len(member_records) > 0:
df = pd.DataFrame(member_records)
df.set_index('name', inplace=True)
df.to_csv(self.output().path)
class TwitterMembersToDatabaseTask(luigi.Task):
date = luigi.DateParameter(default=date.today())
hour = luigi.DateHourParameter(default=get_current_hour())
debug = luigi.BoolParameter(default=False) # NOTE: SUPER DANGEROUS WILL SCRUB DATABASE
def requires(self):
return ParseTwitterMemberCountTask(date=self.date, hour=self.hour)
def run(self):
if not Twitter.table_exists():
create_twitter_table()
if not self.complete():
df = pd.read_csv(self.input().path)
df.set_index('name', inplace=True)
for name, row in df.iterrows():
followers = row['followers']
following = row['following']
likes = row['likes']
tweets = row['tweets']
data = {'name': name, 'followers': followers, 'following': following, 'likes': likes,
'tweets': tweets, 'date': self.hour}
Twitter.add_member_data(**data)
# TODO: Twitter LINKS, RAW DATA. rename csv files
def complete(self):
# TODO: Add task to create a DB/Table or
# IDEA: Add an except for no table - create table then check databsse for complete
if self.debug:
clean_twitter_table() # DEBUG: REMOVE
print('DELETING TABLE FOR DEBUGGING!!!!!!!!!!!!!!!!!')
try:
local_ = pd.read_csv(self.input().path)
dbase_ = Twitter.data_by_date(self.hour)
print('#' * 25)
print(len(local_)) # TODO: Logging
print(len(dbase_)) # TODO: Logging
# TODO: If else raise data not written to db
print(len(local_.index) == len(dbase_.index))
# TODO: If else raise data not written to db
print('#' * 25)
return len(local_.index) == len(dbase_.index)
except (FileNotFoundError, KeyError):
print()
return False
def job():
print('running job..')
task = TwitterMembersToDatabaseTask(debug=False)
luigi.build([task], local_scheduler=True)
if __name__ == '__main__':
job()
|
# STANDARDLIB
import csv
from csv import DictWriter
from datetime import date
import logging
from multiprocessing import Pool, cpu_count
from pathlib import Path
from time import sleep
import json
# THIRD-PARTY
import luigi
import luigi.contrib.postgres
import pandas as pd
import schedule
import structlog
from structlog.stdlib import LoggerFactory
from peewee import ProgrammingError
# LOCAL-APP
import bad_links
import base_pipe
from constains import DATAPATH
from scraper import twitter
from scraper.utils import get_current_hour
from models.twitter import Twitter, create_twitter_table, clean_twitter_table
structlog.configure(logger_factory=LoggerFactory())
logging.basicConfig(level='INFO')
class ParseTwitterMemberCountTask(luigi.Task):
date = luigi.DateParameter(default=date.today())
hour = luigi.DateHourParameter(default=get_current_hour())
limit = luigi.Parameter(default=None)
def requires(self):
return [base_pipe.CreateDateFolder(date=self.date), base_pipe.ParseTwitterJSONtoCSVTask(date=self.date)]
def output(self):
path = Path(str(self.input()[0].path)) / f'Twitter_Data_{self.hour}.csv'
return luigi.LocalTarget(str(path))
def run(self):
twitter_links = []
with self.input()[1].open('r') as f:
reader = csv.reader(f)
header = next(reader)
for i, row in enumerate(reader):
name = row[0]
link = row[1]
if not bad_links.Twitter.is_link_bad(link):
twitter_links.append({'name': name, 'link': link}) # IDEA: tuple
if self.limit:
if i > self.limit:
break
max_processes = cpu_count() * 2
print(f'parsing {len(twitter_links)} twitter links')
with Pool(max_processes) as p:
member_records = p.map(twitter.parse_twitter_count, twitter_links)
if len(member_records) > 0:
df = pd.DataFrame(member_records)
df.set_index('name', inplace=True)
df.to_csv(self.output().path)
class TwitterMembersToDatabaseTask(luigi.Task):
date = luigi.DateParameter(default=date.today())
hour = luigi.DateHourParameter(default=get_current_hour())
debug = luigi.BoolParameter(default=False) # NOTE: SUPER DANGEROUS WILL SCRUB DATABASE
def requires(self):
return ParseTwitterMemberCountTask(date=self.date, hour=self.hour)
def run(self):
if not Twitter.table_exists():
create_twitter_table()
if not self.complete():
df = pd.read_csv(self.input().path)
df.set_index('name', inplace=True)
for name, row in df.iterrows():
followers = row['followers']
following = row['following']
likes = row['likes']
tweets = row['tweets']
data = {'name': name, 'followers': followers, 'following': following, 'likes': likes,
'tweets': tweets, 'date': self.hour}
Twitter.add_member_data(**data)
# TODO: Twitter LINKS, RAW DATA. rename csv files
def complete(self):
# TODO: Add task to create a DB/Table or
# IDEA: Add an except for no table - create table then check databsse for complete
if self.debug:
clean_twitter_table() # DEBUG: REMOVE
print('DELETING TABLE FOR DEBUGGING!!!!!!!!!!!!!!!!!')
try:
local_ = pd.read_csv(self.input().path)
dbase_ = Twitter.data_by_date(self.hour)
print('#' * 25)
print(len(local_)) # TODO: Logging
print(len(dbase_)) # TODO: Logging
# TODO: If else raise data not written to db
print(len(local_.index) == len(dbase_.index))
# TODO: If else raise data not written to db
print('#' * 25)
return len(local_.index) == len(dbase_.index)
except (FileNotFoundError, KeyError):
print()
return False
def job():
print('running job..')
task = TwitterMembersToDatabaseTask(debug=False)
luigi.build([task], local_scheduler=True)
if __name__ == '__main__':
job()
|
en
| 0.30303
|
# STANDARDLIB # THIRD-PARTY # LOCAL-APP # IDEA: tuple # NOTE: SUPER DANGEROUS WILL SCRUB DATABASE # TODO: Twitter LINKS, RAW DATA. rename csv files # TODO: Add task to create a DB/Table or # IDEA: Add an except for no table - create table then check databsse for complete # DEBUG: REMOVE # TODO: Logging # TODO: Logging # TODO: If else raise data not written to db # TODO: If else raise data not written to db
| 2.411936
| 2
|
services/traction/api/endpoints/models/v1/admin.py
|
bcgov/traction
| 12
|
6629863
|
from enum import Enum
from typing import Optional
from uuid import UUID
from datetime import datetime
from pydantic import BaseModel
class PublicDIDStateType(str, Enum):
private = "private"
requested = "requested"
endorsed = "endorsed"
published = "published"
public = "public"
class AdminTenantIssueRead(BaseModel):
"""AdminTenantIssueRead.
ResponseModel for Traction Tenant details related to issuance
Attributes:
tenant_id: traction's tenant id
wallet_id: the acapy wallet id
public_did: uuid, the wallet's public did
public_did_state: str, state of tenant's public did
created_at: datetime, tenant issuer details was created
updated_at: datetime, tenant issuer details were updated
"""
tenant_id: UUID
wallet_id: UUID
public_did: Optional[str]
public_did_state: Optional[PublicDIDStateType]
created_at: datetime
updated_at: datetime
|
from enum import Enum
from typing import Optional
from uuid import UUID
from datetime import datetime
from pydantic import BaseModel
class PublicDIDStateType(str, Enum):
private = "private"
requested = "requested"
endorsed = "endorsed"
published = "published"
public = "public"
class AdminTenantIssueRead(BaseModel):
"""AdminTenantIssueRead.
ResponseModel for Traction Tenant details related to issuance
Attributes:
tenant_id: traction's tenant id
wallet_id: the acapy wallet id
public_did: uuid, the wallet's public did
public_did_state: str, state of tenant's public did
created_at: datetime, tenant issuer details was created
updated_at: datetime, tenant issuer details were updated
"""
tenant_id: UUID
wallet_id: UUID
public_did: Optional[str]
public_did_state: Optional[PublicDIDStateType]
created_at: datetime
updated_at: datetime
|
en
| 0.871921
|
AdminTenantIssueRead. ResponseModel for Traction Tenant details related to issuance Attributes: tenant_id: traction's tenant id wallet_id: the acapy wallet id public_did: uuid, the wallet's public did public_did_state: str, state of tenant's public did created_at: datetime, tenant issuer details was created updated_at: datetime, tenant issuer details were updated
| 2.67614
| 3
|
mergify_engine/queue/__init__.py
|
v1v/mergify-engine
| 0
|
6629864
|
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import dataclasses
import functools
import typing
import daiquiri
from mergify_engine import context
from mergify_engine import github_types
from mergify_engine import rules
from mergify_engine import utils
LOG = daiquiri.getLogger(__name__)
class PullQueueConfig(typing.TypedDict):
strict_method: typing.Literal["merge", "rebase"]
priority: int
effective_priority: int
bot_account: typing.Optional[str]
update_bot_account: typing.Optional[str]
name: rules.QueueName
queue_config: rules.QueueConfig
QueueT = typing.TypeVar("QueueT", bound="QueueBase")
@dataclasses.dataclass # type: ignore
class QueueBase(abc.ABC):
repository: context.Repository
ref: github_types.GitHubRefType
@functools.cached_property
def log(self):
return daiquiri.getLogger(
__name__,
gh_owner=self.repository.installation.owner_login,
gh_repo=self.repository.name,
gh_branch=self.ref,
)
@classmethod
async def from_context(cls: typing.Type[QueueT], ctxt: context.Context) -> QueueT:
return cls(ctxt.repository, ctxt.pull["base"]["ref"])
@abc.abstractmethod
async def get_config(
self, pull_number: github_types.GitHubPullRequestNumber
) -> PullQueueConfig:
"""Return merge config for a pull request.
Do not use it for logic, just for displaying the queue summary.
:param pull_number: The pull request number.
"""
@abc.abstractmethod
async def add_pull(self, ctxt: context.Context, config: PullQueueConfig) -> None:
pass
@abc.abstractmethod
async def remove_pull(self, ctxt: context.Context) -> None:
pass
@abc.abstractmethod
async def is_first_pull(self, ctxt: context.Context) -> bool:
pass
@abc.abstractmethod
async def get_pulls(self) -> typing.List[github_types.GitHubPullRequestNumber]:
"""Return ordered queued pull requests"""
pass
async def get_position(self, ctxt: context.Context) -> typing.Optional[int]:
pulls = await self.get_pulls()
try:
return pulls.index(ctxt.pull["number"])
except ValueError:
return None
async def _refresh_pulls(
self,
repository: github_types.GitHubRepository,
except_pull_request: typing.Optional[
github_types.GitHubPullRequestNumber
] = None,
) -> None:
from mergify_engine import github_events # circular reference
with utils.aredis_for_stream() as redis_stream:
for pull_number in await self.get_pulls():
if (
except_pull_request is not None
and except_pull_request == pull_number
):
continue
await github_events.send_refresh(
self.repository.installation.redis,
redis_stream,
repository,
pull_request_number=pull_number,
action="internal",
)
|
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import dataclasses
import functools
import typing
import daiquiri
from mergify_engine import context
from mergify_engine import github_types
from mergify_engine import rules
from mergify_engine import utils
LOG = daiquiri.getLogger(__name__)
class PullQueueConfig(typing.TypedDict):
strict_method: typing.Literal["merge", "rebase"]
priority: int
effective_priority: int
bot_account: typing.Optional[str]
update_bot_account: typing.Optional[str]
name: rules.QueueName
queue_config: rules.QueueConfig
QueueT = typing.TypeVar("QueueT", bound="QueueBase")
@dataclasses.dataclass # type: ignore
class QueueBase(abc.ABC):
repository: context.Repository
ref: github_types.GitHubRefType
@functools.cached_property
def log(self):
return daiquiri.getLogger(
__name__,
gh_owner=self.repository.installation.owner_login,
gh_repo=self.repository.name,
gh_branch=self.ref,
)
@classmethod
async def from_context(cls: typing.Type[QueueT], ctxt: context.Context) -> QueueT:
return cls(ctxt.repository, ctxt.pull["base"]["ref"])
@abc.abstractmethod
async def get_config(
self, pull_number: github_types.GitHubPullRequestNumber
) -> PullQueueConfig:
"""Return merge config for a pull request.
Do not use it for logic, just for displaying the queue summary.
:param pull_number: The pull request number.
"""
@abc.abstractmethod
async def add_pull(self, ctxt: context.Context, config: PullQueueConfig) -> None:
pass
@abc.abstractmethod
async def remove_pull(self, ctxt: context.Context) -> None:
pass
@abc.abstractmethod
async def is_first_pull(self, ctxt: context.Context) -> bool:
pass
@abc.abstractmethod
async def get_pulls(self) -> typing.List[github_types.GitHubPullRequestNumber]:
"""Return ordered queued pull requests"""
pass
async def get_position(self, ctxt: context.Context) -> typing.Optional[int]:
pulls = await self.get_pulls()
try:
return pulls.index(ctxt.pull["number"])
except ValueError:
return None
async def _refresh_pulls(
self,
repository: github_types.GitHubRepository,
except_pull_request: typing.Optional[
github_types.GitHubPullRequestNumber
] = None,
) -> None:
from mergify_engine import github_events # circular reference
with utils.aredis_for_stream() as redis_stream:
for pull_number in await self.get_pulls():
if (
except_pull_request is not None
and except_pull_request == pull_number
):
continue
await github_events.send_refresh(
self.repository.installation.redis,
redis_stream,
repository,
pull_request_number=pull_number,
action="internal",
)
|
en
| 0.822654
|
# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # type: ignore Return merge config for a pull request. Do not use it for logic, just for displaying the queue summary. :param pull_number: The pull request number. Return ordered queued pull requests # circular reference
| 1.778286
| 2
|
tripleoclient/constants.py
|
mail2nsrajesh/python-tripleoclient
| 0
|
6629865
|
<reponame>mail2nsrajesh/python-tripleoclient
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
SERVICE_LIST = {
'aodh': {'password_field': '<PASSWORD>'},
'ceilometer': {'password_field': '<PASSWORD>'},
'cinder': {'password_field': '<PASSWORD>'},
'cinderv2': {'password_field': '<PASSWORD>'},
'glance': {'password_field': '<PASSWORD>'},
'gnocchi': {'password_field': '<PASSWORD>'},
'heat': {'password_field': '<PASSWORD>'},
'heatcfn': {},
'ironic': {'password_field': '<PASSWORD>'},
'neutron': {'password_field': '<PASSWORD>'},
'nova': {'password_field': '<PASSWORD>'},
'panko': {'password_field': '<PASSWORD>'},
'swift': {'password_field': '<PASSWORD>'},
'sahara': {'password_field': '<PASSWORD>'},
'trove': {'password_field': '<PASSWORD>'},
}
TRIPLEO_HEAT_TEMPLATES = "/usr/share/openstack-tripleo-heat-templates/"
OVERCLOUD_YAML_NAME = "overcloud.yaml"
OVERCLOUD_ROLES_FILE = "roles_data.yaml"
RHEL_REGISTRATION_EXTRACONFIG_NAME = (
"extraconfig/pre_deploy/rhel-registration/")
# The name of the file which holds the plan environment contents
PLAN_ENVIRONMENT = 'plan-environment.yaml'
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
SERVICE_LIST = {
'aodh': {'password_field': '<PASSWORD>'},
'ceilometer': {'password_field': '<PASSWORD>'},
'cinder': {'password_field': '<PASSWORD>'},
'cinderv2': {'password_field': '<PASSWORD>'},
'glance': {'password_field': '<PASSWORD>'},
'gnocchi': {'password_field': '<PASSWORD>'},
'heat': {'password_field': '<PASSWORD>'},
'heatcfn': {},
'ironic': {'password_field': '<PASSWORD>'},
'neutron': {'password_field': '<PASSWORD>'},
'nova': {'password_field': '<PASSWORD>'},
'panko': {'password_field': '<PASSWORD>'},
'swift': {'password_field': '<PASSWORD>'},
'sahara': {'password_field': '<PASSWORD>'},
'trove': {'password_field': '<PASSWORD>'},
}
TRIPLEO_HEAT_TEMPLATES = "/usr/share/openstack-tripleo-heat-templates/"
OVERCLOUD_YAML_NAME = "overcloud.yaml"
OVERCLOUD_ROLES_FILE = "roles_data.yaml"
RHEL_REGISTRATION_EXTRACONFIG_NAME = (
"extraconfig/pre_deploy/rhel-registration/")
# The name of the file which holds the plan environment contents
PLAN_ENVIRONMENT = 'plan-environment.yaml'
|
en
| 0.86587
|
# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # The name of the file which holds the plan environment contents
| 1.054324
| 1
|
tensorflow/depth_estimator/utils.py
|
muchemwal/models
| 2
|
6629866
|
import os
import io
import cv2
import base64
import urllib
import numpy as np
from PIL import Image
from imageio import imread
def url_to_image(url):
"""
Download the image, convert it to a NumPy array,
and then read it into OpenCV format.
"""
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
def b64_to_image(img_base64):
"""
Convert base64 imge to a NumPy array,
and then read it into OpenCV format.
"""
img = imread(io.BytesIO(base64.b64decode(img_base64)))
image = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return image
def image_to_base64(image):
"""
Convert image from OpenCV format
to base64 format.
"""
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
img = Image.fromarray(image)
im_file = io.BytesIO()
img.save(im_file, format="PNG")
im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8")
return im_bytes
|
import os
import io
import cv2
import base64
import urllib
import numpy as np
from PIL import Image
from imageio import imread
def url_to_image(url):
"""
Download the image, convert it to a NumPy array,
and then read it into OpenCV format.
"""
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
def b64_to_image(img_base64):
"""
Convert base64 imge to a NumPy array,
and then read it into OpenCV format.
"""
img = imread(io.BytesIO(base64.b64decode(img_base64)))
image = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return image
def image_to_base64(image):
"""
Convert image from OpenCV format
to base64 format.
"""
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
img = Image.fromarray(image)
im_file = io.BytesIO()
img.save(im_file, format="PNG")
im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8")
return im_bytes
|
en
| 0.629829
|
Download the image, convert it to a NumPy array, and then read it into OpenCV format. Convert base64 imge to a NumPy array, and then read it into OpenCV format. Convert image from OpenCV format to base64 format.
| 3.462587
| 3
|
buildscripts/resmokelib/testing/executor.py
|
tychoish/mongo
| 3
|
6629867
|
<reponame>tychoish/mongo
"""
Driver of the test execution framework.
"""
from __future__ import absolute_import
import threading
from . import fixtures
from . import hooks as _hooks
from . import job as _job
from . import report as _report
from . import testcases
from .. import config as _config
from .. import errors
from .. import logging
from .. import utils
from ..utils import queue as _queue
class TestGroupExecutor(object):
"""
Executes a test group.
Responsible for setting up and tearing down the fixtures that the
tests execute against.
"""
_TIMEOUT = 24 * 60 * 60 # =1 day (a long time to have tests run)
def __init__(self,
exec_logger,
test_group,
logging_config,
config=None,
fixture=None,
hooks=None):
"""
Initializes the TestGroupExecutor with the test group to run.
"""
# Build a logger for executing this group of tests.
logger_name = "%s:%s" % (exec_logger.name, test_group.test_kind)
self.logger = logging.loggers.new_logger(logger_name, parent=exec_logger)
self.logging_config = logging_config
self.fixture_config = fixture
self.hooks_config = utils.default_if_none(hooks, [])
self.test_config = utils.default_if_none(config, {})
self._test_group = test_group
self._using_buildlogger = logging.config.using_buildlogger(logging_config)
self._build_config = None
if self._using_buildlogger:
self._build_config = logging.buildlogger.get_config()
# Must be done after getting buildlogger configuration.
self._jobs = [self._make_job(job_num) for job_num in xrange(_config.JOBS)]
def run(self):
"""
Executes the test group.
Any exceptions that occur during setting up or tearing down a
fixture are propagated.
"""
self.logger.info("Starting execution of %ss...", self._test_group.test_kind)
return_code = 0
try:
if not self._setup_fixtures():
return_code = 2
return
num_repeats = _config.REPEAT
while num_repeats > 0:
test_queue = self._make_test_queue()
self._test_group.record_start()
(report, interrupted) = self._run_tests(test_queue)
self._test_group.record_end(report)
# If the user triggered a KeyboardInterrupt, then we should stop.
if interrupted:
raise errors.StopExecution("Received interrupt from user")
sb = [] # String builder.
self._test_group.summarize_latest(sb)
self.logger.info("Summary: %s", "\n ".join(sb))
if not report.wasSuccessful():
return_code = 1
if _config.FAIL_FAST:
break
# Clear the report so it can be reused for the next execution.
for job in self._jobs:
job.report.reset()
num_repeats -= 1
finally:
if not self._teardown_fixtures():
return_code = 2
self._test_group.return_code = return_code
def _setup_fixtures(self):
"""
Sets up a fixture for each job.
"""
for job in self._jobs:
try:
job.fixture.setup()
except:
self.logger.exception("Encountered an error while setting up %s.", job.fixture)
return False
# Once they have all been started, wait for them to become available.
for job in self._jobs:
try:
job.fixture.await_ready()
except:
self.logger.exception("Encountered an error while waiting for %s to be ready",
job.fixture)
return False
return True
def _run_tests(self, test_queue):
"""
Starts a thread for each Job instance and blocks until all of
the tests are run.
Returns a (combined report, user interrupted) pair, where the
report contains the status and timing information of tests run
by all of the threads.
"""
threads = []
interrupt_flag = threading.Event()
user_interrupted = False
try:
# Run each Job instance in its own thread.
for job in self._jobs:
t = threading.Thread(target=job, args=(test_queue, interrupt_flag))
# Do not wait for tests to finish executing if interrupted by the user.
t.daemon = True
t.start()
threads.append(t)
joined = False
while not joined:
# Need to pass a timeout to join() so that KeyboardInterrupt exceptions
# are propagated.
joined = test_queue.join(TestGroupExecutor._TIMEOUT)
except (KeyboardInterrupt, SystemExit):
interrupt_flag.set()
user_interrupted = True
else:
# Only wait for all the Job instances if not interrupted by the user.
for t in threads:
t.join()
reports = [job.report for job in self._jobs]
combined_report = _report.TestReport.combine(*reports)
# We cannot return 'interrupt_flag.is_set()' because the interrupt flag can be set by a Job
# instance if a test fails and it decides to drain the queue. We only want to raise a
# StopExecution exception in TestGroupExecutor.run() if the user triggered the interrupt.
return (combined_report, user_interrupted)
def _teardown_fixtures(self):
"""
Tears down all of the fixtures.
Returns true if all fixtures were torn down successfully, and
false otherwise.
"""
success = True
for job in self._jobs:
try:
if not job.fixture.teardown():
self.logger.warn("Teardown of %s was not successful.", job.fixture)
success = False
except:
self.logger.exception("Encountered an error while tearing down %s.", job.fixture)
success = False
return success
def _get_build_id(self, job_num):
"""
Returns a unique build id for a job.
"""
build_config = self._build_config
if self._using_buildlogger:
# Use a distinct "builder" for each job in order to separate their logs.
if build_config is not None and "builder" in build_config:
build_config = build_config.copy()
build_config["builder"] = "%s_job%d" % (build_config["builder"], job_num)
build_id = logging.buildlogger.new_build_id(build_config)
if build_config is None or build_id is None:
self.logger.info("Encountered an error configuring buildlogger for job #%d, falling"
" back to stderr.", job_num)
return build_id, build_config
return None, build_config
def _make_fixture(self, job_num, build_id, build_config):
"""
Creates a fixture for a job.
"""
fixture_config = {}
fixture_class = fixtures.NOOP_FIXTURE_CLASS
if self.fixture_config is not None:
fixture_config = self.fixture_config.copy()
fixture_class = fixture_config.pop("class")
logger_name = "%s:job%d" % (fixture_class, job_num)
logger = logging.loggers.new_logger(logger_name, parent=logging.loggers.FIXTURE)
logging.config.apply_buildlogger_global_handler(logger,
self.logging_config,
build_id=build_id,
build_config=build_config)
return fixtures.make_fixture(fixture_class, logger, job_num, **fixture_config)
def _make_hooks(self, job_num, fixture):
"""
Creates the custom behaviors for the job's fixture.
"""
behaviors = []
for behavior_config in self.hooks_config:
behavior_config = behavior_config.copy()
behavior_class = behavior_config.pop("class")
logger_name = "%s:job%d" % (behavior_class, job_num)
logger = logging.loggers.new_logger(logger_name, parent=self.logger)
behavior = _hooks.make_custom_behavior(behavior_class,
logger,
fixture,
**behavior_config)
behaviors.append(behavior)
return behaviors
def _make_job(self, job_num):
"""
Returns a Job instance with its own fixture, hooks, and test
report.
"""
build_id, build_config = self._get_build_id(job_num)
fixture = self._make_fixture(job_num, build_id, build_config)
hooks = self._make_hooks(job_num, fixture)
logger_name = "%s:job%d" % (self.logger.name, job_num)
logger = logging.loggers.new_logger(logger_name, parent=self.logger)
if build_id is not None:
endpoint = logging.buildlogger.APPEND_GLOBAL_LOGS_ENDPOINT % {"build_id": build_id}
url = "%s/%s/" % (_config.BUILDLOGGER_URL.rstrip("/"), endpoint.strip("/"))
logger.info("Writing output of job #%d to %s.", job_num, url)
report = _report.TestReport(logger,
self.logging_config,
build_id=build_id,
build_config=build_config)
return _job.Job(logger, fixture, hooks, report)
def _make_test_queue(self):
"""
Returns a queue of TestCase instances.
Use a multi-consumer queue instead of a unittest.TestSuite so
that the test cases can be dispatched to multiple threads.
"""
test_kind_logger = logging.loggers.new_logger(self._test_group.test_kind,
parent=logging.loggers.TESTS)
# Put all the test cases in a queue.
queue = _queue.Queue()
for test_name in self._test_group.tests:
test_case = testcases.make_test_case(self._test_group.test_kind,
test_kind_logger,
test_name,
**self.test_config)
queue.put(test_case)
# Add sentinel value for each job to indicate when there are no more items to process.
for _ in xrange(_config.JOBS):
queue.put(None)
return queue
|
"""
Driver of the test execution framework.
"""
from __future__ import absolute_import
import threading
from . import fixtures
from . import hooks as _hooks
from . import job as _job
from . import report as _report
from . import testcases
from .. import config as _config
from .. import errors
from .. import logging
from .. import utils
from ..utils import queue as _queue
class TestGroupExecutor(object):
"""
Executes a test group.
Responsible for setting up and tearing down the fixtures that the
tests execute against.
"""
_TIMEOUT = 24 * 60 * 60 # =1 day (a long time to have tests run)
def __init__(self,
exec_logger,
test_group,
logging_config,
config=None,
fixture=None,
hooks=None):
"""
Initializes the TestGroupExecutor with the test group to run.
"""
# Build a logger for executing this group of tests.
logger_name = "%s:%s" % (exec_logger.name, test_group.test_kind)
self.logger = logging.loggers.new_logger(logger_name, parent=exec_logger)
self.logging_config = logging_config
self.fixture_config = fixture
self.hooks_config = utils.default_if_none(hooks, [])
self.test_config = utils.default_if_none(config, {})
self._test_group = test_group
self._using_buildlogger = logging.config.using_buildlogger(logging_config)
self._build_config = None
if self._using_buildlogger:
self._build_config = logging.buildlogger.get_config()
# Must be done after getting buildlogger configuration.
self._jobs = [self._make_job(job_num) for job_num in xrange(_config.JOBS)]
def run(self):
"""
Executes the test group.
Any exceptions that occur during setting up or tearing down a
fixture are propagated.
"""
self.logger.info("Starting execution of %ss...", self._test_group.test_kind)
return_code = 0
try:
if not self._setup_fixtures():
return_code = 2
return
num_repeats = _config.REPEAT
while num_repeats > 0:
test_queue = self._make_test_queue()
self._test_group.record_start()
(report, interrupted) = self._run_tests(test_queue)
self._test_group.record_end(report)
# If the user triggered a KeyboardInterrupt, then we should stop.
if interrupted:
raise errors.StopExecution("Received interrupt from user")
sb = [] # String builder.
self._test_group.summarize_latest(sb)
self.logger.info("Summary: %s", "\n ".join(sb))
if not report.wasSuccessful():
return_code = 1
if _config.FAIL_FAST:
break
# Clear the report so it can be reused for the next execution.
for job in self._jobs:
job.report.reset()
num_repeats -= 1
finally:
if not self._teardown_fixtures():
return_code = 2
self._test_group.return_code = return_code
def _setup_fixtures(self):
"""
Sets up a fixture for each job.
"""
for job in self._jobs:
try:
job.fixture.setup()
except:
self.logger.exception("Encountered an error while setting up %s.", job.fixture)
return False
# Once they have all been started, wait for them to become available.
for job in self._jobs:
try:
job.fixture.await_ready()
except:
self.logger.exception("Encountered an error while waiting for %s to be ready",
job.fixture)
return False
return True
def _run_tests(self, test_queue):
"""
Starts a thread for each Job instance and blocks until all of
the tests are run.
Returns a (combined report, user interrupted) pair, where the
report contains the status and timing information of tests run
by all of the threads.
"""
threads = []
interrupt_flag = threading.Event()
user_interrupted = False
try:
# Run each Job instance in its own thread.
for job in self._jobs:
t = threading.Thread(target=job, args=(test_queue, interrupt_flag))
# Do not wait for tests to finish executing if interrupted by the user.
t.daemon = True
t.start()
threads.append(t)
joined = False
while not joined:
# Need to pass a timeout to join() so that KeyboardInterrupt exceptions
# are propagated.
joined = test_queue.join(TestGroupExecutor._TIMEOUT)
except (KeyboardInterrupt, SystemExit):
interrupt_flag.set()
user_interrupted = True
else:
# Only wait for all the Job instances if not interrupted by the user.
for t in threads:
t.join()
reports = [job.report for job in self._jobs]
combined_report = _report.TestReport.combine(*reports)
# We cannot return 'interrupt_flag.is_set()' because the interrupt flag can be set by a Job
# instance if a test fails and it decides to drain the queue. We only want to raise a
# StopExecution exception in TestGroupExecutor.run() if the user triggered the interrupt.
return (combined_report, user_interrupted)
def _teardown_fixtures(self):
"""
Tears down all of the fixtures.
Returns true if all fixtures were torn down successfully, and
false otherwise.
"""
success = True
for job in self._jobs:
try:
if not job.fixture.teardown():
self.logger.warn("Teardown of %s was not successful.", job.fixture)
success = False
except:
self.logger.exception("Encountered an error while tearing down %s.", job.fixture)
success = False
return success
def _get_build_id(self, job_num):
"""
Returns a unique build id for a job.
"""
build_config = self._build_config
if self._using_buildlogger:
# Use a distinct "builder" for each job in order to separate their logs.
if build_config is not None and "builder" in build_config:
build_config = build_config.copy()
build_config["builder"] = "%s_job%d" % (build_config["builder"], job_num)
build_id = logging.buildlogger.new_build_id(build_config)
if build_config is None or build_id is None:
self.logger.info("Encountered an error configuring buildlogger for job #%d, falling"
" back to stderr.", job_num)
return build_id, build_config
return None, build_config
def _make_fixture(self, job_num, build_id, build_config):
"""
Creates a fixture for a job.
"""
fixture_config = {}
fixture_class = fixtures.NOOP_FIXTURE_CLASS
if self.fixture_config is not None:
fixture_config = self.fixture_config.copy()
fixture_class = fixture_config.pop("class")
logger_name = "%s:job%d" % (fixture_class, job_num)
logger = logging.loggers.new_logger(logger_name, parent=logging.loggers.FIXTURE)
logging.config.apply_buildlogger_global_handler(logger,
self.logging_config,
build_id=build_id,
build_config=build_config)
return fixtures.make_fixture(fixture_class, logger, job_num, **fixture_config)
def _make_hooks(self, job_num, fixture):
"""
Creates the custom behaviors for the job's fixture.
"""
behaviors = []
for behavior_config in self.hooks_config:
behavior_config = behavior_config.copy()
behavior_class = behavior_config.pop("class")
logger_name = "%s:job%d" % (behavior_class, job_num)
logger = logging.loggers.new_logger(logger_name, parent=self.logger)
behavior = _hooks.make_custom_behavior(behavior_class,
logger,
fixture,
**behavior_config)
behaviors.append(behavior)
return behaviors
def _make_job(self, job_num):
"""
Returns a Job instance with its own fixture, hooks, and test
report.
"""
build_id, build_config = self._get_build_id(job_num)
fixture = self._make_fixture(job_num, build_id, build_config)
hooks = self._make_hooks(job_num, fixture)
logger_name = "%s:job%d" % (self.logger.name, job_num)
logger = logging.loggers.new_logger(logger_name, parent=self.logger)
if build_id is not None:
endpoint = logging.buildlogger.APPEND_GLOBAL_LOGS_ENDPOINT % {"build_id": build_id}
url = "%s/%s/" % (_config.BUILDLOGGER_URL.rstrip("/"), endpoint.strip("/"))
logger.info("Writing output of job #%d to %s.", job_num, url)
report = _report.TestReport(logger,
self.logging_config,
build_id=build_id,
build_config=build_config)
return _job.Job(logger, fixture, hooks, report)
def _make_test_queue(self):
"""
Returns a queue of TestCase instances.
Use a multi-consumer queue instead of a unittest.TestSuite so
that the test cases can be dispatched to multiple threads.
"""
test_kind_logger = logging.loggers.new_logger(self._test_group.test_kind,
parent=logging.loggers.TESTS)
# Put all the test cases in a queue.
queue = _queue.Queue()
for test_name in self._test_group.tests:
test_case = testcases.make_test_case(self._test_group.test_kind,
test_kind_logger,
test_name,
**self.test_config)
queue.put(test_case)
# Add sentinel value for each job to indicate when there are no more items to process.
for _ in xrange(_config.JOBS):
queue.put(None)
return queue
|
en
| 0.88754
|
Driver of the test execution framework. Executes a test group. Responsible for setting up and tearing down the fixtures that the tests execute against. # =1 day (a long time to have tests run) Initializes the TestGroupExecutor with the test group to run. # Build a logger for executing this group of tests. # Must be done after getting buildlogger configuration. Executes the test group. Any exceptions that occur during setting up or tearing down a fixture are propagated. # If the user triggered a KeyboardInterrupt, then we should stop. # String builder. # Clear the report so it can be reused for the next execution. Sets up a fixture for each job. # Once they have all been started, wait for them to become available. Starts a thread for each Job instance and blocks until all of the tests are run. Returns a (combined report, user interrupted) pair, where the report contains the status and timing information of tests run by all of the threads. # Run each Job instance in its own thread. # Do not wait for tests to finish executing if interrupted by the user. # Need to pass a timeout to join() so that KeyboardInterrupt exceptions # are propagated. # Only wait for all the Job instances if not interrupted by the user. # We cannot return 'interrupt_flag.is_set()' because the interrupt flag can be set by a Job # instance if a test fails and it decides to drain the queue. We only want to raise a # StopExecution exception in TestGroupExecutor.run() if the user triggered the interrupt. Tears down all of the fixtures. Returns true if all fixtures were torn down successfully, and false otherwise. Returns a unique build id for a job. # Use a distinct "builder" for each job in order to separate their logs. #%d, falling" Creates a fixture for a job. Creates the custom behaviors for the job's fixture. Returns a Job instance with its own fixture, hooks, and test report. #%d to %s.", job_num, url) Returns a queue of TestCase instances. Use a multi-consumer queue instead of a unittest.TestSuite so that the test cases can be dispatched to multiple threads. # Put all the test cases in a queue. # Add sentinel value for each job to indicate when there are no more items to process.
| 2.347607
| 2
|
o365_sharepoint_connector/tests/test_o365_sharepoint_connector.py
|
Bystroushaak/Office365SharepointConnector
| 0
|
6629868
|
<reponame>Bystroushaak/Office365SharepointConnector<filename>o365_sharepoint_connector/tests/test_o365_sharepoint_connector.py
#! /usr/bin/env python3
import os.path
import unittest
from o365_sharepoint_connector import SharePointConnector
from o365_sharepoint_connector import CantCreateNewListException
class TestSharePointConnector(unittest.TestCase):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
LOGIN_FILE = os.path.join(TEST_DIR, "login.txt")
USERNAME, PASSWORD, SITE_URL = open(LOGIN_FILE).read().splitlines()
@classmethod
def setUpClass(cls):
cls.c = SharePointConnector(
login=cls.USERNAME,
password=<PASSWORD>,
site_url=cls.SITE_URL
)
cls.c.authenticate()
def test_add_list_dir_list_remove_list(self):
try:
new_list = self.c.add_list("unittests")
self.assertEqual(new_list.title, "unittests")
except CantCreateNewListException:
pass
lists = self.c.get_lists()
self.assertIn("unittests", lists)
lists = self.c.get_lists()
lists["unittests"].delete()
lists = self.c.get_lists()
self.assertNotIn("unittests", lists)
def test_get_all_folders(self):
try:
ut_list = self.c.add_list("unittests")
except CantCreateNewListException:
ut_list = self.c.get_lists()["unittests"]
assert ut_list.get_all_folders()
if __name__ == '__main__':
unittest.main()
|
#! /usr/bin/env python3
import os.path
import unittest
from o365_sharepoint_connector import SharePointConnector
from o365_sharepoint_connector import CantCreateNewListException
class TestSharePointConnector(unittest.TestCase):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
LOGIN_FILE = os.path.join(TEST_DIR, "login.txt")
USERNAME, PASSWORD, SITE_URL = open(LOGIN_FILE).read().splitlines()
@classmethod
def setUpClass(cls):
cls.c = SharePointConnector(
login=cls.USERNAME,
password=<PASSWORD>,
site_url=cls.SITE_URL
)
cls.c.authenticate()
def test_add_list_dir_list_remove_list(self):
try:
new_list = self.c.add_list("unittests")
self.assertEqual(new_list.title, "unittests")
except CantCreateNewListException:
pass
lists = self.c.get_lists()
self.assertIn("unittests", lists)
lists = self.c.get_lists()
lists["unittests"].delete()
lists = self.c.get_lists()
self.assertNotIn("unittests", lists)
def test_get_all_folders(self):
try:
ut_list = self.c.add_list("unittests")
except CantCreateNewListException:
ut_list = self.c.get_lists()["unittests"]
assert ut_list.get_all_folders()
if __name__ == '__main__':
unittest.main()
|
fr
| 0.20845
|
#! /usr/bin/env python3
| 2.524588
| 3
|
unet/model/manager.py
|
SyPRX/u-net
| 1
|
6629869
|
<gh_stars>1-10
import tensorflow as tf
from tensorflow.keras.layers import MaxPooling2D, \
Conv2D, Add, concatenate, Cropping2D, UpSampling2D, Activation, BatchNormalization, Input, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from tensorflow.keras.models import load_model
import os
import shutil
class Model_manager:
"""
The Model manager provided methods and attributes to manage a tf.keras.models.Model
"""
def __init__(self, model_folder):
self.model_folder = model_folder
self.model = None
self.callbacks = None
self.block_type = {
"standard": self.block_standard,
"residual": self.block_residual,
"rrdb": self.block_residual_in_residual
}
pass
def block_standard(self, input,feature_map = 64, conv_nb=2, bn=False, padding="same", kernel_size=3):
"""
Define the block as described in the original U-net paper
:param input: input layer
:param feature_map: number of feature map
:param conv_nb: number of convolution
:param bn: Boolean for batch normalisation
:param padding: padding type
:param kernel_size: conv filter size
:return: block output layer
"""
layer = input
for i_conv in range(conv_nb):
layer = Conv2D(feature_map, kernel_size, padding=padding, kernel_initializer='he_normal')(layer)
if bn:
layer = BatchNormalization()(layer)
layer = Activation("relu")(layer)
return layer
def block_residual(self, input, feature_map = 64, conv_nb=2, bn=False, padding="valid"):
"""
Define a Residual block
:param input: input layer
:param feature_map: number of feature map
:param conv_nb: number of convolution
:param bn: Boolean for batch normalisation
:param padding: padding type
:return: block output layer
"""
# Overwriting value to keep dimension
padding = "same"
input = Conv2D(feature_map, 1, padding=padding, kernel_initializer='he_normal')(input)
layer = self.block_standard(input,feature_map = feature_map, conv_nb=conv_nb, bn=bn, padding=padding)
return Add()([input, layer])
def block_residual_in_residual(self, input, feature_map = 64, conv_nb=2, bn=False, padding="valid"):
"""
Define the Residual in Residual block proposed in the ESRGAN paper
:param input: input layer
:param feature_map: number of feature map
:param conv_nb: number of convolution
:param bn: Boolean for batch normalisation
:param padding: padding type
:return: block output layer
"""
# 4 residual block
padding = "same"
input = Conv2D(feature_map, 1, padding=padding, kernel_initializer='he_normal')(input)
layer = input
for i in range(4):
layer = self.block_residual(layer, feature_map=feature_map, conv_nb=conv_nb, bn=bn, padding=padding)
return Add()([input, layer])
def get_callbacks(self):
"""
Define model callbacks (Model check point and tensorflow logs)
:return: model callbacks
"""
log_file = 'data/log.tf'
if os.path.isdir(log_file):
shutil.rmtree(log_file)
print("Removing logs before training")
tensorboard = TensorBoard(log_dir=log_file)
# Saving multiple model during training
if not os.path.isdir("data/models"):
os.mkdir("data/models")
model_file = "data/models/unet_model.{epoch:02d}-{val_loss:.4f}.hdf5"
model_checkpoint = ModelCheckpoint(model_file, monitor='loss', verbose=1, save_best_only=False)
weight_file = "data/models/unet_weight.{epoch:02d}-{val_loss:.4f}.hdf5"
weight_checkpoint = ModelCheckpoint(weight_file, monitor='loss', verbose=1, save_weights_only=True)
return [tensorboard, model_checkpoint, weight_checkpoint]
def create_model(self,
img_size=(256, 256),
pooling_nb=4,
conv_nb=2,
block_type="standard",
pretrained_weights=None,
padding="same",
batch_norm=False):
"""
Define u-net model architecture regarding few
:param img_size: image input size
:param pooling_nb: number of pooling operation
:param conv_nb: number of convolutional layer within a block
:param block_type: block type, standard, residual etc...
:param pretrained_weights: pretrained weight provided for initialisation
:param padding: padding type
:param batch_norm: Boolean for batch normalisation
:return: tf.keras.models.Model
"""
# Defining Input
inputs = Input((img_size[0], img_size[1], 1))
layer = inputs
feature_map = 64
downsampler_layers = []
# Encoder steps (Down sampling)
for i in range(pooling_nb):
layer = self.block_type[block_type](layer,conv_nb=conv_nb, feature_map=feature_map,bn=batch_norm, padding=padding)
if i == (pooling_nb-1):
layer = Dropout(0.5)(layer)
downsampler_layers.append(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
feature_map *= 2
# Bridge
layer = self.block_type[block_type](layer, conv_nb=conv_nb, feature_map=feature_map, bn=batch_norm, padding=padding)
layer = Dropout(0.5)(layer)
# Decoder steps (Up sampling)
for i in range(pooling_nb)[::-1]:
# Up sampling and conv
feature_map = int(feature_map / 2)
layer = UpSampling2D(size=(2, 2))(layer)
layer = Conv2D(feature_map, 2, activation='relu', padding="same", kernel_initializer='he_normal')(layer)
# Cropping along x and y the encoder layer
crop_x = None
if ((downsampler_layers[i].shape[1] - layer.shape[1]) % 2) == 0:
crop_x = [int((downsampler_layers[i].shape[1] - layer.shape[1])/2)]*2
else:
crop_x = (int((downsampler_layers[i].shape[1] - layer.shape[1])/2), int((downsampler_layers[i].shape[1] - layer.shape[1])/2)+ 1)
crop_y = None
if ((downsampler_layers[i].shape[2] - layer.shape[2]) % 2) == 0:
crop_y = [int((downsampler_layers[i].shape[2] - layer.shape[2])/2)]*2
else:
crop_y = (int((downsampler_layers[i].shape[2] - layer.shape[2])/2), int((downsampler_layers[i].shape[2] - layer.shape[2])/2)+ 1)
cropped_layer = Cropping2D(cropping=(crop_x, crop_y))(downsampler_layers[i])
# Stacking both encoder an decoder layers
layer = concatenate([cropped_layer, layer], axis=3)
layer = self.block_type[block_type](layer, feature_map=feature_map, bn=batch_norm, padding=padding)
# Final stage
layer = Conv2D(2, 3, activation='relu', padding=padding, kernel_initializer='he_normal')(layer)
outputs = Conv2D(1, 1, activation='sigmoid')(layer)
self.model = tf.keras.models.Model(inputs=inputs, outputs=outputs)
self.model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
if pretrained_weights:
print("Loading weights from {}".format(pretrained_weights))
self.model.load_weights(pretrained_weights)
return self.model
def get_model(self, mode_filepath = None):
"""
Loading a specific model
:param mode_filepath: model file path if provided
:return: tf.keras.models.Model
"""
if mode_filepath is not None:
assert os.path.isfile(mode_filepath), "[MODEL ERROR]: The model filepath {} does not exist. " \
"Please trained a model before predicting or specify " \
"a valid filepath".format(mode_filepath)
self.model = load_model(mode_filepath)
else:
print("Choose one of those models in order to make some predictions")
models_filepathes = self.list_model_filenames(folder=self.model_folder)
for index, file in enumerate(models_filepathes):
print(index, ":", file)
resp = int(input("Enter the model index you want to load :\n"))
mode_filepath = models_filepathes[resp]
print("Loading model : {}".format(mode_filepath))
self.model = load_model(os.path.join(self.model_folder,mode_filepath))
return self.model
def list_model_filenames(self, folder ="data/models"):
"""
List model file in a specific folder
:param folder: model folder
:return: list of model filenames
"""
models_filenames = []
for file in os.listdir(folder):
if "model" in file:
models_filenames.append(file)
return models_filenames
|
import tensorflow as tf
from tensorflow.keras.layers import MaxPooling2D, \
Conv2D, Add, concatenate, Cropping2D, UpSampling2D, Activation, BatchNormalization, Input, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from tensorflow.keras.models import load_model
import os
import shutil
class Model_manager:
"""
The Model manager provided methods and attributes to manage a tf.keras.models.Model
"""
def __init__(self, model_folder):
self.model_folder = model_folder
self.model = None
self.callbacks = None
self.block_type = {
"standard": self.block_standard,
"residual": self.block_residual,
"rrdb": self.block_residual_in_residual
}
pass
def block_standard(self, input,feature_map = 64, conv_nb=2, bn=False, padding="same", kernel_size=3):
"""
Define the block as described in the original U-net paper
:param input: input layer
:param feature_map: number of feature map
:param conv_nb: number of convolution
:param bn: Boolean for batch normalisation
:param padding: padding type
:param kernel_size: conv filter size
:return: block output layer
"""
layer = input
for i_conv in range(conv_nb):
layer = Conv2D(feature_map, kernel_size, padding=padding, kernel_initializer='he_normal')(layer)
if bn:
layer = BatchNormalization()(layer)
layer = Activation("relu")(layer)
return layer
def block_residual(self, input, feature_map = 64, conv_nb=2, bn=False, padding="valid"):
"""
Define a Residual block
:param input: input layer
:param feature_map: number of feature map
:param conv_nb: number of convolution
:param bn: Boolean for batch normalisation
:param padding: padding type
:return: block output layer
"""
# Overwriting value to keep dimension
padding = "same"
input = Conv2D(feature_map, 1, padding=padding, kernel_initializer='he_normal')(input)
layer = self.block_standard(input,feature_map = feature_map, conv_nb=conv_nb, bn=bn, padding=padding)
return Add()([input, layer])
def block_residual_in_residual(self, input, feature_map = 64, conv_nb=2, bn=False, padding="valid"):
"""
Define the Residual in Residual block proposed in the ESRGAN paper
:param input: input layer
:param feature_map: number of feature map
:param conv_nb: number of convolution
:param bn: Boolean for batch normalisation
:param padding: padding type
:return: block output layer
"""
# 4 residual block
padding = "same"
input = Conv2D(feature_map, 1, padding=padding, kernel_initializer='he_normal')(input)
layer = input
for i in range(4):
layer = self.block_residual(layer, feature_map=feature_map, conv_nb=conv_nb, bn=bn, padding=padding)
return Add()([input, layer])
def get_callbacks(self):
"""
Define model callbacks (Model check point and tensorflow logs)
:return: model callbacks
"""
log_file = 'data/log.tf'
if os.path.isdir(log_file):
shutil.rmtree(log_file)
print("Removing logs before training")
tensorboard = TensorBoard(log_dir=log_file)
# Saving multiple model during training
if not os.path.isdir("data/models"):
os.mkdir("data/models")
model_file = "data/models/unet_model.{epoch:02d}-{val_loss:.4f}.hdf5"
model_checkpoint = ModelCheckpoint(model_file, monitor='loss', verbose=1, save_best_only=False)
weight_file = "data/models/unet_weight.{epoch:02d}-{val_loss:.4f}.hdf5"
weight_checkpoint = ModelCheckpoint(weight_file, monitor='loss', verbose=1, save_weights_only=True)
return [tensorboard, model_checkpoint, weight_checkpoint]
def create_model(self,
img_size=(256, 256),
pooling_nb=4,
conv_nb=2,
block_type="standard",
pretrained_weights=None,
padding="same",
batch_norm=False):
"""
Define u-net model architecture regarding few
:param img_size: image input size
:param pooling_nb: number of pooling operation
:param conv_nb: number of convolutional layer within a block
:param block_type: block type, standard, residual etc...
:param pretrained_weights: pretrained weight provided for initialisation
:param padding: padding type
:param batch_norm: Boolean for batch normalisation
:return: tf.keras.models.Model
"""
# Defining Input
inputs = Input((img_size[0], img_size[1], 1))
layer = inputs
feature_map = 64
downsampler_layers = []
# Encoder steps (Down sampling)
for i in range(pooling_nb):
layer = self.block_type[block_type](layer,conv_nb=conv_nb, feature_map=feature_map,bn=batch_norm, padding=padding)
if i == (pooling_nb-1):
layer = Dropout(0.5)(layer)
downsampler_layers.append(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
feature_map *= 2
# Bridge
layer = self.block_type[block_type](layer, conv_nb=conv_nb, feature_map=feature_map, bn=batch_norm, padding=padding)
layer = Dropout(0.5)(layer)
# Decoder steps (Up sampling)
for i in range(pooling_nb)[::-1]:
# Up sampling and conv
feature_map = int(feature_map / 2)
layer = UpSampling2D(size=(2, 2))(layer)
layer = Conv2D(feature_map, 2, activation='relu', padding="same", kernel_initializer='he_normal')(layer)
# Cropping along x and y the encoder layer
crop_x = None
if ((downsampler_layers[i].shape[1] - layer.shape[1]) % 2) == 0:
crop_x = [int((downsampler_layers[i].shape[1] - layer.shape[1])/2)]*2
else:
crop_x = (int((downsampler_layers[i].shape[1] - layer.shape[1])/2), int((downsampler_layers[i].shape[1] - layer.shape[1])/2)+ 1)
crop_y = None
if ((downsampler_layers[i].shape[2] - layer.shape[2]) % 2) == 0:
crop_y = [int((downsampler_layers[i].shape[2] - layer.shape[2])/2)]*2
else:
crop_y = (int((downsampler_layers[i].shape[2] - layer.shape[2])/2), int((downsampler_layers[i].shape[2] - layer.shape[2])/2)+ 1)
cropped_layer = Cropping2D(cropping=(crop_x, crop_y))(downsampler_layers[i])
# Stacking both encoder an decoder layers
layer = concatenate([cropped_layer, layer], axis=3)
layer = self.block_type[block_type](layer, feature_map=feature_map, bn=batch_norm, padding=padding)
# Final stage
layer = Conv2D(2, 3, activation='relu', padding=padding, kernel_initializer='he_normal')(layer)
outputs = Conv2D(1, 1, activation='sigmoid')(layer)
self.model = tf.keras.models.Model(inputs=inputs, outputs=outputs)
self.model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
if pretrained_weights:
print("Loading weights from {}".format(pretrained_weights))
self.model.load_weights(pretrained_weights)
return self.model
def get_model(self, mode_filepath = None):
"""
Loading a specific model
:param mode_filepath: model file path if provided
:return: tf.keras.models.Model
"""
if mode_filepath is not None:
assert os.path.isfile(mode_filepath), "[MODEL ERROR]: The model filepath {} does not exist. " \
"Please trained a model before predicting or specify " \
"a valid filepath".format(mode_filepath)
self.model = load_model(mode_filepath)
else:
print("Choose one of those models in order to make some predictions")
models_filepathes = self.list_model_filenames(folder=self.model_folder)
for index, file in enumerate(models_filepathes):
print(index, ":", file)
resp = int(input("Enter the model index you want to load :\n"))
mode_filepath = models_filepathes[resp]
print("Loading model : {}".format(mode_filepath))
self.model = load_model(os.path.join(self.model_folder,mode_filepath))
return self.model
def list_model_filenames(self, folder ="data/models"):
"""
List model file in a specific folder
:param folder: model folder
:return: list of model filenames
"""
models_filenames = []
for file in os.listdir(folder):
if "model" in file:
models_filenames.append(file)
return models_filenames
|
en
| 0.596078
|
The Model manager provided methods and attributes to manage a tf.keras.models.Model Define the block as described in the original U-net paper :param input: input layer :param feature_map: number of feature map :param conv_nb: number of convolution :param bn: Boolean for batch normalisation :param padding: padding type :param kernel_size: conv filter size :return: block output layer Define a Residual block :param input: input layer :param feature_map: number of feature map :param conv_nb: number of convolution :param bn: Boolean for batch normalisation :param padding: padding type :return: block output layer # Overwriting value to keep dimension Define the Residual in Residual block proposed in the ESRGAN paper :param input: input layer :param feature_map: number of feature map :param conv_nb: number of convolution :param bn: Boolean for batch normalisation :param padding: padding type :return: block output layer # 4 residual block Define model callbacks (Model check point and tensorflow logs) :return: model callbacks # Saving multiple model during training Define u-net model architecture regarding few :param img_size: image input size :param pooling_nb: number of pooling operation :param conv_nb: number of convolutional layer within a block :param block_type: block type, standard, residual etc... :param pretrained_weights: pretrained weight provided for initialisation :param padding: padding type :param batch_norm: Boolean for batch normalisation :return: tf.keras.models.Model # Defining Input # Encoder steps (Down sampling) # Bridge # Decoder steps (Up sampling) # Up sampling and conv # Cropping along x and y the encoder layer # Stacking both encoder an decoder layers # Final stage Loading a specific model :param mode_filepath: model file path if provided :return: tf.keras.models.Model List model file in a specific folder :param folder: model folder :return: list of model filenames
| 2.828245
| 3
|
array/40.py
|
MingfeiPan/leetcode
| 0
|
6629870
|
<gh_stars>0
class Solution:
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
ret = []
candidates.sort()
self.backtrack(ret, [], candidates, target, 0)
return ret
def backtrack(self, ret, temp, candidates, value, start):
if value < 0:
return
elif value == 0:
ret.append(temp)
else:
for i in range(start, len(candidates)):
if start < i and candidates[i-1] == candidates[i]:
continue
else:
self.backtrack(ret, temp+[candidates[i]], candidates, value - candidates[i], i+1)
|
class Solution:
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
ret = []
candidates.sort()
self.backtrack(ret, [], candidates, target, 0)
return ret
def backtrack(self, ret, temp, candidates, value, start):
if value < 0:
return
elif value == 0:
ret.append(temp)
else:
for i in range(start, len(candidates)):
if start < i and candidates[i-1] == candidates[i]:
continue
else:
self.backtrack(ret, temp+[candidates[i]], candidates, value - candidates[i], i+1)
|
en
| 0.224647
|
:type candidates: List[int] :type target: int :rtype: List[List[int]]
| 3.253594
| 3
|
genlabels.py
|
KevSed/OpenData_PhotonStream_Analysis
| 0
|
6629871
|
from features_from_phs import cluster_labels, is_simulation_file
from fact.io import to_h5py
import click
from tqdm import tqdm
from multiprocessing import Pool, cpu_count
@click.command()
@click.argument('output_file', type=click.Path(exists=False))
@click.argument('input_file', nargs=-1, required=True, type=click.Path(exists=True))
@click.option('-n', '--n-jobs', default=-1, type=int, help='Number of cores to use')
def main(output_file, input_file, n_jobs):
if n_jobs == -1:
n_jobs = 15
print('Calculating features using', n_jobs, 'cores')
if is_simulation_file(input_file[0]):
print('Received simulation files as input.')
else:
print('Received data files as input.')
with Pool(n_jobs) as pool:
results = pool.imap_unordered(cluster_labels, input_file)
for df in tqdm(results, total=len(input_file)):
to_h5py(df, output_file, key="events", mode='a', index=False)
if __name__ == '__main__':
main()
|
from features_from_phs import cluster_labels, is_simulation_file
from fact.io import to_h5py
import click
from tqdm import tqdm
from multiprocessing import Pool, cpu_count
@click.command()
@click.argument('output_file', type=click.Path(exists=False))
@click.argument('input_file', nargs=-1, required=True, type=click.Path(exists=True))
@click.option('-n', '--n-jobs', default=-1, type=int, help='Number of cores to use')
def main(output_file, input_file, n_jobs):
if n_jobs == -1:
n_jobs = 15
print('Calculating features using', n_jobs, 'cores')
if is_simulation_file(input_file[0]):
print('Received simulation files as input.')
else:
print('Received data files as input.')
with Pool(n_jobs) as pool:
results = pool.imap_unordered(cluster_labels, input_file)
for df in tqdm(results, total=len(input_file)):
to_h5py(df, output_file, key="events", mode='a', index=False)
if __name__ == '__main__':
main()
|
none
| 1
| 2.389841
| 2
|
|
src/openapi_dataclasses/types/openapi/tag.py
|
cal-pratt/openapi-dataclasses
| 0
|
6629872
|
<reponame>cal-pratt/openapi-dataclasses
from dataclasses import dataclass, field
from typing import Optional
from ...meta import metadata
from .extdoc import OpenApiExternalDocumentation
@dataclass
class OpenApiTag:
"""
Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have
a Tag Object per tag defined in the Operation Object instances.
"""
name: str
"""
The name of the tag.
"""
description: Optional[str] = None
"""
A short description for the tag. CommonMark syntax MAY be used for rich text representation.
"""
external_docs: Optional[OpenApiExternalDocumentation] = field(
metadata=metadata(name="externalDocs"), default=None
)
"""
Additional external documentation for this tag.
"""
|
from dataclasses import dataclass, field
from typing import Optional
from ...meta import metadata
from .extdoc import OpenApiExternalDocumentation
@dataclass
class OpenApiTag:
"""
Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have
a Tag Object per tag defined in the Operation Object instances.
"""
name: str
"""
The name of the tag.
"""
description: Optional[str] = None
"""
A short description for the tag. CommonMark syntax MAY be used for rich text representation.
"""
external_docs: Optional[OpenApiExternalDocumentation] = field(
metadata=metadata(name="externalDocs"), default=None
)
"""
Additional external documentation for this tag.
"""
|
en
| 0.774122
|
Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances. The name of the tag. A short description for the tag. CommonMark syntax MAY be used for rich text representation. Additional external documentation for this tag.
| 2.528309
| 3
|
scripts/Legacy/deprecated/loader.py
|
rhong3/Neutrophil
| 2
|
6629873
|
'''
Separate samples into 5000 each. txt
RH 0717
'''
import pandas as pd
import numpy as np
from PIL import Image
dat = np.empty((0, int(299 ** 2 * 3)), dtype='uint8')
tile_lab = []
totlist = pd.read_csv('../Neutrophil/79_Tiles_final/tot_sample.csv', header = 0)
f = 1
for index, row in totlist.iterrows():
image = Image.open(row['path'])
pix = np.array(image)[:, :, 0:3]
dat = np.vstack([dat, pix.flatten()])
tile_lab.append(row['label'])
if len(tile_lab) == 5000 or index == len(totlist['label'])-1:
np.savetxt('../Neutrophil/79_Tiles_final/slide79_data_{}.txt'.format(f), dat, fmt='%i', delimiter='\t')
np.savetxt('../Neutrophil/79_Tiles_final/slide79_lab_{}.txt'.format(f), tile_lab, fmt='%i', delimiter='\t')
dat = np.empty((0, int(299 ** 2 * 3)), dtype='uint8')
tile_lab = []
f+=1
|
'''
Separate samples into 5000 each. txt
RH 0717
'''
import pandas as pd
import numpy as np
from PIL import Image
dat = np.empty((0, int(299 ** 2 * 3)), dtype='uint8')
tile_lab = []
totlist = pd.read_csv('../Neutrophil/79_Tiles_final/tot_sample.csv', header = 0)
f = 1
for index, row in totlist.iterrows():
image = Image.open(row['path'])
pix = np.array(image)[:, :, 0:3]
dat = np.vstack([dat, pix.flatten()])
tile_lab.append(row['label'])
if len(tile_lab) == 5000 or index == len(totlist['label'])-1:
np.savetxt('../Neutrophil/79_Tiles_final/slide79_data_{}.txt'.format(f), dat, fmt='%i', delimiter='\t')
np.savetxt('../Neutrophil/79_Tiles_final/slide79_lab_{}.txt'.format(f), tile_lab, fmt='%i', delimiter='\t')
dat = np.empty((0, int(299 ** 2 * 3)), dtype='uint8')
tile_lab = []
f+=1
|
en
| 0.816549
|
Separate samples into 5000 each. txt RH 0717
| 2.48737
| 2
|
backend/server/app/models.py
|
jacksonmoji/workfinder_test
| 0
|
6629874
|
from django.db import models
class Building(models.Model):
'''
The Building object.
Attributes:
id: The id of the building,
name: The string with building name,
'''
id = models.PositiveSmallIntegerField(primary_key=True)
name = models.CharField(max_length=128)
def __str__(self):
return self.name
class Meter(models.Model):
'''
The Meter represent the fuel, energy measuring unit.
Attributes:
id: The identification number of meter
fuel: source of energy
unit: energy measuring unit
building_id : The reference to corresponding building
'''
id = models.PositiveSmallIntegerField(primary_key=True)
fuel = models.CharField(max_length=20)
unit = models.CharField(max_length=20)
building_id = models.ForeignKey(
Building, on_delete=models.CASCADE, related_name='meters')
class EnergyConsumption (models.Model):
'''
The EnergyConsumption represent the building's half hourly consumption object.
Attributes:
consumption: energy used
reading_date_time: The date captured after half hourly energy consuption
meter_id: The reference to corresponding meter
'''
consumption = models.FloatField(max_length=15)
reading_date_time = models.DateTimeField(blank=True)
meter_id = models.ForeignKey(
Meter, on_delete=models.CASCADE, related_name='energy_consumptions')
def __float__(self):
return self.consumption
|
from django.db import models
class Building(models.Model):
'''
The Building object.
Attributes:
id: The id of the building,
name: The string with building name,
'''
id = models.PositiveSmallIntegerField(primary_key=True)
name = models.CharField(max_length=128)
def __str__(self):
return self.name
class Meter(models.Model):
'''
The Meter represent the fuel, energy measuring unit.
Attributes:
id: The identification number of meter
fuel: source of energy
unit: energy measuring unit
building_id : The reference to corresponding building
'''
id = models.PositiveSmallIntegerField(primary_key=True)
fuel = models.CharField(max_length=20)
unit = models.CharField(max_length=20)
building_id = models.ForeignKey(
Building, on_delete=models.CASCADE, related_name='meters')
class EnergyConsumption (models.Model):
'''
The EnergyConsumption represent the building's half hourly consumption object.
Attributes:
consumption: energy used
reading_date_time: The date captured after half hourly energy consuption
meter_id: The reference to corresponding meter
'''
consumption = models.FloatField(max_length=15)
reading_date_time = models.DateTimeField(blank=True)
meter_id = models.ForeignKey(
Meter, on_delete=models.CASCADE, related_name='energy_consumptions')
def __float__(self):
return self.consumption
|
en
| 0.768638
|
The Building object. Attributes: id: The id of the building, name: The string with building name, The Meter represent the fuel, energy measuring unit. Attributes: id: The identification number of meter fuel: source of energy unit: energy measuring unit building_id : The reference to corresponding building The EnergyConsumption represent the building's half hourly consumption object. Attributes: consumption: energy used reading_date_time: The date captured after half hourly energy consuption meter_id: The reference to corresponding meter
| 2.609274
| 3
|
sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/automl/automl_job.py
|
dubiety/azure-sdk-for-python
| 1
|
6629875
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import logging
from abc import ABC
from typing import Any, Dict, Union
from azure.ai.ml.constants import AssetTypes, AutoMLConstants, TYPE, JobType
from azure.ai.ml._restclient.v2022_02_01_preview.models import (
JobBaseData,
MLTableJobInput,
ResourceConfiguration,
TaskType,
ManagedIdentity,
UserIdentity,
AmlToken,
)
from azure.ai.ml.entities import Job
from azure.ai.ml.entities._inputs_outputs import Input
from azure.ai.ml.entities._job.job_io_mixin import JobIOMixin
from azure.ai.ml._utils.utils import camel_to_snake
from azure.ai.ml._ml_exceptions import ErrorCategory, ErrorTarget, ValidationException
from azure.ai.ml.entities._job.pipeline._io import AutoMLNodeIOMixin
module_logger = logging.getLogger(__name__)
class AutoMLJob(Job, JobIOMixin, AutoMLNodeIOMixin, ABC):
"""
AutoML job entity.
"""
def __init__(
self,
*,
resources: ResourceConfiguration = None,
identity: Union[ManagedIdentity, AmlToken, UserIdentity] = None,
**kwargs: Any,
) -> None:
"""
Initialize an AutoML job entity.
:param task_details: The task configuration of the job. This can be Classification, Regression, etc.
:param resources: Resource configuration for the job.
:param identity: Identity that training job will use while running on compute.
:type identity: Union[ManagedIdentity, AmlToken, UserIdentity]
:param kwargs:
"""
kwargs[TYPE] = JobType.AUTOML
self.environment_id = kwargs.pop("environment_id", None)
self.environment_variables = kwargs.pop("environment_variables", None)
self.outputs = kwargs.pop("outputs", None)
super().__init__(**kwargs)
self.resources = resources
self.identity = identity
@classmethod
def _load_from_rest(cls, obj: JobBaseData) -> "AutoMLJob":
task_type = (
camel_to_snake(obj.properties.task_details.task_type) if obj.properties.task_details.task_type else None
)
class_type = cls._get_task_mapping().get(task_type, None)
if class_type:
return class_type._from_rest_object(obj)
else:
msg = f"Unsupported task type: {obj.properties.task_details.task_type}"
raise ValidationException(
message=msg,
no_personal_data_message=msg,
target=ErrorTarget.AUTOML,
error_category=ErrorCategory.SYSTEM_ERROR,
)
@classmethod
def _load_from_dict(
cls, data: Dict, context: Dict, additional_message: str, inside_pipeline=False, **kwargs
) -> "AutoMLJob":
task_type = data.get(AutoMLConstants.TASK_TYPE_YAML)
class_type = cls._get_task_mapping().get(task_type, None)
if class_type:
return class_type._load_from_dict(
data,
context,
additional_message,
inside_pipeline=inside_pipeline,
**kwargs,
)
else:
msg = f"Unsupported task type: {task_type}"
raise ValidationException(
message=msg,
no_personal_data_message=msg,
target=ErrorTarget.AUTOML,
error_category=ErrorCategory.USER_ERROR,
)
@classmethod
def _get_task_mapping(cls):
from .tabular import (
ClassificationJob,
RegressionJob,
ForecastingJob,
)
from .image import (
ImageClassificationJob,
ImageClassificationMultilabelJob,
ImageObjectDetectionJob,
ImageInstanceSegmentationJob,
)
from .nlp import (
TextNerJob,
TextClassificationJob,
TextClassificationMultilabelJob,
)
# create a mapping of task type to job class
return {
camel_to_snake(TaskType.CLASSIFICATION): ClassificationJob,
camel_to_snake(TaskType.REGRESSION): RegressionJob,
camel_to_snake(TaskType.FORECASTING): ForecastingJob,
camel_to_snake(TaskType.IMAGE_CLASSIFICATION): ImageClassificationJob,
camel_to_snake(TaskType.IMAGE_CLASSIFICATION_MULTILABEL): ImageClassificationMultilabelJob,
camel_to_snake(TaskType.IMAGE_OBJECT_DETECTION): ImageObjectDetectionJob,
camel_to_snake(TaskType.IMAGE_INSTANCE_SEGMENTATION): ImageInstanceSegmentationJob,
camel_to_snake(TaskType.TEXT_NER): TextNerJob,
camel_to_snake(TaskType.TEXT_CLASSIFICATION): TextClassificationJob,
camel_to_snake(TaskType.TEXT_CLASSIFICATION_MULTILABEL): TextClassificationMultilabelJob,
}
def _resolve_data_inputs(self):
"""Resolve JobInputs to MLTableJobInputs within data_settings"""
training_data = self._data.training_data
if isinstance(training_data.data, Input):
self._data.training_data.data = MLTableJobInput(uri=training_data.data.path)
validation_data = self._data.validation_data
if validation_data and isinstance(validation_data.data, Input):
self._data.validation_data.data = MLTableJobInput(uri=validation_data.data.path)
test_data = self._data.test_data
if test_data and isinstance(test_data.data, Input):
self._data.test_data.data = MLTableJobInput(uri=test_data.data.path)
def _restore_data_inputs(self):
"""Restore MLTableJobInputs to JobInputs within data_settings"""
training_data = self._data.training_data
if isinstance(training_data.data, MLTableJobInput):
self._data.training_data.data = Input(type=AssetTypes.MLTABLE, path=training_data.data.uri)
validation_data = self._data.validation_data
if validation_data and isinstance(validation_data.data, MLTableJobInput):
self._data.validation_data.data = Input(type=AssetTypes.MLTABLE, path=validation_data.data.uri)
test_data = self._data.test_data
if test_data and isinstance(test_data.data, MLTableJobInput):
self._data.test_data.data = Input(type=AssetTypes.MLTABLE, path=test_data.data.uri)
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import logging
from abc import ABC
from typing import Any, Dict, Union
from azure.ai.ml.constants import AssetTypes, AutoMLConstants, TYPE, JobType
from azure.ai.ml._restclient.v2022_02_01_preview.models import (
JobBaseData,
MLTableJobInput,
ResourceConfiguration,
TaskType,
ManagedIdentity,
UserIdentity,
AmlToken,
)
from azure.ai.ml.entities import Job
from azure.ai.ml.entities._inputs_outputs import Input
from azure.ai.ml.entities._job.job_io_mixin import JobIOMixin
from azure.ai.ml._utils.utils import camel_to_snake
from azure.ai.ml._ml_exceptions import ErrorCategory, ErrorTarget, ValidationException
from azure.ai.ml.entities._job.pipeline._io import AutoMLNodeIOMixin
module_logger = logging.getLogger(__name__)
class AutoMLJob(Job, JobIOMixin, AutoMLNodeIOMixin, ABC):
"""
AutoML job entity.
"""
def __init__(
self,
*,
resources: ResourceConfiguration = None,
identity: Union[ManagedIdentity, AmlToken, UserIdentity] = None,
**kwargs: Any,
) -> None:
"""
Initialize an AutoML job entity.
:param task_details: The task configuration of the job. This can be Classification, Regression, etc.
:param resources: Resource configuration for the job.
:param identity: Identity that training job will use while running on compute.
:type identity: Union[ManagedIdentity, AmlToken, UserIdentity]
:param kwargs:
"""
kwargs[TYPE] = JobType.AUTOML
self.environment_id = kwargs.pop("environment_id", None)
self.environment_variables = kwargs.pop("environment_variables", None)
self.outputs = kwargs.pop("outputs", None)
super().__init__(**kwargs)
self.resources = resources
self.identity = identity
@classmethod
def _load_from_rest(cls, obj: JobBaseData) -> "AutoMLJob":
task_type = (
camel_to_snake(obj.properties.task_details.task_type) if obj.properties.task_details.task_type else None
)
class_type = cls._get_task_mapping().get(task_type, None)
if class_type:
return class_type._from_rest_object(obj)
else:
msg = f"Unsupported task type: {obj.properties.task_details.task_type}"
raise ValidationException(
message=msg,
no_personal_data_message=msg,
target=ErrorTarget.AUTOML,
error_category=ErrorCategory.SYSTEM_ERROR,
)
@classmethod
def _load_from_dict(
cls, data: Dict, context: Dict, additional_message: str, inside_pipeline=False, **kwargs
) -> "AutoMLJob":
task_type = data.get(AutoMLConstants.TASK_TYPE_YAML)
class_type = cls._get_task_mapping().get(task_type, None)
if class_type:
return class_type._load_from_dict(
data,
context,
additional_message,
inside_pipeline=inside_pipeline,
**kwargs,
)
else:
msg = f"Unsupported task type: {task_type}"
raise ValidationException(
message=msg,
no_personal_data_message=msg,
target=ErrorTarget.AUTOML,
error_category=ErrorCategory.USER_ERROR,
)
@classmethod
def _get_task_mapping(cls):
from .tabular import (
ClassificationJob,
RegressionJob,
ForecastingJob,
)
from .image import (
ImageClassificationJob,
ImageClassificationMultilabelJob,
ImageObjectDetectionJob,
ImageInstanceSegmentationJob,
)
from .nlp import (
TextNerJob,
TextClassificationJob,
TextClassificationMultilabelJob,
)
# create a mapping of task type to job class
return {
camel_to_snake(TaskType.CLASSIFICATION): ClassificationJob,
camel_to_snake(TaskType.REGRESSION): RegressionJob,
camel_to_snake(TaskType.FORECASTING): ForecastingJob,
camel_to_snake(TaskType.IMAGE_CLASSIFICATION): ImageClassificationJob,
camel_to_snake(TaskType.IMAGE_CLASSIFICATION_MULTILABEL): ImageClassificationMultilabelJob,
camel_to_snake(TaskType.IMAGE_OBJECT_DETECTION): ImageObjectDetectionJob,
camel_to_snake(TaskType.IMAGE_INSTANCE_SEGMENTATION): ImageInstanceSegmentationJob,
camel_to_snake(TaskType.TEXT_NER): TextNerJob,
camel_to_snake(TaskType.TEXT_CLASSIFICATION): TextClassificationJob,
camel_to_snake(TaskType.TEXT_CLASSIFICATION_MULTILABEL): TextClassificationMultilabelJob,
}
def _resolve_data_inputs(self):
"""Resolve JobInputs to MLTableJobInputs within data_settings"""
training_data = self._data.training_data
if isinstance(training_data.data, Input):
self._data.training_data.data = MLTableJobInput(uri=training_data.data.path)
validation_data = self._data.validation_data
if validation_data and isinstance(validation_data.data, Input):
self._data.validation_data.data = MLTableJobInput(uri=validation_data.data.path)
test_data = self._data.test_data
if test_data and isinstance(test_data.data, Input):
self._data.test_data.data = MLTableJobInput(uri=test_data.data.path)
def _restore_data_inputs(self):
"""Restore MLTableJobInputs to JobInputs within data_settings"""
training_data = self._data.training_data
if isinstance(training_data.data, MLTableJobInput):
self._data.training_data.data = Input(type=AssetTypes.MLTABLE, path=training_data.data.uri)
validation_data = self._data.validation_data
if validation_data and isinstance(validation_data.data, MLTableJobInput):
self._data.validation_data.data = Input(type=AssetTypes.MLTABLE, path=validation_data.data.uri)
test_data = self._data.test_data
if test_data and isinstance(test_data.data, MLTableJobInput):
self._data.test_data.data = Input(type=AssetTypes.MLTABLE, path=test_data.data.uri)
|
en
| 0.661687
|
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- AutoML job entity. Initialize an AutoML job entity. :param task_details: The task configuration of the job. This can be Classification, Regression, etc. :param resources: Resource configuration for the job. :param identity: Identity that training job will use while running on compute. :type identity: Union[ManagedIdentity, AmlToken, UserIdentity] :param kwargs: # create a mapping of task type to job class Resolve JobInputs to MLTableJobInputs within data_settings Restore MLTableJobInputs to JobInputs within data_settings
| 1.924613
| 2
|
sme_financing/main/service/sme_service.py
|
BuildForSDG/team-214-backend
| 1
|
6629876
|
from datetime import datetime
from sqlalchemy.exc import SQLAlchemyError
from .. import db
from ..models.sme import SME
from .client_service import get_client_by_email
# from .document_service import get_all_sme_documents
def update():
db.session.commit()
def commit_changes(data):
db.session.add(data)
update()
def save_sme(data):
new_sme = SME(
name=data["name"],
postal_address=data["postal_address"],
location=data["location"],
telephone=data["telephone"],
email=data["email"],
description=data["description"],
sector=data["sector"],
principal_product_service=data["principal_product_service"],
other_product_service=data["other_product_service"],
age=data["age"],
establishment_date=datetime.strptime(
data["establishment_date"], "%Y-%m-%d"
).date(),
ownership_type=data["ownership_type"],
bank_account_details=data["bank_account_details"],
employees_number=data["employees_number"],
)
client = get_client_by_email(data["client_email"])
if not client:
response_object = {
"status": "fail",
"message": "Client/User doesn't exists.",
}
return response_object, 409
else:
new_sme.client = client
try:
commit_changes(new_sme)
response_object = {
"status": "success",
"message": "Successfully registered.",
}
return response_object, 201
except SQLAlchemyError as error:
response_object = {"status": "error", "message": str(error)}
return response_object, 500
def set_sme(data, sme):
if data.get("name"):
sme.name = data["name"]
if data.get("location"):
sme.location = data["location"]
if data.get("telephone"):
sme.telephone = data["telephone"]
if data.get("email"):
sme.email = data["email"]
if data.get("description"):
sme.description = data["description"]
if data.get("sector"):
sme.sector = data["sector"]
if data.get("principal_product_service"):
sme.principal_product_service = data["principal_product_service"]
if data.get("bank_account_details"):
sme.bank_account_details = data["bank_account_details"]
if data.get("employees_number"):
sme.employees_number = data["employees_number"]
def update_sme(data, sme):
set_sme(data, sme)
try:
commit_changes(sme)
response_object = {
"status": "success",
"message": "SME successfully updated.",
}
return response_object, 201
except SQLAlchemyError as err:
db.session.rollback()
print(str(err))
response_object = {"status": "error", "message": str(err)}
return response_object, 400
def delete_sme(sme):
# sme.delete()
try:
db.session.delete(sme)
db.session.commit()
response_object = {
"status": "success",
"message": "SME successfully deleted.",
}
return response_object, 200
except SQLAlchemyError as e:
db.session.rollback()
response_object = {"status": "error", "message": str(e)}
return response_object, 400
def get_all_smes():
return SME.query.all()
def get_sme_by_id(sme_id):
return SME.query.filter_by(id=sme_id).first()
def get_sme_by_email(sme_email):
return SME.query.filter_by(email=sme_email).first()
def get_all_sme_documents(sme_id):
return get_sme_by_id(sme_id).documents
|
from datetime import datetime
from sqlalchemy.exc import SQLAlchemyError
from .. import db
from ..models.sme import SME
from .client_service import get_client_by_email
# from .document_service import get_all_sme_documents
def update():
db.session.commit()
def commit_changes(data):
db.session.add(data)
update()
def save_sme(data):
new_sme = SME(
name=data["name"],
postal_address=data["postal_address"],
location=data["location"],
telephone=data["telephone"],
email=data["email"],
description=data["description"],
sector=data["sector"],
principal_product_service=data["principal_product_service"],
other_product_service=data["other_product_service"],
age=data["age"],
establishment_date=datetime.strptime(
data["establishment_date"], "%Y-%m-%d"
).date(),
ownership_type=data["ownership_type"],
bank_account_details=data["bank_account_details"],
employees_number=data["employees_number"],
)
client = get_client_by_email(data["client_email"])
if not client:
response_object = {
"status": "fail",
"message": "Client/User doesn't exists.",
}
return response_object, 409
else:
new_sme.client = client
try:
commit_changes(new_sme)
response_object = {
"status": "success",
"message": "Successfully registered.",
}
return response_object, 201
except SQLAlchemyError as error:
response_object = {"status": "error", "message": str(error)}
return response_object, 500
def set_sme(data, sme):
if data.get("name"):
sme.name = data["name"]
if data.get("location"):
sme.location = data["location"]
if data.get("telephone"):
sme.telephone = data["telephone"]
if data.get("email"):
sme.email = data["email"]
if data.get("description"):
sme.description = data["description"]
if data.get("sector"):
sme.sector = data["sector"]
if data.get("principal_product_service"):
sme.principal_product_service = data["principal_product_service"]
if data.get("bank_account_details"):
sme.bank_account_details = data["bank_account_details"]
if data.get("employees_number"):
sme.employees_number = data["employees_number"]
def update_sme(data, sme):
set_sme(data, sme)
try:
commit_changes(sme)
response_object = {
"status": "success",
"message": "SME successfully updated.",
}
return response_object, 201
except SQLAlchemyError as err:
db.session.rollback()
print(str(err))
response_object = {"status": "error", "message": str(err)}
return response_object, 400
def delete_sme(sme):
# sme.delete()
try:
db.session.delete(sme)
db.session.commit()
response_object = {
"status": "success",
"message": "SME successfully deleted.",
}
return response_object, 200
except SQLAlchemyError as e:
db.session.rollback()
response_object = {"status": "error", "message": str(e)}
return response_object, 400
def get_all_smes():
return SME.query.all()
def get_sme_by_id(sme_id):
return SME.query.filter_by(id=sme_id).first()
def get_sme_by_email(sme_email):
return SME.query.filter_by(email=sme_email).first()
def get_all_sme_documents(sme_id):
return get_sme_by_id(sme_id).documents
|
en
| 0.370609
|
# from .document_service import get_all_sme_documents # sme.delete()
| 2.647714
| 3
|
library/pyjamas/ui/CellPanel.py
|
certik/pyjamas
| 1
|
6629877
|
<gh_stars>1-10
# Copyright 2006 <NAME> and contributors
# Copyright (C) 2009 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from ComplexPanel import ComplexPanel
class CellPanel(ComplexPanel):
def __init__(self, **kwargs):
element = None
if kwargs.has_key('Element'):
element = kwargs.pop('Element')
if element is None:
element = DOM.createTable()
self.table = element
self.setElement(self.table)
self.body = DOM.createTBody()
self.spacing = None
self.padding = None
DOM.appendChild(self.table, self.body)
ComplexPanel.__init__(self, **kwargs)
def getTable(self):
return self.table
def getBody(self):
return self.body
def getSpacing(self):
return self.spacing
def getPadding(self):
return self.padding
def getWidgetTd(self, widget):
if widget.getParent() != self:
return None
return DOM.getParent(widget.getElement())
def setBorderWidth(self, width):
DOM.setAttribute(self.table, "border", "%d" % width)
def setCellHeight(self, widget, height):
td = DOM.getParent(widget.getElement())
DOM.setAttribute(td, "height", height)
def setCellHorizontalAlignment(self, widget, align):
td = self.getWidgetTd(widget)
if td is not None:
DOM.setAttribute(td, "align", align)
def setCellVerticalAlignment(self, widget, align):
td = self.getWidgetTd(widget)
if td is not None:
DOM.setStyleAttribute(td, "verticalAlign", align)
def setCellWidth(self, widget, width):
td = DOM.getParent(widget.getElement())
DOM.setAttribute(td, "width", width)
def setSpacing(self, spacing):
self.spacing = spacing
DOM.setAttribute(self.table, "cellSpacing", str(spacing))
def setPadding(self, padding):
self.padding = padding
DOM.setAttribute(self.table, "cellPadding", str(padding))
Factory.registerClass('pyjamas.ui.CellPanel', CellPanel)
|
# Copyright 2006 <NAME> and contributors
# Copyright (C) 2009 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from ComplexPanel import ComplexPanel
class CellPanel(ComplexPanel):
def __init__(self, **kwargs):
element = None
if kwargs.has_key('Element'):
element = kwargs.pop('Element')
if element is None:
element = DOM.createTable()
self.table = element
self.setElement(self.table)
self.body = DOM.createTBody()
self.spacing = None
self.padding = None
DOM.appendChild(self.table, self.body)
ComplexPanel.__init__(self, **kwargs)
def getTable(self):
return self.table
def getBody(self):
return self.body
def getSpacing(self):
return self.spacing
def getPadding(self):
return self.padding
def getWidgetTd(self, widget):
if widget.getParent() != self:
return None
return DOM.getParent(widget.getElement())
def setBorderWidth(self, width):
DOM.setAttribute(self.table, "border", "%d" % width)
def setCellHeight(self, widget, height):
td = DOM.getParent(widget.getElement())
DOM.setAttribute(td, "height", height)
def setCellHorizontalAlignment(self, widget, align):
td = self.getWidgetTd(widget)
if td is not None:
DOM.setAttribute(td, "align", align)
def setCellVerticalAlignment(self, widget, align):
td = self.getWidgetTd(widget)
if td is not None:
DOM.setStyleAttribute(td, "verticalAlign", align)
def setCellWidth(self, widget, width):
td = DOM.getParent(widget.getElement())
DOM.setAttribute(td, "width", width)
def setSpacing(self, spacing):
self.spacing = spacing
DOM.setAttribute(self.table, "cellSpacing", str(spacing))
def setPadding(self, padding):
self.padding = padding
DOM.setAttribute(self.table, "cellPadding", str(padding))
Factory.registerClass('pyjamas.ui.CellPanel', CellPanel)
|
en
| 0.831264
|
# Copyright 2006 <NAME> and contributors # Copyright (C) 2009 <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
| 2.348116
| 2
|
vim/plugins/vim-orgmode/tests/test_plugin_mappings.py
|
Raymond-yn/dotfiles
| 11
|
6629878
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
sys.path.append(u'../ftplugin')
import unittest
import orgmode.settings
from orgmode.exceptions import PluginError
from orgmode._vim import ORGMODE
from orgmode.keybinding import MODE_ALL, Plug
import vim
from orgmode.py3compat.encode_compatibility import *
ORG_PLUGINS = ['ShowHide', '|', 'Navigator', 'EditStructure', '|', 'Hyperlinks', '|', 'Todo', 'TagsProperties', 'Date', 'Agenda', 'Misc', '|', 'Export']
class MappingTestCase(unittest.TestCase):
u"""Tests all plugins for overlapping mappings."""
def test_non_overlapping_plug_mappings(self):
def find_overlapping_mappings(kb, all_keybindings):
found_overlapping_mapping = False
for tkb in all_keybindings:
if kb.mode == tkb.mode or MODE_ALL in (kb.mode, tkb.mode):
if isinstance(kb._action, Plug) and isinstance(tkb._action, Plug):
akb = kb.action
atkb = tkb.action
if (akb.startswith(atkb) or atkb.startswith(akb)) and akb != atkb:
print(u'\nERROR: Found overlapping mapping: %s (%s), %s (%s)' % (kb.key, akb, tkb.key, atkb))
found_overlapping_mapping = True
if all_keybindings:
res = find_overlapping_mappings(all_keybindings[0], all_keybindings[1:])
if not found_overlapping_mapping:
return res
return found_overlapping_mapping
if self.keybindings:
self.assertFalse(find_overlapping_mappings(self.keybindings[0], self.keybindings[1:]))
def setUp(self):
self.keybindings = []
vim.EVALRESULTS = {
u'exists("g:org_debug")': 0,
u'exists("b:org_debug")': 0,
u'exists("*repeat#set()")': 0,
u'b:changedtick': 0,
u_encode(u'exists("b:org_plugins")'): 0,
u_encode(u'exists("g:org_plugins")'): 1,
u_encode(u'g:org_plugins'): ORG_PLUGINS,
}
for plugin in filter(lambda p: p != '|', ORG_PLUGINS):
try:
ORGMODE.register_plugin(plugin)
except PluginError:
pass
if plugin in ORGMODE._plugins:
self.keybindings.extend(ORGMODE._plugins[plugin].keybindings)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(MappingTestCase)
# vi: noexpandtab
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
sys.path.append(u'../ftplugin')
import unittest
import orgmode.settings
from orgmode.exceptions import PluginError
from orgmode._vim import ORGMODE
from orgmode.keybinding import MODE_ALL, Plug
import vim
from orgmode.py3compat.encode_compatibility import *
ORG_PLUGINS = ['ShowHide', '|', 'Navigator', 'EditStructure', '|', 'Hyperlinks', '|', 'Todo', 'TagsProperties', 'Date', 'Agenda', 'Misc', '|', 'Export']
class MappingTestCase(unittest.TestCase):
u"""Tests all plugins for overlapping mappings."""
def test_non_overlapping_plug_mappings(self):
def find_overlapping_mappings(kb, all_keybindings):
found_overlapping_mapping = False
for tkb in all_keybindings:
if kb.mode == tkb.mode or MODE_ALL in (kb.mode, tkb.mode):
if isinstance(kb._action, Plug) and isinstance(tkb._action, Plug):
akb = kb.action
atkb = tkb.action
if (akb.startswith(atkb) or atkb.startswith(akb)) and akb != atkb:
print(u'\nERROR: Found overlapping mapping: %s (%s), %s (%s)' % (kb.key, akb, tkb.key, atkb))
found_overlapping_mapping = True
if all_keybindings:
res = find_overlapping_mappings(all_keybindings[0], all_keybindings[1:])
if not found_overlapping_mapping:
return res
return found_overlapping_mapping
if self.keybindings:
self.assertFalse(find_overlapping_mappings(self.keybindings[0], self.keybindings[1:]))
def setUp(self):
self.keybindings = []
vim.EVALRESULTS = {
u'exists("g:org_debug")': 0,
u'exists("b:org_debug")': 0,
u'exists("*repeat#set()")': 0,
u'b:changedtick': 0,
u_encode(u'exists("b:org_plugins")'): 0,
u_encode(u'exists("g:org_plugins")'): 1,
u_encode(u'g:org_plugins'): ORG_PLUGINS,
}
for plugin in filter(lambda p: p != '|', ORG_PLUGINS):
try:
ORGMODE.register_plugin(plugin)
except PluginError:
pass
if plugin in ORGMODE._plugins:
self.keybindings.extend(ORGMODE._plugins[plugin].keybindings)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(MappingTestCase)
# vi: noexpandtab
|
en
| 0.836969
|
# -*- coding: utf-8 -*- Tests all plugins for overlapping mappings. #set()")': 0, # vi: noexpandtab
| 2.143433
| 2
|
1009.py
|
gabrielsilvadev/URI-python-3
| 5
|
6629879
|
<filename>1009.py
a = (input())
b = float(input())
c = float(input())
T = float(b + c*0.15)
print('TOTAL = R$ {:.2f}'.format(T))
|
<filename>1009.py
a = (input())
b = float(input())
c = float(input())
T = float(b + c*0.15)
print('TOTAL = R$ {:.2f}'.format(T))
|
none
| 1
| 2.800629
| 3
|
|
contractor/tasks.py
|
krmax44/django-contractor
| 0
|
6629880
|
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.db.models import F
from django.utils import timezone
from celery import shared_task
import requests
from .models import Contract
@shared_task(name='fetch_contract_result')
def fetch_contract_result(contract_id):
try:
contract = Contract.objects.get(id=contract_id)
except Contract.DoesNotExist:
return
Contract.objects.filter(id=contract_id).update(
version=F('version') + 1,
updated=timezone.now()
)
contract.version += 1
for filename, url in contract.get_source_files():
download_file(contract, filename, url)
def download_file(contract, filename, url):
path = contract.get_file_path(filename)
response = requests.get(url)
# FIXME: returned path is not stored, assumed to be the same
default_storage.save(path, ContentFile(response.content))
|
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.db.models import F
from django.utils import timezone
from celery import shared_task
import requests
from .models import Contract
@shared_task(name='fetch_contract_result')
def fetch_contract_result(contract_id):
try:
contract = Contract.objects.get(id=contract_id)
except Contract.DoesNotExist:
return
Contract.objects.filter(id=contract_id).update(
version=F('version') + 1,
updated=timezone.now()
)
contract.version += 1
for filename, url in contract.get_source_files():
download_file(contract, filename, url)
def download_file(contract, filename, url):
path = contract.get_file_path(filename)
response = requests.get(url)
# FIXME: returned path is not stored, assumed to be the same
default_storage.save(path, ContentFile(response.content))
|
en
| 0.924917
|
# FIXME: returned path is not stored, assumed to be the same
| 2.191984
| 2
|
src/tools/nuscenes-devkit/tests/test_nuscenes.py
|
jie311/TraDeS
| 1,284
|
6629881
|
<reponame>jie311/TraDeS
# nuScenes dev-kit.
# Code written by <NAME>, 2019.
import os
import unittest
from nuscenes import NuScenes
class TestNuScenes(unittest.TestCase):
def test_load(self):
"""
Loads up NuScenes.
This is intended to simply run the NuScenes class to check for import errors, typos, etc.
"""
assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'
nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False)
# Trivial assert statement
self.assertEqual(nusc.table_root, os.path.join(os.environ['NUSCENES'], 'v1.0-mini'))
if __name__ == '__main__':
unittest.main()
|
# nuScenes dev-kit.
# Code written by <NAME>, 2019.
import os
import unittest
from nuscenes import NuScenes
class TestNuScenes(unittest.TestCase):
def test_load(self):
"""
Loads up NuScenes.
This is intended to simply run the NuScenes class to check for import errors, typos, etc.
"""
assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'
nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False)
# Trivial assert statement
self.assertEqual(nusc.table_root, os.path.join(os.environ['NUSCENES'], 'v1.0-mini'))
if __name__ == '__main__':
unittest.main()
|
en
| 0.746783
|
# nuScenes dev-kit. # Code written by <NAME>, 2019. Loads up NuScenes. This is intended to simply run the NuScenes class to check for import errors, typos, etc. # Trivial assert statement
| 2.709771
| 3
|
slingen/src/algogen/BackEnd/trsm2lgen.py
|
danielesgit/slingen
| 23
|
6629882
|
<filename>slingen/src/algogen/BackEnd/trsm2lgen.py
from core.expression import Equal, Times, Minus, Inverse, Transpose, NList, Predicate, PatternDot
import core.properties as props
from core.functional import RewriteRule, Constraint, Replacement
import Config
import PredicateMetadata as pm
pm.DB["ldiv_lni"] = pm.PredicateMetadata( "ldiv_lni", tuple() )
pm.DB["ldiv_lni"].overwrite = []
pm.DB["ldiv_lni_ow"] = pm.PredicateMetadata( "ldiv_lni_ow", tuple() )
pm.DB["ldiv_lni_ow"].overwrite = [(1,0)]
pm.DB["ldiv_lnn"] = pm.PredicateMetadata( "ldiv_lnn", tuple() )
pm.DB["ldiv_lnn"].overwrite = []
pm.DB["ldiv_lnn_ow"] = pm.PredicateMetadata( "ldiv_lnn_ow", tuple() )
pm.DB["ldiv_lnn_ow"].overwrite = [(1,0)]
pm.DB["ldiv_lnu"] = pm.PredicateMetadata( "ldiv_lnu", tuple() )
pm.DB["ldiv_lnu"].overwrite = []
pm.DB["ldiv_lnu_ow"] = pm.PredicateMetadata( "ldiv_lnu_ow", tuple() )
pm.DB["ldiv_lnu_ow"].overwrite = [(1,0)]
pm.DB["ldiv_lti"] = pm.PredicateMetadata( "ldiv_lti", tuple() )
pm.DB["ldiv_lti"].overwrite = []
pm.DB["ldiv_lti_ow"] = pm.PredicateMetadata( "ldiv_lti_ow", tuple() )
pm.DB["ldiv_lti_ow"].overwrite = [(1,0)]
pm.DB["ldiv_ltn"] = pm.PredicateMetadata( "ldiv_ltn", tuple() )
pm.DB["ldiv_ltn"].overwrite = []
pm.DB["ldiv_ltn_ow"] = pm.PredicateMetadata( "ldiv_ltn_ow", tuple() )
pm.DB["ldiv_ltn_ow"].overwrite = [(1,0)]
pm.DB["ldiv_ltu"] = pm.PredicateMetadata( "ldiv_ltu", tuple() )
pm.DB["ldiv_ltu"].overwrite = []
pm.DB["ldiv_ltu_ow"] = pm.PredicateMetadata( "ldiv_ltu_ow", tuple() )
pm.DB["ldiv_ltu_ow"].overwrite = [(1,0)]
pm.DB["ldiv_uni"] = pm.PredicateMetadata( "ldiv_uni", tuple() )
pm.DB["ldiv_uni"].overwrite = []
pm.DB["ldiv_uni_ow"] = pm.PredicateMetadata( "ldiv_uni_ow", tuple() )
pm.DB["ldiv_uni_ow"].overwrite = [(1,0)]
pm.DB["ldiv_unn"] = pm.PredicateMetadata( "ldiv_unn", tuple() )
pm.DB["ldiv_unn"].overwrite = []
pm.DB["ldiv_unn_ow"] = pm.PredicateMetadata( "ldiv_unn_ow", tuple() )
pm.DB["ldiv_unn_ow"].overwrite = [(1,0)]
pm.DB["ldiv_unu"] = pm.PredicateMetadata( "ldiv_unu", tuple() )
pm.DB["ldiv_unu"].overwrite = []
pm.DB["ldiv_unu_ow"] = pm.PredicateMetadata( "ldiv_unu_ow", tuple() )
pm.DB["ldiv_unu_ow"].overwrite = [(1,0)]
pm.DB["ldiv_uti"] = pm.PredicateMetadata( "ldiv_uti", tuple() )
pm.DB["ldiv_uti"].overwrite = []
pm.DB["ldiv_uti_ow"] = pm.PredicateMetadata( "ldiv_uti_ow", tuple() )
pm.DB["ldiv_uti_ow"].overwrite = [(1,0)]
pm.DB["ldiv_utn"] = pm.PredicateMetadata( "ldiv_utn", tuple() )
pm.DB["ldiv_utn"].overwrite = []
pm.DB["ldiv_utn_ow"] = pm.PredicateMetadata( "ldiv_utn_ow", tuple() )
pm.DB["ldiv_utn_ow"].overwrite = [(1,0)]
pm.DB["ldiv_utu"] = pm.PredicateMetadata( "ldiv_utu", tuple() )
pm.DB["ldiv_utu"].overwrite = []
pm.DB["ldiv_utu_ow"] = pm.PredicateMetadata( "ldiv_utu_ow", tuple() )
pm.DB["ldiv_utu_ow"].overwrite = [(1,0)]
pm.DB["rdiv_lni"] = pm.PredicateMetadata( "rdiv_lni", tuple() )
pm.DB["rdiv_lni"].overwrite = []
pm.DB["rdiv_lni_ow"] = pm.PredicateMetadata( "rdiv_lni_ow", tuple() )
pm.DB["rdiv_lni_ow"].overwrite = [(1,0)]
pm.DB["rdiv_lnn"] = pm.PredicateMetadata( "rdiv_lnn", tuple() )
pm.DB["rdiv_lnn"].overwrite = []
pm.DB["rdiv_lnn_ow"] = pm.PredicateMetadata( "rdiv_lnn_ow", tuple() )
pm.DB["rdiv_lnn_ow"].overwrite = [(1,0)]
pm.DB["rdiv_lnu"] = pm.PredicateMetadata( "rdiv_lnu", tuple() )
pm.DB["rdiv_lnu"].overwrite = []
pm.DB["rdiv_lnu_ow"] = pm.PredicateMetadata( "rdiv_lnu_ow", tuple() )
pm.DB["rdiv_lnu_ow"].overwrite = [(1,0)]
pm.DB["rdiv_lti"] = pm.PredicateMetadata( "rdiv_lti", tuple() )
pm.DB["rdiv_lti"].overwrite = []
pm.DB["rdiv_lti_ow"] = pm.PredicateMetadata( "rdiv_lti_ow", tuple() )
pm.DB["rdiv_lti_ow"].overwrite = [(1,0)]
pm.DB["rdiv_ltn"] = pm.PredicateMetadata( "rdiv_ltn", tuple() )
pm.DB["rdiv_ltn"].overwrite = []
pm.DB["rdiv_ltn_ow"] = pm.PredicateMetadata( "rdiv_ltn_ow", tuple() )
pm.DB["rdiv_ltn_ow"].overwrite = [(1,0)]
pm.DB["rdiv_ltu"] = pm.PredicateMetadata( "rdiv_ltu", tuple() )
pm.DB["rdiv_ltu"].overwrite = []
pm.DB["rdiv_ltu_ow"] = pm.PredicateMetadata( "rdiv_ltu_ow", tuple() )
pm.DB["rdiv_ltu_ow"].overwrite = [(1,0)]
pm.DB["rdiv_uni"] = pm.PredicateMetadata( "rdiv_uni", tuple() )
pm.DB["rdiv_uni"].overwrite = []
pm.DB["rdiv_uni_ow"] = pm.PredicateMetadata( "rdiv_uni_ow", tuple() )
pm.DB["rdiv_uni_ow"].overwrite = [(1,0)]
pm.DB["rdiv_unn"] = pm.PredicateMetadata( "rdiv_unn", tuple() )
pm.DB["rdiv_unn"].overwrite = []
pm.DB["rdiv_unn_ow"] = pm.PredicateMetadata( "rdiv_unn_ow", tuple() )
pm.DB["rdiv_unn_ow"].overwrite = [(1,0)]
pm.DB["rdiv_unu"] = pm.PredicateMetadata( "rdiv_unu", tuple() )
pm.DB["rdiv_unu"].overwrite = []
pm.DB["rdiv_unu_ow"] = pm.PredicateMetadata( "rdiv_unu_ow", tuple() )
pm.DB["rdiv_unu_ow"].overwrite = [(1,0)]
pm.DB["rdiv_uti"] = pm.PredicateMetadata( "rdiv_uti", tuple() )
pm.DB["rdiv_uti"].overwrite = []
pm.DB["rdiv_uti_ow"] = pm.PredicateMetadata( "rdiv_uti_ow", tuple() )
pm.DB["rdiv_uti_ow"].overwrite = [(1,0)]
pm.DB["rdiv_utn"] = pm.PredicateMetadata( "rdiv_utn", tuple() )
pm.DB["rdiv_utn"].overwrite = []
pm.DB["rdiv_utn_ow"] = pm.PredicateMetadata( "rdiv_utn_ow", tuple() )
pm.DB["rdiv_utn_ow"].overwrite = [(1,0)]
pm.DB["rdiv_utu"] = pm.PredicateMetadata( "rdiv_utu", tuple() )
pm.DB["rdiv_utu"].overwrite = []
pm.DB["rdiv_utu_ow"] = pm.PredicateMetadata( "rdiv_utu_ow", tuple() )
pm.DB["rdiv_utu_ow"].overwrite = [(1,0)]
A = PatternDot("A")
B = PatternDot("B")
X = PatternDot("X")
trsm2lgen_rules = [
# X = i(t(A)) B -> ldiv_lni
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lni", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lni_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lni_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lnu
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lnu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lnu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lnu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lnn
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lnn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
RewriteRule(
(
Equal([ NList([ X ]), Times([ Minus([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Minus([ Predicate("ldiv_lnn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ]) ])
)
),
# X = i(t(A)) B -> ldiv_lnn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lnn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
RewriteRule(
(
Equal([ NList([ X ]), Times([ Minus([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Minus([ Predicate("ldiv_lnn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ]) ])
)
),
# X = i(t(A)) B -> ldiv_lti
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lti", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lti_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lti_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_ltu
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_ltu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_ltu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_ltu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_ltn
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_ltn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_ltn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_ltn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_uni
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_uni", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_uni_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_uni_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_unu
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_unu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_unu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_unu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_unn
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_unn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_unn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_unn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_uti
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_uti", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_uti_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_uti_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_utu
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_utu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_utu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_utu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_utn
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_utn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_utn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_utn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lni
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lni", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lni_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lni_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lnu
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lnu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lnu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lnu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lnn
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lnn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lnn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lnn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lti
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lti", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lti_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lti_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_ltu
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_ltu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_ltu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_ltu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_ltn
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_ltn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_ltn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_ltn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_uni
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_uni", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_uni_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_uni_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_unu
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_unu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_unu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_unu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_unn
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_unn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_unn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_unn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_uti
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_uti", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_uti_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_uti_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_utu
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_utu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_utu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_utu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_utn
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_utn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_utn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_utn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
]
|
<filename>slingen/src/algogen/BackEnd/trsm2lgen.py
from core.expression import Equal, Times, Minus, Inverse, Transpose, NList, Predicate, PatternDot
import core.properties as props
from core.functional import RewriteRule, Constraint, Replacement
import Config
import PredicateMetadata as pm
pm.DB["ldiv_lni"] = pm.PredicateMetadata( "ldiv_lni", tuple() )
pm.DB["ldiv_lni"].overwrite = []
pm.DB["ldiv_lni_ow"] = pm.PredicateMetadata( "ldiv_lni_ow", tuple() )
pm.DB["ldiv_lni_ow"].overwrite = [(1,0)]
pm.DB["ldiv_lnn"] = pm.PredicateMetadata( "ldiv_lnn", tuple() )
pm.DB["ldiv_lnn"].overwrite = []
pm.DB["ldiv_lnn_ow"] = pm.PredicateMetadata( "ldiv_lnn_ow", tuple() )
pm.DB["ldiv_lnn_ow"].overwrite = [(1,0)]
pm.DB["ldiv_lnu"] = pm.PredicateMetadata( "ldiv_lnu", tuple() )
pm.DB["ldiv_lnu"].overwrite = []
pm.DB["ldiv_lnu_ow"] = pm.PredicateMetadata( "ldiv_lnu_ow", tuple() )
pm.DB["ldiv_lnu_ow"].overwrite = [(1,0)]
pm.DB["ldiv_lti"] = pm.PredicateMetadata( "ldiv_lti", tuple() )
pm.DB["ldiv_lti"].overwrite = []
pm.DB["ldiv_lti_ow"] = pm.PredicateMetadata( "ldiv_lti_ow", tuple() )
pm.DB["ldiv_lti_ow"].overwrite = [(1,0)]
pm.DB["ldiv_ltn"] = pm.PredicateMetadata( "ldiv_ltn", tuple() )
pm.DB["ldiv_ltn"].overwrite = []
pm.DB["ldiv_ltn_ow"] = pm.PredicateMetadata( "ldiv_ltn_ow", tuple() )
pm.DB["ldiv_ltn_ow"].overwrite = [(1,0)]
pm.DB["ldiv_ltu"] = pm.PredicateMetadata( "ldiv_ltu", tuple() )
pm.DB["ldiv_ltu"].overwrite = []
pm.DB["ldiv_ltu_ow"] = pm.PredicateMetadata( "ldiv_ltu_ow", tuple() )
pm.DB["ldiv_ltu_ow"].overwrite = [(1,0)]
pm.DB["ldiv_uni"] = pm.PredicateMetadata( "ldiv_uni", tuple() )
pm.DB["ldiv_uni"].overwrite = []
pm.DB["ldiv_uni_ow"] = pm.PredicateMetadata( "ldiv_uni_ow", tuple() )
pm.DB["ldiv_uni_ow"].overwrite = [(1,0)]
pm.DB["ldiv_unn"] = pm.PredicateMetadata( "ldiv_unn", tuple() )
pm.DB["ldiv_unn"].overwrite = []
pm.DB["ldiv_unn_ow"] = pm.PredicateMetadata( "ldiv_unn_ow", tuple() )
pm.DB["ldiv_unn_ow"].overwrite = [(1,0)]
pm.DB["ldiv_unu"] = pm.PredicateMetadata( "ldiv_unu", tuple() )
pm.DB["ldiv_unu"].overwrite = []
pm.DB["ldiv_unu_ow"] = pm.PredicateMetadata( "ldiv_unu_ow", tuple() )
pm.DB["ldiv_unu_ow"].overwrite = [(1,0)]
pm.DB["ldiv_uti"] = pm.PredicateMetadata( "ldiv_uti", tuple() )
pm.DB["ldiv_uti"].overwrite = []
pm.DB["ldiv_uti_ow"] = pm.PredicateMetadata( "ldiv_uti_ow", tuple() )
pm.DB["ldiv_uti_ow"].overwrite = [(1,0)]
pm.DB["ldiv_utn"] = pm.PredicateMetadata( "ldiv_utn", tuple() )
pm.DB["ldiv_utn"].overwrite = []
pm.DB["ldiv_utn_ow"] = pm.PredicateMetadata( "ldiv_utn_ow", tuple() )
pm.DB["ldiv_utn_ow"].overwrite = [(1,0)]
pm.DB["ldiv_utu"] = pm.PredicateMetadata( "ldiv_utu", tuple() )
pm.DB["ldiv_utu"].overwrite = []
pm.DB["ldiv_utu_ow"] = pm.PredicateMetadata( "ldiv_utu_ow", tuple() )
pm.DB["ldiv_utu_ow"].overwrite = [(1,0)]
pm.DB["rdiv_lni"] = pm.PredicateMetadata( "rdiv_lni", tuple() )
pm.DB["rdiv_lni"].overwrite = []
pm.DB["rdiv_lni_ow"] = pm.PredicateMetadata( "rdiv_lni_ow", tuple() )
pm.DB["rdiv_lni_ow"].overwrite = [(1,0)]
pm.DB["rdiv_lnn"] = pm.PredicateMetadata( "rdiv_lnn", tuple() )
pm.DB["rdiv_lnn"].overwrite = []
pm.DB["rdiv_lnn_ow"] = pm.PredicateMetadata( "rdiv_lnn_ow", tuple() )
pm.DB["rdiv_lnn_ow"].overwrite = [(1,0)]
pm.DB["rdiv_lnu"] = pm.PredicateMetadata( "rdiv_lnu", tuple() )
pm.DB["rdiv_lnu"].overwrite = []
pm.DB["rdiv_lnu_ow"] = pm.PredicateMetadata( "rdiv_lnu_ow", tuple() )
pm.DB["rdiv_lnu_ow"].overwrite = [(1,0)]
pm.DB["rdiv_lti"] = pm.PredicateMetadata( "rdiv_lti", tuple() )
pm.DB["rdiv_lti"].overwrite = []
pm.DB["rdiv_lti_ow"] = pm.PredicateMetadata( "rdiv_lti_ow", tuple() )
pm.DB["rdiv_lti_ow"].overwrite = [(1,0)]
pm.DB["rdiv_ltn"] = pm.PredicateMetadata( "rdiv_ltn", tuple() )
pm.DB["rdiv_ltn"].overwrite = []
pm.DB["rdiv_ltn_ow"] = pm.PredicateMetadata( "rdiv_ltn_ow", tuple() )
pm.DB["rdiv_ltn_ow"].overwrite = [(1,0)]
pm.DB["rdiv_ltu"] = pm.PredicateMetadata( "rdiv_ltu", tuple() )
pm.DB["rdiv_ltu"].overwrite = []
pm.DB["rdiv_ltu_ow"] = pm.PredicateMetadata( "rdiv_ltu_ow", tuple() )
pm.DB["rdiv_ltu_ow"].overwrite = [(1,0)]
pm.DB["rdiv_uni"] = pm.PredicateMetadata( "rdiv_uni", tuple() )
pm.DB["rdiv_uni"].overwrite = []
pm.DB["rdiv_uni_ow"] = pm.PredicateMetadata( "rdiv_uni_ow", tuple() )
pm.DB["rdiv_uni_ow"].overwrite = [(1,0)]
pm.DB["rdiv_unn"] = pm.PredicateMetadata( "rdiv_unn", tuple() )
pm.DB["rdiv_unn"].overwrite = []
pm.DB["rdiv_unn_ow"] = pm.PredicateMetadata( "rdiv_unn_ow", tuple() )
pm.DB["rdiv_unn_ow"].overwrite = [(1,0)]
pm.DB["rdiv_unu"] = pm.PredicateMetadata( "rdiv_unu", tuple() )
pm.DB["rdiv_unu"].overwrite = []
pm.DB["rdiv_unu_ow"] = pm.PredicateMetadata( "rdiv_unu_ow", tuple() )
pm.DB["rdiv_unu_ow"].overwrite = [(1,0)]
pm.DB["rdiv_uti"] = pm.PredicateMetadata( "rdiv_uti", tuple() )
pm.DB["rdiv_uti"].overwrite = []
pm.DB["rdiv_uti_ow"] = pm.PredicateMetadata( "rdiv_uti_ow", tuple() )
pm.DB["rdiv_uti_ow"].overwrite = [(1,0)]
pm.DB["rdiv_utn"] = pm.PredicateMetadata( "rdiv_utn", tuple() )
pm.DB["rdiv_utn"].overwrite = []
pm.DB["rdiv_utn_ow"] = pm.PredicateMetadata( "rdiv_utn_ow", tuple() )
pm.DB["rdiv_utn_ow"].overwrite = [(1,0)]
pm.DB["rdiv_utu"] = pm.PredicateMetadata( "rdiv_utu", tuple() )
pm.DB["rdiv_utu"].overwrite = []
pm.DB["rdiv_utu_ow"] = pm.PredicateMetadata( "rdiv_utu_ow", tuple() )
pm.DB["rdiv_utu_ow"].overwrite = [(1,0)]
A = PatternDot("A")
B = PatternDot("B")
X = PatternDot("X")
trsm2lgen_rules = [
# X = i(t(A)) B -> ldiv_lni
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lni", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lni_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lni_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lnu
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lnu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lnu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lnu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lnn
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lnn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
RewriteRule(
(
Equal([ NList([ X ]), Times([ Minus([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Minus([ Predicate("ldiv_lnn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ]) ])
)
),
# X = i(t(A)) B -> ldiv_lnn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lnn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
RewriteRule(
(
Equal([ NList([ X ]), Times([ Minus([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Minus([ Predicate("ldiv_lnn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ]) ])
)
),
# X = i(t(A)) B -> ldiv_lti
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lti", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_lti_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_lti_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_ltu
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_ltu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_ltu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_ltu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_ltn
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_ltn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_ltn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_ltn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_uni
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_uni", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_uni_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_uni_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_unu
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_unu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_unu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_unu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_unn
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_unn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_unn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Inverse([ A ]), B ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_unn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_uti
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_uti", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_uti_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_uti_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_utu
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_utu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_utu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_utu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_utn
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_utn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> ldiv_utn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ Transpose([ Inverse([ A ]) ]), B ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("ldiv_utn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lni
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lni", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lni_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lni_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lnu
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lnu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lnu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lnu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lnn
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lnn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lnn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lnn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lti
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lti", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_lti_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_lti_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_ltu
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_ltu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_ltu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_ltu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_ltn
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_ltn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_ltn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isLowerTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_ltn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_uni
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_uni", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_uni_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_uni_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_unu
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_unu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_unu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_unu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_unn
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_unn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_unn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Inverse([ A ]) ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_unn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_uti
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_uti", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_uti_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isImplicitUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_uti_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_utu
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_utu", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_utu_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and A.isUnitDiagonal() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_utu_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_utn
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name == X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_utn", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
# X = i(t(A)) B -> rdiv_utn_ow
RewriteRule(
(
Equal([ NList([ X ]), Times([ B, Transpose([ Inverse([ A ]) ]) ]) ]),
Constraint("A.isUpperTriangular() and X.st_info[1].name != X.name")
),
Replacement(
lambda d:
Equal([ NList([ d["X"] ]), Predicate("rdiv_utn_ow", [d["A"], d["B"]],
[d["A"].get_size(), d["B"]. get_size()]) ])
)
),
]
|
en
| 0.318693
|
# X = i(t(A)) B -> ldiv_lni # X = i(t(A)) B -> ldiv_lni_ow # X = i(t(A)) B -> ldiv_lnu # X = i(t(A)) B -> ldiv_lnu_ow # X = i(t(A)) B -> ldiv_lnn # X = i(t(A)) B -> ldiv_lnn_ow # X = i(t(A)) B -> ldiv_lti # X = i(t(A)) B -> ldiv_lti_ow # X = i(t(A)) B -> ldiv_ltu # X = i(t(A)) B -> ldiv_ltu_ow # X = i(t(A)) B -> ldiv_ltn # X = i(t(A)) B -> ldiv_ltn_ow # X = i(t(A)) B -> ldiv_uni # X = i(t(A)) B -> ldiv_uni_ow # X = i(t(A)) B -> ldiv_unu # X = i(t(A)) B -> ldiv_unu_ow # X = i(t(A)) B -> ldiv_unn # X = i(t(A)) B -> ldiv_unn_ow # X = i(t(A)) B -> ldiv_uti # X = i(t(A)) B -> ldiv_uti_ow # X = i(t(A)) B -> ldiv_utu # X = i(t(A)) B -> ldiv_utu_ow # X = i(t(A)) B -> ldiv_utn # X = i(t(A)) B -> ldiv_utn_ow # X = i(t(A)) B -> rdiv_lni # X = i(t(A)) B -> rdiv_lni_ow # X = i(t(A)) B -> rdiv_lnu # X = i(t(A)) B -> rdiv_lnu_ow # X = i(t(A)) B -> rdiv_lnn # X = i(t(A)) B -> rdiv_lnn_ow # X = i(t(A)) B -> rdiv_lti # X = i(t(A)) B -> rdiv_lti_ow # X = i(t(A)) B -> rdiv_ltu # X = i(t(A)) B -> rdiv_ltu_ow # X = i(t(A)) B -> rdiv_ltn # X = i(t(A)) B -> rdiv_ltn_ow # X = i(t(A)) B -> rdiv_uni # X = i(t(A)) B -> rdiv_uni_ow # X = i(t(A)) B -> rdiv_unu # X = i(t(A)) B -> rdiv_unu_ow # X = i(t(A)) B -> rdiv_unn # X = i(t(A)) B -> rdiv_unn_ow # X = i(t(A)) B -> rdiv_uti # X = i(t(A)) B -> rdiv_uti_ow # X = i(t(A)) B -> rdiv_utu # X = i(t(A)) B -> rdiv_utu_ow # X = i(t(A)) B -> rdiv_utn # X = i(t(A)) B -> rdiv_utn_ow
| 1.906787
| 2
|
pdf4me/Pdf4mePythonClientApi/pdf4me/model/job_config.py
|
pdf4me/pdf4me-clientapi-python
| 1
|
6629883
|
<filename>pdf4me/Pdf4mePythonClientApi/pdf4me/model/job_config.py
# coding: utf-8
"""
Pdf4me
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class JobConfig(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'job_config_id': 'str',
'enabled': 'bool',
'active': 'bool',
'creation_date': 'datetime',
'mod_date': 'datetime',
'name': 'str',
'user_id': 'str',
'tenant_id': 'str',
'source_folder': 'StorageFolder',
'execution_trigger': 'ExecutionTrigger',
'action_flow': 'ActionFlow',
'target_folder': 'StorageFolder'
}
attribute_map = {
'job_config_id': 'jobConfigId',
'enabled': 'enabled',
'active': 'active',
'creation_date': 'creationDate',
'mod_date': 'modDate',
'name': 'name',
'user_id': 'userId',
'tenant_id': 'tenantId',
'source_folder': 'sourceFolder',
'execution_trigger': 'executionTrigger',
'action_flow': 'actionFlow',
'target_folder': 'targetFolder'
}
def __init__(self, job_config_id=None, enabled=None, active=None, creation_date=None, mod_date=None, name=None, user_id=None, tenant_id=None, source_folder=None, execution_trigger=None, action_flow=None, target_folder=None): # noqa: E501
"""JobConfig - a model defined in Swagger""" # noqa: E501
self._job_config_id = None
self._enabled = None
self._active = None
self._creation_date = None
self._mod_date = None
self._name = None
self._user_id = None
self._tenant_id = None
self._source_folder = None
self._execution_trigger = None
self._action_flow = None
self._target_folder = None
self.discriminator = None
self.job_config_id = job_config_id
self.enabled = enabled
if active is not None:
self.active = active
if creation_date is not None:
self.creation_date = creation_date
if mod_date is not None:
self.mod_date = mod_date
self.name = name
self.user_id = user_id
self.tenant_id = tenant_id
if source_folder is not None:
self.source_folder = source_folder
if execution_trigger is not None:
self.execution_trigger = execution_trigger
if action_flow is not None:
self.action_flow = action_flow
if target_folder is not None:
self.target_folder = target_folder
@property
def job_config_id(self):
"""Gets the job_config_id of this JobConfig. # noqa: E501
:return: The job_config_id of this JobConfig. # noqa: E501
:rtype: str
"""
return self._job_config_id
@job_config_id.setter
def job_config_id(self, job_config_id):
"""Sets the job_config_id of this JobConfig.
:param job_config_id: The job_config_id of this JobConfig. # noqa: E501
:type: str
"""
if job_config_id is None:
raise ValueError("Invalid value for `job_config_id`, must not be `None`") # noqa: E501
self._job_config_id = job_config_id
@property
def enabled(self):
"""Gets the enabled of this JobConfig. # noqa: E501
:return: The enabled of this JobConfig. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this JobConfig.
:param enabled: The enabled of this JobConfig. # noqa: E501
:type: bool
"""
if enabled is None:
raise ValueError("Invalid value for `enabled`, must not be `None`") # noqa: E501
self._enabled = enabled
@property
def active(self):
"""Gets the active of this JobConfig. # noqa: E501
:return: The active of this JobConfig. # noqa: E501
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this JobConfig.
:param active: The active of this JobConfig. # noqa: E501
:type: bool
"""
self._active = active
@property
def creation_date(self):
"""Gets the creation_date of this JobConfig. # noqa: E501
:return: The creation_date of this JobConfig. # noqa: E501
:rtype: datetime
"""
return self._creation_date
@creation_date.setter
def creation_date(self, creation_date):
"""Sets the creation_date of this JobConfig.
:param creation_date: The creation_date of this JobConfig. # noqa: E501
:type: datetime
"""
self._creation_date = creation_date
@property
def mod_date(self):
"""Gets the mod_date of this JobConfig. # noqa: E501
:return: The mod_date of this JobConfig. # noqa: E501
:rtype: datetime
"""
return self._mod_date
@mod_date.setter
def mod_date(self, mod_date):
"""Sets the mod_date of this JobConfig.
:param mod_date: The mod_date of this JobConfig. # noqa: E501
:type: datetime
"""
self._mod_date = mod_date
@property
def name(self):
"""Gets the name of this JobConfig. # noqa: E501
:return: The name of this JobConfig. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this JobConfig.
:param name: The name of this JobConfig. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def user_id(self):
"""Gets the user_id of this JobConfig. # noqa: E501
:return: The user_id of this JobConfig. # noqa: E501
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this JobConfig.
:param user_id: The user_id of this JobConfig. # noqa: E501
:type: str
"""
if user_id is None:
raise ValueError("Invalid value for `user_id`, must not be `None`") # noqa: E501
self._user_id = user_id
@property
def tenant_id(self):
"""Gets the tenant_id of this JobConfig. # noqa: E501
:return: The tenant_id of this JobConfig. # noqa: E501
:rtype: str
"""
return self._tenant_id
@tenant_id.setter
def tenant_id(self, tenant_id):
"""Sets the tenant_id of this JobConfig.
:param tenant_id: The tenant_id of this JobConfig. # noqa: E501
:type: str
"""
if tenant_id is None:
raise ValueError("Invalid value for `tenant_id`, must not be `None`") # noqa: E501
self._tenant_id = tenant_id
@property
def source_folder(self):
"""Gets the source_folder of this JobConfig. # noqa: E501
:return: The source_folder of this JobConfig. # noqa: E501
:rtype: StorageFolder
"""
return self._source_folder
@source_folder.setter
def source_folder(self, source_folder):
"""Sets the source_folder of this JobConfig.
:param source_folder: The source_folder of this JobConfig. # noqa: E501
:type: StorageFolder
"""
self._source_folder = source_folder
@property
def execution_trigger(self):
"""Gets the execution_trigger of this JobConfig. # noqa: E501
:return: The execution_trigger of this JobConfig. # noqa: E501
:rtype: ExecutionTrigger
"""
return self._execution_trigger
@execution_trigger.setter
def execution_trigger(self, execution_trigger):
"""Sets the execution_trigger of this JobConfig.
:param execution_trigger: The execution_trigger of this JobConfig. # noqa: E501
:type: ExecutionTrigger
"""
self._execution_trigger = execution_trigger
@property
def action_flow(self):
"""Gets the action_flow of this JobConfig. # noqa: E501
:return: The action_flow of this JobConfig. # noqa: E501
:rtype: ActionFlow
"""
return self._action_flow
@action_flow.setter
def action_flow(self, action_flow):
"""Sets the action_flow of this JobConfig.
:param action_flow: The action_flow of this JobConfig. # noqa: E501
:type: ActionFlow
"""
self._action_flow = action_flow
@property
def target_folder(self):
"""Gets the target_folder of this JobConfig. # noqa: E501
:return: The target_folder of this JobConfig. # noqa: E501
:rtype: StorageFolder
"""
return self._target_folder
@target_folder.setter
def target_folder(self, target_folder):
"""Sets the target_folder of this JobConfig.
:param target_folder: The target_folder of this JobConfig. # noqa: E501
:type: StorageFolder
"""
self._target_folder = target_folder
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(JobConfig, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JobConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
<filename>pdf4me/Pdf4mePythonClientApi/pdf4me/model/job_config.py
# coding: utf-8
"""
Pdf4me
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class JobConfig(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'job_config_id': 'str',
'enabled': 'bool',
'active': 'bool',
'creation_date': 'datetime',
'mod_date': 'datetime',
'name': 'str',
'user_id': 'str',
'tenant_id': 'str',
'source_folder': 'StorageFolder',
'execution_trigger': 'ExecutionTrigger',
'action_flow': 'ActionFlow',
'target_folder': 'StorageFolder'
}
attribute_map = {
'job_config_id': 'jobConfigId',
'enabled': 'enabled',
'active': 'active',
'creation_date': 'creationDate',
'mod_date': 'modDate',
'name': 'name',
'user_id': 'userId',
'tenant_id': 'tenantId',
'source_folder': 'sourceFolder',
'execution_trigger': 'executionTrigger',
'action_flow': 'actionFlow',
'target_folder': 'targetFolder'
}
def __init__(self, job_config_id=None, enabled=None, active=None, creation_date=None, mod_date=None, name=None, user_id=None, tenant_id=None, source_folder=None, execution_trigger=None, action_flow=None, target_folder=None): # noqa: E501
"""JobConfig - a model defined in Swagger""" # noqa: E501
self._job_config_id = None
self._enabled = None
self._active = None
self._creation_date = None
self._mod_date = None
self._name = None
self._user_id = None
self._tenant_id = None
self._source_folder = None
self._execution_trigger = None
self._action_flow = None
self._target_folder = None
self.discriminator = None
self.job_config_id = job_config_id
self.enabled = enabled
if active is not None:
self.active = active
if creation_date is not None:
self.creation_date = creation_date
if mod_date is not None:
self.mod_date = mod_date
self.name = name
self.user_id = user_id
self.tenant_id = tenant_id
if source_folder is not None:
self.source_folder = source_folder
if execution_trigger is not None:
self.execution_trigger = execution_trigger
if action_flow is not None:
self.action_flow = action_flow
if target_folder is not None:
self.target_folder = target_folder
@property
def job_config_id(self):
"""Gets the job_config_id of this JobConfig. # noqa: E501
:return: The job_config_id of this JobConfig. # noqa: E501
:rtype: str
"""
return self._job_config_id
@job_config_id.setter
def job_config_id(self, job_config_id):
"""Sets the job_config_id of this JobConfig.
:param job_config_id: The job_config_id of this JobConfig. # noqa: E501
:type: str
"""
if job_config_id is None:
raise ValueError("Invalid value for `job_config_id`, must not be `None`") # noqa: E501
self._job_config_id = job_config_id
@property
def enabled(self):
"""Gets the enabled of this JobConfig. # noqa: E501
:return: The enabled of this JobConfig. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this JobConfig.
:param enabled: The enabled of this JobConfig. # noqa: E501
:type: bool
"""
if enabled is None:
raise ValueError("Invalid value for `enabled`, must not be `None`") # noqa: E501
self._enabled = enabled
@property
def active(self):
"""Gets the active of this JobConfig. # noqa: E501
:return: The active of this JobConfig. # noqa: E501
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this JobConfig.
:param active: The active of this JobConfig. # noqa: E501
:type: bool
"""
self._active = active
@property
def creation_date(self):
"""Gets the creation_date of this JobConfig. # noqa: E501
:return: The creation_date of this JobConfig. # noqa: E501
:rtype: datetime
"""
return self._creation_date
@creation_date.setter
def creation_date(self, creation_date):
"""Sets the creation_date of this JobConfig.
:param creation_date: The creation_date of this JobConfig. # noqa: E501
:type: datetime
"""
self._creation_date = creation_date
@property
def mod_date(self):
"""Gets the mod_date of this JobConfig. # noqa: E501
:return: The mod_date of this JobConfig. # noqa: E501
:rtype: datetime
"""
return self._mod_date
@mod_date.setter
def mod_date(self, mod_date):
"""Sets the mod_date of this JobConfig.
:param mod_date: The mod_date of this JobConfig. # noqa: E501
:type: datetime
"""
self._mod_date = mod_date
@property
def name(self):
"""Gets the name of this JobConfig. # noqa: E501
:return: The name of this JobConfig. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this JobConfig.
:param name: The name of this JobConfig. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def user_id(self):
"""Gets the user_id of this JobConfig. # noqa: E501
:return: The user_id of this JobConfig. # noqa: E501
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this JobConfig.
:param user_id: The user_id of this JobConfig. # noqa: E501
:type: str
"""
if user_id is None:
raise ValueError("Invalid value for `user_id`, must not be `None`") # noqa: E501
self._user_id = user_id
@property
def tenant_id(self):
"""Gets the tenant_id of this JobConfig. # noqa: E501
:return: The tenant_id of this JobConfig. # noqa: E501
:rtype: str
"""
return self._tenant_id
@tenant_id.setter
def tenant_id(self, tenant_id):
"""Sets the tenant_id of this JobConfig.
:param tenant_id: The tenant_id of this JobConfig. # noqa: E501
:type: str
"""
if tenant_id is None:
raise ValueError("Invalid value for `tenant_id`, must not be `None`") # noqa: E501
self._tenant_id = tenant_id
@property
def source_folder(self):
"""Gets the source_folder of this JobConfig. # noqa: E501
:return: The source_folder of this JobConfig. # noqa: E501
:rtype: StorageFolder
"""
return self._source_folder
@source_folder.setter
def source_folder(self, source_folder):
"""Sets the source_folder of this JobConfig.
:param source_folder: The source_folder of this JobConfig. # noqa: E501
:type: StorageFolder
"""
self._source_folder = source_folder
@property
def execution_trigger(self):
"""Gets the execution_trigger of this JobConfig. # noqa: E501
:return: The execution_trigger of this JobConfig. # noqa: E501
:rtype: ExecutionTrigger
"""
return self._execution_trigger
@execution_trigger.setter
def execution_trigger(self, execution_trigger):
"""Sets the execution_trigger of this JobConfig.
:param execution_trigger: The execution_trigger of this JobConfig. # noqa: E501
:type: ExecutionTrigger
"""
self._execution_trigger = execution_trigger
@property
def action_flow(self):
"""Gets the action_flow of this JobConfig. # noqa: E501
:return: The action_flow of this JobConfig. # noqa: E501
:rtype: ActionFlow
"""
return self._action_flow
@action_flow.setter
def action_flow(self, action_flow):
"""Sets the action_flow of this JobConfig.
:param action_flow: The action_flow of this JobConfig. # noqa: E501
:type: ActionFlow
"""
self._action_flow = action_flow
@property
def target_folder(self):
"""Gets the target_folder of this JobConfig. # noqa: E501
:return: The target_folder of this JobConfig. # noqa: E501
:rtype: StorageFolder
"""
return self._target_folder
@target_folder.setter
def target_folder(self, target_folder):
"""Sets the target_folder of this JobConfig.
:param target_folder: The target_folder of this JobConfig. # noqa: E501
:type: StorageFolder
"""
self._target_folder = target_folder
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(JobConfig, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JobConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
en
| 0.436588
|
# coding: utf-8 Pdf4me No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: v1 Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: F401 NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. # noqa: E501 JobConfig - a model defined in Swagger # noqa: E501 Gets the job_config_id of this JobConfig. # noqa: E501 :return: The job_config_id of this JobConfig. # noqa: E501 :rtype: str Sets the job_config_id of this JobConfig. :param job_config_id: The job_config_id of this JobConfig. # noqa: E501 :type: str # noqa: E501 Gets the enabled of this JobConfig. # noqa: E501 :return: The enabled of this JobConfig. # noqa: E501 :rtype: bool Sets the enabled of this JobConfig. :param enabled: The enabled of this JobConfig. # noqa: E501 :type: bool # noqa: E501 Gets the active of this JobConfig. # noqa: E501 :return: The active of this JobConfig. # noqa: E501 :rtype: bool Sets the active of this JobConfig. :param active: The active of this JobConfig. # noqa: E501 :type: bool Gets the creation_date of this JobConfig. # noqa: E501 :return: The creation_date of this JobConfig. # noqa: E501 :rtype: datetime Sets the creation_date of this JobConfig. :param creation_date: The creation_date of this JobConfig. # noqa: E501 :type: datetime Gets the mod_date of this JobConfig. # noqa: E501 :return: The mod_date of this JobConfig. # noqa: E501 :rtype: datetime Sets the mod_date of this JobConfig. :param mod_date: The mod_date of this JobConfig. # noqa: E501 :type: datetime Gets the name of this JobConfig. # noqa: E501 :return: The name of this JobConfig. # noqa: E501 :rtype: str Sets the name of this JobConfig. :param name: The name of this JobConfig. # noqa: E501 :type: str # noqa: E501 Gets the user_id of this JobConfig. # noqa: E501 :return: The user_id of this JobConfig. # noqa: E501 :rtype: str Sets the user_id of this JobConfig. :param user_id: The user_id of this JobConfig. # noqa: E501 :type: str # noqa: E501 Gets the tenant_id of this JobConfig. # noqa: E501 :return: The tenant_id of this JobConfig. # noqa: E501 :rtype: str Sets the tenant_id of this JobConfig. :param tenant_id: The tenant_id of this JobConfig. # noqa: E501 :type: str # noqa: E501 Gets the source_folder of this JobConfig. # noqa: E501 :return: The source_folder of this JobConfig. # noqa: E501 :rtype: StorageFolder Sets the source_folder of this JobConfig. :param source_folder: The source_folder of this JobConfig. # noqa: E501 :type: StorageFolder Gets the execution_trigger of this JobConfig. # noqa: E501 :return: The execution_trigger of this JobConfig. # noqa: E501 :rtype: ExecutionTrigger Sets the execution_trigger of this JobConfig. :param execution_trigger: The execution_trigger of this JobConfig. # noqa: E501 :type: ExecutionTrigger Gets the action_flow of this JobConfig. # noqa: E501 :return: The action_flow of this JobConfig. # noqa: E501 :rtype: ActionFlow Sets the action_flow of this JobConfig. :param action_flow: The action_flow of this JobConfig. # noqa: E501 :type: ActionFlow Gets the target_folder of this JobConfig. # noqa: E501 :return: The target_folder of this JobConfig. # noqa: E501 :rtype: StorageFolder Sets the target_folder of this JobConfig. :param target_folder: The target_folder of this JobConfig. # noqa: E501 :type: StorageFolder Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal
| 1.300243
| 1
|
plugins/modules/truenas_api_group.py
|
nkiraly/ansible-collection-spatiumcepa-truenas
| 1
|
6629884
|
<reponame>nkiraly/ansible-collection-spatiumcepa-truenas<gh_stars>1-10
from __future__ import absolute_import, division, print_function
from ansible_collections.spatiumcepa.truenas.plugins.module_utils.common import HTTPCode, HTTPResponse, \
TruenasServerError, TruenasModelError, TruenasUnexpectedResponse, strip_null_module_params
from ansible_collections.spatiumcepa.truenas.plugins.module_utils.resources import TruenasGroup
from ansible_collections.spatiumcepa.truenas.plugins.module_utils.arg_specs import API_ARG_SPECS
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community"
}
DOCUMENTATION = """
module: truenas_api_group
short_description: Manage TrueNAS Groups
description:
- Manage TrueNAS Groups via REST API
version_added: "0.1"
author: <NAME> (@nkiraly)
options:
state:
type: str
description: Desired state of the group
default: present
choices: [ absent, present ]
model:
type: dict
description: ''
options:
allow_duplicate_gid:
description: ''
type: bool
gid:
description: ''
type: int
name:
description: ''
type: str
smb:
description: ''
type: bool
sudo:
description: ''
type: bool
sudo_commands:
description: ''
type: list
sudo_nopasswd:
description: ''
type: bool
users:
description: ''
type: list
"""
EXAMPLES = """
- name: Manage Group via TrueNAS API
spatiumcepa.truenas.truenas_api_group:
model:
gid: 983
name: syncthing
sudo: false
state: present
"""
RETURN = """
response:
description: HTTP response returned from the API call
returned: success
type: dict
"""
def main():
module = AnsibleModule(
argument_spec=dict(
model=API_ARG_SPECS[TruenasGroup.RESOURCE_API_MODEL_SPEC],
state={'type': 'str', 'choices': ['absent', 'present'], 'default': 'present'}
),
supports_check_mode=True,
)
connection = Connection(module._socket_path)
group_resource = TruenasGroup(connection, module.check_mode)
try:
response = None
model_param = strip_null_module_params(module.params['model'])
state_param = module.params['state']
if state_param == 'present':
response = group_resource.update_item(model_param)
failed = response[HTTPResponse.STATUS_CODE] != HTTPCode.OK
elif state_param == 'absent':
response = group_resource.delete_item(model_param)
failed = response[HTTPResponse.STATUS_CODE] not in [HTTPCode.OK, HTTPCode.NOT_FOUND]
module.exit_json(
created=group_resource.resource_created,
changed=group_resource.resource_changed,
deleted=group_resource.resource_deleted,
failed=failed,
response=response,
submitted_model=model_param,
)
except TruenasServerError as e:
module.fail_json(msg='Server returned an error, satus code: %s. '
'Server response: %s' % (e.code, e.response))
except TruenasModelError as e:
module.fail_json(msg='Data model error: %s' % (e.args[0]))
except TruenasUnexpectedResponse as e:
module.fail_json(msg=e.args[0])
except ConnectionError as e:
module.fail_json(msg=e.args[0])
if __name__ == '__main__':
main()
|
from __future__ import absolute_import, division, print_function
from ansible_collections.spatiumcepa.truenas.plugins.module_utils.common import HTTPCode, HTTPResponse, \
TruenasServerError, TruenasModelError, TruenasUnexpectedResponse, strip_null_module_params
from ansible_collections.spatiumcepa.truenas.plugins.module_utils.resources import TruenasGroup
from ansible_collections.spatiumcepa.truenas.plugins.module_utils.arg_specs import API_ARG_SPECS
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community"
}
DOCUMENTATION = """
module: truenas_api_group
short_description: Manage TrueNAS Groups
description:
- Manage TrueNAS Groups via REST API
version_added: "0.1"
author: <NAME> (@nkiraly)
options:
state:
type: str
description: Desired state of the group
default: present
choices: [ absent, present ]
model:
type: dict
description: ''
options:
allow_duplicate_gid:
description: ''
type: bool
gid:
description: ''
type: int
name:
description: ''
type: str
smb:
description: ''
type: bool
sudo:
description: ''
type: bool
sudo_commands:
description: ''
type: list
sudo_nopasswd:
description: ''
type: bool
users:
description: ''
type: list
"""
EXAMPLES = """
- name: Manage Group via TrueNAS API
spatiumcepa.truenas.truenas_api_group:
model:
gid: 983
name: syncthing
sudo: false
state: present
"""
RETURN = """
response:
description: HTTP response returned from the API call
returned: success
type: dict
"""
def main():
module = AnsibleModule(
argument_spec=dict(
model=API_ARG_SPECS[TruenasGroup.RESOURCE_API_MODEL_SPEC],
state={'type': 'str', 'choices': ['absent', 'present'], 'default': 'present'}
),
supports_check_mode=True,
)
connection = Connection(module._socket_path)
group_resource = TruenasGroup(connection, module.check_mode)
try:
response = None
model_param = strip_null_module_params(module.params['model'])
state_param = module.params['state']
if state_param == 'present':
response = group_resource.update_item(model_param)
failed = response[HTTPResponse.STATUS_CODE] != HTTPCode.OK
elif state_param == 'absent':
response = group_resource.delete_item(model_param)
failed = response[HTTPResponse.STATUS_CODE] not in [HTTPCode.OK, HTTPCode.NOT_FOUND]
module.exit_json(
created=group_resource.resource_created,
changed=group_resource.resource_changed,
deleted=group_resource.resource_deleted,
failed=failed,
response=response,
submitted_model=model_param,
)
except TruenasServerError as e:
module.fail_json(msg='Server returned an error, satus code: %s. '
'Server response: %s' % (e.code, e.response))
except TruenasModelError as e:
module.fail_json(msg='Data model error: %s' % (e.args[0]))
except TruenasUnexpectedResponse as e:
module.fail_json(msg=e.args[0])
except ConnectionError as e:
module.fail_json(msg=e.args[0])
if __name__ == '__main__':
main()
|
en
| 0.574865
|
module: truenas_api_group short_description: Manage TrueNAS Groups description: - Manage TrueNAS Groups via REST API version_added: "0.1" author: <NAME> (@nkiraly) options: state: type: str description: Desired state of the group default: present choices: [ absent, present ] model: type: dict description: '' options: allow_duplicate_gid: description: '' type: bool gid: description: '' type: int name: description: '' type: str smb: description: '' type: bool sudo: description: '' type: bool sudo_commands: description: '' type: list sudo_nopasswd: description: '' type: bool users: description: '' type: list - name: Manage Group via TrueNAS API spatiumcepa.truenas.truenas_api_group: model: gid: 983 name: syncthing sudo: false state: present response: description: HTTP response returned from the API call returned: success type: dict
| 1.809191
| 2
|
sdk/python/pulumi_aws/dms/event_subscription.py
|
Otanikotani/pulumi-aws
| 0
|
6629885
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['EventSubscription']
class EventSubscription(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
enabled: Optional[pulumi.Input[bool]] = None,
event_categories: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
sns_topic_arn: Optional[pulumi.Input[str]] = None,
source_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
source_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a DMS (Data Migration Service) event subscription resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.dms.EventSubscription("example",
enabled=True,
event_categories=[
"creation",
"failure",
],
sns_topic_arn=aws_sns_topic["example"]["arn"],
source_ids=[aws_dms_replication_task["example"]["replication_task_id"]],
source_type="replication-task",
tags={
"Name": "example",
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enabled: Whether the event subscription should be enabled.
:param pulumi.Input[Sequence[pulumi.Input[str]]] event_categories: List of event categories to listen for, see `DescribeEventCategories` for a canonical list.
:param pulumi.Input[str] name: Name of event subscription.
:param pulumi.Input[str] sns_topic_arn: SNS topic arn to send events on.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_ids: Ids of sources to listen to.
:param pulumi.Input[str] source_type: Type of source for events. Valid values: `replication-instance` or `replication-task`
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['enabled'] = enabled
if event_categories is None:
raise TypeError("Missing required property 'event_categories'")
__props__['event_categories'] = event_categories
__props__['name'] = name
if sns_topic_arn is None:
raise TypeError("Missing required property 'sns_topic_arn'")
__props__['sns_topic_arn'] = sns_topic_arn
__props__['source_ids'] = source_ids
__props__['source_type'] = source_type
__props__['tags'] = tags
__props__['arn'] = None
super(EventSubscription, __self__).__init__(
'aws:dms/eventSubscription:EventSubscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
event_categories: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
sns_topic_arn: Optional[pulumi.Input[str]] = None,
source_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
source_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'EventSubscription':
"""
Get an existing EventSubscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enabled: Whether the event subscription should be enabled.
:param pulumi.Input[Sequence[pulumi.Input[str]]] event_categories: List of event categories to listen for, see `DescribeEventCategories` for a canonical list.
:param pulumi.Input[str] name: Name of event subscription.
:param pulumi.Input[str] sns_topic_arn: SNS topic arn to send events on.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_ids: Ids of sources to listen to.
:param pulumi.Input[str] source_type: Type of source for events. Valid values: `replication-instance` or `replication-task`
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["enabled"] = enabled
__props__["event_categories"] = event_categories
__props__["name"] = name
__props__["sns_topic_arn"] = sns_topic_arn
__props__["source_ids"] = source_ids
__props__["source_type"] = source_type
__props__["tags"] = tags
return EventSubscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the event subscription should be enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="eventCategories")
def event_categories(self) -> pulumi.Output[Sequence[str]]:
"""
List of event categories to listen for, see `DescribeEventCategories` for a canonical list.
"""
return pulumi.get(self, "event_categories")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of event subscription.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="snsTopicArn")
def sns_topic_arn(self) -> pulumi.Output[str]:
"""
SNS topic arn to send events on.
"""
return pulumi.get(self, "sns_topic_arn")
@property
@pulumi.getter(name="sourceIds")
def source_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Ids of sources to listen to.
"""
return pulumi.get(self, "source_ids")
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> pulumi.Output[Optional[str]]:
"""
Type of source for events. Valid values: `replication-instance` or `replication-task`
"""
return pulumi.get(self, "source_type")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['EventSubscription']
class EventSubscription(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
enabled: Optional[pulumi.Input[bool]] = None,
event_categories: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
sns_topic_arn: Optional[pulumi.Input[str]] = None,
source_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
source_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a DMS (Data Migration Service) event subscription resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.dms.EventSubscription("example",
enabled=True,
event_categories=[
"creation",
"failure",
],
sns_topic_arn=aws_sns_topic["example"]["arn"],
source_ids=[aws_dms_replication_task["example"]["replication_task_id"]],
source_type="replication-task",
tags={
"Name": "example",
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enabled: Whether the event subscription should be enabled.
:param pulumi.Input[Sequence[pulumi.Input[str]]] event_categories: List of event categories to listen for, see `DescribeEventCategories` for a canonical list.
:param pulumi.Input[str] name: Name of event subscription.
:param pulumi.Input[str] sns_topic_arn: SNS topic arn to send events on.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_ids: Ids of sources to listen to.
:param pulumi.Input[str] source_type: Type of source for events. Valid values: `replication-instance` or `replication-task`
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['enabled'] = enabled
if event_categories is None:
raise TypeError("Missing required property 'event_categories'")
__props__['event_categories'] = event_categories
__props__['name'] = name
if sns_topic_arn is None:
raise TypeError("Missing required property 'sns_topic_arn'")
__props__['sns_topic_arn'] = sns_topic_arn
__props__['source_ids'] = source_ids
__props__['source_type'] = source_type
__props__['tags'] = tags
__props__['arn'] = None
super(EventSubscription, __self__).__init__(
'aws:dms/eventSubscription:EventSubscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
event_categories: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
sns_topic_arn: Optional[pulumi.Input[str]] = None,
source_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
source_type: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'EventSubscription':
"""
Get an existing EventSubscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enabled: Whether the event subscription should be enabled.
:param pulumi.Input[Sequence[pulumi.Input[str]]] event_categories: List of event categories to listen for, see `DescribeEventCategories` for a canonical list.
:param pulumi.Input[str] name: Name of event subscription.
:param pulumi.Input[str] sns_topic_arn: SNS topic arn to send events on.
:param pulumi.Input[Sequence[pulumi.Input[str]]] source_ids: Ids of sources to listen to.
:param pulumi.Input[str] source_type: Type of source for events. Valid values: `replication-instance` or `replication-task`
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["enabled"] = enabled
__props__["event_categories"] = event_categories
__props__["name"] = name
__props__["sns_topic_arn"] = sns_topic_arn
__props__["source_ids"] = source_ids
__props__["source_type"] = source_type
__props__["tags"] = tags
return EventSubscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the event subscription should be enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="eventCategories")
def event_categories(self) -> pulumi.Output[Sequence[str]]:
"""
List of event categories to listen for, see `DescribeEventCategories` for a canonical list.
"""
return pulumi.get(self, "event_categories")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of event subscription.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="snsTopicArn")
def sns_topic_arn(self) -> pulumi.Output[str]:
"""
SNS topic arn to send events on.
"""
return pulumi.get(self, "sns_topic_arn")
@property
@pulumi.getter(name="sourceIds")
def source_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Ids of sources to listen to.
"""
return pulumi.get(self, "source_ids")
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> pulumi.Output[Optional[str]]:
"""
Type of source for events. Valid values: `replication-instance` or `replication-task`
"""
return pulumi.get(self, "source_type")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
en
| 0.59814
|
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** Provides a DMS (Data Migration Service) event subscription resource. ## Example Usage ```python import pulumi import pulumi_aws as aws example = aws.dms.EventSubscription("example", enabled=True, event_categories=[ "creation", "failure", ], sns_topic_arn=aws_sns_topic["example"]["arn"], source_ids=[aws_dms_replication_task["example"]["replication_task_id"]], source_type="replication-task", tags={ "Name": "example", }) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] enabled: Whether the event subscription should be enabled. :param pulumi.Input[Sequence[pulumi.Input[str]]] event_categories: List of event categories to listen for, see `DescribeEventCategories` for a canonical list. :param pulumi.Input[str] name: Name of event subscription. :param pulumi.Input[str] sns_topic_arn: SNS topic arn to send events on. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_ids: Ids of sources to listen to. :param pulumi.Input[str] source_type: Type of source for events. Valid values: `replication-instance` or `replication-task` Get an existing EventSubscription resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] enabled: Whether the event subscription should be enabled. :param pulumi.Input[Sequence[pulumi.Input[str]]] event_categories: List of event categories to listen for, see `DescribeEventCategories` for a canonical list. :param pulumi.Input[str] name: Name of event subscription. :param pulumi.Input[str] sns_topic_arn: SNS topic arn to send events on. :param pulumi.Input[Sequence[pulumi.Input[str]]] source_ids: Ids of sources to listen to. :param pulumi.Input[str] source_type: Type of source for events. Valid values: `replication-instance` or `replication-task` Whether the event subscription should be enabled. List of event categories to listen for, see `DescribeEventCategories` for a canonical list. Name of event subscription. SNS topic arn to send events on. Ids of sources to listen to. Type of source for events. Valid values: `replication-instance` or `replication-task`
| 1.594429
| 2
|
Algorithm/main.py
|
Falcon9XTech/Aircraft-intent-prediction-ADS-B-data-streams
| 0
|
6629886
|
<reponame>Falcon9XTech/Aircraft-intent-prediction-ADS-B-data-streams<filename>Algorithm/main.py
#!/usr/bin/env python
"""
Name: Main program for AID system
Author: <NAME>
Copyright: University of Liverpool © 2021
License: MIT
Version: 1.0
Status: Development
Description: The source code calls all functions and methods derived from other python files to run as one.
"""
from fetch_data import *
from filter import *
from conditions import *
from intent_prediction import *
import appearance as apr
from configparser import ConfigParser
# Project Logo
print(apr.banner())
# Load credentials to log in OpenSky Network
config = ConfigParser()
config.read("credentials.cfg")
USERNAME, PASSWORD = config["Credentials"]["USERNAME"], config["Credentials"]["PASSWORD"]
filename = "flight_data" # input("Create filename for flight data: ")
p = opensky(filename, USERNAME, PASSWORD)
# Fetch Data
fetch = p.flight_data()
fetch
# Filter Data
filter = data.filter()
# Convert Data
convert = data.convert()
#Conditions
C1_outcome = airspace_infringement.airspace_class()
# Intent Prediction
time_series = prediction.time_series()
predict = prediction.machine_learning()
|
#!/usr/bin/env python
"""
Name: Main program for AID system
Author: <NAME>
Copyright: University of Liverpool © 2021
License: MIT
Version: 1.0
Status: Development
Description: The source code calls all functions and methods derived from other python files to run as one.
"""
from fetch_data import *
from filter import *
from conditions import *
from intent_prediction import *
import appearance as apr
from configparser import ConfigParser
# Project Logo
print(apr.banner())
# Load credentials to log in OpenSky Network
config = ConfigParser()
config.read("credentials.cfg")
USERNAME, PASSWORD = config["Credentials"]["USERNAME"], config["Credentials"]["PASSWORD"]
filename = "flight_data" # input("Create filename for flight data: ")
p = opensky(filename, USERNAME, PASSWORD)
# Fetch Data
fetch = p.flight_data()
fetch
# Filter Data
filter = data.filter()
# Convert Data
convert = data.convert()
#Conditions
C1_outcome = airspace_infringement.airspace_class()
# Intent Prediction
time_series = prediction.time_series()
predict = prediction.machine_learning()
|
en
| 0.758675
|
#!/usr/bin/env python Name: Main program for AID system Author: <NAME> Copyright: University of Liverpool © 2021 License: MIT Version: 1.0 Status: Development Description: The source code calls all functions and methods derived from other python files to run as one. # Project Logo # Load credentials to log in OpenSky Network # input("Create filename for flight data: ") # Fetch Data # Filter Data # Convert Data #Conditions # Intent Prediction
| 2.061873
| 2
|
Crypto Viewer.py
|
HenriqueSoriano/Crypto-Viewer
| 2
|
6629887
|
<filename>Crypto Viewer.py
from tkinter import *
from tkinter import Tk
# LOGIN CHECK
def entrar():
email = txtboxemail_log.get()
senha = txtboxpass_log.get()
if (email=="" or senha==""):
erro_blank = Label(LoginFrame, text="Preencha os campos obrigatórios.", background='#111111', font="Segoe 20", fg="red")
erro_blank.place(relwidth=1, relx=0.5, rely=0.65, anchor=CENTER)
else:
if email == "crypto_viewer_user" and senha == "<PASSWORD>":
# MAIN FRAME
root.destroy()
main = Tk()
main.configure(background='#111111')
main.title("Crypto Viewer")
main.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(main, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
main.destroy()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# SELECT CRYPTO
MainFrame = Frame(main, background='#111111')
MainFrame.place(relwidth=1, relheight=0.6, rely=0.2)
def sol_page():
sol = Tk()
sol.configure(background='#111111')
sol.title("Crypto Viewer")
sol.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(sol, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
main.destroy()
sol.destroy()
ws.close()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# INFO SOL
sol_frame = Frame(sol, background='#111111')
sol_frame.place(relwidth=1, relheight=0.6, rely=0.2)
label_sol = Label(sol_frame, text="SOLANA / DOLAR - SOLUSDT", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
label_sol.place(relwidth=0.5, relheight=0.1, relx=0.25, rely=0.25)
lb_loading_sol = Label(sol_frame, text="Carregando...", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
lb_loading_sol.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
import json, websocket
SOCKET = "wss://stream.binance.com:9443/ws/solusdt@kline_1m"
def on_message(ws, message):
valor_sol = json.loads(message)['k']['c']
def show_sol():
lb_val_sol = Label(sol_frame, text=valor_sol, border="0", bg='#FFFFFF', fg="black", font="Segoe 30 bold")
lb_val_sol.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
show_sol()
ws = websocket.WebSocketApp(SOCKET, on_message=on_message)
def back_sol():
sol.destroy()
ws.close()
sol_back = Button(sol_frame, text="Voltar", border="0", bg='black', fg="white", font="Segoe 30 bold", cursor="hand2", command=back_sol)
sol_back.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.6)
# RUN WS
def connect_to_socket():
ws.run_forever()
def on_connect():
import threading
t = threading.Thread(target=connect_to_socket)
t.start()
on_connect()
# RUN SOL
sol.mainloop()
def btc_page():
btc = Tk()
btc.configure(background='#111111')
btc.title("Crypto Viewer")
btc.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(btc, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
main.destroy()
btc.destroy()
ws.close()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# INFO BTC
btc_frame = Frame(btc, background='#111111')
btc_frame.place(relwidth=1, relheight=0.6, rely=0.2)
label_btc = Label(btc_frame, text="BITCOIN / DOLAR - BTCUSDT", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
label_btc.place(relwidth=0.5, relheight=0.1, relx=0.25, rely=0.25)
lb_loading_btc = Label(btc_frame, text="Carregando...", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
lb_loading_btc.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
import json, websocket
SOCKET = "wss://stream.binance.com:9443/ws/btcusdt@kline_1m"
def on_message(ws, message):
valor_btc = json.loads(message)['k']['c']
def show_btc():
lb_val_btc = Label(btc_frame, text=valor_btc, border="0", bg='#FFFFFF', fg="black", font="Segoe 30 bold")
lb_val_btc.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
show_btc()
ws = websocket.WebSocketApp(SOCKET, on_message=on_message)
def back_btc():
btc.destroy()
ws.close()
btc_back = Button(btc_frame, text="Voltar", border="0", bg='black', fg="white", font="Segoe 30 bold", cursor="hand2", command=back_btc)
btc_back.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.6)
# RUN WS
def connect_to_socket():
ws.run_forever()
def on_connect():
import threading
t = threading.Thread(target=connect_to_socket)
t.start()
on_connect()
# RUN BTC
btc.mainloop()
def eth_page():
eth = Tk()
eth.configure(background='#111111')
eth.title("Crypto Viewer")
eth.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(eth, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
main.destroy()
eth.destroy()
ws.close()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# INFO ETH
eth_frame = Frame(eth, background='#111111')
eth_frame.place(relwidth=1, relheight=0.6, rely=0.2)
label_eth = Label(eth_frame, text="ETHEREUM / DOLAR - ETHUSDT", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
label_eth.place(relwidth=0.5, relheight=0.1, relx=0.25, rely=0.25)
lb_loading_eth = Label(eth_frame, text="Carregando...", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
lb_loading_eth.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
import json, websocket
SOCKET = "wss://stream.binance.com:9443/ws/ethusdt@kline_1m"
def on_message(ws, message):
valor_eth = json.loads(message)['k']['c']
def show_eth():
lb_val_btc = Label(eth_frame, text=valor_eth, border="0", bg='#FFFFFF', fg="black", font="Segoe 30 bold")
lb_val_btc.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
show_eth()
ws = websocket.WebSocketApp(SOCKET, on_message=on_message)
def back_eth():
eth.destroy()
ws.close()
eth_back = Button(eth_frame, text="Voltar", border="0", bg='black', fg="white", font="Segoe 30 bold", cursor="hand2", command=back_eth)
eth_back.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.6)
# RUN WS
def connect_to_socket():
ws.run_forever()
def on_connect():
import threading
t = threading.Thread(target=connect_to_socket)
t.start()
on_connect()
# RUN ETH
eth.mainloop()
btc_bttn = Button(MainFrame, text="BTC", border="1", bg='#FFFFFF', fg="black", font="Segoe 30 bold", cursor="hand2", command=btc_page)
btc_bttn.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.3)
eth_bttn = Button(MainFrame, text="ETH", border="1", bg='#FFFFFF', fg="black", font="Segoe 30 bold", cursor="hand2", command=eth_page)
eth_bttn.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.5)
sol_bttn = Button(MainFrame, text="SOL", border="1", bg='#FFFFFF', fg="black", font="Segoe 30 bold", cursor="hand2", command=sol_page)
sol_bttn.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.7)
# RUN MAIN
main.mainloop()
else:
erro_invalidade = Label(LoginFrame, text="Usuário ou senha inválidos. Tente novamente.", background='#111111', font="Segoe 20", fg="red")
erro_invalidade.place(relwidth=1, relx=0.5, rely=0.65, anchor=CENTER)
# DEF EXIT
def exit():
root.destroy()
# INFO PAGE
def info():
txt_info_1 = "Crypto Viewer é um projeto Python desenvilvido para a visualização em tempo real do valor de criptomoedas."
txt_info_2 = "Desenvolvido por <NAME>, estudante de Análise e desenvolvimento de Sistemas - Etec Polivalente Americana"
txt_info_3 = "email: <EMAIL>"
txt_info_4 = "LinkedIn: linkedin.com/in/henrique-soriano-b6b623226"
# INFO FRAME
root.destroy()
info = Tk()
info.configure(background='#111111')
info.title("Crypto Viewer")
info.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(info, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
info.destroy()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# INFO
info_frame = Frame(info, background='#111111')
info_frame.place(relwidth=1, relheight=0.6, rely=0.2)
lb_loading_eth = Label(info_frame, text=txt_info_1, border="0", bg='#111111', fg="white", font="Segoe 15 bold")
lb_loading_eth.place(relwidth=1, relheight=0.1, rely=0.3)
lb_loading_eth = Label(info_frame, text=txt_info_2, border="0", bg='#111111', fg="white", font="Segoe 15 bold")
lb_loading_eth.place(relwidth=1, relheight=0.1, rely=0.4)
lb_loading_eth = Label(info_frame, text=txt_info_3, border="0", bg='#111111', fg="white", font="Segoe 15 bold")
lb_loading_eth.place(relwidth=1, relheight=0.1, rely=0.5)
lb_loading_eth = Label(info_frame, text=txt_info_4, border="0", bg='#111111', fg="white", font="Segoe 15 bold")
lb_loading_eth.place(relwidth=1, relheight=0.1, rely=0.6)
# MAIN INITIAL FRAME
root = Tk()
root.configure(background='#111111')
root.title("Crypto Viewer")
root.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(root, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
root.destroy()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# LOGIN
LoginFrame = Frame(root, background='#111111')
LoginFrame.place(relwidth=1, relheight=0.6, rely=0.2)
l_entrar = Label(LoginFrame, text="ENTRAR", background='#111111', font="Segoe 15 bold", fg="white")
l_entrar.place(relwidth=1, relheight=0.2)
l_email = Label(LoginFrame, text="Usuário", background='#111111', font="Segoe 15", fg="white")
l_email.place(relwidth=1, relheight=0.2, rely=0.15)
txtboxemail_log = Entry(LoginFrame, bg="#222222", border=0, fg="white", font="Segoe 15")
txtboxemail_log.place(relwidth=0.3, relheight=0.05, relx=0.35, rely=0.3)
l_pass = Label(LoginFrame, text="Senha", background='#111111', font="Segoe 15", fg="white")
l_pass.place(relwidth=1, relheight=0.2, rely=0.35)
txtboxpass_log = Entry(LoginFrame, bg="#222222", border=0, fg="white", font="Segoe 15", show="*")
txtboxpass_log.place(relwidth=0.3, relheight=0.05, relx=0.35, rely=0.5)
login_bttn = Button(LoginFrame, text="ENTRAR", border="0", bg='#222222', fg="white", font="Segoe 20 bold", cursor="hand2", command=entrar)
login_bttn.place(relwidth=0.1, relheight=0.085, relx=0.45, rely=0.75)
login_bttn = Button(LoginFrame, text="?", border="0", bg='white', fg="black", font="Segoe 16 bold", cursor="hand2", command=info)
login_bttn.place(relwidth=0.02, relheight=0.06, relx=0.8, rely=0.9)
# RUN ROOT
root.mainloop()
|
<filename>Crypto Viewer.py
from tkinter import *
from tkinter import Tk
# LOGIN CHECK
def entrar():
email = txtboxemail_log.get()
senha = txtboxpass_log.get()
if (email=="" or senha==""):
erro_blank = Label(LoginFrame, text="Preencha os campos obrigatórios.", background='#111111', font="Segoe 20", fg="red")
erro_blank.place(relwidth=1, relx=0.5, rely=0.65, anchor=CENTER)
else:
if email == "crypto_viewer_user" and senha == "<PASSWORD>":
# MAIN FRAME
root.destroy()
main = Tk()
main.configure(background='#111111')
main.title("Crypto Viewer")
main.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(main, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
main.destroy()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# SELECT CRYPTO
MainFrame = Frame(main, background='#111111')
MainFrame.place(relwidth=1, relheight=0.6, rely=0.2)
def sol_page():
sol = Tk()
sol.configure(background='#111111')
sol.title("Crypto Viewer")
sol.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(sol, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
main.destroy()
sol.destroy()
ws.close()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# INFO SOL
sol_frame = Frame(sol, background='#111111')
sol_frame.place(relwidth=1, relheight=0.6, rely=0.2)
label_sol = Label(sol_frame, text="SOLANA / DOLAR - SOLUSDT", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
label_sol.place(relwidth=0.5, relheight=0.1, relx=0.25, rely=0.25)
lb_loading_sol = Label(sol_frame, text="Carregando...", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
lb_loading_sol.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
import json, websocket
SOCKET = "wss://stream.binance.com:9443/ws/solusdt@kline_1m"
def on_message(ws, message):
valor_sol = json.loads(message)['k']['c']
def show_sol():
lb_val_sol = Label(sol_frame, text=valor_sol, border="0", bg='#FFFFFF', fg="black", font="Segoe 30 bold")
lb_val_sol.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
show_sol()
ws = websocket.WebSocketApp(SOCKET, on_message=on_message)
def back_sol():
sol.destroy()
ws.close()
sol_back = Button(sol_frame, text="Voltar", border="0", bg='black', fg="white", font="Segoe 30 bold", cursor="hand2", command=back_sol)
sol_back.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.6)
# RUN WS
def connect_to_socket():
ws.run_forever()
def on_connect():
import threading
t = threading.Thread(target=connect_to_socket)
t.start()
on_connect()
# RUN SOL
sol.mainloop()
def btc_page():
btc = Tk()
btc.configure(background='#111111')
btc.title("Crypto Viewer")
btc.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(btc, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
main.destroy()
btc.destroy()
ws.close()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# INFO BTC
btc_frame = Frame(btc, background='#111111')
btc_frame.place(relwidth=1, relheight=0.6, rely=0.2)
label_btc = Label(btc_frame, text="BITCOIN / DOLAR - BTCUSDT", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
label_btc.place(relwidth=0.5, relheight=0.1, relx=0.25, rely=0.25)
lb_loading_btc = Label(btc_frame, text="Carregando...", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
lb_loading_btc.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
import json, websocket
SOCKET = "wss://stream.binance.com:9443/ws/btcusdt@kline_1m"
def on_message(ws, message):
valor_btc = json.loads(message)['k']['c']
def show_btc():
lb_val_btc = Label(btc_frame, text=valor_btc, border="0", bg='#FFFFFF', fg="black", font="Segoe 30 bold")
lb_val_btc.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
show_btc()
ws = websocket.WebSocketApp(SOCKET, on_message=on_message)
def back_btc():
btc.destroy()
ws.close()
btc_back = Button(btc_frame, text="Voltar", border="0", bg='black', fg="white", font="Segoe 30 bold", cursor="hand2", command=back_btc)
btc_back.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.6)
# RUN WS
def connect_to_socket():
ws.run_forever()
def on_connect():
import threading
t = threading.Thread(target=connect_to_socket)
t.start()
on_connect()
# RUN BTC
btc.mainloop()
def eth_page():
eth = Tk()
eth.configure(background='#111111')
eth.title("Crypto Viewer")
eth.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(eth, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
main.destroy()
eth.destroy()
ws.close()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# INFO ETH
eth_frame = Frame(eth, background='#111111')
eth_frame.place(relwidth=1, relheight=0.6, rely=0.2)
label_eth = Label(eth_frame, text="ETHEREUM / DOLAR - ETHUSDT", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
label_eth.place(relwidth=0.5, relheight=0.1, relx=0.25, rely=0.25)
lb_loading_eth = Label(eth_frame, text="Carregando...", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
lb_loading_eth.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
import json, websocket
SOCKET = "wss://stream.binance.com:9443/ws/ethusdt@kline_1m"
def on_message(ws, message):
valor_eth = json.loads(message)['k']['c']
def show_eth():
lb_val_btc = Label(eth_frame, text=valor_eth, border="0", bg='#FFFFFF', fg="black", font="Segoe 30 bold")
lb_val_btc.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
show_eth()
ws = websocket.WebSocketApp(SOCKET, on_message=on_message)
def back_eth():
eth.destroy()
ws.close()
eth_back = Button(eth_frame, text="Voltar", border="0", bg='black', fg="white", font="Segoe 30 bold", cursor="hand2", command=back_eth)
eth_back.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.6)
# RUN WS
def connect_to_socket():
ws.run_forever()
def on_connect():
import threading
t = threading.Thread(target=connect_to_socket)
t.start()
on_connect()
# RUN ETH
eth.mainloop()
btc_bttn = Button(MainFrame, text="BTC", border="1", bg='#FFFFFF', fg="black", font="Segoe 30 bold", cursor="hand2", command=btc_page)
btc_bttn.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.3)
eth_bttn = Button(MainFrame, text="ETH", border="1", bg='#FFFFFF', fg="black", font="Segoe 30 bold", cursor="hand2", command=eth_page)
eth_bttn.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.5)
sol_bttn = Button(MainFrame, text="SOL", border="1", bg='#FFFFFF', fg="black", font="Segoe 30 bold", cursor="hand2", command=sol_page)
sol_bttn.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.7)
# RUN MAIN
main.mainloop()
else:
erro_invalidade = Label(LoginFrame, text="Usuário ou senha inválidos. Tente novamente.", background='#111111', font="Segoe 20", fg="red")
erro_invalidade.place(relwidth=1, relx=0.5, rely=0.65, anchor=CENTER)
# DEF EXIT
def exit():
root.destroy()
# INFO PAGE
def info():
txt_info_1 = "Crypto Viewer é um projeto Python desenvilvido para a visualização em tempo real do valor de criptomoedas."
txt_info_2 = "Desenvolvido por <NAME>, estudante de Análise e desenvolvimento de Sistemas - Etec Polivalente Americana"
txt_info_3 = "email: <EMAIL>"
txt_info_4 = "LinkedIn: linkedin.com/in/henrique-soriano-b6b623226"
# INFO FRAME
root.destroy()
info = Tk()
info.configure(background='#111111')
info.title("Crypto Viewer")
info.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(info, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
info.destroy()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# INFO
info_frame = Frame(info, background='#111111')
info_frame.place(relwidth=1, relheight=0.6, rely=0.2)
lb_loading_eth = Label(info_frame, text=txt_info_1, border="0", bg='#111111', fg="white", font="Segoe 15 bold")
lb_loading_eth.place(relwidth=1, relheight=0.1, rely=0.3)
lb_loading_eth = Label(info_frame, text=txt_info_2, border="0", bg='#111111', fg="white", font="Segoe 15 bold")
lb_loading_eth.place(relwidth=1, relheight=0.1, rely=0.4)
lb_loading_eth = Label(info_frame, text=txt_info_3, border="0", bg='#111111', fg="white", font="Segoe 15 bold")
lb_loading_eth.place(relwidth=1, relheight=0.1, rely=0.5)
lb_loading_eth = Label(info_frame, text=txt_info_4, border="0", bg='#111111', fg="white", font="Segoe 15 bold")
lb_loading_eth.place(relwidth=1, relheight=0.1, rely=0.6)
# MAIN INITIAL FRAME
root = Tk()
root.configure(background='#111111')
root.title("Crypto Viewer")
root.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(root, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
root.destroy()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# LOGIN
LoginFrame = Frame(root, background='#111111')
LoginFrame.place(relwidth=1, relheight=0.6, rely=0.2)
l_entrar = Label(LoginFrame, text="ENTRAR", background='#111111', font="Segoe 15 bold", fg="white")
l_entrar.place(relwidth=1, relheight=0.2)
l_email = Label(LoginFrame, text="Usuário", background='#111111', font="Segoe 15", fg="white")
l_email.place(relwidth=1, relheight=0.2, rely=0.15)
txtboxemail_log = Entry(LoginFrame, bg="#222222", border=0, fg="white", font="Segoe 15")
txtboxemail_log.place(relwidth=0.3, relheight=0.05, relx=0.35, rely=0.3)
l_pass = Label(LoginFrame, text="Senha", background='#111111', font="Segoe 15", fg="white")
l_pass.place(relwidth=1, relheight=0.2, rely=0.35)
txtboxpass_log = Entry(LoginFrame, bg="#222222", border=0, fg="white", font="Segoe 15", show="*")
txtboxpass_log.place(relwidth=0.3, relheight=0.05, relx=0.35, rely=0.5)
login_bttn = Button(LoginFrame, text="ENTRAR", border="0", bg='#222222', fg="white", font="Segoe 20 bold", cursor="hand2", command=entrar)
login_bttn.place(relwidth=0.1, relheight=0.085, relx=0.45, rely=0.75)
login_bttn = Button(LoginFrame, text="?", border="0", bg='white', fg="black", font="Segoe 16 bold", cursor="hand2", command=info)
login_bttn.place(relwidth=0.02, relheight=0.06, relx=0.8, rely=0.9)
# RUN ROOT
root.mainloop()
|
en
| 0.485631
|
# LOGIN CHECK # MAIN FRAME # HEADER # SELECT CRYPTO # HEADER # INFO SOL # RUN WS # RUN SOL # HEADER # INFO BTC # RUN WS # RUN BTC # HEADER # INFO ETH # RUN WS # RUN ETH # RUN MAIN # DEF EXIT # INFO PAGE # INFO FRAME # HEADER # INFO # MAIN INITIAL FRAME # HEADER # LOGIN # RUN ROOT
| 3.154282
| 3
|
tools/c_mode.py
|
Melab/gvmt
| 0
|
6629888
|
import common
_exception = common.UnlocatedException
import sys
import itertools, re
import builtin, gtypes, operators
from stacks import Stack, CachingStack
def default_name(index):
return "gvmt_t%s" % index
_uid = 0
_return_type_codes = {
gtypes.i1 : 'RETURN_TYPE_I4',
gtypes.i2 : 'RETURN_TYPE_I4',
gtypes.i4 : 'RETURN_TYPE_I4',
gtypes.i8 : 'RETURN_TYPE_I8',
gtypes.u1 : 'RETURN_TYPE_I4',
gtypes.u2 : 'RETURN_TYPE_I4',
gtypes.u4 : 'RETURN_TYPE_I4',
gtypes.u8 : 'RETURN_TYPE_I8',
gtypes.f4 : 'RETURN_TYPE_F4',
gtypes.f8 : 'RETURN_TYPE_F8',
gtypes.r : 'RETURN_TYPE_R',
gtypes.p : 'RETURN_TYPE_P',
gtypes.v : 'RETURN_TYPE_V',
}
_suffices = {
gtypes.i1 : 'i',
gtypes.i2 : 'i',
gtypes.i4 : 'i',
gtypes.i8 : 'l',
gtypes.u1 : 'u',
gtypes.u2 : 'u',
gtypes.u4 : 'u',
gtypes.u8 : 'w',
gtypes.f4 : 'f',
gtypes.f8 : 'd',
gtypes.r : 'o' ,
gtypes.p : 'p'
}
_temp_index = 0
class CStack(object):
def __init__(self, declarations):
self.offset = 0
self.declarations = declarations
def pop(self, tipe, out):
global _temp_index
_temp_index += 1
if tipe is gtypes.x:
self.declarations['gvmt_r%d' % _temp_index] = 'GVMT_StackItem'
out << ' gvmt_r%d = gvmt_sp[%d];' % (_temp_index, -self.offset)
self.offset -= 1
return StackItem('gvmt_r%d' % _temp_index)
else:
self.declarations['gvmt_r%d' % _temp_index] = tipe.c_name
out << ' gvmt_r%d = gvmt_sp[%d].%s;' % (_temp_index, -self.offset, _suffices[tipe])
self.offset -= 1
return Simple(tipe, 'gvmt_r%d' % _temp_index)
def push(self, value, out):
self.offset += 1
si = StackItem('gvmt_sp[%d]' % (-self.offset))
out << ' %s = %s;' % (si.cast(value.tipe), value)
def pick(self, tipe, index, out):
global _temp_index
_temp_index += 1
self.declarations['gvmt_r%d' % _temp_index] = tipe.c_name
if self.offset < 0:
out << ' gvmt_r%d = gvmt_sp[%s+%d].%s;' % (_temp_index, index, -self.offset, _suffices[tipe])
else:
out << ' gvmt_r%d = gvmt_sp[%s-%d].%s;' % (_temp_index, index, self.offset, _suffices[tipe])
return Simple(tipe, 'gvmt_r%d' % _temp_index)
def poke(self, index, value, out):
if self.offset < 0:
si = StackItem('gvmt_sp[%s+%d]' % (index, -self.offset))
else:
si = StackItem('gvmt_sp[%s-%d]' % (index, self.offset))
out << ' %s = %s;' % (si.cast(value.tipe), value)
def flush_to_memory(self, out, ignore = 0):
if self.offset:
if self.offset < 0:
out << ' gvmt_sp += %d;' % -self.offset
else:
out << ' gvmt_sp -= %d;' % self.offset
self.offset = 0
def top(self, out, cached = 0):
global _uid
_uid += 1
var = '__sp_top%d' % _uid
total_offset = cached+self.offset
if total_offset:
if total_offset < 0:
out << ' GVMT_StackItem *%s = gvmt_sp+%d;' % (var, -total_offset)
else:
out << ' GVMT_StackItem *%s = gvmt_sp-%d;' % (var, total_offset)
else:
out << ' GVMT_StackItem *%s = gvmt_sp;' % var
return var
def insert(self, offset, size, out):
#Is offset a build-time constant?
self.flush_to_memory(out)
if offset == 0:
out << ' gvmt_sp -= %s;' % size.cast(gtypes.iptr)
loop_fmt = ' for (intptr_t i = 0; i < %s; i++) gvmt_sp[i].i = 0;'
out << loop_fmt % size.cast(gtypes.iptr)
return '%s' % self.top(out)
else:
out << ' gvmt_sp -= %s;' % size.cast(gtypes.iptr)
loop_fmt = ' for (intptr_t i=0; i<%s; i++) gvmt_sp[i]=gvmt_sp[%s+i];'
out << loop_fmt % (offset.cast(gtypes.iptr), size.cast(gtypes.iptr))
loop_fmt = ' for (intptr_t i = 0; i < %s; i++) gvmt_sp[%s+i].i = 0;'
out << loop_fmt % (size.cast(gtypes.iptr), offset.cast(gtypes.iptr))
return '(%s+%s)' % (self.top(out) ,'%s' % offset)
def drop(self, offset, size, out):
self.flush_to_memory(out)
if offset == 0:
out << ' gvmt_sp += %s;' % size.cast(gtypes.iptr)
else:
loop_fmt = ' for (intptr_t i=0; i<%s; i++) gvmt_sp[%s+i]=gvmt_sp[i];'
out << loop_fmt % (offset.cast(gtypes.iptr), size.cast(gtypes.iptr))
out << ' gvmt_sp += %s;' % size.cast(gtypes.iptr)
def comment(self, out):
out << 'Offset: %d\n' % self.offset
def copy(self):
result = CStack(self.declarations)
result.offset = self.offset
return result
def transform(self, other, transformer, out):
if self.offset != other.offset:
out << ' gvmt_sp += %d;' % (self.offset - other.offset)
self.offset = other.offset
def store(self, out):
pass
class Expr(object):
def __init__(self, tipe):
assert tipe
self.tipe = tipe
def cast(self, tipe):
if self.tipe == tipe:
return self
else:
return Cast(tipe, self)
def __int__(self):
raise ValueError
def indir(self, tipe):
return Indirection(tipe, self)
def div(self, size):
return '%s/%d' % (self, size)
def call(self, tipe):
call_fmt = '(((gvmt_funcptr)%s)(gvmt_sp, (GVMT_Frame)FRAME_POINTER))'
return Simple(tipe, call_fmt % self)
def n_call(self, tipe, params):
if params:
call_fmt = '(((gvmt_native_funcptr_%s)%s)(%s))'
params = ', '.join([str(x) for x in params])
return Simple(tipe, call_fmt % (tipe.suffix, self, params))
else:
call_fmt = '(((gvmt_native_funcptr0_%s)%s)())'
return Simple(tipe, call_fmt % (tipe.suffix, self))
def store(self, decl, out):
global _temp_index
_temp_index += 1
decl['gvmt_r%d' % _temp_index ] = self.tipe.c_name
out << ' gvmt_r%d = %s;' % (_temp_index, self)
return Simple(self.tipe, 'gvmt_r%d' % _temp_index)
def pstore(self, tipe, value, out):
out << (' %s = %s; ' % (self.indir(tipe), value))
class StackItem(Expr):
def __init__(self, txt):
Expr.__init__(self, gtypes.x)
assert isinstance(txt, str)
self.txt = txt
def cast(self, tipe):
assert tipe in _suffices or tipe == gtypes.x
if tipe == gtypes.v:
return Simple(tipe, '(void)%s' % self.txt)
elif tipe == gtypes.x:
return self
else:
return Simple(tipe, '%s.%s' % (self.txt, _suffices[tipe]))
def indir(self, tipe):
return Simple(tipe, '(*(%s*)%s.p)' % (tipe.c_name, self.txt))
def __str__(self):
return self.txt
def store(self, decl, out):
return self
class Simple(Expr):
def __init__(self, tipe, txt):
Expr.__init__(self, tipe)
assert isinstance(txt, str)
self.txt = txt
def __str__(self):
return self.txt
def __int__(self):
if not self.tipe.is_int or self.tipe.size > gtypes.p.size:
raise ValueError
return int(self.txt)
def store(self, decl, out):
if self.txt.startswith('gvmt_r'):
return self
else:
return Expr.store(self, decl, out)
class LAddr(Expr):
def __init__(self, name):
Expr.__init__(self, gtypes.p)
self.name = name
def __str__(self):
return '(&FRAME_POINTER->%s)' % self.name
def indir(self, tipe):
return Simple(tipe, "(%s = FRAME_POINTER->%s)" % (self.name, self.name))
def pstore(self, tipe, value, out):
out << (' %s = %s; ' % (self.name, value))
out << (' FRAME_POINTER->%s = %s; ' % (self.name, self.name))
class Constant(Simple):
def __init__(self, tipe, val):
assert val is not None
assert -(2**31) <= val
assert 2**32 > val
if val >= 2**31:
txt = hex(val)
if txt[-1] in 'Ll':
txt = txt[:-1]
else:
txt = str(val)
Simple.__init__(self, tipe, txt)
def div(self, x):
assert('int' in self.tipe.c_name)
return str(int(self.txt)/x)
def __int__(self):
return int(self.txt)
def __str__(self):
return self.txt
class Cast(Expr):
def __init__(self, tipe, expr):
Expr.__init__(self, tipe)
self.expr = expr
def __str__(self):
if (self.tipe == gtypes.f4 or self.expr.tipe == gtypes.f4 or
self.tipe == gtypes.f8 or self.expr.tipe == gtypes.f8):
assert self.tipe.size == self.expr.tipe.size
si = StackItem('((GVMT_StackItem)%s)' % self.expr)
return si.cast(self.tipe).__str__()
elif self.tipe == gtypes.p and self.expr.tipe.size < gtypes.p.size:
return '((void*)(intptr_t)(%s))' % self.expr
else:
return '((%s)(%s))' % (self.tipe.c_name, self.expr)
class Binary(Expr):
def __init__(self, tipe, left, op, right):
Expr.__init__(self, tipe)
assert isinstance(left, Expr)
assert isinstance(right, Expr)
self.left = left
self.op = op
self.right = right
def __str__(self):
return '(%s%s%s)' % (self.left, self.op.c_name, self.right)
def div(self, x):
if self.op == operators.add or self.op == operators.sub:
return '(%s%s%s)' % (self.left.div(x), self.op.c_name,
self.right.div(x))
else:
return '%s/%d' % (self, x)
class LeftShift(Binary):
def __init__(self, tipe, left, right):
Binary.__init__(self, tipe, left, operators.lsh, right)
def div(self, size):
log2 = ( '', '0', '1', '', '2', '', '', '', '3')
if str(self.right) == log2[size]:
return str(self.left)
else:
return '%s/%d' % (self, size)
def __str__(self):
try:
return '(%s<<%s)' % (self.left, self.right)
except:
return Binary.__str__(self)
class PointerAdd(Binary):
def __init__(self, tipe, left, op, right):
Binary.__init__(self, tipe, left, op, right)
def __str__(self):
if self.left.tipe == gtypes.p or self.left.tipe == gtypes.r:
return '(((char*)%s)%s%s)' % (self.left, self.op.c_name,
self.right.cast(gtypes.iptr))
else:
if self.right.tipe == gtypes.p:
assert self.op.c_name == '+'
return '(((char*)%s)+%s)' % (self.right,
self.left.cast(gtypes.iptr))
else:
return '((char*)(%s%s%s))' % (self.left.cast(gtypes.iptr),
self.op.c_name, self.right.cast(gtypes.iptr))
class Indirection(Expr):
def __init__(self, tipe, expr):
Expr.__init__(self, tipe)
self.expr = expr
def __str__(self):
return '(((GVMT_memory*)%s)->%s)' % (self.expr, self.tipe.suffix)
class Address(Expr):
def __init__(self, txt, externals):
Expr.__init__(self, gtypes.p)
assert isinstance(txt, str)
self.txt = txt
self.externals = externals
def __str__(self):
return '&' + self.txt
def indir(self, tipe):
tname = self.externals[self.txt].split()[0]
if tname == tipe.c_name:
return Simple(tipe, self.txt)
else:
return Indirection(tipe, '&' + self.txt)
def call(self, tipe):
return Simple(tipe, '%s(gvmt_sp, (GVMT_Frame)FRAME_POINTER)' % self.txt)
def n_call(self, tipe, params):
params = ', '.join([str(x) for x in params])
return Simple(tipe, '%s(%s)' % (self.txt, params))
_next_label = 0
def _no_amp(x):
s = str(x)
if s and s[0] == '&':
return s[1:]
else:
return 'function'
class CMode(object):
"Output C for immediate execution"
def __init__(self, out, externals, gc_name):
global _next_label
self.out = out
self.stream_offset = 0
self.stream_stack = []
self.label = _next_label
_next_label += 1
self.temp_types = {}
self.stack = CachingStack(CStack({}))
self.names = {}
self.type_names = {}
self.filename = ''
self.externals = externals
self.next_edge_set = []
self.edges = None
self.n_args = []
self.gc_name = gc_name
self.in_regs = set()
self.ref_base = 0
self.ref_temps_count = 0
self.ref_temps_max = 0
self.mem_temps = []
self.first_block = False
def pload(self, tipe, array):
self._null_check(array)
return array.indir(tipe)
def pstore(self, tipe, array, value):
self._null_check(array)
self.stack.store(self.out)
array.pstore(tipe, value.cast(tipe), self.out)
#If debug is on, insert extra checking code.
def _check_ref_access(self, obj, offset, tipe):
if common.global_debug:
if tipe == gtypes.r:
comp = '<='
expected = ''
got = 'non-'
else:
comp = '>='
expected = 'non-'
got = ''
shape_fmt = ' if(gvmt_get_shape_at_offset(%s, %s) %s 0)'
self.out << shape_fmt % (obj, offset, comp)
self.out << shape_fmt % (obj, offset, '==')
fatal_fmt = (' __gvmt_fatal("%%s:%%d: Invalid member access'
' (offset %%d), %s \\n", __FILE__, __LINE__, %s);')
what = 'past end of object'
self.out << fatal_fmt % (what, offset)
self.out << ' else'
what = 'expected %sreference got %sreference' % (expected, got)
self.out << fatal_fmt % (what, offset)
def _null_check(self, obj):
if common.global_debug:
self.out << ' if(%s == NULL) ' % obj
self.out << '__gvmt_fatal("%s:%d: Attempted use of NULL '
self.out << 'reference/pointer\\n", __FILE__, __LINE__);'
def rload(self, tipe, obj, offset):
obj = obj.cast(gtypes.r)
self._null_check(obj)
self._check_ref_access(obj, offset, tipe)
return PointerAdd(tipe, obj, operators.add, offset).indir(tipe)
def field_is_null(self, is_null, obj, offset):
obj = obj.cast(gtypes.r)
self._null_check(obj)
l = PointerAdd(gtypes.iptr, obj, operators.add, offset).indir(gtypes.iptr)
r = Constant(gtypes.iptr, 0)
if is_null:
return Binary(gtypes.iptr, l, operators.eq, r)
else:
return Binary(gtypes.iptr, l, operators.ne, r)
def rstore(self, tipe, obj, offset, value):
obj = obj.cast(gtypes.r)
self._null_check(obj)
self.stack.store(self.out)
if common.global_debug:
self.out << ' if (gvmt_object_is_initialised(%s, %s))' % (obj,
offset)
self._check_ref_access(obj, offset, tipe)
internal_ptr = PointerAdd(tipe, obj, operators.add, offset)
internal_ptr.pstore(tipe, value.cast(tipe), self.out)
def binary(self, tipe, left, op, right):
if op == operators.lsh:
return LeftShift(tipe, left.cast(tipe), right.cast(gtypes.i4))
elif tipe == gtypes.p or tipe == gtypes.r:
return PointerAdd(tipe, left, op, right)
elif op == operators.rsh:
return Binary(tipe, left.cast(tipe), op, right.cast(gtypes.i4))
else:
return Binary(tipe, left.cast(tipe), op, right.cast(tipe))
def comparison(self, tipe, left, op, right):
return Binary(gtypes.iptr, left.cast(tipe), op, right.cast(tipe))
def unary(self, tipe, op, arg):
return Simple(tipe, '(%s%s)' % (op.c_name, arg.cast(tipe)))
def c_call(self, func, tipe, pcount):
global _uid
_uid += 1
top = self.stack.top(self.out)
if common.global_debug:
self.out << ' gvmt_last_return_type = 0;'
self._call(func, tipe)
new_top = self.stack.top(self.out)
if common.global_debug:
#Turn off return type checking, need to implement in CC as well.
#fmt = ' if(gvmt_last_return_type && gvmt_last_return_type != %s)'
#self.out << fmt % _return_type_codes[tipe]
#fmt = (' __gvmt_fatal("%%s:%%d:Incorrect return type, '
# 'expected %s got %%s\\n", __FILE__, __LINE__,'
# 'gvmt_return_type_names[gvmt_last_return_type]);')
#self.out << fmt % tipe.suffix
self.out << ' if(%s-%s > %s)' % (new_top, top, pcount)
fmt = ' __gvmt_expect_v(__FILE__, __LINE__, "%s", %s, %s-%s);'
self.out << fmt % (_no_amp(func), pcount, new_top, top)
if pcount:
if tipe != gtypes.v:
fmt = ' %s call_%d = *((%s*)gvmt_sp);'
self.out << fmt % (tipe.c_name, _uid, tipe.c_name)
self.stack.push(Simple(tipe, 'call_%d' % _uid), self.out)
self.out << ' gvmt_sp = %s+%s;' % (top, pcount)
def call(self, func, tipe):
if common.global_debug:
top = self.stack.top(self.out)
self.out << ' gvmt_last_return_type = 0;'
self._call(func, tipe)
new_top = self.stack.top(self.out)
if tipe == gtypes.v:
twords = 0
elif tipe.size <= gtypes.p.size:
twords = 1
else:
twords = 2
#if common.global_debug:
#Turn off return type checking, need to implement in CC as well.
#fmt = ' if(gvmt_last_return_type && gvmt_last_return_type != %s)'
#self.out << fmt % _return_type_codes[tipe]
#fmt = (' __gvmt_fatal("%%s:%%d:Incorrect return type, '
# 'expected %s got %%s\\n", __FILE__, __LINE__,'
# 'gvmt_return_type_names[gvmt_last_return_type]);')
#self.out << fmt % tipe.suffix
def _call(self, func, tipe):
global _uid
_uid += 1
self.in_regs = set()
self.stack.flush_to_memory(self.out)
self.out << ' gvmt_sp = %s;' % func.call(tipe)
def n_call(self, func, tipe, args, gc = True):
self.in_regs = set()
self.stack.flush_to_memory(self.out)
if gc:
enter = ' gvmt_enter_native(gvmt_sp, (GVMT_Frame)FRAME_POINTER);'
self.out << enter
# For now to check that this is OK - To be removed
if len(self.n_args) < args:
raise _exception('Insufficient native arguments for N_CALL')
arguments = self.n_args[-args:]
a = func.n_call(tipe, arguments)
self.n_args = self.n_args[:-args]
if tipe is gtypes.v:
self.out << ' %s;' % a
result = None
else:
self.stack.push(a, self.out)
self.stack.store(self.out)
result = self.stack.pop(tipe, self.out)
if gc:
self.out << ' gvmt_sp = gvmt_exit_native();'
return result
def n_call_no_gc(self, func, tipe, args):
return self.n_call(func, tipe, args, False)
def alloca(self, tipe, size):
global _uid
if self.first_block:
try:
#Size is build time constant
count = int(size)
if tipe == gtypes.r:
ref_fmt = '(FRAME_POINTER->gvmt_frame.refs + %d)'
result = Simple(gtypes.p, ref_fmt % self.ref_temps_count)
self.ref_temps_count += count
if self.ref_temps_count > self.ref_temps_max:
self.ref_temps_max = self.ref_temps_count
return result
else:
name = 'gvmt_frame_%d' % _uid
if count == 1:
self.out << '%s %s;' % (tipe.c_name, name)
else:
self.out << '%s %s[%s];' % (tipe.c_name, name, count)
_uid += 1
return Simple(gtypes.p, '&%s' % name)
except ValueError:
pass
if tipe is gtypes.r:
raise _exception('Illegal use of ALLOCA_R.')
else:
bytes = '%s*%s' % (size.cast(gtypes.iptr), tipe.size)
return Simple(gtypes.p, 'alloca(%s)' % bytes)
def gc_malloc(self, size):
self.stack.flush_to_memory(self.out)
malloc = 'gvmt_%s_malloc(gvmt_sp, (GVMT_Frame)FRAME_POINTER, %s)'
obj = Simple(gtypes.p, malloc % (self.gc_name, size.cast(gtypes.iptr)))
self.in_regs = set()
return obj
def fully_initialised(self, obj):
if common.global_debug:
self.out << ' gvmt_fully_initialized_check(%s);' % obj.cast(gtypes.r)
def gc_malloc_fast(self, size):
global _uid
_uid += 1
c = Simple(gtypes.r, 'gvmt_malloc_%d' % _uid)
malloc_fast = ' GVMT_Object gvmt_malloc_%d = gvmt_fast_allocate(%s);'
self.out << malloc_fast % (_uid, size)
return c
def convert(self, from_type, to_type, value):
return Simple(to_type, '((%s)(%s))' % (to_type.c_name,
value.cast(from_type)))
def ip(self):
raise _exception('Cannot use IP outside of intepreter context')
def opcode(self):
raise _exception('Cannot use OPCODE outside of intepreter context')
def next_ip(self):
raise _exception('Cannot use NEXT_IP outside of intepreter context')
def laddr(self, name):
raise _exception('Cannot use LADDR outside of intepreter context')
def address(self, name):
return Address(name, self.externals)
def symbol(self, index):
return Simple(gtypes.p, '_gvmt_get_symbol(%s)' % index)
def extend(self, tipe, value):
# Is this right?
return value.cast(tipe).cast(gtypes.iptr)
def gc_safe(self):
# Uncache all references.
self.in_regs = set()
self.out << ' if(gvmt_gc_waiting) gvmt_gc_safe_point'
self.out << '(gvmt_sp, (GVMT_Frame)FRAME_POINTER);'
def compound(self, name, qualifiers, graph):
global _next_label
self.stack.store(self.out)
saved_state = (self.temp_types, self.names, self.type_names, self.label,
self.edges, self.mem_temps, self.first_block)
self.type_names = {}
old_label = self.label
self.label = _next_label
_next_label += 1
self.names = {}
self.temp_types = {}
self.out << ' { '
out = self.out
self.out = common.Buffer()
self.edges = {}
if graph.may_gc():
old_in_regs = set()
else:
old_in_regs = self.in_regs
offsets = {}
last_bb = graph[-1]
for bb in graph:
for i in bb:
if i.__class__ is builtin.TStore:
self.temp_types[i.index] = i.tipe
for bb in graph:
for i in bb:
if i.__class__ is builtin.TLoad:
if i.index not in self.temp_types:
unitialised_temp(graph, i.index)
self.mem_temps = sorted(list(graph.gc_temps()))
self.out << ' /* Mem temps %s */ ' % self.mem_temps
old_ref_base = self.ref_base
self.ref_base = self.ref_temps_count
self.ref_temps_count = self.ref_base + len(self.mem_temps)
if self.ref_temps_count > self.ref_temps_max:
self.ref_temps_max = self.ref_temps_count
self.first_block = True
for bb in graph:
#Set stack here.
e = bb.parent
if e:
if e not in self.edges:
c_stack = CStack(self.stack.declarations)
self.edges[e] = CachingStack(c_stack)
if e not in offsets:
offsets[e] = self.stream_offset
self.stack = self.edges[e].copy()
self.stream_offset = offsets[e]
else:
assert bb == graph[0]
# self.stack.comment(self.out)
self.next_edge_set.append(bb.child)
self.in_regs = set()
for i in bb:
# self.out << '/* %s */' % i.name
i.process(self)
if bb is not last_bb:
self.block_end()
self.next_edge_set.pop()
self.first_block = False
self.stack.store(self.out)
for i in self.temp_types:
if i in self.type_names:
tname = 'struct gvmt_object_%s*' % self.type_names[i]
else:
tname = self.temp_types[i].c_name
if i in self.names:
out << ' %s %s;' % (tname, self.names[i])
else:
out << ' %s gvmt_t%s;' % (tname, i)
out << self.out
(self.temp_types, self.names, self.type_names, self.label,
self.edges, self.mem_temps, self.first_block) = saved_state
self.out = out
self.out << ' }\n'
self.in_regs = old_in_regs
self.ref_temps_count = self.ref_base
self.ref_base = old_ref_base
def transformer(self, s, o):
if isinstance(o, StackItem):
self.out << ' %s = %s;' % (o.cast(s.tipe), s)
else:
self.out << ' %s = %s;' % (o, s.cast(o.tipe))
def block_end(self):
next_edge = self.next_edge_set[-1]
self.stack.flush_to_memory(self.out)
if next_edge:
if next_edge in self.edges:
self.stack.transform(self.edges[next_edge],
self.transformer, self.out)
else:
self.stack.store(self.out)
self.edges[next_edge] = self.stack.copy()
self.next_edge_set[-1] = None
# self.stack.comment(self.out)
def constant(self, val):
assert isinstance(val, int) or isinstance(val, long)
# Should validate that value fits in native int.
return Constant(gtypes.iptr, val)
def stream_fetch(self, size = 1):
assert size in (1,2,4)
s = '0'
while size and self.stream_stack:
value = self.get()
s = Simple(gtypes.iptr, '((%s << 8) | %s)' % (s, value))
size -= 1
if size == 4:
ip = '(_gvmt_ip + %d)' % self.stream_offset
self.stream_offset += 4
return Simple(gtypes.iptr, '_gvmt_fetch_4(%s)' % ip)
while size:
value = self.get()
s = Simple(gtypes.iptr, '((%s << 8) | %s)' % (s, value))
size -= 1
return s
def _stream_item(self, index):
return '_gvmt_ip[%d]' % index
def get(self):
if self.stream_stack:
return self.stream_stack.pop()
else:
s = self._stream_item(self.stream_offset)
self.stream_offset += 1
return Simple(gtypes.iptr, s)
def stream_peek(self, index):
if index < len(self.stream_stack):
return self.stream_stack[-index-1]
else:
index = self.stream_offset + index-len(self.stream_stack)
return Simple(gtypes.iptr, self._stream_item(index))
def immediate_add(self, l, op, r):
return Simple(gtypes.iptr, '%s%s%s' % (l, op, r))
def stream_push(self, value):
assert isinstance(value, Expr)
self.stream_stack.append(value)
def stack_pop(self, tipe = gtypes.x):
return self.stack.pop(tipe, self.out)
def top_level(self, name, qualifiers, graph):
out = self.out
self.out = common.Buffer()
self.compound(name, qualifiers, graph)
if self.ref_temps_max:
out << '\n#define FRAME_POINTER (&gvmt_frame)\n'
out << ' struct { struct gvmt_frame gvmt_frame; '
out << 'GVMT_Object refs[%d]; } gvmt_frame;' % self.ref_temps_max
out << ' gvmt_frame.gvmt_frame.previous = _gvmt_caller_frame;'
out << ' gvmt_frame.gvmt_frame.count = %d;' % self.ref_temps_max
for i in range(self.ref_temps_max):
out << ' gvmt_frame.gvmt_frame.refs[%d] = 0;' % i
else:
out << '\n#define FRAME_POINTER _gvmt_caller_frame\n'
out << self.out
self.out = out
out.no_line()
self.out << '#undef FRAME_POINTER\n'
def declarations(self, out):
for name in self.stack.declarations:
out << ' %s %s;' % (self.stack.declarations[name], name)
def stack_push(self, value):
assert isinstance(value, Expr)
self.stack.push(value, self.out)
def stack_pick(self, tipe, index):
return self.stack.pick(tipe, index, self.out)
def stack_poke(self, index, value):
self.stack.poke(index, value, self.out)
def sign(self, val):
return Simple(gtypes.i8, '((int64_t)%s)' % val.cast(gtypes.i4))
def pin(self, val):
return Simple(gtypes.p, 'gvmt_gc_pin(%s)' % val.cast(gtypes.p))
def pinned_object(self, val):
if common.global_debug:
fatal_fmt = ('__gvmt_fatal("%%s:%%d: Object at 0x%%x is not pinned\\n"'
', __FILE__, __LINE__, (size_t)%s);')
self.out << 'if(!gvmt_is_pinned(%s)) ' % val.cast(gtypes.p)
self.out << fatal_fmt % val.cast(gtypes.p)
return val.cast(gtypes.r)
def zero(self, val):
return Simple(gtypes.u8, '((uint64_t)%s)' % val.cast(gtypes.u4))
def name(self, index, name):
if index in self.names and name != self.names[index]:
if self.names[index] == default_name(index):
raise _exception("Temp %d named '%s' after use" % (index, name))
else:
raise _exception("Renaming of temp %d from '%s' to '%s'" %
(index, self.names[index], name))
# name may have already been used for another variable:
if name not in self.names.values():
self.names[index] = name
def type_name(self, index, name):
if index in self.type_names and name != self.type_names[index]:
raise _exception("Renaming of type of temp %d from '%s' to '%s'" %
(index, self.type_names[index], name))
self.type_names[index] = name
def rstack(self):
self.stack.flush_to_memory(self.out)
return Simple(gtypes.p, self.stack.top(self.out))
def stack_flush(self):
self.stack.flush_to_memory(self.out)
def stack_insert(self, offset, size):
return Simple(gtypes.p, self.stack.insert(offset, size, self.out))
def hop(self, index):
self.block_end()
self.out << ' goto target_%s_%d;' % (self.label, index)
def jump(self, offset):
raise _exception('Cannot use JUMP outside of intepreter context')
def line(self, number):
# self.stack.comment(self.out)
if self.filename:
self.out << '\n#line %s "%s"\n ' % (number, self.filename)
def file(self, name):
self.filename = name
def local_branch(self, index, condition, t_or_f):
self.block_end()
if t_or_f:
self.out << ' if(%s) ' % condition
else:
self.out << ' if(!(%s)) ' % condition
self.out << 'goto target_%s_%d;' % (self.label, index)
def target(self, index):
self.stack.store(self.out)
self.out << ' target_%s_%d: ((void)0);' % (self.label, index)
def tload(self, tipe, index):
if index in self.names:
name = self.names[index]
else:
name = default_name(index)
self.names[index] = name
if index in self.type_names:
cast = '(struct gvmt_object_%s*)' % self.type_names[index]
else:
cast = ''
if index in self.mem_temps and index not in self.in_regs:
self.out << ' %s = %sFRAME_POINTER->gvmt_frame.refs[%d];' % (name,
cast, self.ref_base + self.mem_temps.index(index))
self.in_regs.add(index)
if cast:
name = '((GVMT_Object)%s)' % name
if index not in self.temp_types:
raise _exception("Undefined temp %d" % index)
if tipe != self.temp_types[index]:
if not tipe.compatible(self.temp_types[index]):
raise _exception("Reuse of temp %d type %s with type %s" % (
index, self.temp_types[index], tipe))
else:
return Simple(self.temp_types[index], name).cast(tipe)
else:
return Simple(tipe, name)
def tstore(self, tipe, index, value):
self.stack.store(self.out)
if tipe != self.temp_types[index]:
if not tipe.compatible(self.temp_types[index]):
fmt = "Reuse of temp %d type %s with type %s"
raise _exception(fmt % (index, self.temp_types[index], tipe))
if index in self.type_names:
cast = '(struct gvmt_object_%s*)' % self.type_names[index]
else:
cast = ''
if index in self.names:
name = self.names[index]
else:
name = default_name(index)
self.names[index] = name
self.out << ' %s = %s%s;' % (name, cast, value.cast(tipe))
if index in self.mem_temps:
fmt = ' FRAME_POINTER->gvmt_frame.refs[%d] = (GVMT_Object)%s;'
self.out << fmt % (self.ref_base+self.mem_temps.index(index), name)
self.in_regs.discard(index)
def return_(self, type):
self.stack.flush_to_memory(self.out)
if common.global_debug:
fmt = ' gvmt_last_return_type = %s;'
self.out << fmt % _return_type_codes[type]
self.out << ' return gvmt_sp;'
def far_jump(self, addr):
raise _exception('Cannot use FAR_JUMP outside of intepreter context')
def stack_drop(self, offset, size):
self.stack.drop(offset, size, self.out)
def close(self):
if self.stream_offset:
self.out << ' _gvmt_ip += %d;' % self.stream_offset
if self.stream_stack:
msg = 'Value(s) pushed back to stream at end of instruction'
raise _exception(msg)
self.stream_offset = 0
# self.stack.comment(self.out)
self.stack.flush_to_memory(self.out)
def create_and_push_handler(self):
self.stack.flush_to_memory(self.out)
fmt = ' GvmtExceptionHandler __handler_%d = gvmt_create_and_push_handler();'
self.out << fmt % _uid
self.out << ' __handler_%d->sp = gvmt_sp;' % _uid
def n_arg(self, tipe, val):
global _uid
_uid += 1
name = '_gvmt_narg_%s' % _uid
self.out << '%s %s = %s;' % (tipe.c_name, name, val.cast(tipe))
self.n_args.append(Simple(tipe, name))
def setjump(self):
c = Simple(gtypes.r, '__state_%d' % _uid)
self.out << 'gvmt_double_return_t val%d; ' % _uid
'val%d.ret = ' % _uid
fmt = 'val%d.ret = gvmt_setjump(&__handler_%d->registers, gvmt_sp); '
self.out << fmt % (_uid, _uid)
self.out << 'GVMT_Object %s = val%d.regs.ex; ' % (c, _uid)
self.out << 'gvmt_sp = val%d.regs.sp; ' % _uid
return c
def push_current_state(self):
global _uid
_uid += 1
self.create_and_push_handler()
return self.setjump()
def discard_state(self):
self.out << ' gvmt_pop_and_free_handler();'
def _raise(self, value):
self.stack.flush_to_memory(self.out)
self.out << ' gvmt_raise_exception(%s);'% value.cast(gtypes.r)
def transfer(self, value):
self.stack.flush_to_memory(self.out)
self.out << ' gvmt_transfer(%s, gvmt_sp);'% value.cast(gtypes.r)
def gc_free_pointer_store(self, value):
self.out << ' gvmt_gc_free_pointer = (GVMT_StackItem*)%s;' % value
def gc_limit_pointer_store(self, value):
self.out << ' gvmt_gc_limit_pointer = (GVMT_StackItem*)%s;' % value
def gc_limit_pointer_load(self):
return Simple(gtypes.p, 'gvmt_gc_limit_pointer')
def gc_free_pointer_load(self):
return Simple(gtypes.p, 'gvmt_gc_free_pointer')
def zero_memory(self, obj, size):
try:
size = int(size)
if size <= gtypes.p.size * 6:
size = (size + gtypes.p.size - 1) // gtypes.p.size
for i in range(size):
self.out << ' ((intptr*)%s)[%d] = 0;' % (obj, i)
except ValueError:
self.out << ' GVMT_SET_MEMORY(%s, %s, 0);' % (
obj, size.cast(gtypes.iptr))
def lock(self, lock):
#Inline uncontended case
global _uid
_uid += 1
self.out << ' intptr_t* lock_%d = (intptr_t*)%s; ' % (_uid, lock)
self.out << ' if(!COMPARE_AND_SWAP(lock_%d, ' % _uid
self.out << 'GVMT_LOCKING_UNLOCKED, '
self.out << 'gvmt_thread_id | GVMT_LOCKING_LOCKED))'
self.out << ' gvmt_save_pointers(gvmt_sp, (GVMT_Frame)FRAME_POINTER);'
self.out << ' gvmt_fast_lock(lock_%d);' % _uid
def unlock(self, lock):
#Inline uncontended case:
global _uid
_uid += 1
self.out << ' intptr_t* lock_%d = (intptr_t*)%s; ' % (_uid, lock)
self.out << ' if(!COMPARE_AND_SWAP(lock_%d, ' % _uid
self.out << 'gvmt_thread_id | GVMT_LOCKING_LOCKED, '
self.out << 'GVMT_LOCKING_UNLOCKED)) '
self.out << ' gvmt_fast_unlock(lock_%d);' % _uid
def lock_internal(self, obj, offset):
#Inline uncontended case
global _uid
_uid += 1
if common.global_debug:
self._check_ref_access(obj, offset, gtypes.iptr)
self.out << ' intptr_t* lock_%d = (intptr_t*)(((char*)%s)+%s); ' % (_uid, obj, offset)
self.out << ' if(!COMPARE_AND_SWAP(lock_%d, ' % _uid
self.out << 'GVMT_LOCKING_UNLOCKED, '
self.out << 'gvmt_thread_id | GVMT_LOCKING_LOCKED))'
self.out << ' gvmt_fast_lock(lock_%d);' % _uid
def unlock_internal(self, obj, offset):
#Inline uncontended case:
global _uid
_uid += 1
if common.global_debug:
self._check_ref_access(obj, offset, gtypes.iptr)
self.out << ' intptr_t* lock_%d = (intptr_t*)(((char*)%s)+%s); ' % (_uid, obj, offset)
self.out << ' if(!COMPARE_AND_SWAP(lock_%d, ' % _uid
self.out << 'gvmt_thread_id | GVMT_LOCKING_LOCKED, '
self.out << 'GVMT_LOCKING_UNLOCKED)) '
self.out << ' gvmt_fast_unlock(lock_%d);' % _uid
class IMode(CMode):
def __init__(self, temp, externals, gc_name, name):
CMode.__init__(self, temp, externals, gc_name)
self.i_name = name
def top_level(self, name, qualifiers, graph):
self.i_length = graph.deltas[0] + 1
self.compound(name, qualifiers, graph)
def push_current_state(self):
global _uid
_uid += 1
self.create_and_push_handler()
self.out << ' __handler_%d->ip = _gvmt_ip;' % _uid
c = self.setjump()
self.out << ' __handler_%d = gvmt_exception_stack;' % _uid
self.out << ' _gvmt_ip = __handler_%d->ip;' % _uid
return c
def ip(self):
return Simple(gtypes.p, '_gvmt_ip')
def opcode(self):
return Simple(gtypes.iptr, '(GVMT_CURRENT_OPCODE)')
def next_ip(self):
return Simple(gtypes.p, '(_gvmt_ip+%d)' % self.i_length)
def laddr(self, name):
return LAddr(name)
def far_jump(self, addr):
self.out << ' _gvmt_ip = %s;' % addr.cast(gtypes.p)
self.stack.flush_to_memory(self.out)
if common.token_threading:
self.out << ' goto *gvmt_operand_table[*_gvmt_ip];'
else:
self.out << ' break;'
def close(self):
if self.stream_offset:
self.out << ' _gvmt_ip += %d;' % self.stream_offset
if self.stream_stack:
raise _exception(
"Value(s) pushed back to stream at end of instruction")
self.stream_offset = 0
self.stack.flush_to_memory(self.out)
def jump(self, offset):
self.out << ' _gvmt_ip += (int16_t)(%s);' % offset
self.stack.flush_to_memory(self.out)
if common.token_threading:
self.out << ' goto *gvmt_operand_table[*_gvmt_ip];'
else:
self.out << ' break;'
def alloca(self, tipe, size):
global _uid
if tipe is gtypes.r:
raise _exception('Illegal use of ALLOCA_R.')
if self.first_block:
try:
#Size is build time constant
count = int(size)
name = 'gvmt_frame_%d' % _uid
if count == 1:
self.out << '%s %s;' % (tipe.c_name, name)
else:
self.out << '%s %s[%s];' % (tipe.c_name, name, count)
_uid += 1
return Simple(gtypes.p, '&%s' % name)
except ValueError:
msg = "Cannot use variable sized ALLOCA_X in interpreter code"
raise _exception(msg)
else:
msg = "Use of ALLOCA_X in interpreter code must be in first block"
raise _exception(msg)
def unitialised_temp(graph, index):
filename = None
line = None
for bb in graph:
for i in bb:
if i.__class__ == builtin.File:
filename = i.filename
if i.__class__ == builtin.Line:
line = i.line
if i.__class__ is builtin.Name and i.index == index:
if filename and line:
raise common.GVMTException(
"%s:%s:Uninitialised variable '%s'" %
(filename, line, i.tname))
else:
raise _exception("Uninitialised temp '%s'" % i.tname)
|
import common
_exception = common.UnlocatedException
import sys
import itertools, re
import builtin, gtypes, operators
from stacks import Stack, CachingStack
def default_name(index):
return "gvmt_t%s" % index
_uid = 0
_return_type_codes = {
gtypes.i1 : 'RETURN_TYPE_I4',
gtypes.i2 : 'RETURN_TYPE_I4',
gtypes.i4 : 'RETURN_TYPE_I4',
gtypes.i8 : 'RETURN_TYPE_I8',
gtypes.u1 : 'RETURN_TYPE_I4',
gtypes.u2 : 'RETURN_TYPE_I4',
gtypes.u4 : 'RETURN_TYPE_I4',
gtypes.u8 : 'RETURN_TYPE_I8',
gtypes.f4 : 'RETURN_TYPE_F4',
gtypes.f8 : 'RETURN_TYPE_F8',
gtypes.r : 'RETURN_TYPE_R',
gtypes.p : 'RETURN_TYPE_P',
gtypes.v : 'RETURN_TYPE_V',
}
_suffices = {
gtypes.i1 : 'i',
gtypes.i2 : 'i',
gtypes.i4 : 'i',
gtypes.i8 : 'l',
gtypes.u1 : 'u',
gtypes.u2 : 'u',
gtypes.u4 : 'u',
gtypes.u8 : 'w',
gtypes.f4 : 'f',
gtypes.f8 : 'd',
gtypes.r : 'o' ,
gtypes.p : 'p'
}
_temp_index = 0
class CStack(object):
def __init__(self, declarations):
self.offset = 0
self.declarations = declarations
def pop(self, tipe, out):
global _temp_index
_temp_index += 1
if tipe is gtypes.x:
self.declarations['gvmt_r%d' % _temp_index] = 'GVMT_StackItem'
out << ' gvmt_r%d = gvmt_sp[%d];' % (_temp_index, -self.offset)
self.offset -= 1
return StackItem('gvmt_r%d' % _temp_index)
else:
self.declarations['gvmt_r%d' % _temp_index] = tipe.c_name
out << ' gvmt_r%d = gvmt_sp[%d].%s;' % (_temp_index, -self.offset, _suffices[tipe])
self.offset -= 1
return Simple(tipe, 'gvmt_r%d' % _temp_index)
def push(self, value, out):
self.offset += 1
si = StackItem('gvmt_sp[%d]' % (-self.offset))
out << ' %s = %s;' % (si.cast(value.tipe), value)
def pick(self, tipe, index, out):
global _temp_index
_temp_index += 1
self.declarations['gvmt_r%d' % _temp_index] = tipe.c_name
if self.offset < 0:
out << ' gvmt_r%d = gvmt_sp[%s+%d].%s;' % (_temp_index, index, -self.offset, _suffices[tipe])
else:
out << ' gvmt_r%d = gvmt_sp[%s-%d].%s;' % (_temp_index, index, self.offset, _suffices[tipe])
return Simple(tipe, 'gvmt_r%d' % _temp_index)
def poke(self, index, value, out):
if self.offset < 0:
si = StackItem('gvmt_sp[%s+%d]' % (index, -self.offset))
else:
si = StackItem('gvmt_sp[%s-%d]' % (index, self.offset))
out << ' %s = %s;' % (si.cast(value.tipe), value)
def flush_to_memory(self, out, ignore = 0):
if self.offset:
if self.offset < 0:
out << ' gvmt_sp += %d;' % -self.offset
else:
out << ' gvmt_sp -= %d;' % self.offset
self.offset = 0
def top(self, out, cached = 0):
global _uid
_uid += 1
var = '__sp_top%d' % _uid
total_offset = cached+self.offset
if total_offset:
if total_offset < 0:
out << ' GVMT_StackItem *%s = gvmt_sp+%d;' % (var, -total_offset)
else:
out << ' GVMT_StackItem *%s = gvmt_sp-%d;' % (var, total_offset)
else:
out << ' GVMT_StackItem *%s = gvmt_sp;' % var
return var
def insert(self, offset, size, out):
#Is offset a build-time constant?
self.flush_to_memory(out)
if offset == 0:
out << ' gvmt_sp -= %s;' % size.cast(gtypes.iptr)
loop_fmt = ' for (intptr_t i = 0; i < %s; i++) gvmt_sp[i].i = 0;'
out << loop_fmt % size.cast(gtypes.iptr)
return '%s' % self.top(out)
else:
out << ' gvmt_sp -= %s;' % size.cast(gtypes.iptr)
loop_fmt = ' for (intptr_t i=0; i<%s; i++) gvmt_sp[i]=gvmt_sp[%s+i];'
out << loop_fmt % (offset.cast(gtypes.iptr), size.cast(gtypes.iptr))
loop_fmt = ' for (intptr_t i = 0; i < %s; i++) gvmt_sp[%s+i].i = 0;'
out << loop_fmt % (size.cast(gtypes.iptr), offset.cast(gtypes.iptr))
return '(%s+%s)' % (self.top(out) ,'%s' % offset)
def drop(self, offset, size, out):
self.flush_to_memory(out)
if offset == 0:
out << ' gvmt_sp += %s;' % size.cast(gtypes.iptr)
else:
loop_fmt = ' for (intptr_t i=0; i<%s; i++) gvmt_sp[%s+i]=gvmt_sp[i];'
out << loop_fmt % (offset.cast(gtypes.iptr), size.cast(gtypes.iptr))
out << ' gvmt_sp += %s;' % size.cast(gtypes.iptr)
def comment(self, out):
out << 'Offset: %d\n' % self.offset
def copy(self):
result = CStack(self.declarations)
result.offset = self.offset
return result
def transform(self, other, transformer, out):
if self.offset != other.offset:
out << ' gvmt_sp += %d;' % (self.offset - other.offset)
self.offset = other.offset
def store(self, out):
pass
class Expr(object):
def __init__(self, tipe):
assert tipe
self.tipe = tipe
def cast(self, tipe):
if self.tipe == tipe:
return self
else:
return Cast(tipe, self)
def __int__(self):
raise ValueError
def indir(self, tipe):
return Indirection(tipe, self)
def div(self, size):
return '%s/%d' % (self, size)
def call(self, tipe):
call_fmt = '(((gvmt_funcptr)%s)(gvmt_sp, (GVMT_Frame)FRAME_POINTER))'
return Simple(tipe, call_fmt % self)
def n_call(self, tipe, params):
if params:
call_fmt = '(((gvmt_native_funcptr_%s)%s)(%s))'
params = ', '.join([str(x) for x in params])
return Simple(tipe, call_fmt % (tipe.suffix, self, params))
else:
call_fmt = '(((gvmt_native_funcptr0_%s)%s)())'
return Simple(tipe, call_fmt % (tipe.suffix, self))
def store(self, decl, out):
global _temp_index
_temp_index += 1
decl['gvmt_r%d' % _temp_index ] = self.tipe.c_name
out << ' gvmt_r%d = %s;' % (_temp_index, self)
return Simple(self.tipe, 'gvmt_r%d' % _temp_index)
def pstore(self, tipe, value, out):
out << (' %s = %s; ' % (self.indir(tipe), value))
class StackItem(Expr):
def __init__(self, txt):
Expr.__init__(self, gtypes.x)
assert isinstance(txt, str)
self.txt = txt
def cast(self, tipe):
assert tipe in _suffices or tipe == gtypes.x
if tipe == gtypes.v:
return Simple(tipe, '(void)%s' % self.txt)
elif tipe == gtypes.x:
return self
else:
return Simple(tipe, '%s.%s' % (self.txt, _suffices[tipe]))
def indir(self, tipe):
return Simple(tipe, '(*(%s*)%s.p)' % (tipe.c_name, self.txt))
def __str__(self):
return self.txt
def store(self, decl, out):
return self
class Simple(Expr):
def __init__(self, tipe, txt):
Expr.__init__(self, tipe)
assert isinstance(txt, str)
self.txt = txt
def __str__(self):
return self.txt
def __int__(self):
if not self.tipe.is_int or self.tipe.size > gtypes.p.size:
raise ValueError
return int(self.txt)
def store(self, decl, out):
if self.txt.startswith('gvmt_r'):
return self
else:
return Expr.store(self, decl, out)
class LAddr(Expr):
def __init__(self, name):
Expr.__init__(self, gtypes.p)
self.name = name
def __str__(self):
return '(&FRAME_POINTER->%s)' % self.name
def indir(self, tipe):
return Simple(tipe, "(%s = FRAME_POINTER->%s)" % (self.name, self.name))
def pstore(self, tipe, value, out):
out << (' %s = %s; ' % (self.name, value))
out << (' FRAME_POINTER->%s = %s; ' % (self.name, self.name))
class Constant(Simple):
def __init__(self, tipe, val):
assert val is not None
assert -(2**31) <= val
assert 2**32 > val
if val >= 2**31:
txt = hex(val)
if txt[-1] in 'Ll':
txt = txt[:-1]
else:
txt = str(val)
Simple.__init__(self, tipe, txt)
def div(self, x):
assert('int' in self.tipe.c_name)
return str(int(self.txt)/x)
def __int__(self):
return int(self.txt)
def __str__(self):
return self.txt
class Cast(Expr):
def __init__(self, tipe, expr):
Expr.__init__(self, tipe)
self.expr = expr
def __str__(self):
if (self.tipe == gtypes.f4 or self.expr.tipe == gtypes.f4 or
self.tipe == gtypes.f8 or self.expr.tipe == gtypes.f8):
assert self.tipe.size == self.expr.tipe.size
si = StackItem('((GVMT_StackItem)%s)' % self.expr)
return si.cast(self.tipe).__str__()
elif self.tipe == gtypes.p and self.expr.tipe.size < gtypes.p.size:
return '((void*)(intptr_t)(%s))' % self.expr
else:
return '((%s)(%s))' % (self.tipe.c_name, self.expr)
class Binary(Expr):
def __init__(self, tipe, left, op, right):
Expr.__init__(self, tipe)
assert isinstance(left, Expr)
assert isinstance(right, Expr)
self.left = left
self.op = op
self.right = right
def __str__(self):
return '(%s%s%s)' % (self.left, self.op.c_name, self.right)
def div(self, x):
if self.op == operators.add or self.op == operators.sub:
return '(%s%s%s)' % (self.left.div(x), self.op.c_name,
self.right.div(x))
else:
return '%s/%d' % (self, x)
class LeftShift(Binary):
def __init__(self, tipe, left, right):
Binary.__init__(self, tipe, left, operators.lsh, right)
def div(self, size):
log2 = ( '', '0', '1', '', '2', '', '', '', '3')
if str(self.right) == log2[size]:
return str(self.left)
else:
return '%s/%d' % (self, size)
def __str__(self):
try:
return '(%s<<%s)' % (self.left, self.right)
except:
return Binary.__str__(self)
class PointerAdd(Binary):
def __init__(self, tipe, left, op, right):
Binary.__init__(self, tipe, left, op, right)
def __str__(self):
if self.left.tipe == gtypes.p or self.left.tipe == gtypes.r:
return '(((char*)%s)%s%s)' % (self.left, self.op.c_name,
self.right.cast(gtypes.iptr))
else:
if self.right.tipe == gtypes.p:
assert self.op.c_name == '+'
return '(((char*)%s)+%s)' % (self.right,
self.left.cast(gtypes.iptr))
else:
return '((char*)(%s%s%s))' % (self.left.cast(gtypes.iptr),
self.op.c_name, self.right.cast(gtypes.iptr))
class Indirection(Expr):
def __init__(self, tipe, expr):
Expr.__init__(self, tipe)
self.expr = expr
def __str__(self):
return '(((GVMT_memory*)%s)->%s)' % (self.expr, self.tipe.suffix)
class Address(Expr):
def __init__(self, txt, externals):
Expr.__init__(self, gtypes.p)
assert isinstance(txt, str)
self.txt = txt
self.externals = externals
def __str__(self):
return '&' + self.txt
def indir(self, tipe):
tname = self.externals[self.txt].split()[0]
if tname == tipe.c_name:
return Simple(tipe, self.txt)
else:
return Indirection(tipe, '&' + self.txt)
def call(self, tipe):
return Simple(tipe, '%s(gvmt_sp, (GVMT_Frame)FRAME_POINTER)' % self.txt)
def n_call(self, tipe, params):
params = ', '.join([str(x) for x in params])
return Simple(tipe, '%s(%s)' % (self.txt, params))
_next_label = 0
def _no_amp(x):
s = str(x)
if s and s[0] == '&':
return s[1:]
else:
return 'function'
class CMode(object):
"Output C for immediate execution"
def __init__(self, out, externals, gc_name):
global _next_label
self.out = out
self.stream_offset = 0
self.stream_stack = []
self.label = _next_label
_next_label += 1
self.temp_types = {}
self.stack = CachingStack(CStack({}))
self.names = {}
self.type_names = {}
self.filename = ''
self.externals = externals
self.next_edge_set = []
self.edges = None
self.n_args = []
self.gc_name = gc_name
self.in_regs = set()
self.ref_base = 0
self.ref_temps_count = 0
self.ref_temps_max = 0
self.mem_temps = []
self.first_block = False
def pload(self, tipe, array):
self._null_check(array)
return array.indir(tipe)
def pstore(self, tipe, array, value):
self._null_check(array)
self.stack.store(self.out)
array.pstore(tipe, value.cast(tipe), self.out)
#If debug is on, insert extra checking code.
def _check_ref_access(self, obj, offset, tipe):
if common.global_debug:
if tipe == gtypes.r:
comp = '<='
expected = ''
got = 'non-'
else:
comp = '>='
expected = 'non-'
got = ''
shape_fmt = ' if(gvmt_get_shape_at_offset(%s, %s) %s 0)'
self.out << shape_fmt % (obj, offset, comp)
self.out << shape_fmt % (obj, offset, '==')
fatal_fmt = (' __gvmt_fatal("%%s:%%d: Invalid member access'
' (offset %%d), %s \\n", __FILE__, __LINE__, %s);')
what = 'past end of object'
self.out << fatal_fmt % (what, offset)
self.out << ' else'
what = 'expected %sreference got %sreference' % (expected, got)
self.out << fatal_fmt % (what, offset)
def _null_check(self, obj):
if common.global_debug:
self.out << ' if(%s == NULL) ' % obj
self.out << '__gvmt_fatal("%s:%d: Attempted use of NULL '
self.out << 'reference/pointer\\n", __FILE__, __LINE__);'
def rload(self, tipe, obj, offset):
obj = obj.cast(gtypes.r)
self._null_check(obj)
self._check_ref_access(obj, offset, tipe)
return PointerAdd(tipe, obj, operators.add, offset).indir(tipe)
def field_is_null(self, is_null, obj, offset):
obj = obj.cast(gtypes.r)
self._null_check(obj)
l = PointerAdd(gtypes.iptr, obj, operators.add, offset).indir(gtypes.iptr)
r = Constant(gtypes.iptr, 0)
if is_null:
return Binary(gtypes.iptr, l, operators.eq, r)
else:
return Binary(gtypes.iptr, l, operators.ne, r)
def rstore(self, tipe, obj, offset, value):
obj = obj.cast(gtypes.r)
self._null_check(obj)
self.stack.store(self.out)
if common.global_debug:
self.out << ' if (gvmt_object_is_initialised(%s, %s))' % (obj,
offset)
self._check_ref_access(obj, offset, tipe)
internal_ptr = PointerAdd(tipe, obj, operators.add, offset)
internal_ptr.pstore(tipe, value.cast(tipe), self.out)
def binary(self, tipe, left, op, right):
if op == operators.lsh:
return LeftShift(tipe, left.cast(tipe), right.cast(gtypes.i4))
elif tipe == gtypes.p or tipe == gtypes.r:
return PointerAdd(tipe, left, op, right)
elif op == operators.rsh:
return Binary(tipe, left.cast(tipe), op, right.cast(gtypes.i4))
else:
return Binary(tipe, left.cast(tipe), op, right.cast(tipe))
def comparison(self, tipe, left, op, right):
return Binary(gtypes.iptr, left.cast(tipe), op, right.cast(tipe))
def unary(self, tipe, op, arg):
return Simple(tipe, '(%s%s)' % (op.c_name, arg.cast(tipe)))
def c_call(self, func, tipe, pcount):
global _uid
_uid += 1
top = self.stack.top(self.out)
if common.global_debug:
self.out << ' gvmt_last_return_type = 0;'
self._call(func, tipe)
new_top = self.stack.top(self.out)
if common.global_debug:
#Turn off return type checking, need to implement in CC as well.
#fmt = ' if(gvmt_last_return_type && gvmt_last_return_type != %s)'
#self.out << fmt % _return_type_codes[tipe]
#fmt = (' __gvmt_fatal("%%s:%%d:Incorrect return type, '
# 'expected %s got %%s\\n", __FILE__, __LINE__,'
# 'gvmt_return_type_names[gvmt_last_return_type]);')
#self.out << fmt % tipe.suffix
self.out << ' if(%s-%s > %s)' % (new_top, top, pcount)
fmt = ' __gvmt_expect_v(__FILE__, __LINE__, "%s", %s, %s-%s);'
self.out << fmt % (_no_amp(func), pcount, new_top, top)
if pcount:
if tipe != gtypes.v:
fmt = ' %s call_%d = *((%s*)gvmt_sp);'
self.out << fmt % (tipe.c_name, _uid, tipe.c_name)
self.stack.push(Simple(tipe, 'call_%d' % _uid), self.out)
self.out << ' gvmt_sp = %s+%s;' % (top, pcount)
def call(self, func, tipe):
if common.global_debug:
top = self.stack.top(self.out)
self.out << ' gvmt_last_return_type = 0;'
self._call(func, tipe)
new_top = self.stack.top(self.out)
if tipe == gtypes.v:
twords = 0
elif tipe.size <= gtypes.p.size:
twords = 1
else:
twords = 2
#if common.global_debug:
#Turn off return type checking, need to implement in CC as well.
#fmt = ' if(gvmt_last_return_type && gvmt_last_return_type != %s)'
#self.out << fmt % _return_type_codes[tipe]
#fmt = (' __gvmt_fatal("%%s:%%d:Incorrect return type, '
# 'expected %s got %%s\\n", __FILE__, __LINE__,'
# 'gvmt_return_type_names[gvmt_last_return_type]);')
#self.out << fmt % tipe.suffix
def _call(self, func, tipe):
global _uid
_uid += 1
self.in_regs = set()
self.stack.flush_to_memory(self.out)
self.out << ' gvmt_sp = %s;' % func.call(tipe)
def n_call(self, func, tipe, args, gc = True):
self.in_regs = set()
self.stack.flush_to_memory(self.out)
if gc:
enter = ' gvmt_enter_native(gvmt_sp, (GVMT_Frame)FRAME_POINTER);'
self.out << enter
# For now to check that this is OK - To be removed
if len(self.n_args) < args:
raise _exception('Insufficient native arguments for N_CALL')
arguments = self.n_args[-args:]
a = func.n_call(tipe, arguments)
self.n_args = self.n_args[:-args]
if tipe is gtypes.v:
self.out << ' %s;' % a
result = None
else:
self.stack.push(a, self.out)
self.stack.store(self.out)
result = self.stack.pop(tipe, self.out)
if gc:
self.out << ' gvmt_sp = gvmt_exit_native();'
return result
def n_call_no_gc(self, func, tipe, args):
return self.n_call(func, tipe, args, False)
def alloca(self, tipe, size):
global _uid
if self.first_block:
try:
#Size is build time constant
count = int(size)
if tipe == gtypes.r:
ref_fmt = '(FRAME_POINTER->gvmt_frame.refs + %d)'
result = Simple(gtypes.p, ref_fmt % self.ref_temps_count)
self.ref_temps_count += count
if self.ref_temps_count > self.ref_temps_max:
self.ref_temps_max = self.ref_temps_count
return result
else:
name = 'gvmt_frame_%d' % _uid
if count == 1:
self.out << '%s %s;' % (tipe.c_name, name)
else:
self.out << '%s %s[%s];' % (tipe.c_name, name, count)
_uid += 1
return Simple(gtypes.p, '&%s' % name)
except ValueError:
pass
if tipe is gtypes.r:
raise _exception('Illegal use of ALLOCA_R.')
else:
bytes = '%s*%s' % (size.cast(gtypes.iptr), tipe.size)
return Simple(gtypes.p, 'alloca(%s)' % bytes)
def gc_malloc(self, size):
self.stack.flush_to_memory(self.out)
malloc = 'gvmt_%s_malloc(gvmt_sp, (GVMT_Frame)FRAME_POINTER, %s)'
obj = Simple(gtypes.p, malloc % (self.gc_name, size.cast(gtypes.iptr)))
self.in_regs = set()
return obj
def fully_initialised(self, obj):
if common.global_debug:
self.out << ' gvmt_fully_initialized_check(%s);' % obj.cast(gtypes.r)
def gc_malloc_fast(self, size):
global _uid
_uid += 1
c = Simple(gtypes.r, 'gvmt_malloc_%d' % _uid)
malloc_fast = ' GVMT_Object gvmt_malloc_%d = gvmt_fast_allocate(%s);'
self.out << malloc_fast % (_uid, size)
return c
def convert(self, from_type, to_type, value):
return Simple(to_type, '((%s)(%s))' % (to_type.c_name,
value.cast(from_type)))
def ip(self):
raise _exception('Cannot use IP outside of intepreter context')
def opcode(self):
raise _exception('Cannot use OPCODE outside of intepreter context')
def next_ip(self):
raise _exception('Cannot use NEXT_IP outside of intepreter context')
def laddr(self, name):
raise _exception('Cannot use LADDR outside of intepreter context')
def address(self, name):
return Address(name, self.externals)
def symbol(self, index):
return Simple(gtypes.p, '_gvmt_get_symbol(%s)' % index)
def extend(self, tipe, value):
# Is this right?
return value.cast(tipe).cast(gtypes.iptr)
def gc_safe(self):
# Uncache all references.
self.in_regs = set()
self.out << ' if(gvmt_gc_waiting) gvmt_gc_safe_point'
self.out << '(gvmt_sp, (GVMT_Frame)FRAME_POINTER);'
def compound(self, name, qualifiers, graph):
global _next_label
self.stack.store(self.out)
saved_state = (self.temp_types, self.names, self.type_names, self.label,
self.edges, self.mem_temps, self.first_block)
self.type_names = {}
old_label = self.label
self.label = _next_label
_next_label += 1
self.names = {}
self.temp_types = {}
self.out << ' { '
out = self.out
self.out = common.Buffer()
self.edges = {}
if graph.may_gc():
old_in_regs = set()
else:
old_in_regs = self.in_regs
offsets = {}
last_bb = graph[-1]
for bb in graph:
for i in bb:
if i.__class__ is builtin.TStore:
self.temp_types[i.index] = i.tipe
for bb in graph:
for i in bb:
if i.__class__ is builtin.TLoad:
if i.index not in self.temp_types:
unitialised_temp(graph, i.index)
self.mem_temps = sorted(list(graph.gc_temps()))
self.out << ' /* Mem temps %s */ ' % self.mem_temps
old_ref_base = self.ref_base
self.ref_base = self.ref_temps_count
self.ref_temps_count = self.ref_base + len(self.mem_temps)
if self.ref_temps_count > self.ref_temps_max:
self.ref_temps_max = self.ref_temps_count
self.first_block = True
for bb in graph:
#Set stack here.
e = bb.parent
if e:
if e not in self.edges:
c_stack = CStack(self.stack.declarations)
self.edges[e] = CachingStack(c_stack)
if e not in offsets:
offsets[e] = self.stream_offset
self.stack = self.edges[e].copy()
self.stream_offset = offsets[e]
else:
assert bb == graph[0]
# self.stack.comment(self.out)
self.next_edge_set.append(bb.child)
self.in_regs = set()
for i in bb:
# self.out << '/* %s */' % i.name
i.process(self)
if bb is not last_bb:
self.block_end()
self.next_edge_set.pop()
self.first_block = False
self.stack.store(self.out)
for i in self.temp_types:
if i in self.type_names:
tname = 'struct gvmt_object_%s*' % self.type_names[i]
else:
tname = self.temp_types[i].c_name
if i in self.names:
out << ' %s %s;' % (tname, self.names[i])
else:
out << ' %s gvmt_t%s;' % (tname, i)
out << self.out
(self.temp_types, self.names, self.type_names, self.label,
self.edges, self.mem_temps, self.first_block) = saved_state
self.out = out
self.out << ' }\n'
self.in_regs = old_in_regs
self.ref_temps_count = self.ref_base
self.ref_base = old_ref_base
def transformer(self, s, o):
if isinstance(o, StackItem):
self.out << ' %s = %s;' % (o.cast(s.tipe), s)
else:
self.out << ' %s = %s;' % (o, s.cast(o.tipe))
def block_end(self):
next_edge = self.next_edge_set[-1]
self.stack.flush_to_memory(self.out)
if next_edge:
if next_edge in self.edges:
self.stack.transform(self.edges[next_edge],
self.transformer, self.out)
else:
self.stack.store(self.out)
self.edges[next_edge] = self.stack.copy()
self.next_edge_set[-1] = None
# self.stack.comment(self.out)
def constant(self, val):
assert isinstance(val, int) or isinstance(val, long)
# Should validate that value fits in native int.
return Constant(gtypes.iptr, val)
def stream_fetch(self, size = 1):
assert size in (1,2,4)
s = '0'
while size and self.stream_stack:
value = self.get()
s = Simple(gtypes.iptr, '((%s << 8) | %s)' % (s, value))
size -= 1
if size == 4:
ip = '(_gvmt_ip + %d)' % self.stream_offset
self.stream_offset += 4
return Simple(gtypes.iptr, '_gvmt_fetch_4(%s)' % ip)
while size:
value = self.get()
s = Simple(gtypes.iptr, '((%s << 8) | %s)' % (s, value))
size -= 1
return s
def _stream_item(self, index):
return '_gvmt_ip[%d]' % index
def get(self):
if self.stream_stack:
return self.stream_stack.pop()
else:
s = self._stream_item(self.stream_offset)
self.stream_offset += 1
return Simple(gtypes.iptr, s)
def stream_peek(self, index):
if index < len(self.stream_stack):
return self.stream_stack[-index-1]
else:
index = self.stream_offset + index-len(self.stream_stack)
return Simple(gtypes.iptr, self._stream_item(index))
def immediate_add(self, l, op, r):
return Simple(gtypes.iptr, '%s%s%s' % (l, op, r))
def stream_push(self, value):
assert isinstance(value, Expr)
self.stream_stack.append(value)
def stack_pop(self, tipe = gtypes.x):
return self.stack.pop(tipe, self.out)
def top_level(self, name, qualifiers, graph):
out = self.out
self.out = common.Buffer()
self.compound(name, qualifiers, graph)
if self.ref_temps_max:
out << '\n#define FRAME_POINTER (&gvmt_frame)\n'
out << ' struct { struct gvmt_frame gvmt_frame; '
out << 'GVMT_Object refs[%d]; } gvmt_frame;' % self.ref_temps_max
out << ' gvmt_frame.gvmt_frame.previous = _gvmt_caller_frame;'
out << ' gvmt_frame.gvmt_frame.count = %d;' % self.ref_temps_max
for i in range(self.ref_temps_max):
out << ' gvmt_frame.gvmt_frame.refs[%d] = 0;' % i
else:
out << '\n#define FRAME_POINTER _gvmt_caller_frame\n'
out << self.out
self.out = out
out.no_line()
self.out << '#undef FRAME_POINTER\n'
def declarations(self, out):
for name in self.stack.declarations:
out << ' %s %s;' % (self.stack.declarations[name], name)
def stack_push(self, value):
assert isinstance(value, Expr)
self.stack.push(value, self.out)
def stack_pick(self, tipe, index):
return self.stack.pick(tipe, index, self.out)
def stack_poke(self, index, value):
self.stack.poke(index, value, self.out)
def sign(self, val):
return Simple(gtypes.i8, '((int64_t)%s)' % val.cast(gtypes.i4))
def pin(self, val):
return Simple(gtypes.p, 'gvmt_gc_pin(%s)' % val.cast(gtypes.p))
def pinned_object(self, val):
if common.global_debug:
fatal_fmt = ('__gvmt_fatal("%%s:%%d: Object at 0x%%x is not pinned\\n"'
', __FILE__, __LINE__, (size_t)%s);')
self.out << 'if(!gvmt_is_pinned(%s)) ' % val.cast(gtypes.p)
self.out << fatal_fmt % val.cast(gtypes.p)
return val.cast(gtypes.r)
def zero(self, val):
return Simple(gtypes.u8, '((uint64_t)%s)' % val.cast(gtypes.u4))
def name(self, index, name):
if index in self.names and name != self.names[index]:
if self.names[index] == default_name(index):
raise _exception("Temp %d named '%s' after use" % (index, name))
else:
raise _exception("Renaming of temp %d from '%s' to '%s'" %
(index, self.names[index], name))
# name may have already been used for another variable:
if name not in self.names.values():
self.names[index] = name
def type_name(self, index, name):
if index in self.type_names and name != self.type_names[index]:
raise _exception("Renaming of type of temp %d from '%s' to '%s'" %
(index, self.type_names[index], name))
self.type_names[index] = name
def rstack(self):
self.stack.flush_to_memory(self.out)
return Simple(gtypes.p, self.stack.top(self.out))
def stack_flush(self):
self.stack.flush_to_memory(self.out)
def stack_insert(self, offset, size):
return Simple(gtypes.p, self.stack.insert(offset, size, self.out))
def hop(self, index):
self.block_end()
self.out << ' goto target_%s_%d;' % (self.label, index)
def jump(self, offset):
raise _exception('Cannot use JUMP outside of intepreter context')
def line(self, number):
# self.stack.comment(self.out)
if self.filename:
self.out << '\n#line %s "%s"\n ' % (number, self.filename)
def file(self, name):
self.filename = name
def local_branch(self, index, condition, t_or_f):
self.block_end()
if t_or_f:
self.out << ' if(%s) ' % condition
else:
self.out << ' if(!(%s)) ' % condition
self.out << 'goto target_%s_%d;' % (self.label, index)
def target(self, index):
self.stack.store(self.out)
self.out << ' target_%s_%d: ((void)0);' % (self.label, index)
def tload(self, tipe, index):
if index in self.names:
name = self.names[index]
else:
name = default_name(index)
self.names[index] = name
if index in self.type_names:
cast = '(struct gvmt_object_%s*)' % self.type_names[index]
else:
cast = ''
if index in self.mem_temps and index not in self.in_regs:
self.out << ' %s = %sFRAME_POINTER->gvmt_frame.refs[%d];' % (name,
cast, self.ref_base + self.mem_temps.index(index))
self.in_regs.add(index)
if cast:
name = '((GVMT_Object)%s)' % name
if index not in self.temp_types:
raise _exception("Undefined temp %d" % index)
if tipe != self.temp_types[index]:
if not tipe.compatible(self.temp_types[index]):
raise _exception("Reuse of temp %d type %s with type %s" % (
index, self.temp_types[index], tipe))
else:
return Simple(self.temp_types[index], name).cast(tipe)
else:
return Simple(tipe, name)
def tstore(self, tipe, index, value):
self.stack.store(self.out)
if tipe != self.temp_types[index]:
if not tipe.compatible(self.temp_types[index]):
fmt = "Reuse of temp %d type %s with type %s"
raise _exception(fmt % (index, self.temp_types[index], tipe))
if index in self.type_names:
cast = '(struct gvmt_object_%s*)' % self.type_names[index]
else:
cast = ''
if index in self.names:
name = self.names[index]
else:
name = default_name(index)
self.names[index] = name
self.out << ' %s = %s%s;' % (name, cast, value.cast(tipe))
if index in self.mem_temps:
fmt = ' FRAME_POINTER->gvmt_frame.refs[%d] = (GVMT_Object)%s;'
self.out << fmt % (self.ref_base+self.mem_temps.index(index), name)
self.in_regs.discard(index)
def return_(self, type):
self.stack.flush_to_memory(self.out)
if common.global_debug:
fmt = ' gvmt_last_return_type = %s;'
self.out << fmt % _return_type_codes[type]
self.out << ' return gvmt_sp;'
def far_jump(self, addr):
raise _exception('Cannot use FAR_JUMP outside of intepreter context')
def stack_drop(self, offset, size):
self.stack.drop(offset, size, self.out)
def close(self):
if self.stream_offset:
self.out << ' _gvmt_ip += %d;' % self.stream_offset
if self.stream_stack:
msg = 'Value(s) pushed back to stream at end of instruction'
raise _exception(msg)
self.stream_offset = 0
# self.stack.comment(self.out)
self.stack.flush_to_memory(self.out)
def create_and_push_handler(self):
self.stack.flush_to_memory(self.out)
fmt = ' GvmtExceptionHandler __handler_%d = gvmt_create_and_push_handler();'
self.out << fmt % _uid
self.out << ' __handler_%d->sp = gvmt_sp;' % _uid
def n_arg(self, tipe, val):
global _uid
_uid += 1
name = '_gvmt_narg_%s' % _uid
self.out << '%s %s = %s;' % (tipe.c_name, name, val.cast(tipe))
self.n_args.append(Simple(tipe, name))
def setjump(self):
c = Simple(gtypes.r, '__state_%d' % _uid)
self.out << 'gvmt_double_return_t val%d; ' % _uid
'val%d.ret = ' % _uid
fmt = 'val%d.ret = gvmt_setjump(&__handler_%d->registers, gvmt_sp); '
self.out << fmt % (_uid, _uid)
self.out << 'GVMT_Object %s = val%d.regs.ex; ' % (c, _uid)
self.out << 'gvmt_sp = val%d.regs.sp; ' % _uid
return c
def push_current_state(self):
global _uid
_uid += 1
self.create_and_push_handler()
return self.setjump()
def discard_state(self):
self.out << ' gvmt_pop_and_free_handler();'
def _raise(self, value):
self.stack.flush_to_memory(self.out)
self.out << ' gvmt_raise_exception(%s);'% value.cast(gtypes.r)
def transfer(self, value):
self.stack.flush_to_memory(self.out)
self.out << ' gvmt_transfer(%s, gvmt_sp);'% value.cast(gtypes.r)
def gc_free_pointer_store(self, value):
self.out << ' gvmt_gc_free_pointer = (GVMT_StackItem*)%s;' % value
def gc_limit_pointer_store(self, value):
self.out << ' gvmt_gc_limit_pointer = (GVMT_StackItem*)%s;' % value
def gc_limit_pointer_load(self):
return Simple(gtypes.p, 'gvmt_gc_limit_pointer')
def gc_free_pointer_load(self):
return Simple(gtypes.p, 'gvmt_gc_free_pointer')
def zero_memory(self, obj, size):
try:
size = int(size)
if size <= gtypes.p.size * 6:
size = (size + gtypes.p.size - 1) // gtypes.p.size
for i in range(size):
self.out << ' ((intptr*)%s)[%d] = 0;' % (obj, i)
except ValueError:
self.out << ' GVMT_SET_MEMORY(%s, %s, 0);' % (
obj, size.cast(gtypes.iptr))
def lock(self, lock):
#Inline uncontended case
global _uid
_uid += 1
self.out << ' intptr_t* lock_%d = (intptr_t*)%s; ' % (_uid, lock)
self.out << ' if(!COMPARE_AND_SWAP(lock_%d, ' % _uid
self.out << 'GVMT_LOCKING_UNLOCKED, '
self.out << 'gvmt_thread_id | GVMT_LOCKING_LOCKED))'
self.out << ' gvmt_save_pointers(gvmt_sp, (GVMT_Frame)FRAME_POINTER);'
self.out << ' gvmt_fast_lock(lock_%d);' % _uid
def unlock(self, lock):
#Inline uncontended case:
global _uid
_uid += 1
self.out << ' intptr_t* lock_%d = (intptr_t*)%s; ' % (_uid, lock)
self.out << ' if(!COMPARE_AND_SWAP(lock_%d, ' % _uid
self.out << 'gvmt_thread_id | GVMT_LOCKING_LOCKED, '
self.out << 'GVMT_LOCKING_UNLOCKED)) '
self.out << ' gvmt_fast_unlock(lock_%d);' % _uid
def lock_internal(self, obj, offset):
#Inline uncontended case
global _uid
_uid += 1
if common.global_debug:
self._check_ref_access(obj, offset, gtypes.iptr)
self.out << ' intptr_t* lock_%d = (intptr_t*)(((char*)%s)+%s); ' % (_uid, obj, offset)
self.out << ' if(!COMPARE_AND_SWAP(lock_%d, ' % _uid
self.out << 'GVMT_LOCKING_UNLOCKED, '
self.out << 'gvmt_thread_id | GVMT_LOCKING_LOCKED))'
self.out << ' gvmt_fast_lock(lock_%d);' % _uid
def unlock_internal(self, obj, offset):
#Inline uncontended case:
global _uid
_uid += 1
if common.global_debug:
self._check_ref_access(obj, offset, gtypes.iptr)
self.out << ' intptr_t* lock_%d = (intptr_t*)(((char*)%s)+%s); ' % (_uid, obj, offset)
self.out << ' if(!COMPARE_AND_SWAP(lock_%d, ' % _uid
self.out << 'gvmt_thread_id | GVMT_LOCKING_LOCKED, '
self.out << 'GVMT_LOCKING_UNLOCKED)) '
self.out << ' gvmt_fast_unlock(lock_%d);' % _uid
class IMode(CMode):
def __init__(self, temp, externals, gc_name, name):
CMode.__init__(self, temp, externals, gc_name)
self.i_name = name
def top_level(self, name, qualifiers, graph):
self.i_length = graph.deltas[0] + 1
self.compound(name, qualifiers, graph)
def push_current_state(self):
global _uid
_uid += 1
self.create_and_push_handler()
self.out << ' __handler_%d->ip = _gvmt_ip;' % _uid
c = self.setjump()
self.out << ' __handler_%d = gvmt_exception_stack;' % _uid
self.out << ' _gvmt_ip = __handler_%d->ip;' % _uid
return c
def ip(self):
return Simple(gtypes.p, '_gvmt_ip')
def opcode(self):
return Simple(gtypes.iptr, '(GVMT_CURRENT_OPCODE)')
def next_ip(self):
return Simple(gtypes.p, '(_gvmt_ip+%d)' % self.i_length)
def laddr(self, name):
return LAddr(name)
def far_jump(self, addr):
self.out << ' _gvmt_ip = %s;' % addr.cast(gtypes.p)
self.stack.flush_to_memory(self.out)
if common.token_threading:
self.out << ' goto *gvmt_operand_table[*_gvmt_ip];'
else:
self.out << ' break;'
def close(self):
if self.stream_offset:
self.out << ' _gvmt_ip += %d;' % self.stream_offset
if self.stream_stack:
raise _exception(
"Value(s) pushed back to stream at end of instruction")
self.stream_offset = 0
self.stack.flush_to_memory(self.out)
def jump(self, offset):
self.out << ' _gvmt_ip += (int16_t)(%s);' % offset
self.stack.flush_to_memory(self.out)
if common.token_threading:
self.out << ' goto *gvmt_operand_table[*_gvmt_ip];'
else:
self.out << ' break;'
def alloca(self, tipe, size):
global _uid
if tipe is gtypes.r:
raise _exception('Illegal use of ALLOCA_R.')
if self.first_block:
try:
#Size is build time constant
count = int(size)
name = 'gvmt_frame_%d' % _uid
if count == 1:
self.out << '%s %s;' % (tipe.c_name, name)
else:
self.out << '%s %s[%s];' % (tipe.c_name, name, count)
_uid += 1
return Simple(gtypes.p, '&%s' % name)
except ValueError:
msg = "Cannot use variable sized ALLOCA_X in interpreter code"
raise _exception(msg)
else:
msg = "Use of ALLOCA_X in interpreter code must be in first block"
raise _exception(msg)
def unitialised_temp(graph, index):
filename = None
line = None
for bb in graph:
for i in bb:
if i.__class__ == builtin.File:
filename = i.filename
if i.__class__ == builtin.Line:
line = i.line
if i.__class__ is builtin.Name and i.index == index:
if filename and line:
raise common.GVMTException(
"%s:%s:Uninitialised variable '%s'" %
(filename, line, i.tname))
else:
raise _exception("Uninitialised temp '%s'" % i.tname)
|
en
| 0.523795
|
#Is offset a build-time constant? #If debug is on, insert extra checking code. #Turn off return type checking, need to implement in CC as well. #fmt = ' if(gvmt_last_return_type && gvmt_last_return_type != %s)' #self.out << fmt % _return_type_codes[tipe] #fmt = (' __gvmt_fatal("%%s:%%d:Incorrect return type, ' # 'expected %s got %%s\\n", __FILE__, __LINE__,' # 'gvmt_return_type_names[gvmt_last_return_type]);') #self.out << fmt % tipe.suffix #if common.global_debug: #Turn off return type checking, need to implement in CC as well. #fmt = ' if(gvmt_last_return_type && gvmt_last_return_type != %s)' #self.out << fmt % _return_type_codes[tipe] #fmt = (' __gvmt_fatal("%%s:%%d:Incorrect return type, ' # 'expected %s got %%s\\n", __FILE__, __LINE__,' # 'gvmt_return_type_names[gvmt_last_return_type]);') #self.out << fmt % tipe.suffix # For now to check that this is OK - To be removed #Size is build time constant # Is this right? # Uncache all references. #Set stack here. # self.stack.comment(self.out) # self.out << '/* %s */' % i.name # self.stack.comment(self.out) # Should validate that value fits in native int. #define FRAME_POINTER (&gvmt_frame)\n' #define FRAME_POINTER _gvmt_caller_frame\n' # name may have already been used for another variable: # self.stack.comment(self.out) #line %s "%s"\n ' % (number, self.filename) # self.stack.comment(self.out) #Inline uncontended case #Inline uncontended case: #Inline uncontended case #Inline uncontended case: #Size is build time constant
| 1.996856
| 2
|
slides/code/interlude_03_1.py
|
letstrythat/scientificpython
| 0
|
6629889
|
"""Specifications and unit tests for the exercice.
>>> e = Container()
>>> for v in [1, 2, 'a', 3.14]:
... e.extend(v)
>>> e.add(3.14)
>>> e.add(2)
>>> e.add(2)
>>> e
Container({2, 3.14})
>>> e.add(5)
Traceback (most recent call last):
...
ValueError: Value '5' is not allowed.
>>> e.extend(5)
>>> e.add(5)
>>> e.restrict(1)
>>> e.restrict(2)
Traceback (most recent call last):
...
ValueError: Value '2' is present in the Container.
"""
class Container(set):
def __init__(self):
self.allowedValues = set()
super().__init__()
def extend(self, v):
"""Add a value in the list of allowed values."""
self.allowedValues.add(v)
def add(self, v):
"""Add a value in the set."""
if v not in self.allowedValues:
raise ValueError("Value '%s' is not allowed." % v)
super().add(v)
def restrict(self, v):
"""Remove a value from the list of allowed values."""
if v in self:
raise ValueError("Value '%s' is present in the Container." % v)
self.allowedValues.remove(v)
|
"""Specifications and unit tests for the exercice.
>>> e = Container()
>>> for v in [1, 2, 'a', 3.14]:
... e.extend(v)
>>> e.add(3.14)
>>> e.add(2)
>>> e.add(2)
>>> e
Container({2, 3.14})
>>> e.add(5)
Traceback (most recent call last):
...
ValueError: Value '5' is not allowed.
>>> e.extend(5)
>>> e.add(5)
>>> e.restrict(1)
>>> e.restrict(2)
Traceback (most recent call last):
...
ValueError: Value '2' is present in the Container.
"""
class Container(set):
def __init__(self):
self.allowedValues = set()
super().__init__()
def extend(self, v):
"""Add a value in the list of allowed values."""
self.allowedValues.add(v)
def add(self, v):
"""Add a value in the set."""
if v not in self.allowedValues:
raise ValueError("Value '%s' is not allowed." % v)
super().add(v)
def restrict(self, v):
"""Remove a value from the list of allowed values."""
if v in self:
raise ValueError("Value '%s' is present in the Container." % v)
self.allowedValues.remove(v)
|
en
| 0.459223
|
Specifications and unit tests for the exercice. >>> e = Container() >>> for v in [1, 2, 'a', 3.14]: ... e.extend(v) >>> e.add(3.14) >>> e.add(2) >>> e.add(2) >>> e Container({2, 3.14}) >>> e.add(5) Traceback (most recent call last): ... ValueError: Value '5' is not allowed. >>> e.extend(5) >>> e.add(5) >>> e.restrict(1) >>> e.restrict(2) Traceback (most recent call last): ... ValueError: Value '2' is present in the Container. Add a value in the list of allowed values. Add a value in the set. Remove a value from the list of allowed values.
| 3.580853
| 4
|
search_engine_parser/core/engines/duckduckgo.py
|
Iamdavidonuh/search-engine-parser
| 0
|
6629890
|
"""@desc
Parser for DuckDuckGo search results
"""
import re
from search_engine_parser.core.base import BaseSearch
from search_engine_parser.core.exceptions import NoResultsOrTrafficError
class DuckDuckGoSearch(BaseSearch):
"""
Searches DuckDuckGo for string
"""
name = "DuckDuckGo"
search_url = "https://www.duckduckgo.com/html/?q={query}"
summary = "\tHas a number of advantages over the other search engines. \n\tIt has a clean "\
"interface, it does not track users, it is not fully loaded with ads and has a number "\
"of very nice features (only one page of results, you can search directly other web "\
"sites etc).\n\tAccording to DuckDuckGo traffic stats [December, 2018], they are "\
"currently serving more than 30 million searches per day."
def parse_soup(self, soup):
"""
Parses DuckDuckGo Search Soup for a query results
"""
# find all div tags
return soup.find_all('div', class_='result')
def parse_single_result(self, single_result):
"""
Parses the source code to return
:param single_result: single result found in <div id="r1-{id}">
:type single_result: `bs4.element.ResultSet`
:return: parsed title, link and description of single result
:rtype: str, str, str
"""
h2 = single_result.find('h2', class_="result__title")
link_tag = h2.find('a', class_="result__a")
desc = single_result.find(class_='result__snippet')
#Get the text and link
title = link_tag.text
# raw link is of format "/url?q=REAL-LINK&sa=..."
link = link_tag.get('href')
desc = desc.text
return title, link, desc
|
"""@desc
Parser for DuckDuckGo search results
"""
import re
from search_engine_parser.core.base import BaseSearch
from search_engine_parser.core.exceptions import NoResultsOrTrafficError
class DuckDuckGoSearch(BaseSearch):
"""
Searches DuckDuckGo for string
"""
name = "DuckDuckGo"
search_url = "https://www.duckduckgo.com/html/?q={query}"
summary = "\tHas a number of advantages over the other search engines. \n\tIt has a clean "\
"interface, it does not track users, it is not fully loaded with ads and has a number "\
"of very nice features (only one page of results, you can search directly other web "\
"sites etc).\n\tAccording to DuckDuckGo traffic stats [December, 2018], they are "\
"currently serving more than 30 million searches per day."
def parse_soup(self, soup):
"""
Parses DuckDuckGo Search Soup for a query results
"""
# find all div tags
return soup.find_all('div', class_='result')
def parse_single_result(self, single_result):
"""
Parses the source code to return
:param single_result: single result found in <div id="r1-{id}">
:type single_result: `bs4.element.ResultSet`
:return: parsed title, link and description of single result
:rtype: str, str, str
"""
h2 = single_result.find('h2', class_="result__title")
link_tag = h2.find('a', class_="result__a")
desc = single_result.find(class_='result__snippet')
#Get the text and link
title = link_tag.text
# raw link is of format "/url?q=REAL-LINK&sa=..."
link = link_tag.get('href')
desc = desc.text
return title, link, desc
|
en
| 0.518514
|
@desc Parser for DuckDuckGo search results Searches DuckDuckGo for string Parses DuckDuckGo Search Soup for a query results # find all div tags Parses the source code to return :param single_result: single result found in <div id="r1-{id}"> :type single_result: `bs4.element.ResultSet` :return: parsed title, link and description of single result :rtype: str, str, str #Get the text and link # raw link is of format "/url?q=REAL-LINK&sa=..."
| 3.079227
| 3
|
openmdao/utils/options_dictionary.py
|
toddrme2178/OpenMDAO
| 0
|
6629891
|
<reponame>toddrme2178/OpenMDAO
"""Define the OptionsDictionary class."""
from __future__ import division, print_function
from six import iteritems, string_types
from openmdao.utils.general_utils import warn_deprecation
# unique object to check if default is given
_undefined = object()
class OptionsDictionary(object):
"""
Dictionary with pre-declaration of keys for value-checking and default values.
This class is instantiated for:
1. the options attribute in solvers, drivers, and processor allocators
2. the supports attribute in drivers
3. the options attribute in systems
Attributes
----------
_dict : dict of dict
Dictionary of entries. Each entry is a dictionary consisting of value, values,
types, desc, lower, and upper.
_parent_name : str or None
If defined, prepend this name to beginning of all exceptions.
_read_only : bool
If True, no options can be set after declaration.
"""
def __init__(self, parent_name=None, read_only=False):
"""
Initialize all attributes.
Parameters
----------
parent_name : str
Name or class name of System that owns this OptionsDictionary
read_only : bool
If True, setting (via __setitem__ or update) is not permitted.
"""
self._dict = {}
self._parent_name = parent_name
self._read_only = read_only
def __repr__(self):
"""
Return a dictionary representation of the options.
Returns
-------
dict
The options dictionary.
"""
return self._dict.__repr__()
def __rst__(self):
"""
Generate reStructuredText view of the options table.
Returns
-------
list of str
A rendition of the options as an rST table.
"""
outputs = []
for option_name, option_data in sorted(iteritems(self._dict)):
name = option_name
default = option_data['value'] if option_data['value'] is not _undefined \
else '**Required**'
values = option_data['values']
types = option_data['types']
desc = option_data['desc']
# if the default is an object instance, replace with the (unqualified) object type
default_str = str(default)
idx = default_str.find(' object at ')
if idx >= 0 and default_str[0] is '<':
parts = default_str[:idx].split('.')
default = parts[-1]
if types is None:
types = "N/A"
elif types is not None:
if not isinstance(types, (set, tuple, list)):
types = (types,)
types = [type_.__name__ for type_ in types]
if values is None:
values = "N/A"
elif values is not None:
if not isinstance(values, (set, tuple, list)):
values = (values,)
values = [value for value in values]
outputs.append([name, default, values, types, desc])
lines = []
col_heads = ['Option', 'Default', 'Acceptable Values', 'Acceptable Types', 'Description']
max_sizes = {}
for j, col in enumerate(col_heads):
max_sizes[j] = len(col)
for output in outputs:
for j, item in enumerate(output):
length = len(str(item))
if max_sizes[j] < length:
max_sizes[j] = length
header = ""
titles = ""
for key, val in iteritems(max_sizes):
header += '=' * val + ' '
for j, head in enumerate(col_heads):
titles += "%s " % head
size = max_sizes[j]
space = size - len(head)
if space > 0:
titles += space * ' '
lines.append(header)
lines.append(titles)
lines.append(header)
n = 3
for output in outputs:
line = ""
for j, item in enumerate(output):
line += "%s " % str(item)
size = max_sizes[j]
space = size - len(str(item))
if space > 0:
line += space * ' '
lines.append(line)
n += 1
lines.append(header)
return lines
def __str__(self, width=100):
"""
Generate text string representation of the options table.
Parameters
----------
width : int
The maximum width of the text.
Returns
-------
str
A text representation of the options table.
"""
rst = self.__rst__()
cols = [len(header) for header in rst[0].split()]
desc_col = sum(cols[:-1]) + len(cols) - 1
desc_len = width - desc_col
# if it won't fit in allowed width, just return the rST
if desc_len < 10:
return '\n'.join(rst)
text = []
for row in rst:
if len(row) > width:
text.append(row[:width])
if not row.startswith('==='):
row = row[width:].rstrip()
while(len(row) > 0):
text.append(' ' * desc_col + row[:desc_len])
row = row[desc_len:]
else:
text.append(row)
return '\n'.join(text)
def _raise(self, msg, exc_type=RuntimeError):
"""
Raise the given exception type, with parent's name prepended to the message.
Parameters
----------
msg : str
The error message.
exc_type : class
The type of the exception to be raised.
"""
if self._parent_name is None:
full_msg = msg
else:
full_msg = '{}: {}'.format(self._parent_name, msg)
raise exc_type(full_msg)
def _assert_valid(self, name, value):
"""
Check whether the given value is valid, where the key has already been declared.
The optional checks consist of ensuring: the value is one of a list of acceptable values,
the type of value is one of a list of acceptable types, value is not less than lower,
value is not greater than upper, and value satisfies check_valid.
Parameters
----------
name : str
The key for the declared option.
value : object
The default or user-set value to check for value, type, lower, and upper.
"""
meta = self._dict[name]
values = meta['values']
types = meta['types']
lower = meta['lower']
upper = meta['upper']
if not (value is None and meta['allow_none']):
# If only values is declared
if values is not None:
if value not in values:
if isinstance(value, string_types):
value = "'{}'".format(value)
self._raise("Value ({}) of option '{}' is not one of {}.".format(value, name,
values),
ValueError)
# If only types is declared
elif types is not None:
if not isinstance(value, types):
vtype = type(value).__name__
if isinstance(value, string_types):
value = "'{}'".format(value)
if isinstance(types, (set, tuple, list)):
typs = tuple([type_.__name__ for type_ in types])
self._raise("Value ({}) of option '{}' has type '{}', but one of "
"types {} was expected.".format(value, name, vtype, typs),
exc_type=TypeError)
else:
self._raise("Value ({}) of option '{}' has type '{}', but type '{}' "
"was expected.".format(value, name, vtype, types.__name__),
exc_type=TypeError)
if upper is not None:
if value > upper:
self._raise("Value ({}) of option '{}' "
"exceeds maximum allowed value of {}.".format(value, name, upper),
exc_type=ValueError)
if lower is not None:
if value < lower:
self._raise("Value ({}) of option '{}' "
"is less than minimum allowed value of {}.".format(value, name,
lower),
exc_type=ValueError)
# General function test
if meta['check_valid'] is not None:
meta['check_valid'](name, value)
def declare(self, name, default=_undefined, values=None, types=None, type_=None, desc='',
upper=None, lower=None, check_valid=None, allow_none=False):
r"""
Declare an option.
The value of the option must satisfy the following:
1. If values only was given when declaring, value must be in values.
2. If types only was given when declaring, value must satisfy isinstance(value, types).
3. It is an error if both values and types are given.
Parameters
----------
name : str
Name of the option.
default : object or Null
Optional default value that must be valid under the above 3 conditions.
values : set or list or tuple or None
Optional list of acceptable option values.
types : type or tuple of types or None
Optional type or list of acceptable option types.
type_ : type or tuple of types or None
Deprecated. Use types instead.
desc : str
Optional description of the option.
upper : float or None
Maximum allowable value.
lower : float or None
Minimum allowable value.
check_valid : function or None
General check function that raises an exception if value is not valid.
allow_none : bool
If True, allow None as a value regardless of values or types.
"""
if type_ is not None:
warn_deprecation("In declaration of option '%s' the '_type' arg is deprecated. "
"Use 'types' instead." % name)
if types is None:
types = type_
if values is not None and not isinstance(values, (set, list, tuple)):
self._raise("In declaration of option '%s', the 'values' arg must be of type None,"
" list, or tuple - not %s." % (name, values), exc_type=TypeError)
if types is not None and not isinstance(types, (type, set, list, tuple)):
self._raise("In declaration of option '%s', the 'types' arg must be None, a type "
"or a tuple - not %s." % (name, types), exc_type=TypeError)
if types is not None and values is not None:
self._raise("'types' and 'values' were both specified for option '%s'." % name)
if types is bool:
values = (True, False)
default_provided = default is not _undefined
self._dict[name] = {
'value': default,
'values': values,
'types': types,
'desc': desc,
'upper': upper,
'lower': lower,
'check_valid': check_valid,
'has_been_set': default_provided,
'allow_none': allow_none,
}
# If a default is given, check for validity
if default_provided:
self._assert_valid(name, default)
def undeclare(self, name):
"""
Remove entry from the OptionsDictionary, for classes that don't use that option.
Parameters
----------
name : str
The name of a key, the entry of which will be removed from the internal dictionary.
"""
if name in self._dict:
del self._dict[name]
def update(self, in_dict):
"""
Update the internal dictionary with the given one.
Parameters
----------
in_dict : dict
The incoming dictionary to add to the internal one.
"""
for name in in_dict:
self[name] = in_dict[name]
def __iter__(self):
"""
Provide an iterator.
Returns
-------
iterable
iterator over the keys in the dictionary.
"""
return iter(self._dict)
def __contains__(self, key):
"""
Check if the key is in the local dictionary.
Parameters
----------
key : str
name of the option.
Returns
-------
boolean
whether key is in the local dict.
"""
return key in self._dict
def __setitem__(self, name, value):
"""
Set an option in the local dictionary.
Parameters
----------
name : str
name of the option.
value : -
value of the option to be value- and type-checked if declared.
"""
try:
meta = self._dict[name]
except KeyError:
# The key must have been declared.
msg = "Option '{}' cannot be set because it has not been declared."
self._raise(msg.format(name), exc_type=KeyError)
if self._read_only:
self._raise("Tried to set read-only option '{}'.".format(name), exc_type=KeyError)
self._assert_valid(name, value)
meta['value'] = value
meta['has_been_set'] = True
def __getitem__(self, name):
"""
Get an option from the dict or declared default.
Parameters
----------
name : str
name of the option.
Returns
-------
value : -
value of the option.
"""
# If the option has been set in this system, return the set value
try:
meta = self._dict[name]
if meta['has_been_set']:
return meta['value']
else:
self._raise("Option '{}' is required but has not been set.".format(name))
except KeyError:
self._raise("Option '{}' cannot be found".format(name), exc_type=KeyError)
|
"""Define the OptionsDictionary class."""
from __future__ import division, print_function
from six import iteritems, string_types
from openmdao.utils.general_utils import warn_deprecation
# unique object to check if default is given
_undefined = object()
class OptionsDictionary(object):
"""
Dictionary with pre-declaration of keys for value-checking and default values.
This class is instantiated for:
1. the options attribute in solvers, drivers, and processor allocators
2. the supports attribute in drivers
3. the options attribute in systems
Attributes
----------
_dict : dict of dict
Dictionary of entries. Each entry is a dictionary consisting of value, values,
types, desc, lower, and upper.
_parent_name : str or None
If defined, prepend this name to beginning of all exceptions.
_read_only : bool
If True, no options can be set after declaration.
"""
def __init__(self, parent_name=None, read_only=False):
"""
Initialize all attributes.
Parameters
----------
parent_name : str
Name or class name of System that owns this OptionsDictionary
read_only : bool
If True, setting (via __setitem__ or update) is not permitted.
"""
self._dict = {}
self._parent_name = parent_name
self._read_only = read_only
def __repr__(self):
"""
Return a dictionary representation of the options.
Returns
-------
dict
The options dictionary.
"""
return self._dict.__repr__()
def __rst__(self):
"""
Generate reStructuredText view of the options table.
Returns
-------
list of str
A rendition of the options as an rST table.
"""
outputs = []
for option_name, option_data in sorted(iteritems(self._dict)):
name = option_name
default = option_data['value'] if option_data['value'] is not _undefined \
else '**Required**'
values = option_data['values']
types = option_data['types']
desc = option_data['desc']
# if the default is an object instance, replace with the (unqualified) object type
default_str = str(default)
idx = default_str.find(' object at ')
if idx >= 0 and default_str[0] is '<':
parts = default_str[:idx].split('.')
default = parts[-1]
if types is None:
types = "N/A"
elif types is not None:
if not isinstance(types, (set, tuple, list)):
types = (types,)
types = [type_.__name__ for type_ in types]
if values is None:
values = "N/A"
elif values is not None:
if not isinstance(values, (set, tuple, list)):
values = (values,)
values = [value for value in values]
outputs.append([name, default, values, types, desc])
lines = []
col_heads = ['Option', 'Default', 'Acceptable Values', 'Acceptable Types', 'Description']
max_sizes = {}
for j, col in enumerate(col_heads):
max_sizes[j] = len(col)
for output in outputs:
for j, item in enumerate(output):
length = len(str(item))
if max_sizes[j] < length:
max_sizes[j] = length
header = ""
titles = ""
for key, val in iteritems(max_sizes):
header += '=' * val + ' '
for j, head in enumerate(col_heads):
titles += "%s " % head
size = max_sizes[j]
space = size - len(head)
if space > 0:
titles += space * ' '
lines.append(header)
lines.append(titles)
lines.append(header)
n = 3
for output in outputs:
line = ""
for j, item in enumerate(output):
line += "%s " % str(item)
size = max_sizes[j]
space = size - len(str(item))
if space > 0:
line += space * ' '
lines.append(line)
n += 1
lines.append(header)
return lines
def __str__(self, width=100):
"""
Generate text string representation of the options table.
Parameters
----------
width : int
The maximum width of the text.
Returns
-------
str
A text representation of the options table.
"""
rst = self.__rst__()
cols = [len(header) for header in rst[0].split()]
desc_col = sum(cols[:-1]) + len(cols) - 1
desc_len = width - desc_col
# if it won't fit in allowed width, just return the rST
if desc_len < 10:
return '\n'.join(rst)
text = []
for row in rst:
if len(row) > width:
text.append(row[:width])
if not row.startswith('==='):
row = row[width:].rstrip()
while(len(row) > 0):
text.append(' ' * desc_col + row[:desc_len])
row = row[desc_len:]
else:
text.append(row)
return '\n'.join(text)
def _raise(self, msg, exc_type=RuntimeError):
"""
Raise the given exception type, with parent's name prepended to the message.
Parameters
----------
msg : str
The error message.
exc_type : class
The type of the exception to be raised.
"""
if self._parent_name is None:
full_msg = msg
else:
full_msg = '{}: {}'.format(self._parent_name, msg)
raise exc_type(full_msg)
def _assert_valid(self, name, value):
"""
Check whether the given value is valid, where the key has already been declared.
The optional checks consist of ensuring: the value is one of a list of acceptable values,
the type of value is one of a list of acceptable types, value is not less than lower,
value is not greater than upper, and value satisfies check_valid.
Parameters
----------
name : str
The key for the declared option.
value : object
The default or user-set value to check for value, type, lower, and upper.
"""
meta = self._dict[name]
values = meta['values']
types = meta['types']
lower = meta['lower']
upper = meta['upper']
if not (value is None and meta['allow_none']):
# If only values is declared
if values is not None:
if value not in values:
if isinstance(value, string_types):
value = "'{}'".format(value)
self._raise("Value ({}) of option '{}' is not one of {}.".format(value, name,
values),
ValueError)
# If only types is declared
elif types is not None:
if not isinstance(value, types):
vtype = type(value).__name__
if isinstance(value, string_types):
value = "'{}'".format(value)
if isinstance(types, (set, tuple, list)):
typs = tuple([type_.__name__ for type_ in types])
self._raise("Value ({}) of option '{}' has type '{}', but one of "
"types {} was expected.".format(value, name, vtype, typs),
exc_type=TypeError)
else:
self._raise("Value ({}) of option '{}' has type '{}', but type '{}' "
"was expected.".format(value, name, vtype, types.__name__),
exc_type=TypeError)
if upper is not None:
if value > upper:
self._raise("Value ({}) of option '{}' "
"exceeds maximum allowed value of {}.".format(value, name, upper),
exc_type=ValueError)
if lower is not None:
if value < lower:
self._raise("Value ({}) of option '{}' "
"is less than minimum allowed value of {}.".format(value, name,
lower),
exc_type=ValueError)
# General function test
if meta['check_valid'] is not None:
meta['check_valid'](name, value)
def declare(self, name, default=_undefined, values=None, types=None, type_=None, desc='',
upper=None, lower=None, check_valid=None, allow_none=False):
r"""
Declare an option.
The value of the option must satisfy the following:
1. If values only was given when declaring, value must be in values.
2. If types only was given when declaring, value must satisfy isinstance(value, types).
3. It is an error if both values and types are given.
Parameters
----------
name : str
Name of the option.
default : object or Null
Optional default value that must be valid under the above 3 conditions.
values : set or list or tuple or None
Optional list of acceptable option values.
types : type or tuple of types or None
Optional type or list of acceptable option types.
type_ : type or tuple of types or None
Deprecated. Use types instead.
desc : str
Optional description of the option.
upper : float or None
Maximum allowable value.
lower : float or None
Minimum allowable value.
check_valid : function or None
General check function that raises an exception if value is not valid.
allow_none : bool
If True, allow None as a value regardless of values or types.
"""
if type_ is not None:
warn_deprecation("In declaration of option '%s' the '_type' arg is deprecated. "
"Use 'types' instead." % name)
if types is None:
types = type_
if values is not None and not isinstance(values, (set, list, tuple)):
self._raise("In declaration of option '%s', the 'values' arg must be of type None,"
" list, or tuple - not %s." % (name, values), exc_type=TypeError)
if types is not None and not isinstance(types, (type, set, list, tuple)):
self._raise("In declaration of option '%s', the 'types' arg must be None, a type "
"or a tuple - not %s." % (name, types), exc_type=TypeError)
if types is not None and values is not None:
self._raise("'types' and 'values' were both specified for option '%s'." % name)
if types is bool:
values = (True, False)
default_provided = default is not _undefined
self._dict[name] = {
'value': default,
'values': values,
'types': types,
'desc': desc,
'upper': upper,
'lower': lower,
'check_valid': check_valid,
'has_been_set': default_provided,
'allow_none': allow_none,
}
# If a default is given, check for validity
if default_provided:
self._assert_valid(name, default)
def undeclare(self, name):
"""
Remove entry from the OptionsDictionary, for classes that don't use that option.
Parameters
----------
name : str
The name of a key, the entry of which will be removed from the internal dictionary.
"""
if name in self._dict:
del self._dict[name]
def update(self, in_dict):
"""
Update the internal dictionary with the given one.
Parameters
----------
in_dict : dict
The incoming dictionary to add to the internal one.
"""
for name in in_dict:
self[name] = in_dict[name]
def __iter__(self):
"""
Provide an iterator.
Returns
-------
iterable
iterator over the keys in the dictionary.
"""
return iter(self._dict)
def __contains__(self, key):
"""
Check if the key is in the local dictionary.
Parameters
----------
key : str
name of the option.
Returns
-------
boolean
whether key is in the local dict.
"""
return key in self._dict
def __setitem__(self, name, value):
"""
Set an option in the local dictionary.
Parameters
----------
name : str
name of the option.
value : -
value of the option to be value- and type-checked if declared.
"""
try:
meta = self._dict[name]
except KeyError:
# The key must have been declared.
msg = "Option '{}' cannot be set because it has not been declared."
self._raise(msg.format(name), exc_type=KeyError)
if self._read_only:
self._raise("Tried to set read-only option '{}'.".format(name), exc_type=KeyError)
self._assert_valid(name, value)
meta['value'] = value
meta['has_been_set'] = True
def __getitem__(self, name):
"""
Get an option from the dict or declared default.
Parameters
----------
name : str
name of the option.
Returns
-------
value : -
value of the option.
"""
# If the option has been set in this system, return the set value
try:
meta = self._dict[name]
if meta['has_been_set']:
return meta['value']
else:
self._raise("Option '{}' is required but has not been set.".format(name))
except KeyError:
self._raise("Option '{}' cannot be found".format(name), exc_type=KeyError)
|
en
| 0.622727
|
Define the OptionsDictionary class. # unique object to check if default is given Dictionary with pre-declaration of keys for value-checking and default values. This class is instantiated for: 1. the options attribute in solvers, drivers, and processor allocators 2. the supports attribute in drivers 3. the options attribute in systems Attributes ---------- _dict : dict of dict Dictionary of entries. Each entry is a dictionary consisting of value, values, types, desc, lower, and upper. _parent_name : str or None If defined, prepend this name to beginning of all exceptions. _read_only : bool If True, no options can be set after declaration. Initialize all attributes. Parameters ---------- parent_name : str Name or class name of System that owns this OptionsDictionary read_only : bool If True, setting (via __setitem__ or update) is not permitted. Return a dictionary representation of the options. Returns ------- dict The options dictionary. Generate reStructuredText view of the options table. Returns ------- list of str A rendition of the options as an rST table. # if the default is an object instance, replace with the (unqualified) object type Generate text string representation of the options table. Parameters ---------- width : int The maximum width of the text. Returns ------- str A text representation of the options table. # if it won't fit in allowed width, just return the rST Raise the given exception type, with parent's name prepended to the message. Parameters ---------- msg : str The error message. exc_type : class The type of the exception to be raised. Check whether the given value is valid, where the key has already been declared. The optional checks consist of ensuring: the value is one of a list of acceptable values, the type of value is one of a list of acceptable types, value is not less than lower, value is not greater than upper, and value satisfies check_valid. Parameters ---------- name : str The key for the declared option. value : object The default or user-set value to check for value, type, lower, and upper. # If only values is declared # If only types is declared # General function test Declare an option. The value of the option must satisfy the following: 1. If values only was given when declaring, value must be in values. 2. If types only was given when declaring, value must satisfy isinstance(value, types). 3. It is an error if both values and types are given. Parameters ---------- name : str Name of the option. default : object or Null Optional default value that must be valid under the above 3 conditions. values : set or list or tuple or None Optional list of acceptable option values. types : type or tuple of types or None Optional type or list of acceptable option types. type_ : type or tuple of types or None Deprecated. Use types instead. desc : str Optional description of the option. upper : float or None Maximum allowable value. lower : float or None Minimum allowable value. check_valid : function or None General check function that raises an exception if value is not valid. allow_none : bool If True, allow None as a value regardless of values or types. # If a default is given, check for validity Remove entry from the OptionsDictionary, for classes that don't use that option. Parameters ---------- name : str The name of a key, the entry of which will be removed from the internal dictionary. Update the internal dictionary with the given one. Parameters ---------- in_dict : dict The incoming dictionary to add to the internal one. Provide an iterator. Returns ------- iterable iterator over the keys in the dictionary. Check if the key is in the local dictionary. Parameters ---------- key : str name of the option. Returns ------- boolean whether key is in the local dict. Set an option in the local dictionary. Parameters ---------- name : str name of the option. value : - value of the option to be value- and type-checked if declared. # The key must have been declared. Get an option from the dict or declared default. Parameters ---------- name : str name of the option. Returns ------- value : - value of the option. # If the option has been set in this system, return the set value
| 2.863156
| 3
|
pkgs/conda-4.0.5-py27_0/lib/python2.7/site-packages/conda/cli/main_remove.py
|
wangyum/anaconda
| 0
|
6629892
|
<reponame>wangyum/anaconda
# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import print_function, division, absolute_import
from os.path import join, exists
from argparse import RawDescriptionHelpFormatter
import errno
import logging
from conda.cli import common
from conda.console import json_progress_bars
help = "%s a list of packages from a specified conda environment."
descr = help + """
This command will also remove any package that depends on any of the
specified packages as well---unless a replacement can be found without
that dependency. If you wish to skip this dependency checking and remove
just the requested packages, add the '--force' option. Note however that
this may result in a broken environment, so use this with caution.
"""
example = """
Examples:
conda %s -n myenv scipy
"""
uninstall_help = "Alias for conda remove. See conda remove --help."
log = logging.getLogger(__name__)
def configure_parser(sub_parsers, name='remove'):
if name == 'remove':
p = sub_parsers.add_parser(
name,
formatter_class=RawDescriptionHelpFormatter,
description=descr % name.capitalize(),
help=help % name.capitalize(),
epilog=example % name,
add_help=False,
)
else:
p = sub_parsers.add_parser(
name,
formatter_class=RawDescriptionHelpFormatter,
description=uninstall_help,
help=uninstall_help,
epilog=example % name,
add_help=False,
)
common.add_parser_help(p)
common.add_parser_yes(p)
common.add_parser_json(p)
p.add_argument(
"--all",
action="store_true",
help="%s all packages, i.e., the entire environment." % name.capitalize(),
)
p.add_argument(
"--features",
action="store_true",
help="%s features (instead of packages)." % name.capitalize(),
)
p.add_argument(
"--force",
action="store_true",
help="Forces removal of a package without removing packages that depend on it. "
"Using this option will usually leave your environment in a broken and "
"inconsistent state.",
)
common.add_parser_no_pin(p)
common.add_parser_channels(p)
common.add_parser_prefix(p)
common.add_parser_quiet(p)
# Putting this one first makes it the default
common.add_parser_no_use_index_cache(p)
common.add_parser_use_index_cache(p)
common.add_parser_use_local(p)
common.add_parser_offline(p)
common.add_parser_pscheck(p)
p.add_argument(
'package_names',
metavar='package_name',
action="store",
nargs='*',
help="Package names to %s from the environment." % name,
).completer = common.InstalledPackages
p.set_defaults(func=execute)
def execute(args, parser):
import conda.plan as plan
import conda.instructions as inst
from conda.install import rm_rf, linked
from conda import config
if not (args.all or args.package_names):
common.error_and_exit('no package names supplied,\n'
' try "conda remove -h" for more details',
json=args.json,
error_type="ValueError")
prefix = common.get_prefix(args)
if args.all and prefix == config.default_prefix:
common.error_and_exit("cannot remove current environment. deactivate and run conda remove again")
common.check_write('remove', prefix, json=args.json)
common.ensure_override_channels_requires_channel(args, json=args.json)
channel_urls = args.channel or ()
if args.use_local:
from conda.fetch import fetch_index
from conda.utils import url_path
try:
from conda_build.config import croot
except ImportError:
common.error_and_exit("you need to have 'conda-build >= 1.7.1' installed"
" to use the --use-local option",
json=args.json,
error_type="RuntimeError")
# remove the cache such that a refetch is made,
# this is necessary because we add the local build repo URL
fetch_index.cache = {}
if exists(croot):
channel_urls = [url_path(croot)] + list(channel_urls)
index = common.get_index_trap(channel_urls=channel_urls,
prepend=not args.override_channels,
use_cache=args.use_index_cache,
json=args.json,
offline=args.offline,
prefix=prefix)
else:
index = common.get_index_trap(channel_urls=channel_urls,
prepend=not args.override_channels,
use_cache=args.use_index_cache,
json=args.json,
offline=args.offline,
prefix=prefix)
specs = None
if args.features:
features = set(args.package_names)
actions = plan.remove_features_actions(prefix, index, features)
elif args.all:
if plan.is_root_prefix(prefix):
common.error_and_exit('cannot remove root environment,\n'
' add -n NAME or -p PREFIX option',
json=args.json,
error_type="CantRemoveRoot")
actions = {inst.PREFIX: prefix}
for dist in sorted(linked(prefix)):
plan.add_unlink(actions, dist)
else:
specs = common.specs_from_args(args.package_names)
if (plan.is_root_prefix(prefix) and
common.names_in_specs(common.root_no_rm, specs)):
common.error_and_exit('cannot remove %s from root environment' %
', '.join(common.root_no_rm),
json=args.json,
error_type="CantRemoveFromRoot")
actions = plan.remove_actions(prefix, specs, index=index,
force=args.force, pinned=args.pinned)
if plan.nothing_to_do(actions):
if args.all:
rm_rf(prefix)
if args.json:
common.stdout_json({
'success': True,
'actions': actions
})
return
common.error_and_exit('no packages found to remove from '
'environment: %s' % prefix,
json=args.json,
error_type="PackageNotInstalled")
if not args.json:
print()
print("Package plan for package removal in environment %s:" % prefix)
plan.display_actions(actions, index)
if args.json and args.dry_run:
common.stdout_json({
'success': True,
'dry_run': True,
'actions': actions
})
return
if not args.json:
common.confirm_yn(args)
if args.json and not args.quiet:
with json_progress_bars():
plan.execute_actions(actions, index, verbose=not args.quiet)
else:
plan.execute_actions(actions, index, verbose=not args.quiet)
if specs:
try:
with open(join(prefix, 'conda-meta', 'history'), 'a') as f:
f.write('# remove specs: %s\n' % specs)
except IOError as e:
if e.errno == errno.EACCES:
log.debug("Can't write the history file")
else:
raise
if args.all:
rm_rf(prefix)
if args.json:
common.stdout_json({
'success': True,
'actions': actions
})
|
# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import print_function, division, absolute_import
from os.path import join, exists
from argparse import RawDescriptionHelpFormatter
import errno
import logging
from conda.cli import common
from conda.console import json_progress_bars
help = "%s a list of packages from a specified conda environment."
descr = help + """
This command will also remove any package that depends on any of the
specified packages as well---unless a replacement can be found without
that dependency. If you wish to skip this dependency checking and remove
just the requested packages, add the '--force' option. Note however that
this may result in a broken environment, so use this with caution.
"""
example = """
Examples:
conda %s -n myenv scipy
"""
uninstall_help = "Alias for conda remove. See conda remove --help."
log = logging.getLogger(__name__)
def configure_parser(sub_parsers, name='remove'):
if name == 'remove':
p = sub_parsers.add_parser(
name,
formatter_class=RawDescriptionHelpFormatter,
description=descr % name.capitalize(),
help=help % name.capitalize(),
epilog=example % name,
add_help=False,
)
else:
p = sub_parsers.add_parser(
name,
formatter_class=RawDescriptionHelpFormatter,
description=uninstall_help,
help=uninstall_help,
epilog=example % name,
add_help=False,
)
common.add_parser_help(p)
common.add_parser_yes(p)
common.add_parser_json(p)
p.add_argument(
"--all",
action="store_true",
help="%s all packages, i.e., the entire environment." % name.capitalize(),
)
p.add_argument(
"--features",
action="store_true",
help="%s features (instead of packages)." % name.capitalize(),
)
p.add_argument(
"--force",
action="store_true",
help="Forces removal of a package without removing packages that depend on it. "
"Using this option will usually leave your environment in a broken and "
"inconsistent state.",
)
common.add_parser_no_pin(p)
common.add_parser_channels(p)
common.add_parser_prefix(p)
common.add_parser_quiet(p)
# Putting this one first makes it the default
common.add_parser_no_use_index_cache(p)
common.add_parser_use_index_cache(p)
common.add_parser_use_local(p)
common.add_parser_offline(p)
common.add_parser_pscheck(p)
p.add_argument(
'package_names',
metavar='package_name',
action="store",
nargs='*',
help="Package names to %s from the environment." % name,
).completer = common.InstalledPackages
p.set_defaults(func=execute)
def execute(args, parser):
import conda.plan as plan
import conda.instructions as inst
from conda.install import rm_rf, linked
from conda import config
if not (args.all or args.package_names):
common.error_and_exit('no package names supplied,\n'
' try "conda remove -h" for more details',
json=args.json,
error_type="ValueError")
prefix = common.get_prefix(args)
if args.all and prefix == config.default_prefix:
common.error_and_exit("cannot remove current environment. deactivate and run conda remove again")
common.check_write('remove', prefix, json=args.json)
common.ensure_override_channels_requires_channel(args, json=args.json)
channel_urls = args.channel or ()
if args.use_local:
from conda.fetch import fetch_index
from conda.utils import url_path
try:
from conda_build.config import croot
except ImportError:
common.error_and_exit("you need to have 'conda-build >= 1.7.1' installed"
" to use the --use-local option",
json=args.json,
error_type="RuntimeError")
# remove the cache such that a refetch is made,
# this is necessary because we add the local build repo URL
fetch_index.cache = {}
if exists(croot):
channel_urls = [url_path(croot)] + list(channel_urls)
index = common.get_index_trap(channel_urls=channel_urls,
prepend=not args.override_channels,
use_cache=args.use_index_cache,
json=args.json,
offline=args.offline,
prefix=prefix)
else:
index = common.get_index_trap(channel_urls=channel_urls,
prepend=not args.override_channels,
use_cache=args.use_index_cache,
json=args.json,
offline=args.offline,
prefix=prefix)
specs = None
if args.features:
features = set(args.package_names)
actions = plan.remove_features_actions(prefix, index, features)
elif args.all:
if plan.is_root_prefix(prefix):
common.error_and_exit('cannot remove root environment,\n'
' add -n NAME or -p PREFIX option',
json=args.json,
error_type="CantRemoveRoot")
actions = {inst.PREFIX: prefix}
for dist in sorted(linked(prefix)):
plan.add_unlink(actions, dist)
else:
specs = common.specs_from_args(args.package_names)
if (plan.is_root_prefix(prefix) and
common.names_in_specs(common.root_no_rm, specs)):
common.error_and_exit('cannot remove %s from root environment' %
', '.join(common.root_no_rm),
json=args.json,
error_type="CantRemoveFromRoot")
actions = plan.remove_actions(prefix, specs, index=index,
force=args.force, pinned=args.pinned)
if plan.nothing_to_do(actions):
if args.all:
rm_rf(prefix)
if args.json:
common.stdout_json({
'success': True,
'actions': actions
})
return
common.error_and_exit('no packages found to remove from '
'environment: %s' % prefix,
json=args.json,
error_type="PackageNotInstalled")
if not args.json:
print()
print("Package plan for package removal in environment %s:" % prefix)
plan.display_actions(actions, index)
if args.json and args.dry_run:
common.stdout_json({
'success': True,
'dry_run': True,
'actions': actions
})
return
if not args.json:
common.confirm_yn(args)
if args.json and not args.quiet:
with json_progress_bars():
plan.execute_actions(actions, index, verbose=not args.quiet)
else:
plan.execute_actions(actions, index, verbose=not args.quiet)
if specs:
try:
with open(join(prefix, 'conda-meta', 'history'), 'a') as f:
f.write('# remove specs: %s\n' % specs)
except IOError as e:
if e.errno == errno.EACCES:
log.debug("Can't write the history file")
else:
raise
if args.all:
rm_rf(prefix)
if args.json:
common.stdout_json({
'success': True,
'actions': actions
})
|
en
| 0.797832
|
# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io # All Rights Reserved # # conda is distributed under the terms of the BSD 3-clause license. # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause. This command will also remove any package that depends on any of the specified packages as well---unless a replacement can be found without that dependency. If you wish to skip this dependency checking and remove just the requested packages, add the '--force' option. Note however that this may result in a broken environment, so use this with caution. Examples: conda %s -n myenv scipy # Putting this one first makes it the default # remove the cache such that a refetch is made, # this is necessary because we add the local build repo URL
| 1.983698
| 2
|
regret_wrt_alpha.py
|
yinglunz/on-regret-with-multiple-best-arms
| 0
|
6629893
|
<reponame>yinglunz/on-regret-with-multiple-best-arms
import numpy as np
import time
import multiprocessing
import copy
from functools import partial
from datetime import date
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from regret_class import MOSS, Quantile, MOSSPLUS, Parallel
from regret_curve import generate_instance, calc_oracle_sample
def run_MOSS(n, alpha, oracle_or_not, instance_type, sigma, pull_max, update_interval):
instance, m = generate_instance(n, alpha, pull_max)
if oracle_or_not == 0:
n_select = n
alg_obj = MOSS(instance, n_select, instance_type, sigma, pull_max)
elif oracle_or_not == 1:
n_select = calc_oracle_sample(n, alpha, pull_max)
alg_obj = MOSS(instance, n_select, instance_type, sigma, pull_max)
while alg_obj.t <= pull_max:
if alg_obj.t % update_interval == 0:
print('run MOSS with alpha={} at time={}'.format(alpha, alg_obj.t))
alg_obj.update()
regret = alg_obj.regret
return regret
def single_run_MOSS(n, alpha_list, oracle_or_not, instance_type, sigma, pull_max, update_interval):
regret_list = []
for alpha in alpha_list:
regret = run_MOSS(n, alpha, oracle_or_not, instance_type, sigma, pull_max, update_interval)
regret_list.append(regret)
return regret_list
def run_Quantile(n, alpha, n_initial, instance_type, sigma, pull_max, update_interval):
instance, m = generate_instance(n, alpha, pull_max)
alg_obj = Quantile(instance, n_initial, instance_type, sigma, pull_max)
while alg_obj.t <= pull_max:
if alg_obj.t % update_interval == 0:
print('run Quantile with alpha={} at time={}'.format(alpha, alg_obj.t))
alg_obj.update()
regret = alg_obj.regret
return regret
def single_run_Quantile(n, alpha_list, n_initial, instance_type, sigma, pull_max, update_interval):
regret_list = []
for alpha in alpha_list:
regret = run_Quantile(n, alpha, n_initial, instance_type, sigma, pull_max, update_interval)
regret_list.append(regret)
return regret_list
def run_MOSSPLUS(n, alpha, instance_type, sigma, pull_max, update_interval, beta):
instance, m = generate_instance(n, alpha, pull_max)
alg_obj = MOSSPLUS(instance, instance_type, sigma, pull_max, beta)
while alg_obj.t <= pull_max:
if alg_obj.t % update_interval == 0:
print('run MOSS++ with alpha={} at time={}'.format(alpha, alg_obj.t))
alg_obj.update_vanilla()
# vanilla version of MOSS++
regret = alg_obj.regret
return regret
def single_run_MOSSPLUS(n, alpha_list, instance_type, sigma, pull_max, update_interval, beta):
regret_list = []
for alpha in alpha_list:
regret = run_MOSSPLUS(n, alpha, instance_type, sigma, pull_max, update_interval, beta)
regret_list.append(regret)
return regret_list
def run_empMOSSPLUS(n, alpha, instance_type, sigma, pull_max, update_interval, beta):
instance, m = generate_instance(n, alpha, pull_max)
alg_obj = MOSSPLUS(instance, instance_type, sigma, pull_max, beta)
while alg_obj.t <= pull_max:
if alg_obj.t % update_interval == 0:
print('run empMOSS++ with alpha={} at time={}'.format(alpha, alg_obj.t))
alg_obj.update_emp()
regret = alg_obj.regret
return regret
def single_run_empMOSSPLUS(n, alpha_list, instance_type, sigma, pull_max, update_interval, beta):
regret_list = []
for alpha in alpha_list:
regret = run_empMOSSPLUS(n, alpha, instance_type, sigma, pull_max, update_interval, beta)
regret_list.append(regret)
return regret_list
def run_Parallel(n, alpha, instance_type, sigma, pull_max, update_interval):
instance, m = generate_instance(n, alpha, pull_max)
alg_obj = Parallel(instance, instance_type, sigma, pull_max)
while alg_obj.t <= pull_max:
if alg_obj.t % update_interval == 0:
print('run Parallel with alpha={} at time={}'.format(alpha, alg_obj.t))
alg_obj.update()
regret = alg_obj.regret
return regret
def single_run_Parallel(n, alpha_list, instance_type, sigma, pull_max, update_interval):
regret_list = []
for alpha in alpha_list:
regret = run_Parallel(n, alpha, instance_type, sigma, pull_max, update_interval)
regret_list.append(regret)
return regret_list
def single_sim(n, alpha_list, beta_list, n_initial, instance_type, sigma, pull_max, update_interval):
np.random.seed()
regret_list_multi = []
oracle_or_not_list = [0, 1]
for oracle_or_not in oracle_or_not_list:
regret_list = single_run_MOSS(n, alpha_list, oracle_or_not, instance_type, sigma, pull_max, update_interval)
regret_list_multi.append(regret_list)
regret_list = single_run_Quantile(n, alpha_list, n_initial, instance_type, sigma, pull_max, update_interval)
regret_list_multi.append(regret_list)
regret_list = single_run_Parallel(n, alpha_list, instance_type, sigma, pull_max, update_interval)
regret_list_multi.append(regret_list)
for beta in beta_list:
regret_list = single_run_MOSSPLUS(n, alpha_list, instance_type, sigma, pull_max, update_interval, beta)
regret_list_multi.append(regret_list)
for beta in beta_list:
regret_list = single_run_empMOSSPLUS(n, alpha_list, instance_type, sigma, pull_max, update_interval, beta)
regret_list_multi.append(regret_list)
return regret_list_multi
def multi_sim(n_parallel, n_process, n, alpha_list, n_initial, instance_type, sigma, pull_max, update_interval, beta_list):
time_start = time.time()
single_sim_partial = partial(single_sim, n, alpha_list, beta_list, n_initial, instance_type, sigma, pull_max)
pool = multiprocessing.Pool(processes = n_process)
results = pool.map(single_sim_partial, list(map(int, update_interval * np.ones(n_parallel))))
print(results)
print('multi_sim got results!')
# the order of the following sequences matters!!
measures = ['regret']
algs_MOSS = ['MOSS', 'MOSS Oracle']
algs_MOSSPLUS = ['MOSS++_{} (ours)'.format(beta) for beta in beta_list]
algs_empMOSSPLUS = ['empMOSS++_{} (ours)'.format(beta) for beta in beta_list]
algs = algs_MOSS + [ 'Quantile', 'Parallel (ours)'] + algs_MOSSPLUS + algs_empMOSSPLUS
if len(beta_list) == 1:
algs = algs_MOSS + [ 'Quantile', 'Parallel (ours)', 'MOSS++ (ours)', 'empMOSS++ (ours)']
dict_regret = dict(zip(algs, [[] for alg in algs]))
# orders need to match the previous one!
dict_results = dict(zip(measures, [dict_regret]))
dict_results_ave = copy.deepcopy(dict_results)
dict_results_std = copy.deepcopy(dict_results)
for i in range(n_parallel):
for j in range(len(measures)):
for k in range(len(algs)):
dict_results[measures[j]][algs[k]].append(results[i][k])
# note here measures[0]
print(dict_results)
k = 4
# we devide the std by k in the plot, k=4 means the shaded area represent 0.5 std
for measure in measures:
for alg in algs:
dict_results_ave[measure][alg] = np.mean(dict_results[measure][alg], axis=0)
dict_results_std[measure][alg] = np.std(dict_results[measure][alg], axis=0)/k
print('---- final average results ----')
print(dict_results_ave)
time_end = time.time()
print('total time spent', time_end - time_start)
file = open('AlphaComparison{}n_total - {}alpha_list - {}beta_list - {}max - {}interval - {}n_parallel - {}instance_type -{}.txt'.\
format(n, alpha_list, beta_list, pull_max, update_interval, n_parallel, instance_type, date.today()), 'w')
file.write('{} - {}n_total - {}alpha_list - {}beta_list - {}max - {}interval - {}n_parallel - {}instance_type\n'.\
format(date.today(), n, alpha_list, beta_list, pull_max, update_interval, n_parallel, instance_type))
file.write('total time spent = {}\n'.format(time_end - time_start))
file.write('measures: {}\n'.format(measures))
file.write('algs: {}\n'.format(algs))
file.write('Following results are for all algs\n')
for measure in measures:
for alg in algs:
file.write('measure:{}, alg:{}, ave:\n'.format(measure, alg))
file.write('{}\n'.format(dict_results_ave[measure][alg]))
for measure in measures:
for alg in algs:
file.write('measure:{}, alg:{}, std:\n'.format(measure, alg))
file.write('{}\n'.format(dict_results_std[measure][alg]))
fig = plt.figure(1)
# we only create 6 different line style as following. requires len(algs)=6
marker_list = [(0, (1, 1)), (0, (3, 1, 1, 1, 1, 1)), (0, (5, 1)), '-.', '--', '-']
if len(algs) == 6:
for i in range(len(algs)):
ave = np.array(dict_results_ave[measures[0]][algs[i]])
std = np.array(dict_results_std[measures[0]][algs[i]])
plt.plot(alpha_list, ave, label=algs[i], linestyle=marker_list[i], linewidth=3)
plt.fill_between(alpha_list, ave - std, ave + std, alpha=0.2)
else:
for i in range(len(algs)):
ave = np.array(dict_results_ave[measures[0]][algs[i]])
std = np.array(dict_results_std[measures[0]][algs[i]])
plt.plot(alpha_list, ave, label=algs[i], linewidth=3)
plt.fill_between(alpha_list, ave - std, ave + std, alpha=0.2)
plt.xlabel(r'$\alpha$')
plt.ylabel('Expected regret at time {}'.format(pull_max))
plt.legend(loc=0)
plt.grid(alpha=0.75)
plt.savefig('varying_hardness.pdf')
fig.set_size_inches(6, 4)
plt.savefig('thumbnail_fig.png')
plt.show()
plt.close(fig)
if __name__ == '__main__':
instance_type = 'bernoulli'
n_parallel = 100
n_process = 100
n = 20000
alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
n_initial = 2
sigma = 0.25
pull_max = 50000
update_interval = 50
beta_list = [0.5]
multi_sim(n_parallel, n_process, n, alpha_list, n_initial, instance_type, sigma, pull_max, update_interval,
beta_list)
|
import numpy as np
import time
import multiprocessing
import copy
from functools import partial
from datetime import date
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from regret_class import MOSS, Quantile, MOSSPLUS, Parallel
from regret_curve import generate_instance, calc_oracle_sample
def run_MOSS(n, alpha, oracle_or_not, instance_type, sigma, pull_max, update_interval):
instance, m = generate_instance(n, alpha, pull_max)
if oracle_or_not == 0:
n_select = n
alg_obj = MOSS(instance, n_select, instance_type, sigma, pull_max)
elif oracle_or_not == 1:
n_select = calc_oracle_sample(n, alpha, pull_max)
alg_obj = MOSS(instance, n_select, instance_type, sigma, pull_max)
while alg_obj.t <= pull_max:
if alg_obj.t % update_interval == 0:
print('run MOSS with alpha={} at time={}'.format(alpha, alg_obj.t))
alg_obj.update()
regret = alg_obj.regret
return regret
def single_run_MOSS(n, alpha_list, oracle_or_not, instance_type, sigma, pull_max, update_interval):
regret_list = []
for alpha in alpha_list:
regret = run_MOSS(n, alpha, oracle_or_not, instance_type, sigma, pull_max, update_interval)
regret_list.append(regret)
return regret_list
def run_Quantile(n, alpha, n_initial, instance_type, sigma, pull_max, update_interval):
instance, m = generate_instance(n, alpha, pull_max)
alg_obj = Quantile(instance, n_initial, instance_type, sigma, pull_max)
while alg_obj.t <= pull_max:
if alg_obj.t % update_interval == 0:
print('run Quantile with alpha={} at time={}'.format(alpha, alg_obj.t))
alg_obj.update()
regret = alg_obj.regret
return regret
def single_run_Quantile(n, alpha_list, n_initial, instance_type, sigma, pull_max, update_interval):
regret_list = []
for alpha in alpha_list:
regret = run_Quantile(n, alpha, n_initial, instance_type, sigma, pull_max, update_interval)
regret_list.append(regret)
return regret_list
def run_MOSSPLUS(n, alpha, instance_type, sigma, pull_max, update_interval, beta):
instance, m = generate_instance(n, alpha, pull_max)
alg_obj = MOSSPLUS(instance, instance_type, sigma, pull_max, beta)
while alg_obj.t <= pull_max:
if alg_obj.t % update_interval == 0:
print('run MOSS++ with alpha={} at time={}'.format(alpha, alg_obj.t))
alg_obj.update_vanilla()
# vanilla version of MOSS++
regret = alg_obj.regret
return regret
def single_run_MOSSPLUS(n, alpha_list, instance_type, sigma, pull_max, update_interval, beta):
regret_list = []
for alpha in alpha_list:
regret = run_MOSSPLUS(n, alpha, instance_type, sigma, pull_max, update_interval, beta)
regret_list.append(regret)
return regret_list
def run_empMOSSPLUS(n, alpha, instance_type, sigma, pull_max, update_interval, beta):
instance, m = generate_instance(n, alpha, pull_max)
alg_obj = MOSSPLUS(instance, instance_type, sigma, pull_max, beta)
while alg_obj.t <= pull_max:
if alg_obj.t % update_interval == 0:
print('run empMOSS++ with alpha={} at time={}'.format(alpha, alg_obj.t))
alg_obj.update_emp()
regret = alg_obj.regret
return regret
def single_run_empMOSSPLUS(n, alpha_list, instance_type, sigma, pull_max, update_interval, beta):
regret_list = []
for alpha in alpha_list:
regret = run_empMOSSPLUS(n, alpha, instance_type, sigma, pull_max, update_interval, beta)
regret_list.append(regret)
return regret_list
def run_Parallel(n, alpha, instance_type, sigma, pull_max, update_interval):
instance, m = generate_instance(n, alpha, pull_max)
alg_obj = Parallel(instance, instance_type, sigma, pull_max)
while alg_obj.t <= pull_max:
if alg_obj.t % update_interval == 0:
print('run Parallel with alpha={} at time={}'.format(alpha, alg_obj.t))
alg_obj.update()
regret = alg_obj.regret
return regret
def single_run_Parallel(n, alpha_list, instance_type, sigma, pull_max, update_interval):
regret_list = []
for alpha in alpha_list:
regret = run_Parallel(n, alpha, instance_type, sigma, pull_max, update_interval)
regret_list.append(regret)
return regret_list
def single_sim(n, alpha_list, beta_list, n_initial, instance_type, sigma, pull_max, update_interval):
np.random.seed()
regret_list_multi = []
oracle_or_not_list = [0, 1]
for oracle_or_not in oracle_or_not_list:
regret_list = single_run_MOSS(n, alpha_list, oracle_or_not, instance_type, sigma, pull_max, update_interval)
regret_list_multi.append(regret_list)
regret_list = single_run_Quantile(n, alpha_list, n_initial, instance_type, sigma, pull_max, update_interval)
regret_list_multi.append(regret_list)
regret_list = single_run_Parallel(n, alpha_list, instance_type, sigma, pull_max, update_interval)
regret_list_multi.append(regret_list)
for beta in beta_list:
regret_list = single_run_MOSSPLUS(n, alpha_list, instance_type, sigma, pull_max, update_interval, beta)
regret_list_multi.append(regret_list)
for beta in beta_list:
regret_list = single_run_empMOSSPLUS(n, alpha_list, instance_type, sigma, pull_max, update_interval, beta)
regret_list_multi.append(regret_list)
return regret_list_multi
def multi_sim(n_parallel, n_process, n, alpha_list, n_initial, instance_type, sigma, pull_max, update_interval, beta_list):
time_start = time.time()
single_sim_partial = partial(single_sim, n, alpha_list, beta_list, n_initial, instance_type, sigma, pull_max)
pool = multiprocessing.Pool(processes = n_process)
results = pool.map(single_sim_partial, list(map(int, update_interval * np.ones(n_parallel))))
print(results)
print('multi_sim got results!')
# the order of the following sequences matters!!
measures = ['regret']
algs_MOSS = ['MOSS', 'MOSS Oracle']
algs_MOSSPLUS = ['MOSS++_{} (ours)'.format(beta) for beta in beta_list]
algs_empMOSSPLUS = ['empMOSS++_{} (ours)'.format(beta) for beta in beta_list]
algs = algs_MOSS + [ 'Quantile', 'Parallel (ours)'] + algs_MOSSPLUS + algs_empMOSSPLUS
if len(beta_list) == 1:
algs = algs_MOSS + [ 'Quantile', 'Parallel (ours)', 'MOSS++ (ours)', 'empMOSS++ (ours)']
dict_regret = dict(zip(algs, [[] for alg in algs]))
# orders need to match the previous one!
dict_results = dict(zip(measures, [dict_regret]))
dict_results_ave = copy.deepcopy(dict_results)
dict_results_std = copy.deepcopy(dict_results)
for i in range(n_parallel):
for j in range(len(measures)):
for k in range(len(algs)):
dict_results[measures[j]][algs[k]].append(results[i][k])
# note here measures[0]
print(dict_results)
k = 4
# we devide the std by k in the plot, k=4 means the shaded area represent 0.5 std
for measure in measures:
for alg in algs:
dict_results_ave[measure][alg] = np.mean(dict_results[measure][alg], axis=0)
dict_results_std[measure][alg] = np.std(dict_results[measure][alg], axis=0)/k
print('---- final average results ----')
print(dict_results_ave)
time_end = time.time()
print('total time spent', time_end - time_start)
file = open('AlphaComparison{}n_total - {}alpha_list - {}beta_list - {}max - {}interval - {}n_parallel - {}instance_type -{}.txt'.\
format(n, alpha_list, beta_list, pull_max, update_interval, n_parallel, instance_type, date.today()), 'w')
file.write('{} - {}n_total - {}alpha_list - {}beta_list - {}max - {}interval - {}n_parallel - {}instance_type\n'.\
format(date.today(), n, alpha_list, beta_list, pull_max, update_interval, n_parallel, instance_type))
file.write('total time spent = {}\n'.format(time_end - time_start))
file.write('measures: {}\n'.format(measures))
file.write('algs: {}\n'.format(algs))
file.write('Following results are for all algs\n')
for measure in measures:
for alg in algs:
file.write('measure:{}, alg:{}, ave:\n'.format(measure, alg))
file.write('{}\n'.format(dict_results_ave[measure][alg]))
for measure in measures:
for alg in algs:
file.write('measure:{}, alg:{}, std:\n'.format(measure, alg))
file.write('{}\n'.format(dict_results_std[measure][alg]))
fig = plt.figure(1)
# we only create 6 different line style as following. requires len(algs)=6
marker_list = [(0, (1, 1)), (0, (3, 1, 1, 1, 1, 1)), (0, (5, 1)), '-.', '--', '-']
if len(algs) == 6:
for i in range(len(algs)):
ave = np.array(dict_results_ave[measures[0]][algs[i]])
std = np.array(dict_results_std[measures[0]][algs[i]])
plt.plot(alpha_list, ave, label=algs[i], linestyle=marker_list[i], linewidth=3)
plt.fill_between(alpha_list, ave - std, ave + std, alpha=0.2)
else:
for i in range(len(algs)):
ave = np.array(dict_results_ave[measures[0]][algs[i]])
std = np.array(dict_results_std[measures[0]][algs[i]])
plt.plot(alpha_list, ave, label=algs[i], linewidth=3)
plt.fill_between(alpha_list, ave - std, ave + std, alpha=0.2)
plt.xlabel(r'$\alpha$')
plt.ylabel('Expected regret at time {}'.format(pull_max))
plt.legend(loc=0)
plt.grid(alpha=0.75)
plt.savefig('varying_hardness.pdf')
fig.set_size_inches(6, 4)
plt.savefig('thumbnail_fig.png')
plt.show()
plt.close(fig)
if __name__ == '__main__':
instance_type = 'bernoulli'
n_parallel = 100
n_process = 100
n = 20000
alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
n_initial = 2
sigma = 0.25
pull_max = 50000
update_interval = 50
beta_list = [0.5]
multi_sim(n_parallel, n_process, n, alpha_list, n_initial, instance_type, sigma, pull_max, update_interval,
beta_list)
|
en
| 0.87929
|
# vanilla version of MOSS++ # the order of the following sequences matters!! # orders need to match the previous one! # note here measures[0] # we devide the std by k in the plot, k=4 means the shaded area represent 0.5 std # we only create 6 different line style as following. requires len(algs)=6
| 2.204331
| 2
|
demos/demo_cpu_regularisers3D.py
|
ElsevierSoftwareX/SOFTX_2018_161
| 1
|
6629894
|
<filename>demos/demo_cpu_regularisers3D.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 11:39:43 2018
Demonstration of 3D CPU regularisers
@authors: <NAME>, <NAME>
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import timeit
from ccpi.filters.regularisers import ROF_TV, FGP_TV, SB_TV, TGV, LLT_ROF, FGP_dTV, NDF, Diff4th
from ccpi.supp.qualitymetrics import QualityTools
###############################################################################
def printParametersToString(pars):
txt = r''
for key, value in pars.items():
if key== 'algorithm' :
txt += "{0} = {1}".format(key, value.__name__)
elif key == 'input':
txt += "{0} = {1}".format(key, np.shape(value))
elif key == 'refdata':
txt += "{0} = {1}".format(key, np.shape(value))
else:
txt += "{0} = {1}".format(key, value)
txt += '\n'
return txt
###############################################################################
# filename = os.path.join( "data" ,"lena_gray_512.tif")
filename = "/home/algol/Documents/DEV/CCPi-Regularisation-Toolkit/test/lena_gray_512.tif"
# read image
Im = plt.imread(filename)
Im = np.asarray(Im, dtype='float32')
Im = Im/255
perc = 0.05
u0 = Im + np.random.normal(loc = 0 ,
scale = perc * Im ,
size = np.shape(Im))
u_ref = Im + np.random.normal(loc = 0 ,
scale = 0.01 * Im ,
size = np.shape(Im))
(N,M) = np.shape(u0)
# map the u0 u0->u0>0
# f = np.frompyfunc(lambda x: 0 if x < 0 else x, 1,1)
u0 = u0.astype('float32')
u_ref = u_ref.astype('float32')
# change dims to check that modules work with non-squared images
"""
M = M-100
u_ref2 = np.zeros([N,M],dtype='float32')
u_ref2[:,0:M] = u_ref[:,0:M]
u_ref = u_ref2
del u_ref2
u02 = np.zeros([N,M],dtype='float32')
u02[:,0:M] = u0[:,0:M]
u0 = u02
del u02
Im2 = np.zeros([N,M],dtype='float32')
Im2[:,0:M] = Im[:,0:M]
Im = Im2
del Im2
"""
slices = 15
noisyVol = np.zeros((slices,N,M),dtype='float32')
noisyRef = np.zeros((slices,N,M),dtype='float32')
idealVol = np.zeros((slices,N,M),dtype='float32')
for i in range (slices):
noisyVol[i,:,:] = Im + np.random.normal(loc = 0 , scale = perc * Im , size = np.shape(Im))
noisyRef[i,:,:] = Im + np.random.normal(loc = 0 , scale = 0.01 * Im , size = np.shape(Im))
idealVol[i,:,:] = Im
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_______________ROF-TV (3D)_________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of ROF-TV regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy 15th slice of a volume')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm': ROF_TV, \
'input' : noisyVol,\
'regularisation_parameter':0.02,\
'number_of_iterations': 7000,\
'time_marching_parameter': 0.0007,\
'tolerance_constant':1e-06}
print ("#############ROF TV CPU####################")
start_time = timeit.default_timer()
(rof_cpu3D, info_vec_cpu) = ROF_TV(pars['input'],
pars['regularisation_parameter'],
pars['number_of_iterations'],
pars['time_marching_parameter'],
pars['tolerance_constant'], 'cpu')
Qtools = QualityTools(idealVol, rof_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(rof_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using ROF-TV'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_______________FGP-TV (3D)__________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of FGP-TV regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy Image')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : FGP_TV, \
'input' : noisyVol,\
'regularisation_parameter':0.02, \
'number_of_iterations' :1000 ,\
'tolerance_constant':1e-06,\
'methodTV': 0 ,\
'nonneg': 0}
print ("#############FGP TV GPU####################")
start_time = timeit.default_timer()
(fgp_cpu3D, info_vec_cpu) = FGP_TV(pars['input'],
pars['regularisation_parameter'],
pars['number_of_iterations'],
pars['tolerance_constant'],
pars['methodTV'],
pars['nonneg'], 'cpu')
Qtools = QualityTools(idealVol, fgp_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(fgp_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using FGP-TV'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_______________SB-TV (3D)_________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of SB-TV regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy Image')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : SB_TV, \
'input' : noisyVol,\
'regularisation_parameter':0.02, \
'number_of_iterations' :250 ,\
'tolerance_constant':1e-06,\
'methodTV': 0}
print ("#############SB TV CPU####################")
start_time = timeit.default_timer()
(sb_cpu3D, info_vec_cpu) = SB_TV(pars['input'],
pars['regularisation_parameter'],
pars['number_of_iterations'],
pars['tolerance_constant'],
pars['methodTV'],'cpu')
Qtools = QualityTools(idealVol, sb_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(sb_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using SB-TV'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_______________LLT-ROF (3D)_________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of LLT-ROF regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy Image')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : LLT_ROF, \
'input' : noisyVol,\
'regularisation_parameterROF':0.01, \
'regularisation_parameterLLT':0.008, \
'number_of_iterations' :500 ,\
'time_marching_parameter' :0.001 ,\
'tolerance_constant':1e-06}
print ("#############LLT ROF CPU####################")
start_time = timeit.default_timer()
(lltrof_cpu3D,info_vec_cpu) = LLT_ROF(pars['input'],
pars['regularisation_parameterROF'],
pars['regularisation_parameterLLT'],
pars['number_of_iterations'],
pars['time_marching_parameter'],
pars['tolerance_constant'], 'cpu')
Qtools = QualityTools(idealVol, lltrof_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(lltrof_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using LLT-ROF'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_______________TGV (3D)_________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of TGV regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy Image')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : TGV, \
'input' : noisyVol,\
'regularisation_parameter':0.02, \
'alpha1':1.0,\
'alpha0':2.0,\
'number_of_iterations' :500 ,\
'LipshitzConstant' :12 ,\
'tolerance_constant':1e-06}
print ("#############TGV CPU####################")
start_time = timeit.default_timer()
(tgv_cpu3D,info_vec_cpu) = TGV(pars['input'],
pars['regularisation_parameter'],
pars['alpha1'],
pars['alpha0'],
pars['number_of_iterations'],
pars['LipshitzConstant'],
pars['tolerance_constant'],'cpu')
Qtools = QualityTools(idealVol, tgv_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(tgv_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using TGV'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("________________NDF (3D)___________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of NDF regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy volume')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : NDF, \
'input' : noisyVol,\
'regularisation_parameter':0.02, \
'edge_parameter':0.015,\
'number_of_iterations' :700 ,\
'time_marching_parameter':0.01,\
'penalty_type': 1,\
'tolerance_constant':1e-06}
print ("#############NDF CPU################")
start_time = timeit.default_timer()
(ndf_cpu3D,info_vec_cpu) = NDF(pars['input'],
pars['regularisation_parameter'],
pars['edge_parameter'],
pars['number_of_iterations'],
pars['time_marching_parameter'],
pars['penalty_type'],
pars['tolerance_constant'], 'cpu')
Qtools = QualityTools(idealVol, ndf_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(ndf_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using NDF iterations'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("___Anisotropic Diffusion 4th Order (2D)____")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of Diff4th regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy volume')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : Diff4th, \
'input' : noisyVol,\
'regularisation_parameter':0.8, \
'edge_parameter':0.02,\
'number_of_iterations' :500 ,\
'time_marching_parameter':0.001,\
'tolerance_constant':1e-06}
print ("#############Diff4th CPU################")
start_time = timeit.default_timer()
(diff4th_cpu3D,info_vec_cpu) = Diff4th(pars['input'],
pars['regularisation_parameter'],
pars['edge_parameter'],
pars['number_of_iterations'],
pars['time_marching_parameter'],
pars['tolerance_constant'],'cpu')
Qtools = QualityTools(idealVol, diff4th_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(diff4th_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using DIFF4th iterations'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_______________FGP-dTV (3D)__________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of FGP-dTV regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy Image')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : FGP_dTV,\
'input' : noisyVol,\
'refdata' : noisyRef,\
'regularisation_parameter':0.02, \
'number_of_iterations' :500 ,\
'tolerance_constant':1e-06,\
'eta_const':0.2,\
'methodTV': 0 ,\
'nonneg': 0}
print ("#############FGP dTV CPU####################")
start_time = timeit.default_timer()
(fgp_dTV_cpu3D,info_vec_cpu) = FGP_dTV(pars['input'],
pars['refdata'],
pars['regularisation_parameter'],
pars['number_of_iterations'],
pars['tolerance_constant'],
pars['eta_const'],
pars['methodTV'],
pars['nonneg'],'cpu')
Qtools = QualityTools(idealVol, fgp_dTV_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(fgp_dTV_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using FGP-dTV'))
#%%
|
<filename>demos/demo_cpu_regularisers3D.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 11:39:43 2018
Demonstration of 3D CPU regularisers
@authors: <NAME>, <NAME>
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import timeit
from ccpi.filters.regularisers import ROF_TV, FGP_TV, SB_TV, TGV, LLT_ROF, FGP_dTV, NDF, Diff4th
from ccpi.supp.qualitymetrics import QualityTools
###############################################################################
def printParametersToString(pars):
txt = r''
for key, value in pars.items():
if key== 'algorithm' :
txt += "{0} = {1}".format(key, value.__name__)
elif key == 'input':
txt += "{0} = {1}".format(key, np.shape(value))
elif key == 'refdata':
txt += "{0} = {1}".format(key, np.shape(value))
else:
txt += "{0} = {1}".format(key, value)
txt += '\n'
return txt
###############################################################################
# filename = os.path.join( "data" ,"lena_gray_512.tif")
filename = "/home/algol/Documents/DEV/CCPi-Regularisation-Toolkit/test/lena_gray_512.tif"
# read image
Im = plt.imread(filename)
Im = np.asarray(Im, dtype='float32')
Im = Im/255
perc = 0.05
u0 = Im + np.random.normal(loc = 0 ,
scale = perc * Im ,
size = np.shape(Im))
u_ref = Im + np.random.normal(loc = 0 ,
scale = 0.01 * Im ,
size = np.shape(Im))
(N,M) = np.shape(u0)
# map the u0 u0->u0>0
# f = np.frompyfunc(lambda x: 0 if x < 0 else x, 1,1)
u0 = u0.astype('float32')
u_ref = u_ref.astype('float32')
# change dims to check that modules work with non-squared images
"""
M = M-100
u_ref2 = np.zeros([N,M],dtype='float32')
u_ref2[:,0:M] = u_ref[:,0:M]
u_ref = u_ref2
del u_ref2
u02 = np.zeros([N,M],dtype='float32')
u02[:,0:M] = u0[:,0:M]
u0 = u02
del u02
Im2 = np.zeros([N,M],dtype='float32')
Im2[:,0:M] = Im[:,0:M]
Im = Im2
del Im2
"""
slices = 15
noisyVol = np.zeros((slices,N,M),dtype='float32')
noisyRef = np.zeros((slices,N,M),dtype='float32')
idealVol = np.zeros((slices,N,M),dtype='float32')
for i in range (slices):
noisyVol[i,:,:] = Im + np.random.normal(loc = 0 , scale = perc * Im , size = np.shape(Im))
noisyRef[i,:,:] = Im + np.random.normal(loc = 0 , scale = 0.01 * Im , size = np.shape(Im))
idealVol[i,:,:] = Im
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_______________ROF-TV (3D)_________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of ROF-TV regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy 15th slice of a volume')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm': ROF_TV, \
'input' : noisyVol,\
'regularisation_parameter':0.02,\
'number_of_iterations': 7000,\
'time_marching_parameter': 0.0007,\
'tolerance_constant':1e-06}
print ("#############ROF TV CPU####################")
start_time = timeit.default_timer()
(rof_cpu3D, info_vec_cpu) = ROF_TV(pars['input'],
pars['regularisation_parameter'],
pars['number_of_iterations'],
pars['time_marching_parameter'],
pars['tolerance_constant'], 'cpu')
Qtools = QualityTools(idealVol, rof_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(rof_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using ROF-TV'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_______________FGP-TV (3D)__________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of FGP-TV regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy Image')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : FGP_TV, \
'input' : noisyVol,\
'regularisation_parameter':0.02, \
'number_of_iterations' :1000 ,\
'tolerance_constant':1e-06,\
'methodTV': 0 ,\
'nonneg': 0}
print ("#############FGP TV GPU####################")
start_time = timeit.default_timer()
(fgp_cpu3D, info_vec_cpu) = FGP_TV(pars['input'],
pars['regularisation_parameter'],
pars['number_of_iterations'],
pars['tolerance_constant'],
pars['methodTV'],
pars['nonneg'], 'cpu')
Qtools = QualityTools(idealVol, fgp_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(fgp_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using FGP-TV'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_______________SB-TV (3D)_________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of SB-TV regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy Image')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : SB_TV, \
'input' : noisyVol,\
'regularisation_parameter':0.02, \
'number_of_iterations' :250 ,\
'tolerance_constant':1e-06,\
'methodTV': 0}
print ("#############SB TV CPU####################")
start_time = timeit.default_timer()
(sb_cpu3D, info_vec_cpu) = SB_TV(pars['input'],
pars['regularisation_parameter'],
pars['number_of_iterations'],
pars['tolerance_constant'],
pars['methodTV'],'cpu')
Qtools = QualityTools(idealVol, sb_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(sb_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using SB-TV'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_______________LLT-ROF (3D)_________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of LLT-ROF regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy Image')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : LLT_ROF, \
'input' : noisyVol,\
'regularisation_parameterROF':0.01, \
'regularisation_parameterLLT':0.008, \
'number_of_iterations' :500 ,\
'time_marching_parameter' :0.001 ,\
'tolerance_constant':1e-06}
print ("#############LLT ROF CPU####################")
start_time = timeit.default_timer()
(lltrof_cpu3D,info_vec_cpu) = LLT_ROF(pars['input'],
pars['regularisation_parameterROF'],
pars['regularisation_parameterLLT'],
pars['number_of_iterations'],
pars['time_marching_parameter'],
pars['tolerance_constant'], 'cpu')
Qtools = QualityTools(idealVol, lltrof_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(lltrof_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using LLT-ROF'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_______________TGV (3D)_________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of TGV regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy Image')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : TGV, \
'input' : noisyVol,\
'regularisation_parameter':0.02, \
'alpha1':1.0,\
'alpha0':2.0,\
'number_of_iterations' :500 ,\
'LipshitzConstant' :12 ,\
'tolerance_constant':1e-06}
print ("#############TGV CPU####################")
start_time = timeit.default_timer()
(tgv_cpu3D,info_vec_cpu) = TGV(pars['input'],
pars['regularisation_parameter'],
pars['alpha1'],
pars['alpha0'],
pars['number_of_iterations'],
pars['LipshitzConstant'],
pars['tolerance_constant'],'cpu')
Qtools = QualityTools(idealVol, tgv_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(tgv_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using TGV'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("________________NDF (3D)___________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of NDF regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy volume')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : NDF, \
'input' : noisyVol,\
'regularisation_parameter':0.02, \
'edge_parameter':0.015,\
'number_of_iterations' :700 ,\
'time_marching_parameter':0.01,\
'penalty_type': 1,\
'tolerance_constant':1e-06}
print ("#############NDF CPU################")
start_time = timeit.default_timer()
(ndf_cpu3D,info_vec_cpu) = NDF(pars['input'],
pars['regularisation_parameter'],
pars['edge_parameter'],
pars['number_of_iterations'],
pars['time_marching_parameter'],
pars['penalty_type'],
pars['tolerance_constant'], 'cpu')
Qtools = QualityTools(idealVol, ndf_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(ndf_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using NDF iterations'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("___Anisotropic Diffusion 4th Order (2D)____")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of Diff4th regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy volume')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : Diff4th, \
'input' : noisyVol,\
'regularisation_parameter':0.8, \
'edge_parameter':0.02,\
'number_of_iterations' :500 ,\
'time_marching_parameter':0.001,\
'tolerance_constant':1e-06}
print ("#############Diff4th CPU################")
start_time = timeit.default_timer()
(diff4th_cpu3D,info_vec_cpu) = Diff4th(pars['input'],
pars['regularisation_parameter'],
pars['edge_parameter'],
pars['number_of_iterations'],
pars['time_marching_parameter'],
pars['tolerance_constant'],'cpu')
Qtools = QualityTools(idealVol, diff4th_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(diff4th_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using DIFF4th iterations'))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_______________FGP-dTV (3D)__________________")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
## plot
fig = plt.figure()
plt.suptitle('Performance of FGP-dTV regulariser using the CPU')
a=fig.add_subplot(1,2,1)
a.set_title('Noisy Image')
imgplot = plt.imshow(noisyVol[10,:,:],cmap="gray")
# set parameters
pars = {'algorithm' : FGP_dTV,\
'input' : noisyVol,\
'refdata' : noisyRef,\
'regularisation_parameter':0.02, \
'number_of_iterations' :500 ,\
'tolerance_constant':1e-06,\
'eta_const':0.2,\
'methodTV': 0 ,\
'nonneg': 0}
print ("#############FGP dTV CPU####################")
start_time = timeit.default_timer()
(fgp_dTV_cpu3D,info_vec_cpu) = FGP_dTV(pars['input'],
pars['refdata'],
pars['regularisation_parameter'],
pars['number_of_iterations'],
pars['tolerance_constant'],
pars['eta_const'],
pars['methodTV'],
pars['nonneg'],'cpu')
Qtools = QualityTools(idealVol, fgp_dTV_cpu3D)
pars['rmse'] = Qtools.rmse()
txtstr = printParametersToString(pars)
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
a=fig.add_subplot(1,2,2)
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
# place a text box in upper left in axes coords
a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
imgplot = plt.imshow(fgp_dTV_cpu3D[10,:,:], cmap="gray")
plt.title('{}'.format('Recovered volume on the CPU using FGP-dTV'))
#%%
|
en
| 0.299605
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Thu Feb 22 11:39:43 2018 Demonstration of 3D CPU regularisers @authors: <NAME>, <NAME> ############################################################################### ############################################################################### # filename = os.path.join( "data" ,"lena_gray_512.tif") # read image # map the u0 u0->u0>0 # f = np.frompyfunc(lambda x: 0 if x < 0 else x, 1,1) # change dims to check that modules work with non-squared images M = M-100 u_ref2 = np.zeros([N,M],dtype='float32') u_ref2[:,0:M] = u_ref[:,0:M] u_ref = u_ref2 del u_ref2 u02 = np.zeros([N,M],dtype='float32') u02[:,0:M] = u0[:,0:M] u0 = u02 del u02 Im2 = np.zeros([N,M],dtype='float32') Im2[:,0:M] = Im[:,0:M] Im = Im2 del Im2 #%% ## plot # set parameters ############ROF TV CPU####################") # these are matplotlib.patch.Patch properties # place a text box in upper left in axes coords #%% ## plot # set parameters ############FGP TV GPU####################") # these are matplotlib.patch.Patch properties # place a text box in upper left in axes coords #%% ## plot # set parameters ############SB TV CPU####################") # these are matplotlib.patch.Patch properties # place a text box in upper left in axes coords #%% ## plot # set parameters ############LLT ROF CPU####################") # these are matplotlib.patch.Patch properties # place a text box in upper left in axes coords #%% ## plot # set parameters ############TGV CPU####################") # these are matplotlib.patch.Patch properties # place a text box in upper left in axes coords #%% ## plot # set parameters ############NDF CPU################") # these are matplotlib.patch.Patch properties # place a text box in upper left in axes coords #%% ## plot # set parameters ############Diff4th CPU################") # these are matplotlib.patch.Patch properties # place a text box in upper left in axes coords #%% ## plot # set parameters ############FGP dTV CPU####################") # these are matplotlib.patch.Patch properties # place a text box in upper left in axes coords #%%
| 2.238075
| 2
|
uncertainties/test_uncertainties.py
|
juliotux/uncertainties
| 0
|
6629895
|
<reponame>juliotux/uncertainties<gh_stars>0
# coding=utf-8
"""
Tests of the code in uncertainties/__init__.py.
These tests can be run through the Nose testing framework.
(c) 2010-2016 by <NAME> (EOL).
"""
from __future__ import division
from __future__ import print_function
# Standard modules
from builtins import str
from builtins import zip
from builtins import map
from builtins import range
import copy
import weakref
import math
from math import isnan, isinf
import random
import sys
# 3rd-party modules
# import nose.tools
# Local modules
import uncertainties.core as uncert_core
from uncertainties.core import ufloat, AffineScalarFunc, ufloat_fromstr
from uncertainties import umath
# The following information is useful for making sure that the right
# version of Python is running the tests (for instance with the Travis
# Continuous Integration system):
print("Testing with Python", sys.version)
###############################################################################
# Utilities for unit testing
def numbers_close(x, y, tolerance=1e-6):
"""
Returns True if the given floats are close enough.
The given tolerance is the relative difference allowed, or the absolute
difference, if one of the numbers is 0.
NaN is allowed: it is considered close to itself.
"""
# !!! Python 3.5+ has math.isclose(): maybe it could be used here.
# Instead of using a try and ZeroDivisionError, we do a test,
# NaN could appear silently:
if x != 0 and y != 0:
if isinf(x):
return isinf(y)
elif isnan(x):
return isnan(y)
else:
# Symmetric form of the test:
return 2*abs(x-y)/(abs(x)+abs(y)) < tolerance
else: # Either x or y is zero
return abs(x or y) < tolerance
def ufloats_close(x, y, tolerance=1e-6):
'''
Tests if two numbers with uncertainties are close, as random
variables: this is stronger than testing whether their nominal
value and standard deviation are close.
The tolerance is applied to both the nominal value and the
standard deviation of the difference between the numbers.
'''
diff = x-y
return (numbers_close(diff.nominal_value, 0, tolerance)
and numbers_close(diff.std_dev, 0, tolerance))
class DerivativesDiffer(Exception):
pass
def compare_derivatives(func, numerical_derivatives,
num_args_list=None):
"""
Checks the derivatives of a function 'func' (as returned by the
wrap() wrapper), by comparing them to the
'numerical_derivatives' functions.
Raises a DerivativesDiffer exception in case of problem.
These functions all take the number of arguments listed in
num_args_list. If num_args is None, it is automatically obtained.
Tests are done on random arguments.
"""
try:
funcname = func.name
except AttributeError:
funcname = func.__name__
# print "Testing", func.__name__
if not num_args_list:
# Detecting automatically the correct number of arguments is not
# always easy (because not all values are allowed, etc.):
num_args_table = {
'atanh': [1],
'log': [1, 2] # Both numbers of arguments are tested
}
if funcname in num_args_table:
num_args_list = num_args_table[funcname]
else:
num_args_list = []
# We loop until we find reasonable function arguments:
# We get the number of arguments by trial and error:
for num_args in range(10):
try:
#! Giving integer arguments is good for preventing
# certain functions from failing even though num_args
# is their correct number of arguments
# (e.g. math.ldexp(x, i), where i must be an integer)
func(*(1,)*num_args)
except TypeError:
pass # Not the right number of arguments
else: # No error
# num_args is a good number of arguments for func:
num_args_list.append(num_args)
if not num_args_list:
raise Exception("Can't find a reasonable number of arguments"
" for function '%s'." % funcname)
for num_args in num_args_list:
# Argument numbers that will have a random integer value:
integer_arg_nums = set()
if funcname == 'ldexp':
# The second argument must be an integer:
integer_arg_nums.add(1)
while True:
try:
# We include negative numbers, for more thorough tests:
args = []
for arg_num in range(num_args):
if arg_num in integer_arg_nums:
args.append(random.choice(range(-10, 10)))
else:
args.append(
uncert_core.Variable(random.random()*4-2, 0))
# 'args', but as scalar values:
args_scalar = [uncert_core.nominal_value(v)
for v in args]
func_approx = func(*args)
# Some functions yield simple Python constants, after
# wrapping in wrap(): no test has to be performed.
# Some functions also yield tuples...
if isinstance(func_approx, AffineScalarFunc):
# We compare all derivatives:
for (arg_num, (arg, numerical_deriv)) in (
enumerate(zip(args, numerical_derivatives))):
# Some arguments might not be differentiable:
if isinstance(arg, int):
continue
fixed_deriv_value = func_approx.derivatives[arg]
num_deriv_value = numerical_deriv(*args_scalar)
# This message is useful: the user can see that
# tests are really performed (instead of not being
# performed, silently):
print("Testing derivative #%d of %s at %s" % (
arg_num, funcname, args_scalar))
if not numbers_close(fixed_deriv_value,
num_deriv_value, 1e-4):
# It is possible that the result is NaN:
if not isnan(func_approx):
raise DerivativesDiffer(
"Derivative #%d of function '%s' may be"
" wrong: at args = %s,"
" value obtained = %.16f,"
" while numerical approximation = %.16f."
% (arg_num, funcname, args,
fixed_deriv_value, num_deriv_value))
except ValueError as err: # Arguments out of range, or of wrong type
# Factorial(real) lands here:
if str(err).startswith('factorial'):
integer_arg_nums = set([0])
continue # We try with different arguments
# Some arguments might have to be integers, for instance:
except TypeError as err:
if len(integer_arg_nums) == num_args:
raise Exception("Incorrect testing procedure: unable to "
"find correct argument values for %s: %s"
% (funcname, err))
# Another argument might be forced to be an integer:
integer_arg_nums.add(random.choice(range(num_args)))
else:
# We have found reasonable arguments, and the test passed:
break
###############################################################################
def test_value_construction():
'''
Tests the various means of constructing a constant number with
uncertainty *without a string* (see test_ufloat_fromstr(), for this).
'''
## Simple construction:
x = ufloat(3, 0.14)
assert x.nominal_value == 3
assert x.std_dev == 0.14
assert x.tag is None
# ... with tag as positional argument:
x = ufloat(3, 0.14, 'pi')
assert x.nominal_value == 3
assert x.std_dev == 0.14
assert x.tag == 'pi'
# ... with tag keyword:
x = ufloat(3, 0.14, tag='pi')
assert x.nominal_value == 3
assert x.std_dev == 0.14
assert x.tag == 'pi'
## Comparison with the obsolete tuple form:
# The following tuple is stored in a variable instead of being
# repeated in the calls below, so that the automatic code update
# does not replace ufloat((3, 0.14)) by ufloat(3, 14): the goal
# here is to make sure that the obsolete form gives the same
# result as the new form.
representation = (3, 0.14) # Obsolete representation
x = ufloat(3, 0.14)
x2 = ufloat(representation) # Obsolete
assert x.nominal_value == x2.nominal_value
assert x.std_dev == x2.std_dev
assert x.tag is None
assert x2.tag is None
# With tag as positional argument:
x = ufloat(3, 0.14, "pi")
x2 = ufloat(representation, "pi") # Obsolete
assert x.nominal_value == x2.nominal_value
assert x.std_dev == x2.std_dev
assert x.tag == 'pi'
assert x2.tag == 'pi'
# With tag keyword:
x = ufloat(3, 0.14, tag="pi")
x2 = ufloat(representation, tag="pi") # Obsolete
assert x.nominal_value == x2.nominal_value
assert x.std_dev == x2.std_dev
assert x.tag == 'pi'
assert x2.tag == 'pi'
# Negative standard deviations should be caught in a nice way
# (with the right exception):
try:
x = ufloat(3, -0.1)
except uncert_core.NegativeStdDev:
pass
try:
# Obsolete form:
x = ufloat((3, -0.1))
except uncert_core.NegativeStdDev:
pass
## Incorrect forms should not raise any deprecation warning, but
## raise an exception:
try:
ufloat(1) # Form that has never been allowed
except:
pass
else:
raise Exception("An exception should be raised")
def test_ufloat_fromstr():
"Input of numbers with uncertainties as a string"
# String representation, and numerical values:
tests = {
"-1.23(3.4)": (-1.23, 3.4), # (Nominal value, error)
" -1.23(3.4) ": (-1.23, 3.4), # Spaces ignored
"-1.34(5)": (-1.34, 0.05),
"1(6)": (1, 6),
"3(4.2)": (3, 4.2),
"-9(2)": (-9, 2),
"1234567(1.2)": (1234567, 1.2),
"12.345(15)": (12.345, 0.015),
"-12.3456(78)e-6": (-12.3456e-6, 0.0078e-6),
"0.29": (0.29, 0.01),
"31.": (31, 1),
"-31.": (-31, 1),
# The following tests that the ufloat() routine does
# not consider '31' like the tuple ('3', '1'), which would
# make it expect two numbers (instead of 2 1-character
# strings):
"31": (31, 1),
"-3.1e10": (-3.1e10, 0.1e10),
"169.0(7)": (169, 0.7),
"-0.1+/-1": (-0.1, 1),
"-13e-2+/-1e2": (-13e-2, 1e2),
'-14.(15)': (-14, 15),
'-100.0(15)': (-100, 1.5),
'14.(15)': (14, 15),
# Global exponent:
'(3.141+/-0.001)E+02': (314.1, 0.1),
## Pretty-print notation:
# ± sign, global exponent (not pretty-printed):
u'(3.141±0.001)E+02': (314.1, 0.1),
# ± sign, individual exponent:
u'3.141E+02±0.001e2': (314.1, 0.1),
# ± sign, times symbol, superscript (= full pretty-print):
u'(3.141 ± 0.001) × 10²': (314.1, 0.1),
# NaN uncertainty:
u'(3.141±nan)E+02': (314.1, float('nan')),
'3.141e+02+/-nan': (314.1, float('nan')),
'3.4(nan)e10': (3.4e10, float('nan')),
# NaN value:
'nan+/-3.14e2': (float('nan'), 314),
# "Double-floats"
'(-3.1415 +/- 1e-4)e+200': (-3.1415e200, 1e196),
'(-3.1415e-10 +/- 1e-4)e+200': (-3.1415e190, 1e196),
# Special float representation:
'-3(0.)': (-3, 0)
}
for (representation, values) in tests.items():
# Without tag:
num = ufloat_fromstr(representation)
assert numbers_close(num.nominal_value, values[0])
assert numbers_close(num.std_dev, values[1])
assert num.tag is None
# With a tag as positional argument:
num = ufloat_fromstr(representation, 'test variable')
assert numbers_close(num.nominal_value, values[0])
assert numbers_close(num.std_dev, values[1])
assert num.tag == 'test variable'
# With a tag as keyword argument:
num = ufloat_fromstr(representation, tag='test variable')
assert numbers_close(num.nominal_value, values[0])
assert numbers_close(num.std_dev, values[1])
assert num.tag == 'test variable'
## Obsolete forms
num = ufloat(representation) # Obsolete
assert numbers_close(num.nominal_value, values[0])
assert numbers_close(num.std_dev, values[1])
assert num.tag is None
# Call with a tag list argument:
num = ufloat(representation, 'test variable') # Obsolete
assert numbers_close(num.nominal_value, values[0])
assert numbers_close(num.std_dev, values[1])
assert num.tag == 'test variable'
# Call with a tag keyword argument:
num = ufloat(representation, tag='test variable') # Obsolete
assert numbers_close(num.nominal_value, values[0])
assert numbers_close(num.std_dev, values[1])
assert num.tag == 'test variable'
###############################################################################
# Test of correctness of the fixed (usually analytical) derivatives:
def test_fixed_derivatives_basic_funcs():
"""
Pre-calculated derivatives for operations on AffineScalarFunc.
"""
def check_op(op, num_args):
"""
Makes sure that the derivatives for function '__op__' of class
AffineScalarFunc, which takes num_args arguments, are correct.
If num_args is None, a correct value is calculated.
"""
op_string = "__%s__" % op
func = getattr(AffineScalarFunc, op_string)
numerical_derivatives = uncert_core.NumericalDerivatives(
# The __neg__ etc. methods of AffineScalarFunc only apply,
# by definition, to AffineScalarFunc objects: we first map
# possible scalar arguments (used for calculating
# derivatives) to AffineScalarFunc objects:
lambda *args: func(*map(uncert_core.to_affine_scalar, args)))
compare_derivatives(func, numerical_derivatives, [num_args])
# Operators that take 1 value:
for op in uncert_core.modified_operators:
check_op(op, 1)
# Operators that take 2 values:
for op in uncert_core.modified_ops_with_reflection:
check_op(op, 2)
# Additional, more complex checks, for use with the nose unit testing
# framework.
def test_copy():
"Standard copy module integration"
import gc
x = ufloat(3, 0.1)
assert x == x
y = copy.copy(x)
assert x != y
assert not(x == y)
assert y in y.derivatives.keys() # y must not copy the dependence on x
z = copy.deepcopy(x)
assert x != z
# Copy tests on expressions:
t = x + 2*z
# t depends on x:
assert x in t.derivatives
# The relationship between the copy of an expression and the
# original variables should be preserved:
t_copy = copy.copy(t)
# Shallow copy: the variables on which t depends are not copied:
assert x in t_copy.derivatives
assert (uncert_core.covariance_matrix([t, z]) ==
uncert_core.covariance_matrix([t_copy, z]))
# However, the relationship between a deep copy and the original
# variables should be broken, since the deep copy created new,
# independent variables:
t_deepcopy = copy.deepcopy(t)
assert x not in t_deepcopy.derivatives
assert (uncert_core.covariance_matrix([t, z]) !=
uncert_core.covariance_matrix([t_deepcopy, z]))
# Test of implementations with weak references:
# Weak references: destroying a variable should never destroy the
# integrity of its copies (which would happen if the copy keeps a
# weak reference to the original, in its derivatives member: the
# weak reference to the original would become invalid):
del x
gc.collect()
assert y in list(y.derivatives.keys())
## Classes for the pickling tests (put at the module level, so that
## they can be unpickled):
# Subclass without slots:
class NewVariable_dict(uncert_core.Variable):
pass
# Subclass with slots defined by a tuple:
class NewVariable_slots_tuple(uncert_core.Variable):
__slots__ = ('new_attr',)
# Subclass with slots defined by a string:
class NewVariable_slots_str(uncert_core.Variable):
__slots__ = 'new_attr'
def test_pickling():
"Standard pickle module integration."
import pickle
x = ufloat(2, 0.1)
x_unpickled = pickle.loads(pickle.dumps(x))
assert x != x_unpickled # Pickling creates copies
## Tests with correlations and AffineScalarFunc objects:
f = 2*x
assert isinstance(f, AffineScalarFunc)
(f_unpickled, x_unpickled2) = pickle.loads(pickle.dumps((f, x)))
# Correlations must be preserved:
assert f_unpickled - x_unpickled2 - x_unpickled2 == 0
## Tests with subclasses:
for subclass in (NewVariable_dict, NewVariable_slots_tuple,
NewVariable_slots_str):
x = subclass(3, 0.14)
# Pickling test with possibly uninitialized slots:
pickle.loads(pickle.dumps(x))
# Unpickling test:
x.new_attr = 'New attr value'
x_unpickled = pickle.loads(pickle.dumps(x))
# Must exist (from the slots of the parent class):
x_unpickled.nominal_value
x_unpickled.new_attr # Must exist
##
# Corner case test: when an attribute is present both in __slots__
# and in __dict__, it is first looked up from the slots
# (references:
# http://docs.python.org/2/reference/datamodel.html#invoking-descriptors,
# http://stackoverflow.com/a/15139208/42973). As a consequence,
# the pickling process must pickle the correct value (i.e., not
# the value from __dict__):
x = NewVariable_dict(3, 0.14)
x._nominal_value = 'in slots'
# Corner case: __dict__ key which is also a slot name (it is
# shadowed by the corresponding slot, so this is very unusual,
# though):
x.__dict__['_nominal_value'] = 'in dict'
# Additional __dict__ attribute:
x.dict_attr = 'dict attribute'
x_unpickled = pickle.loads(pickle.dumps(x))
# We make sure that the data is still there and untouched:
assert x_unpickled._nominal_value == 'in slots'
assert x_unpickled.__dict__ == x.__dict__
##
# Corner case that should have no impact on the code but which is
# not prevented by the documentation: case of constant linear
# terms (the potential gotcha is that if the linear_combo
# attribute is empty, __getstate__()'s result could be false, and
# so __setstate__() would not be called and the original empty
# linear combination would not be set in linear_combo.
x = uncert_core.LinearCombination({})
assert pickle.loads(pickle.dumps(x)).linear_combo == {}
def test_int_div():
"Integer division"
# We perform all operations on floats, because derivatives can
# otherwise be meaningless:
x = ufloat(3.9, 2)//2
assert x.nominal_value == 1.
# All errors are supposed to be small, so the ufloat()
# in x violates the assumption. Therefore, the following is
# correct:
assert x.std_dev == 0.0
def test_comparison_ops():
"Test of comparison operators"
import random
# Operations on quantities equivalent to Python numbers must still
# be correct:
a = ufloat(-3, 0)
b = ufloat(10, 0)
c = ufloat(10, 0)
assert a < b
assert a < 3
assert 3 < b # This is first given to int.__lt__()
assert b == c
x = ufloat(3, 0.1)
# One constraint is that usual Python code for inequality testing
# still work in a reasonable way (for instance, it is generally
# desirable that functions defined by different formulas on
# different intervals can still do "if 0 < x < 1:...". This
# supposes again that errors are "small" (as for the estimate of
# the standard error).
assert x > 1
# The limit case is not obvious:
assert not(x >= 3)
assert not(x < 3)
assert x == x
# Comparaison between Variable and AffineScalarFunc:
assert x == x + 0
# Comparaison between 2 _different_ AffineScalarFunc objects
# representing the same value:
assert x/2 == x/2
# With uncorrelated result that have the same behavior (value and
# standard error):
assert 2*ufloat(1, 0.1) != ufloat(2, 0.2)
# Comparaison between 2 _different_ Variable objects
# that are uncorrelated:
assert x != ufloat(3, 0.1)
assert x != ufloat(3, 0.2)
# Comparison to other types should work:
assert x != None # Not comparable
assert x-x == 0 # Comparable, even though the types are different
assert x != [1, 2]
####################
# Checks of the semantics of logical operations: they return True
# iff they are always True when the parameters vary in an
# infinitesimal interval inside sigma (sigma == 0 is a special
# case):
def test_all_comparison_ops(x, y):
"""
Takes two Variable objects.
Fails if any comparison operation fails to follow the proper
semantics: a comparison only returns True if the correspond float
comparison results are True for all the float values taken by
the variables (of x and y) when they vary in an infinitesimal
neighborhood within their uncertainty.
This test is stochastic: it may, exceptionally, fail for
correctly implemented comparison operators.
"""
import random
def random_float(var):
"""
Returns a random value for Variable var, in an
infinitesimal interval withing its uncertainty. The case
of a zero uncertainty is special.
"""
return ((random.random()-0.5) * min(var.std_dev, 1e-5)
+ var.nominal_value)
# All operations are tested:
for op in ["__%s__" % name
for name in('ne', 'eq', 'lt', 'le', 'gt', 'ge')]:
try:
float_func = getattr(float, op)
except AttributeError: # Python 2.3's floats don't have __ne__
continue
# Determination of the correct truth value of func(x, y):
sampled_results = []
# The "main" value is an important particular case, and
# the starting value for the final result
# (correct_result):
sampled_results.append(float_func(x.nominal_value, y.nominal_value))
for check_num in range(50): # Many points checked
sampled_results.append(float_func(random_float(x),
random_float(y)))
min_result = min(sampled_results)
max_result = max(sampled_results)
if min_result == max_result:
correct_result = min_result
else:
# Almost all results must be True, for the final value
# to be True:
num_min_result = sampled_results.count(min_result)
# 1 exception is considered OK:
correct_result = (num_min_result == 1)
try:
assert correct_result == getattr(x, op)(y)
except AssertionError:
print("Sampling results:", sampled_results)
raise Exception("Semantic value of %s %s (%s) %s not"
" correctly reproduced."
% (x, op, y, correct_result))
# With different numbers:
test_all_comparison_ops(ufloat(3, 0.1),
ufloat(-2, 0.1))
test_all_comparison_ops(ufloat(0, 0), # Special number
ufloat(1, 1))
test_all_comparison_ops(ufloat(0, 0), # Special number
ufloat(0, 0.1))
# With identical numbers:
test_all_comparison_ops(ufloat(0, 0),
ufloat(0, 0))
test_all_comparison_ops(ufloat(1, 1),
ufloat(1, 1))
def test_logic():
"Boolean logic: __nonzero__, bool."
x = ufloat(3, 0)
y = ufloat(0, 0)
z = ufloat(0, 0.1)
t = ufloat(-1, 2)
assert bool(x) == True
assert bool(y) == False
assert bool(z) == True
assert bool(t) == True # Only infinitseimal neighborhood are used
def test_obsolete():
'Tests some obsolete creation of number with uncertainties'
x = ufloat(3, 0.1)
# Obsolete function, protected against automatic modification:
x.set_std_dev.__call__(0.2) # Obsolete
x_std_dev = x.std_dev
assert x_std_dev() == 0.2 # Obsolete call
def test_basic_access_to_data():
"Access to data from Variable and AffineScalarFunc objects."
x = ufloat(3.14, 0.01, "x var")
assert x.tag == "x var"
assert x.nominal_value == 3.14
assert x.std_dev == 0.01
# Case of AffineScalarFunc objects:
y = x + 0
assert type(y) == AffineScalarFunc
assert y.nominal_value == 3.14
assert y.std_dev == 0.01
# Details on the sources of error:
a = ufloat(-1, 0.001)
y = 2*x + 3*x + 2 + a
error_sources = y.error_components()
assert len(error_sources) == 2 # 'a' and 'x'
assert error_sources[x] == 0.05
assert error_sources[a] == 0.001
# Derivative values should be available:
assert y.derivatives[x] == 5
# Modification of the standard deviation of variables:
x.std_dev = 1
assert y.error_components()[x] == 5 # New error contribution!
# Calculated values with uncertainties should not have a settable
# standard deviation:
y = 2*x
try:
y.std_dev = 1
except AttributeError:
pass
else:
raise Exception(
"std_dev should not be settable for calculated results")
# Calculation of deviations in units of the standard deviations:
assert 10/x.std_dev == x.std_score(10 + x.nominal_value)
# "In units of the standard deviation" is not always meaningful:
x.std_dev = 0
try:
x.std_score(1)
except ValueError:
pass # Normal behavior
def test_correlations():
"Correlations between variables"
a = ufloat(1, 0)
x = ufloat(4, 0.1)
y = x*2 + a
# Correlations cancel "naive" additions of uncertainties:
assert y.std_dev != 0
normally_zero = y - (x*2 + 1)
assert normally_zero.nominal_value == 0
assert normally_zero.std_dev == 0
def test_no_coercion():
"""
Coercion of Variable object to a simple float.
The coercion should be impossible, like for complex numbers.
"""
x = ufloat(4, 1)
try:
assert float(x) == 4
except TypeError:
pass
else:
raise Exception("Conversion to float() should fail with TypeError")
def test_wrapped_func_no_args_no_kwargs():
'''
Wrap a function that takes only positional-or-keyword parameters.
'''
def f_auto_unc(x, y):
return 2*x+umath.sin(y)
# Like f_auto_unc, but does not accept numbers with uncertainties:
def f(x, y):
assert not isinstance(x, uncert_core.UFloat)
assert not isinstance(y, uncert_core.UFloat)
return f_auto_unc(x, y)
x = uncert_core.ufloat(1, 0.1)
y = uncert_core.ufloat(10, 2)
### Automatic numerical derivatives:
## Fully automatic numerical derivatives:
f_wrapped = uncert_core.wrap(f)
assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x))
## Automatic additional derivatives for non-defined derivatives,
## and explicit None derivative:
f_wrapped = uncert_core.wrap(f, [None]) # No derivative for y
assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x))
### Explicit derivatives:
## Fully defined derivatives:
f_wrapped = uncert_core.wrap(f, [lambda x, y: 2,
lambda x, y: math.cos(y)])
assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x))
## Automatic additional derivatives for non-defined derivatives:
f_wrapped = uncert_core.wrap(f, [lambda x, y: 2]) # No derivative for y
assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x))
def test_wrapped_func_args_no_kwargs():
'''
Wrap a function that takes only positional-or-keyword and
var-positional parameters.
'''
def f_auto_unc(x, y, *args):
return 2*x+umath.sin(y)+3*args[1]
# Like f_auto_unc, but does not accept numbers with uncertainties:
def f(x, y, *args):
assert not any(isinstance(value, uncert_core.UFloat)
for value in [x, y] + list(args))
return f_auto_unc(x, y, *args)
x = uncert_core.ufloat(1, 0.1)
y = uncert_core.ufloat(10, 2)
s = 'string arg'
z = uncert_core.ufloat(100, 3)
args = [s, z, s] # var-positional parameters
### Automatic numerical derivatives:
## Fully automatic numerical derivatives:
f_wrapped = uncert_core.wrap(f)
assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args))
## Automatic additional derivatives for non-defined derivatives,
## and explicit None derivative:
f_wrapped = uncert_core.wrap(f, [None]) # No derivative for y
assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args))
### Explicit derivatives:
## Fully defined derivatives:
f_wrapped = uncert_core.wrap(f, [lambda x, y, *args: 2,
lambda x, y, *args: math.cos(y),
None,
lambda x, y, *args: 3])
assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args))
## Automatic additional derivatives for non-defined derivatives:
# No derivative for y:
f_wrapped = uncert_core.wrap(f, [lambda x, y, *args: 2])
assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args))
def test_wrapped_func_no_args_kwargs():
'''
Wrap a function that takes only positional-or-keyword and
var-keyword parameters.
'''
def f_auto_unc(x, y, **kwargs):
return 2*x+umath.sin(y)+3*kwargs['z']
# Like f_auto_unc, but does not accept numbers with uncertainties:
def f(x, y, **kwargs):
assert not any(isinstance(value, uncert_core.UFloat)
for value in [x, y] + list(kwargs.values()))
return f_auto_unc(x, y, **kwargs)
x = uncert_core.ufloat(1, 0.1)
y = uncert_core.ufloat(10, 2)
s = 'string arg'
z = uncert_core.ufloat(100, 3)
kwargs = {'s': s, 'z': z} # Arguments not in signature
### Automatic numerical derivatives:
## Fully automatic numerical derivatives:
f_wrapped = uncert_core.wrap(f)
assert ufloats_close(f_auto_unc(x, y, **kwargs),
f_wrapped(x, y, **kwargs))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs),
f_wrapped(y=y, x=x, **kwargs))
## Automatic additional derivatives for non-defined derivatives,
## and explicit None derivative:
# No derivative for positional-or-keyword parameter y, no
# derivative for optional-keyword parameter z:
f_wrapped = uncert_core.wrap(f, [None])
assert ufloats_close(f_auto_unc(x, y, **kwargs),
f_wrapped(x, y, **kwargs))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs),
f_wrapped(y=y, x=x, **kwargs))
# No derivative for positional-or-keyword parameter y, no
# derivative for optional-keyword parameter z:
f_wrapped = uncert_core.wrap(f, [None], {'z': None})
assert ufloats_close(f_auto_unc(x, y, **kwargs),
f_wrapped(x, y, **kwargs))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs),
f_wrapped(y=y, x=x, **kwargs))
# No derivative for positional-or-keyword parameter y, derivative
# for optional-keyword parameter z:
f_wrapped = uncert_core.wrap(f, [None],
{'z': lambda x, y, **kwargs: 3})
assert ufloats_close(f_auto_unc(x, y, **kwargs),
f_wrapped(x, y, **kwargs))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs),
f_wrapped(y=y, x=x, **kwargs))
### Explicit derivatives:
## Fully defined derivatives:
f_wrapped = uncert_core.wrap(
f,
[lambda x, y, **kwargs: 2, lambda x, y, **kwargs: math.cos(y)],
{'z:': lambda x, y, **kwargs: 3})
assert ufloats_close(f_auto_unc(x, y, **kwargs),
f_wrapped(x, y, **kwargs))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs),
f_wrapped(y=y, x=x, **kwargs))
## Automatic additional derivatives for non-defined derivatives:
# No derivative for y or z:
f_wrapped = uncert_core.wrap(f, [lambda x, y, **kwargs: 2])
assert ufloats_close(f_auto_unc(x, y, **kwargs),
f_wrapped(x, y, **kwargs))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs),
f_wrapped(y=y, x=x, **kwargs))
def test_wrapped_func_args_kwargs():
'''
Wrap a function that takes positional-or-keyword, var-positional
and var-keyword parameters.
'''
def f_auto_unc(x, y, *args, **kwargs):
return 2*x+umath.sin(y)+4*args[1]+3*kwargs['z']
# Like f_auto_unc, but does not accept numbers with uncertainties:
def f(x, y, *args, **kwargs):
assert not any(isinstance(value, uncert_core.UFloat)
for value in [x, y]+list(args)+list(kwargs.values()))
return f_auto_unc(x, y, *args, **kwargs)
x = uncert_core.ufloat(1, 0.1)
y = uncert_core.ufloat(10, 2)
t = uncert_core.ufloat(1000, 4)
s = 'string arg'
z = uncert_core.ufloat(100, 3)
args = [s, t, s]
kwargs = {'u': s, 'z': z} # Arguments not in signature
### Automatic numerical derivatives:
## Fully automatic numerical derivatives:
f_wrapped = uncert_core.wrap(f)
assert ufloats_close(f_auto_unc(x, y, *args, **kwargs),
f_wrapped(x, y, *args, **kwargs), tolerance=1e-5)
## Automatic additional derivatives for non-defined derivatives,
## and explicit None derivative:
# No derivative for positional-or-keyword parameter y, no
# derivative for optional-keyword parameter z:
f_wrapped = uncert_core.wrap(f, [None, None, None,
lambda x, y, *args, **kwargs: 4])
assert ufloats_close(f_auto_unc(x, y, *args, **kwargs),
f_wrapped(x, y, *args, **kwargs), tolerance=1e-5)
# No derivative for positional-or-keyword parameter y, no
# derivative for optional-keyword parameter z:
f_wrapped = uncert_core.wrap(f, [None], {'z': None})
assert ufloats_close(f_auto_unc(x, y, *args, **kwargs),
f_wrapped(x, y, *args, **kwargs), tolerance=1e-5)
# No derivative for positional-or-keyword parameter y, derivative
# for optional-keyword parameter z:
f_wrapped = uncert_core.wrap(f, [None],
{'z': lambda x, y, *args, **kwargs: 3})
assert ufloats_close(f_auto_unc(x, y, *args, **kwargs),
f_wrapped(x, y, *args, **kwargs), tolerance=1e-5)
### Explicit derivatives:
## Fully defined derivatives:
f_wrapped = uncert_core.wrap(
f,
[lambda x, y, *args, **kwargs: 2,
lambda x, y, *args, **kwargs: math.cos(y)],
{'z:': lambda x, y, *args, **kwargs: 3})
assert ufloats_close(f_auto_unc(x, y, *args, **kwargs),
f_wrapped(x, y, *args, **kwargs), tolerance=1e-5)
## Automatic additional derivatives for non-defined derivatives:
# No derivative for y or z:
f_wrapped = uncert_core.wrap(f, [lambda x, y, *args, **kwargs: 2])
assert ufloats_close(f_auto_unc(x, y, *args, **kwargs),
f_wrapped(x, y, *args, **kwargs), tolerance=1e-5)
def test_wrapped_func():
"""
Test uncertainty-aware functions obtained through wrapping.
"""
########################################
# Function which can automatically handle numbers with
# uncertainties:
def f_auto_unc(angle, *list_var):
return umath.cos(angle) + sum(list_var)
def f(angle, *list_var):
# We make sure that this function is only ever called with
# numbers with no uncertainty (since it is wrapped):
assert not isinstance(angle, uncert_core.UFloat)
assert not any(isinstance(arg, uncert_core.UFloat)
for arg in list_var)
return f_auto_unc(angle, *list_var)
f_wrapped = uncert_core.wrap(f)
my_list = [1, 2, 3]
########################################
# Test of a wrapped function that only calls the original
# function: it should obtain the exact same result:
assert f_wrapped(0, *my_list) == f(0, *my_list)
# 1 == 1 +/- 0, so the type must be checked too:
assert type(f_wrapped(0, *my_list)) == type(f(0, *my_list))
########################################
# Call with uncertainties:
angle = uncert_core.ufloat(1, 0.1)
list_value = uncert_core.ufloat(3, 0.2)
# The random variables must be the same (full correlation):
assert ufloats_close(f_wrapped(angle, *[1, angle]),
f_auto_unc(angle, *[1, angle]))
assert ufloats_close(f_wrapped(angle, *[list_value, angle]),
f_auto_unc(angle, *[list_value, angle]))
########################################
# Non-numerical arguments, and explicit and implicit derivatives:
def f(x, y, z, t, u):
return x+2*z+3*t+4*u
f_wrapped = uncert_core.wrap(
f, [lambda *args: 1, None, lambda *args:2, None]) # No deriv. for u
assert f_wrapped(10, 'string argument', 1, 0, 0) == 12
x = uncert_core.ufloat(10, 1)
assert numbers_close(f_wrapped(x, 'string argument', x, x, x).std_dev,
(1+2+3+4)*x.std_dev)
def test_wrap_with_kwargs():
'''
Tests wrap() on functions with keyword arguments.
Includes both wrapping a function that takes optional keyword
arguments and calling a wrapped function with keyword arguments
(optional or not).
'''
# Version of f() that automatically works with numbers with
# uncertainties:
def f_auto_unc(x, y, *args, **kwargs):
return x + umath.sin(y) + 2*args[0] + 3*kwargs['t']
# We also add keyword arguments in the function which is wrapped:
def f(x, y, *args, **kwargs):
# We make sure that f is not called directly with a number with
# uncertainty:
for value in [x, y]+list(args)+list(kwargs.values()):
assert not isinstance(value, uncert_core.UFloat)
return f_auto_unc(x, y, *args, **kwargs)
f_wrapped = uncert_core.wrap(f)
x = ufloat(1, 0.1)
y = ufloat(10, 0.11)
z = ufloat(100, 0.111)
t = ufloat(0.1, 0.1111)
assert ufloats_close(f_wrapped(x, y, z, t=t),
f_auto_unc(x, y, z, t=t), tolerance=1e-5)
########################################
# We make sure that analytical derivatives are indeed used. We
# also test the automatic handling of additional *args arguments
# beyond the number of supplied derivatives.
f_wrapped2 = uncert_core.wrap(
f, [None, lambda x, y, *args, **kwargs: math.cos(y)])
# The derivatives must be perfectly identical:
# The *args parameter of f() is given as a keyword argument, so as
# to try to confuse the code:
assert (f_wrapped2(x, y, z, t=t).derivatives[y]
== f_auto_unc(x, y, z, t=t).derivatives[y])
# Derivatives supplied through the keyword-parameter dictionary of
# derivatives, and also derivatives supplied for the
# var-positional arguments (*args[0]):
f_wrapped3 = uncert_core.wrap(
f,
[None, None, lambda x, y, *args, **kwargs: 2],
{'t': lambda x, y, *args, **kwargs: 3})
# The derivatives should be exactly the same, because they are
# obtained with the exact same analytic formula:
assert (f_wrapped3(x, y, z, t=t).derivatives[z]
== f_auto_unc(x, y, z, t=t).derivatives[z])
assert (f_wrapped3(x, y, z, t=t).derivatives[t]
== f_auto_unc(x, y, z, t=t).derivatives[t])
########################################
# Making sure that user-supplied derivatives are indeed called:
class FunctionCalled(Exception):
'''
Raised to signal that a function is indeed called.
'''
pass
def failing_func(x, y, *args, **kwargs):
raise FunctionCalled
f_wrapped4 = uncert_core.wrap(
f,
[None, failing_func],
{'t': failing_func})
try:
f_wrapped4(x, 3.14, z, t=t)
except FunctionCalled:
pass
else:
raise Exception('User-supplied derivative should be called')
try:
f_wrapped4(x, y, z, t=3.14)
except FunctionCalled:
pass
else:
raise Exception('User-supplied derivative should be called')
try:
f_wrapped4(x, 3.14, z, t=3.14)
except FunctionCalled:
raise Exception('User-supplied derivative should *not* be called')
###############################################################################
def test_access_to_std_dev():
"Uniform access to the standard deviation"
x = ufloat(1, 0.1)
y = 2*x
# std_dev for Variable and AffineScalarFunc objects:
assert uncert_core.std_dev(x) == x.std_dev
assert uncert_core.std_dev(y) == y.std_dev
# std_dev for other objects:
assert uncert_core.std_dev([]) == 0
assert uncert_core.std_dev(None) == 0
###############################################################################
def test_covariances():
"Covariance matrix"
x = ufloat(1, 0.1)
y = -2*x+10
z = -3*x
covs = uncert_core.covariance_matrix([x, y, z])
# Diagonal elements are simple:
assert numbers_close(covs[0][0], 0.01)
assert numbers_close(covs[1][1], 0.04)
assert numbers_close(covs[2][2], 0.09)
# Non-diagonal elements:
assert numbers_close(covs[0][1], -0.02)
###############################################################################
def test_power_all_cases():
'''
Checks all cases for the value and derivatives of x**p.
'''
power_all_cases(pow)
def power_all_cases(op):
'''
Checks all cases for the value and derivatives of power-like
operator op (op is typically the built-in pow(), or math.pow()).
Checks only the details of special results like 0, 1 or NaN).
Different cases for the value of x**p and its derivatives are
tested by dividing the (x, p) plane with:
- x < 0, x = 0, x > 0
- p integer or not, p < 0, p = 0, p > 0
(not all combinations are distinct: for instance x > 0 gives
identical formulas for all p).
'''
zero = ufloat(0, 0.1)
zero2 = ufloat(0, 0.1)
one = ufloat(1, 0.1)
positive = ufloat(0.3, 0.01)
positive2 = ufloat(0.3, 0.01)
negative = ufloat(-0.3, 0.01)
integer = ufloat(-3, 0)
non_int_larger_than_one = ufloat(3.1, 0.01)
positive_smaller_than_one = ufloat(0.3, 0.01)
## negative**integer
result = op(negative, integer)
assert not isnan(result.derivatives[negative])
assert isnan(result.derivatives[integer])
# Limit cases:
result = op(negative, one)
assert result.derivatives[negative] == 1
assert isnan(result.derivatives[one])
result = op(negative, zero)
assert result.derivatives[negative] == 0
assert isnan(result.derivatives[zero])
## negative**non-integer
## zero**...
result = op(zero, non_int_larger_than_one)
assert isnan(result.derivatives[zero])
assert result.derivatives[non_int_larger_than_one] == 0
# Special cases:
result = op(zero, one)
assert result.derivatives[zero] == 1
assert result.derivatives[one] == 0
result = op(zero, 2*one)
assert result.derivatives[zero] == 0
assert result.derivatives[one] == 0
result = op(zero, positive_smaller_than_one)
assert isnan(result.derivatives[zero])
assert result.derivatives[positive_smaller_than_one] == 0
result = op(zero, zero2)
assert result.derivatives[zero] == 0
assert isnan(result.derivatives[zero2])
## positive**...: this is a quite regular case where the value and
## the derivatives are all defined.
result = op(positive, positive2)
assert not isnan(result.derivatives[positive])
assert not isnan(result.derivatives[positive2])
result = op(positive, zero)
assert result.derivatives[positive] == 0
assert not isnan(result.derivatives[zero])
result = op(positive, negative)
assert not isnan(result.derivatives[positive])
assert not isnan(result.derivatives[negative])
###############################################################################
def test_power_special_cases():
'''
Checks special cases of x**p.
'''
power_special_cases(pow)
# We want the same behavior for numbers with uncertainties and for
# math.pow() at their nominal values:
positive = ufloat(0.3, 0.01)
negative = ufloat(-0.3, 0.01)
# http://stackoverflow.com/questions/10282674/difference-between-the-built-in-pow-and-math-pow-for-floats-in-python
try:
pow(ufloat(0, 0), negative)
except ZeroDivisionError:
pass
else:
raise Exception("A proper exception should have been raised")
try:
pow(ufloat(0, 0.1), negative)
except ZeroDivisionError:
pass
else:
raise Exception('A proper exception should have been raised')
try:
result = pow(negative, positive)
except ValueError:
# The reason why it should also fail in Python 3 is that the
# result of Python 3 is a complex number, which uncertainties
# does not handle (no uncertainties on complex numbers). In
# Python 2, this should always fail, since Python 2 does not
# know how to calculate it.
pass
else:
raise Exception('A proper exception should have been raised')
def power_special_cases(op):
'''
Checks special cases of the uncertainty power operator op (where
op is typically the built-in pow or uncertainties.umath.pow).
The values x = 0, x = 1 and x = NaN are special, as are null,
integral and NaN values of p.
'''
zero = ufloat(0, 0)
one = ufloat(1, 0)
p = ufloat(0.3, 0.01)
assert op(0, p) == 0
assert op(zero, p) == 0
# The outcome of 1**nan and nan**0 was undefined before Python
# 2.6 (http://docs.python.org/library/math.html#math.pow):
assert op(float('nan'), zero) == 1.0
assert op(one, float('nan')) == 1.0
# …**0 == 1.0:
assert op(p, 0) == 1.0
assert op(zero, 0) == 1.0
assert op((-p), 0) == 1.0
# …**zero:
assert op((-10.3), zero) == 1.0
assert op(0, zero) == 1.0
assert op(0.3, zero) == 1.0
assert op((-p), zero) == 1.0
assert op(zero, zero) == 1.0
assert op(p, zero) == 1.0
# one**… == 1.0
assert op(one, -3) == 1.0
assert op(one, -3.1) == 1.0
assert op(one, 0) == 1.0
assert op(one, 3) == 1.0
assert op(one, 3.1) == 1.0
# … with two numbers with uncertainties:
assert op(one, (-p)) == 1.0
assert op(one, zero) == 1.0
assert op(one, p) == 1.0
# 1**… == 1.0:
assert op(1., (-p)) == 1.0
assert op(1., zero) == 1.0
assert op(1., p) == 1.0
def test_power_wrt_ref():
'''
Checks special cases of the built-in pow() power operator.
'''
power_wrt_ref(pow, pow)
def power_wrt_ref(op, ref_op):
'''
Checks special cases of the uncertainty power operator op (where
op is typically the built-in pow or uncertainties.umath.pow), by
comparing its results to the reference power operator ref_op
(which is typically the built-in pow or math.pow).
'''
# Negative numbers with uncertainty can be exponentiated to an
# integral power:
assert op(ufloat(-1.1, 0.1), -9).nominal_value == ref_op(-1.1, -9)
# Case of numbers with no uncertainty: should give the same result
# as numbers with uncertainties:
assert op(ufloat(-1, 0), 9) == ref_op(-1, 9)
assert op(ufloat(-1.1, 0), 9) == ref_op(-1.1, 9)
###############################################################################
def test_PDG_precision():
'''
Test of the calculation of the number of significant digits for
the uncertainty.
'''
# The 3 cases of the rounding rules are covered in each case:
tests = {
# Very big floats:
1.7976931348623157e308: (2, 1.7976931348623157e308),
0.5e308: (1, 0.5e308),
0.9976931348623157e+308: (2, 1e308),
# Very small floats:
1.3e-323: (2, 1.3e-323),
5e-324: (1, 5e-324),
9.99e-324: (2, 1e-323)
}
for (std_dev, result) in tests.items():
assert uncert_core.PDG_precision(std_dev) == result
def test_repr():
'''Test the representation of numbers with uncertainty.'''
# The uncertainty is a power of 2, so that it can be exactly
# represented:
x = ufloat(3.14159265358979, 0.25)
assert repr(x) == '3.14159265358979+/-0.25'
x = ufloat(3.14159265358979, 0)
assert repr(x) == '3.14159265358979+/-0'
# Tagging:
x = ufloat(3, 1, "length")
assert repr(x) == '< length = 3.0+/-1.0 >'
def test_format():
'''Test the formatting of numbers with uncertainty.'''
# The way NaN is formatted with F, E and G depends on the version
# of Python (NAN for Python 2.5+ at least):
NaN_EFG = '%F' % float('nan')
# !! The way NaN is formatted with F, E and G might depend on the
# version of Python, if it is like NaN (could be tested with
# Python 2.3 or 2.4 vs Python 2.7):
Inf_EFG = '%F' % float('inf')
# Tests of each point of the docstring of
# AffineScalarFunc.__format__() in turn, mostly in the same order.
# The LaTeX tests do not use the customization of
# uncert_core.GROUP_SYMBOLS and uncert_core.EXP_PRINT: this
# way, problems in the customization themselves are caught.
tests = { # (Nominal value, uncertainty): {format: result,...}
# Usual float formatting, and individual widths, etc.:
(3.1415, 0.0001): {
'*^+7.2f': '*+3.14*+/-*0.00**',
'+07.2f': '+003.14+/-0000.00', # 0 fill
'>10f': ' 3.141500+/- 0.000100', # Width and align
'11.3e': ' 3.142e+00+/- 0.000e+00', # Duplicated exponent
'0.4e': '3.1415e+00+/-0.0000e+00' # Forced double exponent
},
# Full generalization of float formatting:
(3.1415, 0.0001): {
'+09.2uf': '+03.14150+/-000.00010',
# Alignment is not available with the % formatting
# operator of Python < 2.6:
'*^+9.2uf': '+3.14150*+/-*0.00010*',
'>9f': ' 3.14150+/- 0.00010' # Width and align
},
# Number of digits of the uncertainty fixed:
(123.456789, 0.00123): {
'.1uf': '123.457+/-0.001',
'.2uf': '123.4568+/-0.0012',
'.3uf': '123.45679+/-0.00123',
'.2ue': '(1.234568+/-0.000012)e+02'
},
# Sign handling:
(-123.456789, 0.00123): {
'.1uf': '-123.457+/-0.001',
'.2uf': '-123.4568+/-0.0012',
'.3uf': '-123.45679+/-0.00123',
'.2ue': '(-1.234568+/-0.000012)e+02'
},
# Uncertainty larger than the nominal value:
(12.3, 456.78): {
'': '12+/-457',
'.1uf': '12+/-457',
'.4uf': '12.3+/-456.8'
},
# ... Same thing, but with an exponent:
(12.3, 456.78): {
'.1ue': '(0+/-5)e+02',
'.4ue': '(0.123+/-4.568)e+02',
'.4ueS': '0.123(4.568)e+02'
},
(23456.789123, 1234.56789123): {
'.6gS': '23456.8(1234.6)'
},
# Test of the various float formats: the nominal value should
# have a similar representation as if it were directly
# represented as a float:
(1234567.89, 0.1): {
'.0e': '(1+/-0)e+06',
'e': '(1.23456789+/-0.00000010)e+06',
'E': '(1.23456789+/-0.00000010)E+06',
'f': '1234567.89+/-0.10',
'F': '1234567.89+/-0.10',
'g': '1234567.89+/-0.10',
'G': '1234567.89+/-0.10',
'%': '(123456789+/-10)%'
},
(1234567.89, 4.3): {
'g': '1234568+/-4'
},
(1234567.89, 43): { # Case where g triggers the exponent notation
'g': '(1.23457+/-0.00004)e+06',
'G': '(1.23457+/-0.00004)E+06'
},
(3.1415, 0.0001): {
'+09.2uf': '+03.14150+/-000.00010'
},
(1234.56789, 0.1): {
'.0f': '(1234+/-0.)', # Approximate error indicated with "."
'e': '(1.23456+/-0.00010)e+03',
'E': '(1.23456+/-0.00010)E+03',
'f': '1234.57+/-0.10',
'F': '1234.57+/-0.10',
'f': '1234.57+/-0.10',
'F': '1234.57+/-0.10',
'%': '123457+/-10%'
},
# Percent notation:
(0.42, 0.0055): {
# Because '%' does 0.0055*100, the value
# 0.5499999999999999 is obtained, which rounds to 0.5. The
# original rounded value is 0.006. The same behavior is
# found in Python 2.7: '{:.1%}'.format(0.0055) is '0.5%'.
'.1u%': '(42.0+/-0.5)%',
'.1u%S': '42.0(5)%',
'%P': u'(42.0±0.5)%'
},
# Particle Data Group automatic convention, including limit cases:
(1.2345678, 0.354): {'': '1.23+/-0.35'},
(1.2345678, 0.3549): {'': '1.23+/-0.35'},
(1.2345678, 0.355): {'': '1.2+/-0.4'},
(1.5678, 0.355): {'': '1.6+/-0.4'},
(1.2345678, 0.09499): {'': '1.23+/-0.09'},
(1.2345678, 0.095): {'': '1.23+/-0.10'},
# Automatic extension of the uncertainty up to the decimal
# point:
(1000, 123): {
'.1uf': '1000+/-123',
# The nominal value has 1 <= mantissa < 10. The precision
# is the number of significant digits of the uncertainty:
'.1ue': '(1.0+/-0.1)e+03'
},
# Spectroscopic notation:
(-1.23, 3.4): {
'S': '-1.2(3.4)',
'.2ufS': '-1.2(3.4)',
'.3ufS': '-1.23(3.40)',
},
(-123.456, 0.123): {
'S': '-123.46(12)',
'.1ufS': '-123.5(1)',
'.2ufS': '-123.46(12)',
'.3ufS': '-123.456(123)',
},
(-123.456, 0.567): {
'S': '-123.5(6)',
'.1ufS': '-123.5(6)',
'.2ufS': '-123.46(57)',
'.3ufS': '-123.456(567)',
},
(-123.456, 0.004): {
# The decimal point shows that the uncertainty is not
# exact:
'.2fS': '-123.46(0.00)'
},
# LaTeX notation:
#
(1234.56789, 0.1): {
'eL': r'\left(1.23457 \pm 0.00010\right) \times 10^{3}',
'EL': r'\left(1.23457 \pm 0.00010\right) \times 10^{3}',
'fL': '1234.57 \pm 0.10',
'FL': '1234.57 \pm 0.10',
'fL': '1234.57 \pm 0.10',
'FL': '1234.57 \pm 0.10',
'%L': r'\left(123457 \pm 10\right) \%'
},
#
# ... combined with the spectroscopic notation:
(-1.23, 3.4): {
'SL': '-1.2(3.4)',
'LS': '-1.2(3.4)',
'.2ufSL': '-1.2(3.4)',
'.2ufLS': '-1.2(3.4)'
},
# Special cases for the uncertainty (0, nan) and format
# strings (extension S, L, U,..., global width, etc.).
#
# Python 3.2 and 3.3 give 1.4e-12*1e+12 = 1.4000000000000001
# instead of 1.4 for Python 3.1. The problem does not appear
# with 1.2, so 1.2 is used.
(-1.2e-12, 0): {
'12.2gPL': u' -1.2×10⁻¹²± 0',
# Pure "width" formats are not accepted by the % operator,
# and only %-compatible formats are accepted, for Python <
# 2.6:
'13S': ' -1.2(0)e-12',
'10P': u'-1.2×10⁻¹²± 0',
'L': r'\left(-1.2 \pm 0\right) \times 10^{-12}',
# No factored exponent, LaTeX
'1L': r'-1.2 \times 10^{-12} \pm 0',
'SL': r'-1.2(0) \times 10^{-12}',
'SP': u'-1.2(0)×10⁻¹²'
},
# Python 3.2 and 3.3 give 1.4e-12*1e+12 = 1.4000000000000001
# instead of 1.4 for Python 3.1. The problem does not appear
# with 1.2, so 1.2 is used.
(-1.2e-12, float('nan')): {
'.2uG': '(-1.2+/-%s)E-12' % NaN_EFG, # u ignored, format used
'15GS': ' -1.2(%s)E-12' % NaN_EFG,
'SL': r'-1.2(\mathrm{nan}) \times 10^{-12}', # LaTeX NaN
# Pretty-print priority, but not for NaN:
'PSL': u'-1.2(\mathrm{nan})×10⁻¹²',
'L': r'\left(-1.2 \pm \mathrm{nan}\right) \times 10^{-12}',
# Uppercase NaN and LaTeX:
'.1EL': (r'\left(-1.2 \pm \mathrm{%s}\right) \times 10^{-12}'
% NaN_EFG),
'10': ' -1.2e-12+/- nan',
'15S': ' -1.2(nan)e-12'
},
(3.14e-10, 0.01e-10): {
# Character (Unicode) strings:
u'P': u'(3.140±0.010)×10⁻¹⁰', # PDG rules: 2 digits
u'PL': u'(3.140±0.010)×10⁻¹⁰', # Pretty-print has higher priority
# Truncated non-zero uncertainty:
'.1e': '(3.1+/-0.0)e-10',
'.1eS': '3.1(0.0)e-10'
},
# Some special cases:
(1, float('nan')): {
'g': '1+/-nan',
'G': '1+/-%s' % NaN_EFG,
'%': '(100.000000+/-nan)%', # The % format type is like f
# Should be the same as '+05', for floats, but is not, in
# Python 2.7:
'+05g': '+0001+/-00nan',
# 5 is the *minimal* width, 6 is the default number of
# digits after the decimal point:
'+05%': '(+100.000000+/-00nan)%',
# There is a difference between '{}'.format(1.) and
# '{:g}'.format(1.), which is not fully obvious in the
# documentation, which indicates that a None format type
# is like g. The reason is that the empty format string is
# actually interpreted as str(), and that str() does not
# have to behave like g ('{}'.format(1.234567890123456789)
# and '{:g}'.format(1.234567890123456789) are different).
'': '1.0+/-nan',
# This is ugly, but consistent with
# '{:+05}'.format(float('nan')) and format(1.) (which
# differs from format(1)!):
'+05': '+01.0+/-00nan'
},
(9.9, 0.1): {
'.1ue': '(9.9+/-0.1)e+00',
'.0fS': '10(0.)'
},
(9.99, 0.1): {
# The precision has an effect on the exponent, like for
# floats:
'.2ue': '(9.99+/-0.10)e+00', # Same exponent as for 9.99 alone
'.1ue': '(1.00+/-0.01)e+01' # Same exponent as for 9.99 alone
},
# 0 uncertainty: nominal value displayed like a float:
(1.2345, 0): {
'.2ue': '(1.23+/-0)e+00',
'1.2ue': '1.23e+00+/-0', # No factored exponent
'.2uf': '1.23+/-0',
'.2ufS': '1.23(0)',
'.2fS': '1.23(0)',
'g': '1.2345+/-0',
'': '1.2345+/-0'
},
# Alignment and filling characters (supported in Python 2.6+):
(3.1415e10, 0): {
'<15': '31415000000.0 +/-0 ',
'<20S': '31415000000.0(0) ',
# Trying to trip the format parsing with a fill character
# which is an alignment character:
'=>15': '==31415000000.0+/-==============0'
},
(1234.56789, 0): {
'1.2ue': '1.23e+03+/-0', # u ignored
'1.2e': '1.23e+03+/-0',
# Default precision = 6
'eL': r'\left(1.234568 \pm 0\right) \times 10^{3}',
'EL': r'\left(1.234568 \pm 0\right) \times 10^{3}',
'fL': '1234.567890 \pm 0',
'FL': '1234.567890 \pm 0',
'%L': r'\left(123456.789000 \pm 0\right) \%'
},
(1e5, 0): {
'g': '100000+/-0'
},
(1e6, 0): {
# A default precision of 6 is used because the uncertainty
# cannot be used for defining a default precision (it does
# not have a magnitude):
'g': '(1+/-0)e+06'
},
(1e6+10, 0): {
# A default precision of 6 is used because the uncertainty
# cannot be used for defining a default precision (it does
# not have a magnitude):
'g': '(1.00001+/-0)e+06'
},
# Rounding of the uncertainty that "changes" the number of
# significant digits:
(1, 0.994): {
'.3uf': '1.000+/-0.994',
'.2uf': '1.00+/-0.99',
'.1uf': '1+/-1' # Discontinuity in the number of digits
},
(12.3, 2.3): {
'.2ufS': '12.3(2.3)' # Decimal point on the uncertainty
},
(12.3, 2.3): {
'.1ufS': '12(2)' # No decimal point on the uncertainty
},
(0, 0): { # Make defining the first significant digit problematic
'.1f': '0.0+/-0', # Simple float formatting
'g': '0+/-0'
},
(1.2e-34, 5e-67): {
'.6g': '(1.20000+/-0.00000)e-34',
'13.6g': ' 1.20000e-34+/- 0.00000e-34',
'13.6G': ' 1.20000E-34+/- 0.00000E-34',
'.6GL': r'\left(1.20000 \pm 0.00000\right) \times 10^{-34}'
},
(float('nan'), 100): { # NaN *nominal value*
'': 'nan+/-100.0', # Like '{}'.format(100.)
'g': 'nan+/-100', # Like '{:g}'.format(100.)
'.1e': '(nan+/-1.0)e+02', # Similar to 1±nan
'.1E': '(%s+/-1.0)E+02' % NaN_EFG,
'.1ue': '(nan+/-1)e+02',
'10.1e': ' nan+/- 1.0e+02'
},
(float('nan'), 1e8): { # NaN *nominal value*
'': 'nan+/-100000000.0', # Like '{}'.format(1e8)
'g': '(nan+/-1)e+08', # Like '{:g}'.format(1e8)
'.1e': '(nan+/-1.0)e+08',
'.1E': '(%s+/-1.0)E+08' % NaN_EFG,
'.1ue': '(nan+/-1)e+08',
'10.1e': ' nan+/- 1.0e+08' # 'nane+08' would be strange
},
(float('nan'), 123456789): { # NaN *nominal value*
'': 'nan+/-123456789.0', # Similar to '{}'.format(123456789.)
'g': '(nan+/-1.23457)e+08', # Similar to '{:g}'.format(123456789.)
'.1e': '(nan+/-1.2)e+08',
'.1E': '(%s+/-1.2)E+08' % NaN_EFG,
'.1ue': '(nan+/-1)e+08',
'.1ueL': r'\left(\mathrm{nan} \pm 1\right) \times 10^{8}',
'10.1e': ' nan+/- 1.2e+08',
'10.1eL': r'\mathrm{nan} \pm 1.2 \times 10^{8}'
},
(float('nan'), float('nan')): { # *Double* NaN
'': 'nan+/-nan',
'.1e': 'nan+/-nan',
'.1E': '%s+/-%s' % (NaN_EFG, NaN_EFG),
'.1ue': 'nan+/-nan',
'EL': r'\mathrm{%s} \pm \mathrm{%s}' % (NaN_EFG, NaN_EFG)
},
(float('inf'), 100): { # Inf *nominal value*
'': 'inf+/-100.0', # Like '{}'.format(100.)
'g': 'inf+/-100', # Like '{:g}'.format(100.)
'.1e': '(inf+/-1.0)e+02', # Similar to 1±inf
'.1E': '(%s+/-1.0)E+02' % Inf_EFG,
'.1ue': '(inf+/-1)e+02',
'10.1e': ' inf+/- 1.0e+02'
},
(float('inf'), 1e8): { # Inf *nominal value*
'': 'inf+/-100000000.0', # Like '{}'.format(1e8)
'g': '(inf+/-1)e+08', # Like '{:g}'.format(1e8)
'.1e': '(inf+/-1.0)e+08',
'.1E': '(%s+/-1.0)E+08' % Inf_EFG,
'.1ue': '(inf+/-1)e+08',
'10.1e': ' inf+/- 1.0e+08' # 'infe+08' would be strange
},
(float('inf'), 123456789): { # Inf *nominal value*
'': 'inf+/-123456789.0', # Similar to '{}'.format(123456789.)
'g': '(inf+/-1.23457)e+08', # Similar to '{:g}'.format(123456789.)
'.1e': '(inf+/-1.2)e+08',
'.1E': '(%s+/-1.2)E+08' % Inf_EFG,
'.1ue': '(inf+/-1)e+08',
'.1ueL': r'\left(\infty \pm 1\right) \times 10^{8}',
'10.1e': ' inf+/- 1.2e+08',
'10.1eL': r' \infty \pm 1.2 \times 10^{8}'
},
(float('inf'), float('inf')): { # *Double* Inf
'': 'inf+/-inf',
'.1e': 'inf+/-inf',
'.1E': '%s+/-%s' % (Inf_EFG, Inf_EFG),
'.1ue': 'inf+/-inf',
'EL': r'\infty \pm \infty'
},
# Like the tests for +infinity, but for -infinity:
(float('-inf'), 100): { # Inf *nominal value*
'': '-inf+/-100.0', # Like '{}'.format(100.)
'g': '-inf+/-100', # Like '{:g}'.format(100.)
'.1e': '(-inf+/-1.0)e+02', # Similar to 1±inf
'.1E': '(-%s+/-1.0)E+02' % Inf_EFG,
'.1ue': '(-inf+/-1)e+02',
'10.1e': ' -inf+/- 1.0e+02'
},
(float('-inf'), 1e8): { # Inf *nominal value*
'': '-inf+/-100000000.0', # Like '{}'.format(1e8)
'g': '(-inf+/-1)e+08', # Like '{:g}'.format(1e8)
'.1e': '(-inf+/-1.0)e+08',
'.1E': '(-%s+/-1.0)E+08' % Inf_EFG,
'.1ue': '(-inf+/-1)e+08',
'10.1e': ' -inf+/- 1.0e+08' # 'infe+08' would be strange
},
(float('-inf'), 123456789): { # Inf *nominal value*
'': '-inf+/-123456789.0', # Similar to '{}'.format(123456789.)
'g': '(-inf+/-1.23457)e+08', # Similar to '{:g}'.format(123456789.)
'.1e': '(-inf+/-1.2)e+08',
'.1E': '(-%s+/-1.2)E+08' % Inf_EFG,
'.1ue': '(-inf+/-1)e+08',
'.1ueL': r'\left(-\infty \pm 1\right) \times 10^{8}',
'10.1e': ' -inf+/- 1.2e+08',
'10.1eL': r' -\infty \pm 1.2 \times 10^{8}'
},
(float('-inf'), float('inf')): { # *Double* Inf
'': '-inf+/-inf',
'.1e': '-inf+/-inf',
'.1E': '-%s+/-%s' % (Inf_EFG, Inf_EFG),
'.1ue': '-inf+/-inf',
'EL': r'-\infty \pm \infty'
},
# The Particle Data Group convention trumps the "at least one
# digit past the decimal point" for Python floats, but only
# with a non-zero uncertainty:
(724.2, 26.4): {
'': '724+/-26'
},
(724, 0): {
'': '724.0+/-0'
},
# More NaN and infinity, in particular with LaTeX and various
# options:
(float('-inf'), float('inf')): {
'S': '-inf(inf)',
'LS': '-\infty(\infty)',
'L': '-\infty \pm \infty',
'LP': u'-\infty±\infty',
# The following is consistent with Python's own
# formatting, which depends on the version of Python:
# formatting float("-inf") with format(..., "020") gives
# '-0000000000000000inf' with Python 2.7, but
# '-00000000000000.0inf' with Python 2.6. However, Python
# 2.6 gives the better, Python 2.7 form when format()ting
# with "020g" instead, so this formatting would be better,
# in principle, and similarly for "%020g" % ... Thus,
# Python's format() breaks the official rule according to
# which no format type is equivalent to "g", for
# floats. If the better behavior was needed, internal
# formatting could in principle force the "g" formatting
# type when none is given; however, Python does not
# actually fully treat the none format type in the same
# was as the "g" format, so this solution cannot be used,
# as it would break other formatting behaviors in this
# code. It is thus best to mimic the native behavior of
# none type formatting (even if it does not look so good
# in Python 2.6).
'020S': format(float("-inf"), '015')+'(inf)'
},
(-float('nan'), float('inf')): {
'S': 'nan(inf)',
'LS': '\mathrm{nan}(\infty)',
'L': '\mathrm{nan} \pm \infty',
'LP': u'\mathrm{nan}±\infty'
},
# Leading zeroes in the shorthand notation:
(-2, 3): {
"020S": "-000000000002.0(3.0)"
}
}
# ',' format option: introduced in Python 2.7
if sys.version_info >= (2, 7):
tests.update({
(1234.56789, 0.012): {
',.1uf': '1,234.57+/-0.01'
},
(123456.789123, 1234.5678): {
',f': '123,457+/-1,235', # Particle Data Group convention
',.4f': '123,456.7891+/-1,234.5678'
}
})
# True if we can detect that the Jython interpreter is running this code:
try:
jython_detected = sys.subversion[0] == 'Jython'
except AttributeError:
jython_detected = False
for (values, representations) in tests.items():
value = ufloat(*values)
for (format_spec, result) in representations.items():
# print "FORMATTING {} WITH '{}'".format(repr(value), format_spec)
# Jython 2.5.2 does not always represent NaN as nan or NAN
# in the CPython way: for example, '%.2g' % float('nan')
# is '\ufffd'. The test is skipped, in this case:
if jython_detected and (
isnan(value.std_dev) or isnan(value.nominal_value)):
continue
# Call that works with Python < 2.6 too:
representation = value.format(format_spec)
assert representation == result, (
# The representation is used, for terminal that do not
# support some characters like ±, and superscripts:
'Incorrect representation %r for format %r of %r:'
' %r expected.'
% (representation, format_spec, value, result))
# An empty format string is like calling str()
# (http://docs.python.org/2/library/string.html#formatspec):
if not format_spec:
assert representation == str(value), (
'Empty format should give the same thing as str():'
' %s obtained instead of %s'
% (representation, str(value)))
# Parsing back into a number with uncertainty (unless the
# LaTeX or comma notation is used):
if (not set(format_spec).intersection('L,*%') # * = fill with *
# "0nan"
and '0nan' not in representation.lower()
# "0inf"
and '0inf' not in representation.lower()
# Specific case:
and '=====' not in representation):
value_back = ufloat_fromstr(representation)
# The original number and the new one should be consistent
# with each other:
try:
# The nominal value can be rounded to 0 when the
# uncertainty is larger (because p digits on the
# uncertainty can still show 0.00... for the
# nominal value). The relative error is infinite,
# so this should not cause an error:
if value_back.nominal_value:
assert numbers_close(value.nominal_value,
value_back.nominal_value, 2.4e-1)
# If the uncertainty is zero, then the relative
# change can be large:
assert numbers_close(value.std_dev,
value_back.std_dev, 3e-1)
except AssertionError:
# !! The following string formatting requires
# str() to work (to not raise an exception) on the
# values (which have a non-standard class):
raise AssertionError(
'Original value %s and value %s parsed from %r'
' (obtained through format specification %r)'
' are not close enough'
% (value, value_back, representation, format_spec))
def test_unicode_format():
'''Test of the unicode formatting of numbers with uncertainties'''
x = ufloat(3.14159265358979, 0.25)
assert isinstance(u'Résultat = %s' % x.format(''), str)
assert isinstance(u'Résultat = %s' % x.format('P'), str)
###############################################################################
# The tests below require NumPy, which is an optional package:
try:
import numpy
except ImportError:
pass
else:
def arrays_close(m1, m2, precision=1e-4):
"""
Returns True iff m1 and m2 are almost equal, where elements
can be either floats or AffineScalarFunc objects.
Two independent AffineScalarFunc objects are deemed equal if
both their nominal value and uncertainty are equal (up to the
given precision).
m1, m2 -- NumPy arrays.
precision -- precision passed through to
uncertainties.test_uncertainties.numbers_close().
"""
# ! numpy.allclose() is similar to this function, but does not
# work on arrays that contain numbers with uncertainties, because
# of the isinf() function.
for (elmt1, elmt2) in zip(m1.flat, m2.flat):
# For a simpler comparison, both elements are
# converted to AffineScalarFunc objects:
elmt1 = uncert_core.to_affine_scalar(elmt1)
elmt2 = uncert_core.to_affine_scalar(elmt2)
if not numbers_close(elmt1.nominal_value,
elmt2.nominal_value, precision):
return False
if not numbers_close(elmt1.std_dev,
elmt2.std_dev, precision):
return False
return True
def test_numpy_comparison():
"Comparison with a NumPy array."
x = ufloat(1, 0.1)
# Comparison with a different type:
assert x != [x, x]
# NumPy arrays can be compared, through element-wise
# comparisons. Numbers with uncertainties should yield the
# same kind of results as pure floats (i.e., a NumPy array,
# etc.).
# We test the comparison operators both for the uncertainties
# package *and* the NumPy package:
# Equalities, etc.:
assert len(x == numpy.arange(10)) == 10
assert len(numpy.arange(10) == x) == 10
assert len(x != numpy.arange(10)) == 10
assert len(numpy.arange(10) != x) == 10
assert len(x == numpy.array([x, x, x])) == 3
assert len(numpy.array([x, x, x]) == x) == 3
assert numpy.all(x == numpy.array([x, x, x]))
# Inequalities:
assert len(x < numpy.arange(10)) == 10
assert len(numpy.arange(10) > x) == 10
assert len(x <= numpy.arange(10)) == 10
assert len(numpy.arange(10) >= x) == 10
assert len(x > numpy.arange(10)) == 10
assert len(numpy.arange(10) < x) == 10
assert len(x >= numpy.arange(10)) == 10
assert len(numpy.arange(10) <= x) == 10
# More detailed test, that shows that the comparisons are
# meaningful (x >= 0, but not x <= 1):
assert numpy.all((x >= numpy.arange(3)) == [True, False, False])
def test_correlated_values():
"""
Correlated variables.
Test through the input of the (full) covariance matrix.
"""
u = uncert_core.ufloat(1, 0.1)
cov = uncert_core.covariance_matrix([u])
# "1" is used instead of u.nominal_value because
# u.nominal_value might return a float. The idea is to force
# the new variable u2 to be defined through an integer nominal
# value:
u2, = uncert_core.correlated_values([1], cov)
expr = 2*u2 # Calculations with u2 should be possible, like with u
####################
# Covariances between output and input variables:
x = ufloat(1, 0.1)
y = ufloat(2, 0.3)
z = -3*x+y
covs = uncert_core.covariance_matrix([x, y, z])
# Test of the diagonal covariance elements:
assert arrays_close(
numpy.array([v.std_dev**2 for v in (x, y, z)]),
numpy.array(covs).diagonal())
# "Inversion" of the covariance matrix: creation of new
# variables:
(x_new, y_new, z_new) = uncert_core.correlated_values(
[x.nominal_value, y.nominal_value, z.nominal_value],
covs,
tags = ['x', 'y', 'z'])
# Even the uncertainties should be correctly reconstructed:
assert arrays_close(numpy.array((x, y, z)),
numpy.array((x_new, y_new, z_new)))
# ... and the covariances too:
assert arrays_close(
numpy.array(covs),
numpy.array(uncert_core.covariance_matrix([x_new, y_new, z_new])))
assert arrays_close(
numpy.array([z_new]), numpy.array([-3*x_new+y_new]))
####################
# ... as well as functional relations:
u = ufloat(1, 0.05)
v = ufloat(10, 0.1)
sum_value = u+2*v
# Covariance matrices:
cov_matrix = uncert_core.covariance_matrix([u, v, sum_value])
# Correlated variables can be constructed from a covariance
# matrix, if NumPy is available:
(u2, v2, sum2) = uncert_core.correlated_values(
[x.nominal_value for x in [u, v, sum_value]],
cov_matrix)
# arrays_close() is used instead of numbers_close() because
# it compares uncertainties too:
assert arrays_close(numpy.array([u]), numpy.array([u2]))
assert arrays_close(numpy.array([v]), numpy.array([v2]))
assert arrays_close(numpy.array([sum_value]), numpy.array([sum2]))
assert arrays_close(numpy.array([0]),
numpy.array([sum2-(u2+2*v2)]))
# Spot checks of the correlation matrix:
corr_matrix = uncert_core.correlation_matrix([u, v, sum_value])
assert numbers_close(corr_matrix[0,0], 1)
assert numbers_close(corr_matrix[1,2], 2*v.std_dev/sum_value.std_dev)
####################
# Test of numerical robustness despite wildly different
# orders of magnitude (see
# https://github.com/lebigot/uncertainties/issues/95):
cov = numpy.diag([1e-70, 1e-70, 1e10])
cov[0, 1] = cov[1, 0] = 0.9e-70
cov[[0, 1], 2] = -3e-34
cov[2, [0, 1]] = -3e-34
variables = uncert_core.correlated_values([0]*3, cov)
# Since the numbers are very small, we need to compare them
# in a stricter way, that handles the case of a 0 variance
# in `variables`:
assert numbers_close(
1e66*cov[0,0], 1e66*variables[0].s**2, tolerance=1e-5)
assert numbers_close(
1e66*cov[1,1], 1e66*variables[1].s**2, tolerance=1e-5)
####################
# 0 variances are a bit special, since the correlation matrix
# cannot be calculated naively, so we test that there is no
# specific problem in this case:
cov = numpy.diag([0, 0, 10])
nom_values = [1, 2, 3]
variables = uncert_core.correlated_values(nom_values, cov)
for (variable, nom_value, variance) in zip(
variables, nom_values, cov.diagonal()):
assert numbers_close(variable.n, nom_value)
assert numbers_close(variable.s**2, variance)
assert arrays_close(
cov,
numpy.array(uncert_core.covariance_matrix(variables)))
def test_correlated_values_correlation_mat():
'''
Tests the input of correlated value.
Test through their correlation matrix (instead of the
covariance matrix).
'''
x = ufloat(1, 0.1)
y = ufloat(2, 0.3)
z = -3*x+y
cov_mat = uncert_core.covariance_matrix([x, y, z])
std_devs = numpy.sqrt(numpy.array(cov_mat).diagonal())
corr_mat = cov_mat/std_devs/std_devs[numpy.newaxis].T
# We make sure that the correlation matrix is indeed diagonal:
assert (corr_mat-corr_mat.T).max() <= 1e-15
# We make sure that there are indeed ones on the diagonal:
assert (corr_mat.diagonal()-1).max() <= 1e-15
# We try to recover the correlated variables through the
# correlation matrix (not through the covariance matrix):
nominal_values = [v.nominal_value for v in (x, y, z)]
std_devs = [v.std_dev for v in (x, y, z)]
x2, y2, z2 = uncert_core.correlated_values_norm(
list(zip(nominal_values, std_devs)), corr_mat)
# arrays_close() is used instead of numbers_close() because
# it compares uncertainties too:
# Test of individual variables:
assert arrays_close(numpy.array([x]), numpy.array([x2]))
assert arrays_close(numpy.array([y]), numpy.array([y2]))
assert arrays_close(numpy.array([z]), numpy.array([z2]))
# Partial correlation test:
assert arrays_close(numpy.array([0]), numpy.array([z2-(-3*x2+y2)]))
# Test of the full covariance matrix:
assert arrays_close(
numpy.array(cov_mat),
numpy.array(uncert_core.covariance_matrix([x2, y2, z2])))
|
# coding=utf-8
"""
Tests of the code in uncertainties/__init__.py.
These tests can be run through the Nose testing framework.
(c) 2010-2016 by <NAME> (EOL).
"""
from __future__ import division
from __future__ import print_function
# Standard modules
from builtins import str
from builtins import zip
from builtins import map
from builtins import range
import copy
import weakref
import math
from math import isnan, isinf
import random
import sys
# 3rd-party modules
# import nose.tools
# Local modules
import uncertainties.core as uncert_core
from uncertainties.core import ufloat, AffineScalarFunc, ufloat_fromstr
from uncertainties import umath
# The following information is useful for making sure that the right
# version of Python is running the tests (for instance with the Travis
# Continuous Integration system):
print("Testing with Python", sys.version)
###############################################################################
# Utilities for unit testing
def numbers_close(x, y, tolerance=1e-6):
"""
Returns True if the given floats are close enough.
The given tolerance is the relative difference allowed, or the absolute
difference, if one of the numbers is 0.
NaN is allowed: it is considered close to itself.
"""
# !!! Python 3.5+ has math.isclose(): maybe it could be used here.
# Instead of using a try and ZeroDivisionError, we do a test,
# NaN could appear silently:
if x != 0 and y != 0:
if isinf(x):
return isinf(y)
elif isnan(x):
return isnan(y)
else:
# Symmetric form of the test:
return 2*abs(x-y)/(abs(x)+abs(y)) < tolerance
else: # Either x or y is zero
return abs(x or y) < tolerance
def ufloats_close(x, y, tolerance=1e-6):
'''
Tests if two numbers with uncertainties are close, as random
variables: this is stronger than testing whether their nominal
value and standard deviation are close.
The tolerance is applied to both the nominal value and the
standard deviation of the difference between the numbers.
'''
diff = x-y
return (numbers_close(diff.nominal_value, 0, tolerance)
and numbers_close(diff.std_dev, 0, tolerance))
class DerivativesDiffer(Exception):
pass
def compare_derivatives(func, numerical_derivatives,
num_args_list=None):
"""
Checks the derivatives of a function 'func' (as returned by the
wrap() wrapper), by comparing them to the
'numerical_derivatives' functions.
Raises a DerivativesDiffer exception in case of problem.
These functions all take the number of arguments listed in
num_args_list. If num_args is None, it is automatically obtained.
Tests are done on random arguments.
"""
try:
funcname = func.name
except AttributeError:
funcname = func.__name__
# print "Testing", func.__name__
if not num_args_list:
# Detecting automatically the correct number of arguments is not
# always easy (because not all values are allowed, etc.):
num_args_table = {
'atanh': [1],
'log': [1, 2] # Both numbers of arguments are tested
}
if funcname in num_args_table:
num_args_list = num_args_table[funcname]
else:
num_args_list = []
# We loop until we find reasonable function arguments:
# We get the number of arguments by trial and error:
for num_args in range(10):
try:
#! Giving integer arguments is good for preventing
# certain functions from failing even though num_args
# is their correct number of arguments
# (e.g. math.ldexp(x, i), where i must be an integer)
func(*(1,)*num_args)
except TypeError:
pass # Not the right number of arguments
else: # No error
# num_args is a good number of arguments for func:
num_args_list.append(num_args)
if not num_args_list:
raise Exception("Can't find a reasonable number of arguments"
" for function '%s'." % funcname)
for num_args in num_args_list:
# Argument numbers that will have a random integer value:
integer_arg_nums = set()
if funcname == 'ldexp':
# The second argument must be an integer:
integer_arg_nums.add(1)
while True:
try:
# We include negative numbers, for more thorough tests:
args = []
for arg_num in range(num_args):
if arg_num in integer_arg_nums:
args.append(random.choice(range(-10, 10)))
else:
args.append(
uncert_core.Variable(random.random()*4-2, 0))
# 'args', but as scalar values:
args_scalar = [uncert_core.nominal_value(v)
for v in args]
func_approx = func(*args)
# Some functions yield simple Python constants, after
# wrapping in wrap(): no test has to be performed.
# Some functions also yield tuples...
if isinstance(func_approx, AffineScalarFunc):
# We compare all derivatives:
for (arg_num, (arg, numerical_deriv)) in (
enumerate(zip(args, numerical_derivatives))):
# Some arguments might not be differentiable:
if isinstance(arg, int):
continue
fixed_deriv_value = func_approx.derivatives[arg]
num_deriv_value = numerical_deriv(*args_scalar)
# This message is useful: the user can see that
# tests are really performed (instead of not being
# performed, silently):
print("Testing derivative #%d of %s at %s" % (
arg_num, funcname, args_scalar))
if not numbers_close(fixed_deriv_value,
num_deriv_value, 1e-4):
# It is possible that the result is NaN:
if not isnan(func_approx):
raise DerivativesDiffer(
"Derivative #%d of function '%s' may be"
" wrong: at args = %s,"
" value obtained = %.16f,"
" while numerical approximation = %.16f."
% (arg_num, funcname, args,
fixed_deriv_value, num_deriv_value))
except ValueError as err: # Arguments out of range, or of wrong type
# Factorial(real) lands here:
if str(err).startswith('factorial'):
integer_arg_nums = set([0])
continue # We try with different arguments
# Some arguments might have to be integers, for instance:
except TypeError as err:
if len(integer_arg_nums) == num_args:
raise Exception("Incorrect testing procedure: unable to "
"find correct argument values for %s: %s"
% (funcname, err))
# Another argument might be forced to be an integer:
integer_arg_nums.add(random.choice(range(num_args)))
else:
# We have found reasonable arguments, and the test passed:
break
###############################################################################
def test_value_construction():
'''
Tests the various means of constructing a constant number with
uncertainty *without a string* (see test_ufloat_fromstr(), for this).
'''
## Simple construction:
x = ufloat(3, 0.14)
assert x.nominal_value == 3
assert x.std_dev == 0.14
assert x.tag is None
# ... with tag as positional argument:
x = ufloat(3, 0.14, 'pi')
assert x.nominal_value == 3
assert x.std_dev == 0.14
assert x.tag == 'pi'
# ... with tag keyword:
x = ufloat(3, 0.14, tag='pi')
assert x.nominal_value == 3
assert x.std_dev == 0.14
assert x.tag == 'pi'
## Comparison with the obsolete tuple form:
# The following tuple is stored in a variable instead of being
# repeated in the calls below, so that the automatic code update
# does not replace ufloat((3, 0.14)) by ufloat(3, 14): the goal
# here is to make sure that the obsolete form gives the same
# result as the new form.
representation = (3, 0.14) # Obsolete representation
x = ufloat(3, 0.14)
x2 = ufloat(representation) # Obsolete
assert x.nominal_value == x2.nominal_value
assert x.std_dev == x2.std_dev
assert x.tag is None
assert x2.tag is None
# With tag as positional argument:
x = ufloat(3, 0.14, "pi")
x2 = ufloat(representation, "pi") # Obsolete
assert x.nominal_value == x2.nominal_value
assert x.std_dev == x2.std_dev
assert x.tag == 'pi'
assert x2.tag == 'pi'
# With tag keyword:
x = ufloat(3, 0.14, tag="pi")
x2 = ufloat(representation, tag="pi") # Obsolete
assert x.nominal_value == x2.nominal_value
assert x.std_dev == x2.std_dev
assert x.tag == 'pi'
assert x2.tag == 'pi'
# Negative standard deviations should be caught in a nice way
# (with the right exception):
try:
x = ufloat(3, -0.1)
except uncert_core.NegativeStdDev:
pass
try:
# Obsolete form:
x = ufloat((3, -0.1))
except uncert_core.NegativeStdDev:
pass
## Incorrect forms should not raise any deprecation warning, but
## raise an exception:
try:
ufloat(1) # Form that has never been allowed
except:
pass
else:
raise Exception("An exception should be raised")
def test_ufloat_fromstr():
"Input of numbers with uncertainties as a string"
# String representation, and numerical values:
tests = {
"-1.23(3.4)": (-1.23, 3.4), # (Nominal value, error)
" -1.23(3.4) ": (-1.23, 3.4), # Spaces ignored
"-1.34(5)": (-1.34, 0.05),
"1(6)": (1, 6),
"3(4.2)": (3, 4.2),
"-9(2)": (-9, 2),
"1234567(1.2)": (1234567, 1.2),
"12.345(15)": (12.345, 0.015),
"-12.3456(78)e-6": (-12.3456e-6, 0.0078e-6),
"0.29": (0.29, 0.01),
"31.": (31, 1),
"-31.": (-31, 1),
# The following tests that the ufloat() routine does
# not consider '31' like the tuple ('3', '1'), which would
# make it expect two numbers (instead of 2 1-character
# strings):
"31": (31, 1),
"-3.1e10": (-3.1e10, 0.1e10),
"169.0(7)": (169, 0.7),
"-0.1+/-1": (-0.1, 1),
"-13e-2+/-1e2": (-13e-2, 1e2),
'-14.(15)': (-14, 15),
'-100.0(15)': (-100, 1.5),
'14.(15)': (14, 15),
# Global exponent:
'(3.141+/-0.001)E+02': (314.1, 0.1),
## Pretty-print notation:
# ± sign, global exponent (not pretty-printed):
u'(3.141±0.001)E+02': (314.1, 0.1),
# ± sign, individual exponent:
u'3.141E+02±0.001e2': (314.1, 0.1),
# ± sign, times symbol, superscript (= full pretty-print):
u'(3.141 ± 0.001) × 10²': (314.1, 0.1),
# NaN uncertainty:
u'(3.141±nan)E+02': (314.1, float('nan')),
'3.141e+02+/-nan': (314.1, float('nan')),
'3.4(nan)e10': (3.4e10, float('nan')),
# NaN value:
'nan+/-3.14e2': (float('nan'), 314),
# "Double-floats"
'(-3.1415 +/- 1e-4)e+200': (-3.1415e200, 1e196),
'(-3.1415e-10 +/- 1e-4)e+200': (-3.1415e190, 1e196),
# Special float representation:
'-3(0.)': (-3, 0)
}
for (representation, values) in tests.items():
# Without tag:
num = ufloat_fromstr(representation)
assert numbers_close(num.nominal_value, values[0])
assert numbers_close(num.std_dev, values[1])
assert num.tag is None
# With a tag as positional argument:
num = ufloat_fromstr(representation, 'test variable')
assert numbers_close(num.nominal_value, values[0])
assert numbers_close(num.std_dev, values[1])
assert num.tag == 'test variable'
# With a tag as keyword argument:
num = ufloat_fromstr(representation, tag='test variable')
assert numbers_close(num.nominal_value, values[0])
assert numbers_close(num.std_dev, values[1])
assert num.tag == 'test variable'
## Obsolete forms
num = ufloat(representation) # Obsolete
assert numbers_close(num.nominal_value, values[0])
assert numbers_close(num.std_dev, values[1])
assert num.tag is None
# Call with a tag list argument:
num = ufloat(representation, 'test variable') # Obsolete
assert numbers_close(num.nominal_value, values[0])
assert numbers_close(num.std_dev, values[1])
assert num.tag == 'test variable'
# Call with a tag keyword argument:
num = ufloat(representation, tag='test variable') # Obsolete
assert numbers_close(num.nominal_value, values[0])
assert numbers_close(num.std_dev, values[1])
assert num.tag == 'test variable'
###############################################################################
# Test of correctness of the fixed (usually analytical) derivatives:
def test_fixed_derivatives_basic_funcs():
"""
Pre-calculated derivatives for operations on AffineScalarFunc.
"""
def check_op(op, num_args):
"""
Makes sure that the derivatives for function '__op__' of class
AffineScalarFunc, which takes num_args arguments, are correct.
If num_args is None, a correct value is calculated.
"""
op_string = "__%s__" % op
func = getattr(AffineScalarFunc, op_string)
numerical_derivatives = uncert_core.NumericalDerivatives(
# The __neg__ etc. methods of AffineScalarFunc only apply,
# by definition, to AffineScalarFunc objects: we first map
# possible scalar arguments (used for calculating
# derivatives) to AffineScalarFunc objects:
lambda *args: func(*map(uncert_core.to_affine_scalar, args)))
compare_derivatives(func, numerical_derivatives, [num_args])
# Operators that take 1 value:
for op in uncert_core.modified_operators:
check_op(op, 1)
# Operators that take 2 values:
for op in uncert_core.modified_ops_with_reflection:
check_op(op, 2)
# Additional, more complex checks, for use with the nose unit testing
# framework.
def test_copy():
"Standard copy module integration"
import gc
x = ufloat(3, 0.1)
assert x == x
y = copy.copy(x)
assert x != y
assert not(x == y)
assert y in y.derivatives.keys() # y must not copy the dependence on x
z = copy.deepcopy(x)
assert x != z
# Copy tests on expressions:
t = x + 2*z
# t depends on x:
assert x in t.derivatives
# The relationship between the copy of an expression and the
# original variables should be preserved:
t_copy = copy.copy(t)
# Shallow copy: the variables on which t depends are not copied:
assert x in t_copy.derivatives
assert (uncert_core.covariance_matrix([t, z]) ==
uncert_core.covariance_matrix([t_copy, z]))
# However, the relationship between a deep copy and the original
# variables should be broken, since the deep copy created new,
# independent variables:
t_deepcopy = copy.deepcopy(t)
assert x not in t_deepcopy.derivatives
assert (uncert_core.covariance_matrix([t, z]) !=
uncert_core.covariance_matrix([t_deepcopy, z]))
# Test of implementations with weak references:
# Weak references: destroying a variable should never destroy the
# integrity of its copies (which would happen if the copy keeps a
# weak reference to the original, in its derivatives member: the
# weak reference to the original would become invalid):
del x
gc.collect()
assert y in list(y.derivatives.keys())
## Classes for the pickling tests (put at the module level, so that
## they can be unpickled):
# Subclass without slots:
class NewVariable_dict(uncert_core.Variable):
pass
# Subclass with slots defined by a tuple:
class NewVariable_slots_tuple(uncert_core.Variable):
__slots__ = ('new_attr',)
# Subclass with slots defined by a string:
class NewVariable_slots_str(uncert_core.Variable):
__slots__ = 'new_attr'
def test_pickling():
"Standard pickle module integration."
import pickle
x = ufloat(2, 0.1)
x_unpickled = pickle.loads(pickle.dumps(x))
assert x != x_unpickled # Pickling creates copies
## Tests with correlations and AffineScalarFunc objects:
f = 2*x
assert isinstance(f, AffineScalarFunc)
(f_unpickled, x_unpickled2) = pickle.loads(pickle.dumps((f, x)))
# Correlations must be preserved:
assert f_unpickled - x_unpickled2 - x_unpickled2 == 0
## Tests with subclasses:
for subclass in (NewVariable_dict, NewVariable_slots_tuple,
NewVariable_slots_str):
x = subclass(3, 0.14)
# Pickling test with possibly uninitialized slots:
pickle.loads(pickle.dumps(x))
# Unpickling test:
x.new_attr = 'New attr value'
x_unpickled = pickle.loads(pickle.dumps(x))
# Must exist (from the slots of the parent class):
x_unpickled.nominal_value
x_unpickled.new_attr # Must exist
##
# Corner case test: when an attribute is present both in __slots__
# and in __dict__, it is first looked up from the slots
# (references:
# http://docs.python.org/2/reference/datamodel.html#invoking-descriptors,
# http://stackoverflow.com/a/15139208/42973). As a consequence,
# the pickling process must pickle the correct value (i.e., not
# the value from __dict__):
x = NewVariable_dict(3, 0.14)
x._nominal_value = 'in slots'
# Corner case: __dict__ key which is also a slot name (it is
# shadowed by the corresponding slot, so this is very unusual,
# though):
x.__dict__['_nominal_value'] = 'in dict'
# Additional __dict__ attribute:
x.dict_attr = 'dict attribute'
x_unpickled = pickle.loads(pickle.dumps(x))
# We make sure that the data is still there and untouched:
assert x_unpickled._nominal_value == 'in slots'
assert x_unpickled.__dict__ == x.__dict__
##
# Corner case that should have no impact on the code but which is
# not prevented by the documentation: case of constant linear
# terms (the potential gotcha is that if the linear_combo
# attribute is empty, __getstate__()'s result could be false, and
# so __setstate__() would not be called and the original empty
# linear combination would not be set in linear_combo.
x = uncert_core.LinearCombination({})
assert pickle.loads(pickle.dumps(x)).linear_combo == {}
def test_int_div():
"Integer division"
# We perform all operations on floats, because derivatives can
# otherwise be meaningless:
x = ufloat(3.9, 2)//2
assert x.nominal_value == 1.
# All errors are supposed to be small, so the ufloat()
# in x violates the assumption. Therefore, the following is
# correct:
assert x.std_dev == 0.0
def test_comparison_ops():
"Test of comparison operators"
import random
# Operations on quantities equivalent to Python numbers must still
# be correct:
a = ufloat(-3, 0)
b = ufloat(10, 0)
c = ufloat(10, 0)
assert a < b
assert a < 3
assert 3 < b # This is first given to int.__lt__()
assert b == c
x = ufloat(3, 0.1)
# One constraint is that usual Python code for inequality testing
# still work in a reasonable way (for instance, it is generally
# desirable that functions defined by different formulas on
# different intervals can still do "if 0 < x < 1:...". This
# supposes again that errors are "small" (as for the estimate of
# the standard error).
assert x > 1
# The limit case is not obvious:
assert not(x >= 3)
assert not(x < 3)
assert x == x
# Comparaison between Variable and AffineScalarFunc:
assert x == x + 0
# Comparaison between 2 _different_ AffineScalarFunc objects
# representing the same value:
assert x/2 == x/2
# With uncorrelated result that have the same behavior (value and
# standard error):
assert 2*ufloat(1, 0.1) != ufloat(2, 0.2)
# Comparaison between 2 _different_ Variable objects
# that are uncorrelated:
assert x != ufloat(3, 0.1)
assert x != ufloat(3, 0.2)
# Comparison to other types should work:
assert x != None # Not comparable
assert x-x == 0 # Comparable, even though the types are different
assert x != [1, 2]
####################
# Checks of the semantics of logical operations: they return True
# iff they are always True when the parameters vary in an
# infinitesimal interval inside sigma (sigma == 0 is a special
# case):
def test_all_comparison_ops(x, y):
"""
Takes two Variable objects.
Fails if any comparison operation fails to follow the proper
semantics: a comparison only returns True if the correspond float
comparison results are True for all the float values taken by
the variables (of x and y) when they vary in an infinitesimal
neighborhood within their uncertainty.
This test is stochastic: it may, exceptionally, fail for
correctly implemented comparison operators.
"""
import random
def random_float(var):
"""
Returns a random value for Variable var, in an
infinitesimal interval withing its uncertainty. The case
of a zero uncertainty is special.
"""
return ((random.random()-0.5) * min(var.std_dev, 1e-5)
+ var.nominal_value)
# All operations are tested:
for op in ["__%s__" % name
for name in('ne', 'eq', 'lt', 'le', 'gt', 'ge')]:
try:
float_func = getattr(float, op)
except AttributeError: # Python 2.3's floats don't have __ne__
continue
# Determination of the correct truth value of func(x, y):
sampled_results = []
# The "main" value is an important particular case, and
# the starting value for the final result
# (correct_result):
sampled_results.append(float_func(x.nominal_value, y.nominal_value))
for check_num in range(50): # Many points checked
sampled_results.append(float_func(random_float(x),
random_float(y)))
min_result = min(sampled_results)
max_result = max(sampled_results)
if min_result == max_result:
correct_result = min_result
else:
# Almost all results must be True, for the final value
# to be True:
num_min_result = sampled_results.count(min_result)
# 1 exception is considered OK:
correct_result = (num_min_result == 1)
try:
assert correct_result == getattr(x, op)(y)
except AssertionError:
print("Sampling results:", sampled_results)
raise Exception("Semantic value of %s %s (%s) %s not"
" correctly reproduced."
% (x, op, y, correct_result))
# With different numbers:
test_all_comparison_ops(ufloat(3, 0.1),
ufloat(-2, 0.1))
test_all_comparison_ops(ufloat(0, 0), # Special number
ufloat(1, 1))
test_all_comparison_ops(ufloat(0, 0), # Special number
ufloat(0, 0.1))
# With identical numbers:
test_all_comparison_ops(ufloat(0, 0),
ufloat(0, 0))
test_all_comparison_ops(ufloat(1, 1),
ufloat(1, 1))
def test_logic():
"Boolean logic: __nonzero__, bool."
x = ufloat(3, 0)
y = ufloat(0, 0)
z = ufloat(0, 0.1)
t = ufloat(-1, 2)
assert bool(x) == True
assert bool(y) == False
assert bool(z) == True
assert bool(t) == True # Only infinitseimal neighborhood are used
def test_obsolete():
'Tests some obsolete creation of number with uncertainties'
x = ufloat(3, 0.1)
# Obsolete function, protected against automatic modification:
x.set_std_dev.__call__(0.2) # Obsolete
x_std_dev = x.std_dev
assert x_std_dev() == 0.2 # Obsolete call
def test_basic_access_to_data():
"Access to data from Variable and AffineScalarFunc objects."
x = ufloat(3.14, 0.01, "x var")
assert x.tag == "x var"
assert x.nominal_value == 3.14
assert x.std_dev == 0.01
# Case of AffineScalarFunc objects:
y = x + 0
assert type(y) == AffineScalarFunc
assert y.nominal_value == 3.14
assert y.std_dev == 0.01
# Details on the sources of error:
a = ufloat(-1, 0.001)
y = 2*x + 3*x + 2 + a
error_sources = y.error_components()
assert len(error_sources) == 2 # 'a' and 'x'
assert error_sources[x] == 0.05
assert error_sources[a] == 0.001
# Derivative values should be available:
assert y.derivatives[x] == 5
# Modification of the standard deviation of variables:
x.std_dev = 1
assert y.error_components()[x] == 5 # New error contribution!
# Calculated values with uncertainties should not have a settable
# standard deviation:
y = 2*x
try:
y.std_dev = 1
except AttributeError:
pass
else:
raise Exception(
"std_dev should not be settable for calculated results")
# Calculation of deviations in units of the standard deviations:
assert 10/x.std_dev == x.std_score(10 + x.nominal_value)
# "In units of the standard deviation" is not always meaningful:
x.std_dev = 0
try:
x.std_score(1)
except ValueError:
pass # Normal behavior
def test_correlations():
"Correlations between variables"
a = ufloat(1, 0)
x = ufloat(4, 0.1)
y = x*2 + a
# Correlations cancel "naive" additions of uncertainties:
assert y.std_dev != 0
normally_zero = y - (x*2 + 1)
assert normally_zero.nominal_value == 0
assert normally_zero.std_dev == 0
def test_no_coercion():
"""
Coercion of Variable object to a simple float.
The coercion should be impossible, like for complex numbers.
"""
x = ufloat(4, 1)
try:
assert float(x) == 4
except TypeError:
pass
else:
raise Exception("Conversion to float() should fail with TypeError")
def test_wrapped_func_no_args_no_kwargs():
'''
Wrap a function that takes only positional-or-keyword parameters.
'''
def f_auto_unc(x, y):
return 2*x+umath.sin(y)
# Like f_auto_unc, but does not accept numbers with uncertainties:
def f(x, y):
assert not isinstance(x, uncert_core.UFloat)
assert not isinstance(y, uncert_core.UFloat)
return f_auto_unc(x, y)
x = uncert_core.ufloat(1, 0.1)
y = uncert_core.ufloat(10, 2)
### Automatic numerical derivatives:
## Fully automatic numerical derivatives:
f_wrapped = uncert_core.wrap(f)
assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x))
## Automatic additional derivatives for non-defined derivatives,
## and explicit None derivative:
f_wrapped = uncert_core.wrap(f, [None]) # No derivative for y
assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x))
### Explicit derivatives:
## Fully defined derivatives:
f_wrapped = uncert_core.wrap(f, [lambda x, y: 2,
lambda x, y: math.cos(y)])
assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x))
## Automatic additional derivatives for non-defined derivatives:
f_wrapped = uncert_core.wrap(f, [lambda x, y: 2]) # No derivative for y
assert ufloats_close(f_auto_unc(x, y), f_wrapped(x, y))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x), f_wrapped(y=y, x=x))
def test_wrapped_func_args_no_kwargs():
'''
Wrap a function that takes only positional-or-keyword and
var-positional parameters.
'''
def f_auto_unc(x, y, *args):
return 2*x+umath.sin(y)+3*args[1]
# Like f_auto_unc, but does not accept numbers with uncertainties:
def f(x, y, *args):
assert not any(isinstance(value, uncert_core.UFloat)
for value in [x, y] + list(args))
return f_auto_unc(x, y, *args)
x = uncert_core.ufloat(1, 0.1)
y = uncert_core.ufloat(10, 2)
s = 'string arg'
z = uncert_core.ufloat(100, 3)
args = [s, z, s] # var-positional parameters
### Automatic numerical derivatives:
## Fully automatic numerical derivatives:
f_wrapped = uncert_core.wrap(f)
assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args))
## Automatic additional derivatives for non-defined derivatives,
## and explicit None derivative:
f_wrapped = uncert_core.wrap(f, [None]) # No derivative for y
assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args))
### Explicit derivatives:
## Fully defined derivatives:
f_wrapped = uncert_core.wrap(f, [lambda x, y, *args: 2,
lambda x, y, *args: math.cos(y),
None,
lambda x, y, *args: 3])
assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args))
## Automatic additional derivatives for non-defined derivatives:
# No derivative for y:
f_wrapped = uncert_core.wrap(f, [lambda x, y, *args: 2])
assert ufloats_close(f_auto_unc(x, y, *args), f_wrapped(x, y, *args))
def test_wrapped_func_no_args_kwargs():
'''
Wrap a function that takes only positional-or-keyword and
var-keyword parameters.
'''
def f_auto_unc(x, y, **kwargs):
return 2*x+umath.sin(y)+3*kwargs['z']
# Like f_auto_unc, but does not accept numbers with uncertainties:
def f(x, y, **kwargs):
assert not any(isinstance(value, uncert_core.UFloat)
for value in [x, y] + list(kwargs.values()))
return f_auto_unc(x, y, **kwargs)
x = uncert_core.ufloat(1, 0.1)
y = uncert_core.ufloat(10, 2)
s = 'string arg'
z = uncert_core.ufloat(100, 3)
kwargs = {'s': s, 'z': z} # Arguments not in signature
### Automatic numerical derivatives:
## Fully automatic numerical derivatives:
f_wrapped = uncert_core.wrap(f)
assert ufloats_close(f_auto_unc(x, y, **kwargs),
f_wrapped(x, y, **kwargs))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs),
f_wrapped(y=y, x=x, **kwargs))
## Automatic additional derivatives for non-defined derivatives,
## and explicit None derivative:
# No derivative for positional-or-keyword parameter y, no
# derivative for optional-keyword parameter z:
f_wrapped = uncert_core.wrap(f, [None])
assert ufloats_close(f_auto_unc(x, y, **kwargs),
f_wrapped(x, y, **kwargs))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs),
f_wrapped(y=y, x=x, **kwargs))
# No derivative for positional-or-keyword parameter y, no
# derivative for optional-keyword parameter z:
f_wrapped = uncert_core.wrap(f, [None], {'z': None})
assert ufloats_close(f_auto_unc(x, y, **kwargs),
f_wrapped(x, y, **kwargs))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs),
f_wrapped(y=y, x=x, **kwargs))
# No derivative for positional-or-keyword parameter y, derivative
# for optional-keyword parameter z:
f_wrapped = uncert_core.wrap(f, [None],
{'z': lambda x, y, **kwargs: 3})
assert ufloats_close(f_auto_unc(x, y, **kwargs),
f_wrapped(x, y, **kwargs))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs),
f_wrapped(y=y, x=x, **kwargs))
### Explicit derivatives:
## Fully defined derivatives:
f_wrapped = uncert_core.wrap(
f,
[lambda x, y, **kwargs: 2, lambda x, y, **kwargs: math.cos(y)],
{'z:': lambda x, y, **kwargs: 3})
assert ufloats_close(f_auto_unc(x, y, **kwargs),
f_wrapped(x, y, **kwargs))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs),
f_wrapped(y=y, x=x, **kwargs))
## Automatic additional derivatives for non-defined derivatives:
# No derivative for y or z:
f_wrapped = uncert_core.wrap(f, [lambda x, y, **kwargs: 2])
assert ufloats_close(f_auto_unc(x, y, **kwargs),
f_wrapped(x, y, **kwargs))
# Call with keyword arguments:
assert ufloats_close(f_auto_unc(y=y, x=x, **kwargs),
f_wrapped(y=y, x=x, **kwargs))
def test_wrapped_func_args_kwargs():
'''
Wrap a function that takes positional-or-keyword, var-positional
and var-keyword parameters.
'''
def f_auto_unc(x, y, *args, **kwargs):
return 2*x+umath.sin(y)+4*args[1]+3*kwargs['z']
# Like f_auto_unc, but does not accept numbers with uncertainties:
def f(x, y, *args, **kwargs):
assert not any(isinstance(value, uncert_core.UFloat)
for value in [x, y]+list(args)+list(kwargs.values()))
return f_auto_unc(x, y, *args, **kwargs)
x = uncert_core.ufloat(1, 0.1)
y = uncert_core.ufloat(10, 2)
t = uncert_core.ufloat(1000, 4)
s = 'string arg'
z = uncert_core.ufloat(100, 3)
args = [s, t, s]
kwargs = {'u': s, 'z': z} # Arguments not in signature
### Automatic numerical derivatives:
## Fully automatic numerical derivatives:
f_wrapped = uncert_core.wrap(f)
assert ufloats_close(f_auto_unc(x, y, *args, **kwargs),
f_wrapped(x, y, *args, **kwargs), tolerance=1e-5)
## Automatic additional derivatives for non-defined derivatives,
## and explicit None derivative:
# No derivative for positional-or-keyword parameter y, no
# derivative for optional-keyword parameter z:
f_wrapped = uncert_core.wrap(f, [None, None, None,
lambda x, y, *args, **kwargs: 4])
assert ufloats_close(f_auto_unc(x, y, *args, **kwargs),
f_wrapped(x, y, *args, **kwargs), tolerance=1e-5)
# No derivative for positional-or-keyword parameter y, no
# derivative for optional-keyword parameter z:
f_wrapped = uncert_core.wrap(f, [None], {'z': None})
assert ufloats_close(f_auto_unc(x, y, *args, **kwargs),
f_wrapped(x, y, *args, **kwargs), tolerance=1e-5)
# No derivative for positional-or-keyword parameter y, derivative
# for optional-keyword parameter z:
f_wrapped = uncert_core.wrap(f, [None],
{'z': lambda x, y, *args, **kwargs: 3})
assert ufloats_close(f_auto_unc(x, y, *args, **kwargs),
f_wrapped(x, y, *args, **kwargs), tolerance=1e-5)
### Explicit derivatives:
## Fully defined derivatives:
f_wrapped = uncert_core.wrap(
f,
[lambda x, y, *args, **kwargs: 2,
lambda x, y, *args, **kwargs: math.cos(y)],
{'z:': lambda x, y, *args, **kwargs: 3})
assert ufloats_close(f_auto_unc(x, y, *args, **kwargs),
f_wrapped(x, y, *args, **kwargs), tolerance=1e-5)
## Automatic additional derivatives for non-defined derivatives:
# No derivative for y or z:
f_wrapped = uncert_core.wrap(f, [lambda x, y, *args, **kwargs: 2])
assert ufloats_close(f_auto_unc(x, y, *args, **kwargs),
f_wrapped(x, y, *args, **kwargs), tolerance=1e-5)
def test_wrapped_func():
"""
Test uncertainty-aware functions obtained through wrapping.
"""
########################################
# Function which can automatically handle numbers with
# uncertainties:
def f_auto_unc(angle, *list_var):
return umath.cos(angle) + sum(list_var)
def f(angle, *list_var):
# We make sure that this function is only ever called with
# numbers with no uncertainty (since it is wrapped):
assert not isinstance(angle, uncert_core.UFloat)
assert not any(isinstance(arg, uncert_core.UFloat)
for arg in list_var)
return f_auto_unc(angle, *list_var)
f_wrapped = uncert_core.wrap(f)
my_list = [1, 2, 3]
########################################
# Test of a wrapped function that only calls the original
# function: it should obtain the exact same result:
assert f_wrapped(0, *my_list) == f(0, *my_list)
# 1 == 1 +/- 0, so the type must be checked too:
assert type(f_wrapped(0, *my_list)) == type(f(0, *my_list))
########################################
# Call with uncertainties:
angle = uncert_core.ufloat(1, 0.1)
list_value = uncert_core.ufloat(3, 0.2)
# The random variables must be the same (full correlation):
assert ufloats_close(f_wrapped(angle, *[1, angle]),
f_auto_unc(angle, *[1, angle]))
assert ufloats_close(f_wrapped(angle, *[list_value, angle]),
f_auto_unc(angle, *[list_value, angle]))
########################################
# Non-numerical arguments, and explicit and implicit derivatives:
def f(x, y, z, t, u):
return x+2*z+3*t+4*u
f_wrapped = uncert_core.wrap(
f, [lambda *args: 1, None, lambda *args:2, None]) # No deriv. for u
assert f_wrapped(10, 'string argument', 1, 0, 0) == 12
x = uncert_core.ufloat(10, 1)
assert numbers_close(f_wrapped(x, 'string argument', x, x, x).std_dev,
(1+2+3+4)*x.std_dev)
def test_wrap_with_kwargs():
'''
Tests wrap() on functions with keyword arguments.
Includes both wrapping a function that takes optional keyword
arguments and calling a wrapped function with keyword arguments
(optional or not).
'''
# Version of f() that automatically works with numbers with
# uncertainties:
def f_auto_unc(x, y, *args, **kwargs):
return x + umath.sin(y) + 2*args[0] + 3*kwargs['t']
# We also add keyword arguments in the function which is wrapped:
def f(x, y, *args, **kwargs):
# We make sure that f is not called directly with a number with
# uncertainty:
for value in [x, y]+list(args)+list(kwargs.values()):
assert not isinstance(value, uncert_core.UFloat)
return f_auto_unc(x, y, *args, **kwargs)
f_wrapped = uncert_core.wrap(f)
x = ufloat(1, 0.1)
y = ufloat(10, 0.11)
z = ufloat(100, 0.111)
t = ufloat(0.1, 0.1111)
assert ufloats_close(f_wrapped(x, y, z, t=t),
f_auto_unc(x, y, z, t=t), tolerance=1e-5)
########################################
# We make sure that analytical derivatives are indeed used. We
# also test the automatic handling of additional *args arguments
# beyond the number of supplied derivatives.
f_wrapped2 = uncert_core.wrap(
f, [None, lambda x, y, *args, **kwargs: math.cos(y)])
# The derivatives must be perfectly identical:
# The *args parameter of f() is given as a keyword argument, so as
# to try to confuse the code:
assert (f_wrapped2(x, y, z, t=t).derivatives[y]
== f_auto_unc(x, y, z, t=t).derivatives[y])
# Derivatives supplied through the keyword-parameter dictionary of
# derivatives, and also derivatives supplied for the
# var-positional arguments (*args[0]):
f_wrapped3 = uncert_core.wrap(
f,
[None, None, lambda x, y, *args, **kwargs: 2],
{'t': lambda x, y, *args, **kwargs: 3})
# The derivatives should be exactly the same, because they are
# obtained with the exact same analytic formula:
assert (f_wrapped3(x, y, z, t=t).derivatives[z]
== f_auto_unc(x, y, z, t=t).derivatives[z])
assert (f_wrapped3(x, y, z, t=t).derivatives[t]
== f_auto_unc(x, y, z, t=t).derivatives[t])
########################################
# Making sure that user-supplied derivatives are indeed called:
class FunctionCalled(Exception):
'''
Raised to signal that a function is indeed called.
'''
pass
def failing_func(x, y, *args, **kwargs):
raise FunctionCalled
f_wrapped4 = uncert_core.wrap(
f,
[None, failing_func],
{'t': failing_func})
try:
f_wrapped4(x, 3.14, z, t=t)
except FunctionCalled:
pass
else:
raise Exception('User-supplied derivative should be called')
try:
f_wrapped4(x, y, z, t=3.14)
except FunctionCalled:
pass
else:
raise Exception('User-supplied derivative should be called')
try:
f_wrapped4(x, 3.14, z, t=3.14)
except FunctionCalled:
raise Exception('User-supplied derivative should *not* be called')
###############################################################################
def test_access_to_std_dev():
"Uniform access to the standard deviation"
x = ufloat(1, 0.1)
y = 2*x
# std_dev for Variable and AffineScalarFunc objects:
assert uncert_core.std_dev(x) == x.std_dev
assert uncert_core.std_dev(y) == y.std_dev
# std_dev for other objects:
assert uncert_core.std_dev([]) == 0
assert uncert_core.std_dev(None) == 0
###############################################################################
def test_covariances():
"Covariance matrix"
x = ufloat(1, 0.1)
y = -2*x+10
z = -3*x
covs = uncert_core.covariance_matrix([x, y, z])
# Diagonal elements are simple:
assert numbers_close(covs[0][0], 0.01)
assert numbers_close(covs[1][1], 0.04)
assert numbers_close(covs[2][2], 0.09)
# Non-diagonal elements:
assert numbers_close(covs[0][1], -0.02)
###############################################################################
def test_power_all_cases():
'''
Checks all cases for the value and derivatives of x**p.
'''
power_all_cases(pow)
def power_all_cases(op):
'''
Checks all cases for the value and derivatives of power-like
operator op (op is typically the built-in pow(), or math.pow()).
Checks only the details of special results like 0, 1 or NaN).
Different cases for the value of x**p and its derivatives are
tested by dividing the (x, p) plane with:
- x < 0, x = 0, x > 0
- p integer or not, p < 0, p = 0, p > 0
(not all combinations are distinct: for instance x > 0 gives
identical formulas for all p).
'''
zero = ufloat(0, 0.1)
zero2 = ufloat(0, 0.1)
one = ufloat(1, 0.1)
positive = ufloat(0.3, 0.01)
positive2 = ufloat(0.3, 0.01)
negative = ufloat(-0.3, 0.01)
integer = ufloat(-3, 0)
non_int_larger_than_one = ufloat(3.1, 0.01)
positive_smaller_than_one = ufloat(0.3, 0.01)
## negative**integer
result = op(negative, integer)
assert not isnan(result.derivatives[negative])
assert isnan(result.derivatives[integer])
# Limit cases:
result = op(negative, one)
assert result.derivatives[negative] == 1
assert isnan(result.derivatives[one])
result = op(negative, zero)
assert result.derivatives[negative] == 0
assert isnan(result.derivatives[zero])
## negative**non-integer
## zero**...
result = op(zero, non_int_larger_than_one)
assert isnan(result.derivatives[zero])
assert result.derivatives[non_int_larger_than_one] == 0
# Special cases:
result = op(zero, one)
assert result.derivatives[zero] == 1
assert result.derivatives[one] == 0
result = op(zero, 2*one)
assert result.derivatives[zero] == 0
assert result.derivatives[one] == 0
result = op(zero, positive_smaller_than_one)
assert isnan(result.derivatives[zero])
assert result.derivatives[positive_smaller_than_one] == 0
result = op(zero, zero2)
assert result.derivatives[zero] == 0
assert isnan(result.derivatives[zero2])
## positive**...: this is a quite regular case where the value and
## the derivatives are all defined.
result = op(positive, positive2)
assert not isnan(result.derivatives[positive])
assert not isnan(result.derivatives[positive2])
result = op(positive, zero)
assert result.derivatives[positive] == 0
assert not isnan(result.derivatives[zero])
result = op(positive, negative)
assert not isnan(result.derivatives[positive])
assert not isnan(result.derivatives[negative])
###############################################################################
def test_power_special_cases():
'''
Checks special cases of x**p.
'''
power_special_cases(pow)
# We want the same behavior for numbers with uncertainties and for
# math.pow() at their nominal values:
positive = ufloat(0.3, 0.01)
negative = ufloat(-0.3, 0.01)
# http://stackoverflow.com/questions/10282674/difference-between-the-built-in-pow-and-math-pow-for-floats-in-python
try:
pow(ufloat(0, 0), negative)
except ZeroDivisionError:
pass
else:
raise Exception("A proper exception should have been raised")
try:
pow(ufloat(0, 0.1), negative)
except ZeroDivisionError:
pass
else:
raise Exception('A proper exception should have been raised')
try:
result = pow(negative, positive)
except ValueError:
# The reason why it should also fail in Python 3 is that the
# result of Python 3 is a complex number, which uncertainties
# does not handle (no uncertainties on complex numbers). In
# Python 2, this should always fail, since Python 2 does not
# know how to calculate it.
pass
else:
raise Exception('A proper exception should have been raised')
def power_special_cases(op):
'''
Checks special cases of the uncertainty power operator op (where
op is typically the built-in pow or uncertainties.umath.pow).
The values x = 0, x = 1 and x = NaN are special, as are null,
integral and NaN values of p.
'''
zero = ufloat(0, 0)
one = ufloat(1, 0)
p = ufloat(0.3, 0.01)
assert op(0, p) == 0
assert op(zero, p) == 0
# The outcome of 1**nan and nan**0 was undefined before Python
# 2.6 (http://docs.python.org/library/math.html#math.pow):
assert op(float('nan'), zero) == 1.0
assert op(one, float('nan')) == 1.0
# …**0 == 1.0:
assert op(p, 0) == 1.0
assert op(zero, 0) == 1.0
assert op((-p), 0) == 1.0
# …**zero:
assert op((-10.3), zero) == 1.0
assert op(0, zero) == 1.0
assert op(0.3, zero) == 1.0
assert op((-p), zero) == 1.0
assert op(zero, zero) == 1.0
assert op(p, zero) == 1.0
# one**… == 1.0
assert op(one, -3) == 1.0
assert op(one, -3.1) == 1.0
assert op(one, 0) == 1.0
assert op(one, 3) == 1.0
assert op(one, 3.1) == 1.0
# … with two numbers with uncertainties:
assert op(one, (-p)) == 1.0
assert op(one, zero) == 1.0
assert op(one, p) == 1.0
# 1**… == 1.0:
assert op(1., (-p)) == 1.0
assert op(1., zero) == 1.0
assert op(1., p) == 1.0
def test_power_wrt_ref():
'''
Checks special cases of the built-in pow() power operator.
'''
power_wrt_ref(pow, pow)
def power_wrt_ref(op, ref_op):
'''
Checks special cases of the uncertainty power operator op (where
op is typically the built-in pow or uncertainties.umath.pow), by
comparing its results to the reference power operator ref_op
(which is typically the built-in pow or math.pow).
'''
# Negative numbers with uncertainty can be exponentiated to an
# integral power:
assert op(ufloat(-1.1, 0.1), -9).nominal_value == ref_op(-1.1, -9)
# Case of numbers with no uncertainty: should give the same result
# as numbers with uncertainties:
assert op(ufloat(-1, 0), 9) == ref_op(-1, 9)
assert op(ufloat(-1.1, 0), 9) == ref_op(-1.1, 9)
###############################################################################
def test_PDG_precision():
'''
Test of the calculation of the number of significant digits for
the uncertainty.
'''
# The 3 cases of the rounding rules are covered in each case:
tests = {
# Very big floats:
1.7976931348623157e308: (2, 1.7976931348623157e308),
0.5e308: (1, 0.5e308),
0.9976931348623157e+308: (2, 1e308),
# Very small floats:
1.3e-323: (2, 1.3e-323),
5e-324: (1, 5e-324),
9.99e-324: (2, 1e-323)
}
for (std_dev, result) in tests.items():
assert uncert_core.PDG_precision(std_dev) == result
def test_repr():
'''Test the representation of numbers with uncertainty.'''
# The uncertainty is a power of 2, so that it can be exactly
# represented:
x = ufloat(3.14159265358979, 0.25)
assert repr(x) == '3.14159265358979+/-0.25'
x = ufloat(3.14159265358979, 0)
assert repr(x) == '3.14159265358979+/-0'
# Tagging:
x = ufloat(3, 1, "length")
assert repr(x) == '< length = 3.0+/-1.0 >'
def test_format():
'''Test the formatting of numbers with uncertainty.'''
# The way NaN is formatted with F, E and G depends on the version
# of Python (NAN for Python 2.5+ at least):
NaN_EFG = '%F' % float('nan')
# !! The way NaN is formatted with F, E and G might depend on the
# version of Python, if it is like NaN (could be tested with
# Python 2.3 or 2.4 vs Python 2.7):
Inf_EFG = '%F' % float('inf')
# Tests of each point of the docstring of
# AffineScalarFunc.__format__() in turn, mostly in the same order.
# The LaTeX tests do not use the customization of
# uncert_core.GROUP_SYMBOLS and uncert_core.EXP_PRINT: this
# way, problems in the customization themselves are caught.
tests = { # (Nominal value, uncertainty): {format: result,...}
# Usual float formatting, and individual widths, etc.:
(3.1415, 0.0001): {
'*^+7.2f': '*+3.14*+/-*0.00**',
'+07.2f': '+003.14+/-0000.00', # 0 fill
'>10f': ' 3.141500+/- 0.000100', # Width and align
'11.3e': ' 3.142e+00+/- 0.000e+00', # Duplicated exponent
'0.4e': '3.1415e+00+/-0.0000e+00' # Forced double exponent
},
# Full generalization of float formatting:
(3.1415, 0.0001): {
'+09.2uf': '+03.14150+/-000.00010',
# Alignment is not available with the % formatting
# operator of Python < 2.6:
'*^+9.2uf': '+3.14150*+/-*0.00010*',
'>9f': ' 3.14150+/- 0.00010' # Width and align
},
# Number of digits of the uncertainty fixed:
(123.456789, 0.00123): {
'.1uf': '123.457+/-0.001',
'.2uf': '123.4568+/-0.0012',
'.3uf': '123.45679+/-0.00123',
'.2ue': '(1.234568+/-0.000012)e+02'
},
# Sign handling:
(-123.456789, 0.00123): {
'.1uf': '-123.457+/-0.001',
'.2uf': '-123.4568+/-0.0012',
'.3uf': '-123.45679+/-0.00123',
'.2ue': '(-1.234568+/-0.000012)e+02'
},
# Uncertainty larger than the nominal value:
(12.3, 456.78): {
'': '12+/-457',
'.1uf': '12+/-457',
'.4uf': '12.3+/-456.8'
},
# ... Same thing, but with an exponent:
(12.3, 456.78): {
'.1ue': '(0+/-5)e+02',
'.4ue': '(0.123+/-4.568)e+02',
'.4ueS': '0.123(4.568)e+02'
},
(23456.789123, 1234.56789123): {
'.6gS': '23456.8(1234.6)'
},
# Test of the various float formats: the nominal value should
# have a similar representation as if it were directly
# represented as a float:
(1234567.89, 0.1): {
'.0e': '(1+/-0)e+06',
'e': '(1.23456789+/-0.00000010)e+06',
'E': '(1.23456789+/-0.00000010)E+06',
'f': '1234567.89+/-0.10',
'F': '1234567.89+/-0.10',
'g': '1234567.89+/-0.10',
'G': '1234567.89+/-0.10',
'%': '(123456789+/-10)%'
},
(1234567.89, 4.3): {
'g': '1234568+/-4'
},
(1234567.89, 43): { # Case where g triggers the exponent notation
'g': '(1.23457+/-0.00004)e+06',
'G': '(1.23457+/-0.00004)E+06'
},
(3.1415, 0.0001): {
'+09.2uf': '+03.14150+/-000.00010'
},
(1234.56789, 0.1): {
'.0f': '(1234+/-0.)', # Approximate error indicated with "."
'e': '(1.23456+/-0.00010)e+03',
'E': '(1.23456+/-0.00010)E+03',
'f': '1234.57+/-0.10',
'F': '1234.57+/-0.10',
'f': '1234.57+/-0.10',
'F': '1234.57+/-0.10',
'%': '123457+/-10%'
},
# Percent notation:
(0.42, 0.0055): {
# Because '%' does 0.0055*100, the value
# 0.5499999999999999 is obtained, which rounds to 0.5. The
# original rounded value is 0.006. The same behavior is
# found in Python 2.7: '{:.1%}'.format(0.0055) is '0.5%'.
'.1u%': '(42.0+/-0.5)%',
'.1u%S': '42.0(5)%',
'%P': u'(42.0±0.5)%'
},
# Particle Data Group automatic convention, including limit cases:
(1.2345678, 0.354): {'': '1.23+/-0.35'},
(1.2345678, 0.3549): {'': '1.23+/-0.35'},
(1.2345678, 0.355): {'': '1.2+/-0.4'},
(1.5678, 0.355): {'': '1.6+/-0.4'},
(1.2345678, 0.09499): {'': '1.23+/-0.09'},
(1.2345678, 0.095): {'': '1.23+/-0.10'},
# Automatic extension of the uncertainty up to the decimal
# point:
(1000, 123): {
'.1uf': '1000+/-123',
# The nominal value has 1 <= mantissa < 10. The precision
# is the number of significant digits of the uncertainty:
'.1ue': '(1.0+/-0.1)e+03'
},
# Spectroscopic notation:
(-1.23, 3.4): {
'S': '-1.2(3.4)',
'.2ufS': '-1.2(3.4)',
'.3ufS': '-1.23(3.40)',
},
(-123.456, 0.123): {
'S': '-123.46(12)',
'.1ufS': '-123.5(1)',
'.2ufS': '-123.46(12)',
'.3ufS': '-123.456(123)',
},
(-123.456, 0.567): {
'S': '-123.5(6)',
'.1ufS': '-123.5(6)',
'.2ufS': '-123.46(57)',
'.3ufS': '-123.456(567)',
},
(-123.456, 0.004): {
# The decimal point shows that the uncertainty is not
# exact:
'.2fS': '-123.46(0.00)'
},
# LaTeX notation:
#
(1234.56789, 0.1): {
'eL': r'\left(1.23457 \pm 0.00010\right) \times 10^{3}',
'EL': r'\left(1.23457 \pm 0.00010\right) \times 10^{3}',
'fL': '1234.57 \pm 0.10',
'FL': '1234.57 \pm 0.10',
'fL': '1234.57 \pm 0.10',
'FL': '1234.57 \pm 0.10',
'%L': r'\left(123457 \pm 10\right) \%'
},
#
# ... combined with the spectroscopic notation:
(-1.23, 3.4): {
'SL': '-1.2(3.4)',
'LS': '-1.2(3.4)',
'.2ufSL': '-1.2(3.4)',
'.2ufLS': '-1.2(3.4)'
},
# Special cases for the uncertainty (0, nan) and format
# strings (extension S, L, U,..., global width, etc.).
#
# Python 3.2 and 3.3 give 1.4e-12*1e+12 = 1.4000000000000001
# instead of 1.4 for Python 3.1. The problem does not appear
# with 1.2, so 1.2 is used.
(-1.2e-12, 0): {
'12.2gPL': u' -1.2×10⁻¹²± 0',
# Pure "width" formats are not accepted by the % operator,
# and only %-compatible formats are accepted, for Python <
# 2.6:
'13S': ' -1.2(0)e-12',
'10P': u'-1.2×10⁻¹²± 0',
'L': r'\left(-1.2 \pm 0\right) \times 10^{-12}',
# No factored exponent, LaTeX
'1L': r'-1.2 \times 10^{-12} \pm 0',
'SL': r'-1.2(0) \times 10^{-12}',
'SP': u'-1.2(0)×10⁻¹²'
},
# Python 3.2 and 3.3 give 1.4e-12*1e+12 = 1.4000000000000001
# instead of 1.4 for Python 3.1. The problem does not appear
# with 1.2, so 1.2 is used.
(-1.2e-12, float('nan')): {
'.2uG': '(-1.2+/-%s)E-12' % NaN_EFG, # u ignored, format used
'15GS': ' -1.2(%s)E-12' % NaN_EFG,
'SL': r'-1.2(\mathrm{nan}) \times 10^{-12}', # LaTeX NaN
# Pretty-print priority, but not for NaN:
'PSL': u'-1.2(\mathrm{nan})×10⁻¹²',
'L': r'\left(-1.2 \pm \mathrm{nan}\right) \times 10^{-12}',
# Uppercase NaN and LaTeX:
'.1EL': (r'\left(-1.2 \pm \mathrm{%s}\right) \times 10^{-12}'
% NaN_EFG),
'10': ' -1.2e-12+/- nan',
'15S': ' -1.2(nan)e-12'
},
(3.14e-10, 0.01e-10): {
# Character (Unicode) strings:
u'P': u'(3.140±0.010)×10⁻¹⁰', # PDG rules: 2 digits
u'PL': u'(3.140±0.010)×10⁻¹⁰', # Pretty-print has higher priority
# Truncated non-zero uncertainty:
'.1e': '(3.1+/-0.0)e-10',
'.1eS': '3.1(0.0)e-10'
},
# Some special cases:
(1, float('nan')): {
'g': '1+/-nan',
'G': '1+/-%s' % NaN_EFG,
'%': '(100.000000+/-nan)%', # The % format type is like f
# Should be the same as '+05', for floats, but is not, in
# Python 2.7:
'+05g': '+0001+/-00nan',
# 5 is the *minimal* width, 6 is the default number of
# digits after the decimal point:
'+05%': '(+100.000000+/-00nan)%',
# There is a difference between '{}'.format(1.) and
# '{:g}'.format(1.), which is not fully obvious in the
# documentation, which indicates that a None format type
# is like g. The reason is that the empty format string is
# actually interpreted as str(), and that str() does not
# have to behave like g ('{}'.format(1.234567890123456789)
# and '{:g}'.format(1.234567890123456789) are different).
'': '1.0+/-nan',
# This is ugly, but consistent with
# '{:+05}'.format(float('nan')) and format(1.) (which
# differs from format(1)!):
'+05': '+01.0+/-00nan'
},
(9.9, 0.1): {
'.1ue': '(9.9+/-0.1)e+00',
'.0fS': '10(0.)'
},
(9.99, 0.1): {
# The precision has an effect on the exponent, like for
# floats:
'.2ue': '(9.99+/-0.10)e+00', # Same exponent as for 9.99 alone
'.1ue': '(1.00+/-0.01)e+01' # Same exponent as for 9.99 alone
},
# 0 uncertainty: nominal value displayed like a float:
(1.2345, 0): {
'.2ue': '(1.23+/-0)e+00',
'1.2ue': '1.23e+00+/-0', # No factored exponent
'.2uf': '1.23+/-0',
'.2ufS': '1.23(0)',
'.2fS': '1.23(0)',
'g': '1.2345+/-0',
'': '1.2345+/-0'
},
# Alignment and filling characters (supported in Python 2.6+):
(3.1415e10, 0): {
'<15': '31415000000.0 +/-0 ',
'<20S': '31415000000.0(0) ',
# Trying to trip the format parsing with a fill character
# which is an alignment character:
'=>15': '==31415000000.0+/-==============0'
},
(1234.56789, 0): {
'1.2ue': '1.23e+03+/-0', # u ignored
'1.2e': '1.23e+03+/-0',
# Default precision = 6
'eL': r'\left(1.234568 \pm 0\right) \times 10^{3}',
'EL': r'\left(1.234568 \pm 0\right) \times 10^{3}',
'fL': '1234.567890 \pm 0',
'FL': '1234.567890 \pm 0',
'%L': r'\left(123456.789000 \pm 0\right) \%'
},
(1e5, 0): {
'g': '100000+/-0'
},
(1e6, 0): {
# A default precision of 6 is used because the uncertainty
# cannot be used for defining a default precision (it does
# not have a magnitude):
'g': '(1+/-0)e+06'
},
(1e6+10, 0): {
# A default precision of 6 is used because the uncertainty
# cannot be used for defining a default precision (it does
# not have a magnitude):
'g': '(1.00001+/-0)e+06'
},
# Rounding of the uncertainty that "changes" the number of
# significant digits:
(1, 0.994): {
'.3uf': '1.000+/-0.994',
'.2uf': '1.00+/-0.99',
'.1uf': '1+/-1' # Discontinuity in the number of digits
},
(12.3, 2.3): {
'.2ufS': '12.3(2.3)' # Decimal point on the uncertainty
},
(12.3, 2.3): {
'.1ufS': '12(2)' # No decimal point on the uncertainty
},
(0, 0): { # Make defining the first significant digit problematic
'.1f': '0.0+/-0', # Simple float formatting
'g': '0+/-0'
},
(1.2e-34, 5e-67): {
'.6g': '(1.20000+/-0.00000)e-34',
'13.6g': ' 1.20000e-34+/- 0.00000e-34',
'13.6G': ' 1.20000E-34+/- 0.00000E-34',
'.6GL': r'\left(1.20000 \pm 0.00000\right) \times 10^{-34}'
},
(float('nan'), 100): { # NaN *nominal value*
'': 'nan+/-100.0', # Like '{}'.format(100.)
'g': 'nan+/-100', # Like '{:g}'.format(100.)
'.1e': '(nan+/-1.0)e+02', # Similar to 1±nan
'.1E': '(%s+/-1.0)E+02' % NaN_EFG,
'.1ue': '(nan+/-1)e+02',
'10.1e': ' nan+/- 1.0e+02'
},
(float('nan'), 1e8): { # NaN *nominal value*
'': 'nan+/-100000000.0', # Like '{}'.format(1e8)
'g': '(nan+/-1)e+08', # Like '{:g}'.format(1e8)
'.1e': '(nan+/-1.0)e+08',
'.1E': '(%s+/-1.0)E+08' % NaN_EFG,
'.1ue': '(nan+/-1)e+08',
'10.1e': ' nan+/- 1.0e+08' # 'nane+08' would be strange
},
(float('nan'), 123456789): { # NaN *nominal value*
'': 'nan+/-123456789.0', # Similar to '{}'.format(123456789.)
'g': '(nan+/-1.23457)e+08', # Similar to '{:g}'.format(123456789.)
'.1e': '(nan+/-1.2)e+08',
'.1E': '(%s+/-1.2)E+08' % NaN_EFG,
'.1ue': '(nan+/-1)e+08',
'.1ueL': r'\left(\mathrm{nan} \pm 1\right) \times 10^{8}',
'10.1e': ' nan+/- 1.2e+08',
'10.1eL': r'\mathrm{nan} \pm 1.2 \times 10^{8}'
},
(float('nan'), float('nan')): { # *Double* NaN
'': 'nan+/-nan',
'.1e': 'nan+/-nan',
'.1E': '%s+/-%s' % (NaN_EFG, NaN_EFG),
'.1ue': 'nan+/-nan',
'EL': r'\mathrm{%s} \pm \mathrm{%s}' % (NaN_EFG, NaN_EFG)
},
(float('inf'), 100): { # Inf *nominal value*
'': 'inf+/-100.0', # Like '{}'.format(100.)
'g': 'inf+/-100', # Like '{:g}'.format(100.)
'.1e': '(inf+/-1.0)e+02', # Similar to 1±inf
'.1E': '(%s+/-1.0)E+02' % Inf_EFG,
'.1ue': '(inf+/-1)e+02',
'10.1e': ' inf+/- 1.0e+02'
},
(float('inf'), 1e8): { # Inf *nominal value*
'': 'inf+/-100000000.0', # Like '{}'.format(1e8)
'g': '(inf+/-1)e+08', # Like '{:g}'.format(1e8)
'.1e': '(inf+/-1.0)e+08',
'.1E': '(%s+/-1.0)E+08' % Inf_EFG,
'.1ue': '(inf+/-1)e+08',
'10.1e': ' inf+/- 1.0e+08' # 'infe+08' would be strange
},
(float('inf'), 123456789): { # Inf *nominal value*
'': 'inf+/-123456789.0', # Similar to '{}'.format(123456789.)
'g': '(inf+/-1.23457)e+08', # Similar to '{:g}'.format(123456789.)
'.1e': '(inf+/-1.2)e+08',
'.1E': '(%s+/-1.2)E+08' % Inf_EFG,
'.1ue': '(inf+/-1)e+08',
'.1ueL': r'\left(\infty \pm 1\right) \times 10^{8}',
'10.1e': ' inf+/- 1.2e+08',
'10.1eL': r' \infty \pm 1.2 \times 10^{8}'
},
(float('inf'), float('inf')): { # *Double* Inf
'': 'inf+/-inf',
'.1e': 'inf+/-inf',
'.1E': '%s+/-%s' % (Inf_EFG, Inf_EFG),
'.1ue': 'inf+/-inf',
'EL': r'\infty \pm \infty'
},
# Like the tests for +infinity, but for -infinity:
(float('-inf'), 100): { # Inf *nominal value*
'': '-inf+/-100.0', # Like '{}'.format(100.)
'g': '-inf+/-100', # Like '{:g}'.format(100.)
'.1e': '(-inf+/-1.0)e+02', # Similar to 1±inf
'.1E': '(-%s+/-1.0)E+02' % Inf_EFG,
'.1ue': '(-inf+/-1)e+02',
'10.1e': ' -inf+/- 1.0e+02'
},
(float('-inf'), 1e8): { # Inf *nominal value*
'': '-inf+/-100000000.0', # Like '{}'.format(1e8)
'g': '(-inf+/-1)e+08', # Like '{:g}'.format(1e8)
'.1e': '(-inf+/-1.0)e+08',
'.1E': '(-%s+/-1.0)E+08' % Inf_EFG,
'.1ue': '(-inf+/-1)e+08',
'10.1e': ' -inf+/- 1.0e+08' # 'infe+08' would be strange
},
(float('-inf'), 123456789): { # Inf *nominal value*
'': '-inf+/-123456789.0', # Similar to '{}'.format(123456789.)
'g': '(-inf+/-1.23457)e+08', # Similar to '{:g}'.format(123456789.)
'.1e': '(-inf+/-1.2)e+08',
'.1E': '(-%s+/-1.2)E+08' % Inf_EFG,
'.1ue': '(-inf+/-1)e+08',
'.1ueL': r'\left(-\infty \pm 1\right) \times 10^{8}',
'10.1e': ' -inf+/- 1.2e+08',
'10.1eL': r' -\infty \pm 1.2 \times 10^{8}'
},
(float('-inf'), float('inf')): { # *Double* Inf
'': '-inf+/-inf',
'.1e': '-inf+/-inf',
'.1E': '-%s+/-%s' % (Inf_EFG, Inf_EFG),
'.1ue': '-inf+/-inf',
'EL': r'-\infty \pm \infty'
},
# The Particle Data Group convention trumps the "at least one
# digit past the decimal point" for Python floats, but only
# with a non-zero uncertainty:
(724.2, 26.4): {
'': '724+/-26'
},
(724, 0): {
'': '724.0+/-0'
},
# More NaN and infinity, in particular with LaTeX and various
# options:
(float('-inf'), float('inf')): {
'S': '-inf(inf)',
'LS': '-\infty(\infty)',
'L': '-\infty \pm \infty',
'LP': u'-\infty±\infty',
# The following is consistent with Python's own
# formatting, which depends on the version of Python:
# formatting float("-inf") with format(..., "020") gives
# '-0000000000000000inf' with Python 2.7, but
# '-00000000000000.0inf' with Python 2.6. However, Python
# 2.6 gives the better, Python 2.7 form when format()ting
# with "020g" instead, so this formatting would be better,
# in principle, and similarly for "%020g" % ... Thus,
# Python's format() breaks the official rule according to
# which no format type is equivalent to "g", for
# floats. If the better behavior was needed, internal
# formatting could in principle force the "g" formatting
# type when none is given; however, Python does not
# actually fully treat the none format type in the same
# was as the "g" format, so this solution cannot be used,
# as it would break other formatting behaviors in this
# code. It is thus best to mimic the native behavior of
# none type formatting (even if it does not look so good
# in Python 2.6).
'020S': format(float("-inf"), '015')+'(inf)'
},
(-float('nan'), float('inf')): {
'S': 'nan(inf)',
'LS': '\mathrm{nan}(\infty)',
'L': '\mathrm{nan} \pm \infty',
'LP': u'\mathrm{nan}±\infty'
},
# Leading zeroes in the shorthand notation:
(-2, 3): {
"020S": "-000000000002.0(3.0)"
}
}
# ',' format option: introduced in Python 2.7
if sys.version_info >= (2, 7):
tests.update({
(1234.56789, 0.012): {
',.1uf': '1,234.57+/-0.01'
},
(123456.789123, 1234.5678): {
',f': '123,457+/-1,235', # Particle Data Group convention
',.4f': '123,456.7891+/-1,234.5678'
}
})
# True if we can detect that the Jython interpreter is running this code:
try:
jython_detected = sys.subversion[0] == 'Jython'
except AttributeError:
jython_detected = False
for (values, representations) in tests.items():
value = ufloat(*values)
for (format_spec, result) in representations.items():
# print "FORMATTING {} WITH '{}'".format(repr(value), format_spec)
# Jython 2.5.2 does not always represent NaN as nan or NAN
# in the CPython way: for example, '%.2g' % float('nan')
# is '\ufffd'. The test is skipped, in this case:
if jython_detected and (
isnan(value.std_dev) or isnan(value.nominal_value)):
continue
# Call that works with Python < 2.6 too:
representation = value.format(format_spec)
assert representation == result, (
# The representation is used, for terminal that do not
# support some characters like ±, and superscripts:
'Incorrect representation %r for format %r of %r:'
' %r expected.'
% (representation, format_spec, value, result))
# An empty format string is like calling str()
# (http://docs.python.org/2/library/string.html#formatspec):
if not format_spec:
assert representation == str(value), (
'Empty format should give the same thing as str():'
' %s obtained instead of %s'
% (representation, str(value)))
# Parsing back into a number with uncertainty (unless the
# LaTeX or comma notation is used):
if (not set(format_spec).intersection('L,*%') # * = fill with *
# "0nan"
and '0nan' not in representation.lower()
# "0inf"
and '0inf' not in representation.lower()
# Specific case:
and '=====' not in representation):
value_back = ufloat_fromstr(representation)
# The original number and the new one should be consistent
# with each other:
try:
# The nominal value can be rounded to 0 when the
# uncertainty is larger (because p digits on the
# uncertainty can still show 0.00... for the
# nominal value). The relative error is infinite,
# so this should not cause an error:
if value_back.nominal_value:
assert numbers_close(value.nominal_value,
value_back.nominal_value, 2.4e-1)
# If the uncertainty is zero, then the relative
# change can be large:
assert numbers_close(value.std_dev,
value_back.std_dev, 3e-1)
except AssertionError:
# !! The following string formatting requires
# str() to work (to not raise an exception) on the
# values (which have a non-standard class):
raise AssertionError(
'Original value %s and value %s parsed from %r'
' (obtained through format specification %r)'
' are not close enough'
% (value, value_back, representation, format_spec))
def test_unicode_format():
'''Test of the unicode formatting of numbers with uncertainties'''
x = ufloat(3.14159265358979, 0.25)
assert isinstance(u'Résultat = %s' % x.format(''), str)
assert isinstance(u'Résultat = %s' % x.format('P'), str)
###############################################################################
# The tests below require NumPy, which is an optional package:
try:
import numpy
except ImportError:
pass
else:
def arrays_close(m1, m2, precision=1e-4):
"""
Returns True iff m1 and m2 are almost equal, where elements
can be either floats or AffineScalarFunc objects.
Two independent AffineScalarFunc objects are deemed equal if
both their nominal value and uncertainty are equal (up to the
given precision).
m1, m2 -- NumPy arrays.
precision -- precision passed through to
uncertainties.test_uncertainties.numbers_close().
"""
# ! numpy.allclose() is similar to this function, but does not
# work on arrays that contain numbers with uncertainties, because
# of the isinf() function.
for (elmt1, elmt2) in zip(m1.flat, m2.flat):
# For a simpler comparison, both elements are
# converted to AffineScalarFunc objects:
elmt1 = uncert_core.to_affine_scalar(elmt1)
elmt2 = uncert_core.to_affine_scalar(elmt2)
if not numbers_close(elmt1.nominal_value,
elmt2.nominal_value, precision):
return False
if not numbers_close(elmt1.std_dev,
elmt2.std_dev, precision):
return False
return True
def test_numpy_comparison():
"Comparison with a NumPy array."
x = ufloat(1, 0.1)
# Comparison with a different type:
assert x != [x, x]
# NumPy arrays can be compared, through element-wise
# comparisons. Numbers with uncertainties should yield the
# same kind of results as pure floats (i.e., a NumPy array,
# etc.).
# We test the comparison operators both for the uncertainties
# package *and* the NumPy package:
# Equalities, etc.:
assert len(x == numpy.arange(10)) == 10
assert len(numpy.arange(10) == x) == 10
assert len(x != numpy.arange(10)) == 10
assert len(numpy.arange(10) != x) == 10
assert len(x == numpy.array([x, x, x])) == 3
assert len(numpy.array([x, x, x]) == x) == 3
assert numpy.all(x == numpy.array([x, x, x]))
# Inequalities:
assert len(x < numpy.arange(10)) == 10
assert len(numpy.arange(10) > x) == 10
assert len(x <= numpy.arange(10)) == 10
assert len(numpy.arange(10) >= x) == 10
assert len(x > numpy.arange(10)) == 10
assert len(numpy.arange(10) < x) == 10
assert len(x >= numpy.arange(10)) == 10
assert len(numpy.arange(10) <= x) == 10
# More detailed test, that shows that the comparisons are
# meaningful (x >= 0, but not x <= 1):
assert numpy.all((x >= numpy.arange(3)) == [True, False, False])
def test_correlated_values():
"""
Correlated variables.
Test through the input of the (full) covariance matrix.
"""
u = uncert_core.ufloat(1, 0.1)
cov = uncert_core.covariance_matrix([u])
# "1" is used instead of u.nominal_value because
# u.nominal_value might return a float. The idea is to force
# the new variable u2 to be defined through an integer nominal
# value:
u2, = uncert_core.correlated_values([1], cov)
expr = 2*u2 # Calculations with u2 should be possible, like with u
####################
# Covariances between output and input variables:
x = ufloat(1, 0.1)
y = ufloat(2, 0.3)
z = -3*x+y
covs = uncert_core.covariance_matrix([x, y, z])
# Test of the diagonal covariance elements:
assert arrays_close(
numpy.array([v.std_dev**2 for v in (x, y, z)]),
numpy.array(covs).diagonal())
# "Inversion" of the covariance matrix: creation of new
# variables:
(x_new, y_new, z_new) = uncert_core.correlated_values(
[x.nominal_value, y.nominal_value, z.nominal_value],
covs,
tags = ['x', 'y', 'z'])
# Even the uncertainties should be correctly reconstructed:
assert arrays_close(numpy.array((x, y, z)),
numpy.array((x_new, y_new, z_new)))
# ... and the covariances too:
assert arrays_close(
numpy.array(covs),
numpy.array(uncert_core.covariance_matrix([x_new, y_new, z_new])))
assert arrays_close(
numpy.array([z_new]), numpy.array([-3*x_new+y_new]))
####################
# ... as well as functional relations:
u = ufloat(1, 0.05)
v = ufloat(10, 0.1)
sum_value = u+2*v
# Covariance matrices:
cov_matrix = uncert_core.covariance_matrix([u, v, sum_value])
# Correlated variables can be constructed from a covariance
# matrix, if NumPy is available:
(u2, v2, sum2) = uncert_core.correlated_values(
[x.nominal_value for x in [u, v, sum_value]],
cov_matrix)
# arrays_close() is used instead of numbers_close() because
# it compares uncertainties too:
assert arrays_close(numpy.array([u]), numpy.array([u2]))
assert arrays_close(numpy.array([v]), numpy.array([v2]))
assert arrays_close(numpy.array([sum_value]), numpy.array([sum2]))
assert arrays_close(numpy.array([0]),
numpy.array([sum2-(u2+2*v2)]))
# Spot checks of the correlation matrix:
corr_matrix = uncert_core.correlation_matrix([u, v, sum_value])
assert numbers_close(corr_matrix[0,0], 1)
assert numbers_close(corr_matrix[1,2], 2*v.std_dev/sum_value.std_dev)
####################
# Test of numerical robustness despite wildly different
# orders of magnitude (see
# https://github.com/lebigot/uncertainties/issues/95):
cov = numpy.diag([1e-70, 1e-70, 1e10])
cov[0, 1] = cov[1, 0] = 0.9e-70
cov[[0, 1], 2] = -3e-34
cov[2, [0, 1]] = -3e-34
variables = uncert_core.correlated_values([0]*3, cov)
# Since the numbers are very small, we need to compare them
# in a stricter way, that handles the case of a 0 variance
# in `variables`:
assert numbers_close(
1e66*cov[0,0], 1e66*variables[0].s**2, tolerance=1e-5)
assert numbers_close(
1e66*cov[1,1], 1e66*variables[1].s**2, tolerance=1e-5)
####################
# 0 variances are a bit special, since the correlation matrix
# cannot be calculated naively, so we test that there is no
# specific problem in this case:
cov = numpy.diag([0, 0, 10])
nom_values = [1, 2, 3]
variables = uncert_core.correlated_values(nom_values, cov)
for (variable, nom_value, variance) in zip(
variables, nom_values, cov.diagonal()):
assert numbers_close(variable.n, nom_value)
assert numbers_close(variable.s**2, variance)
assert arrays_close(
cov,
numpy.array(uncert_core.covariance_matrix(variables)))
def test_correlated_values_correlation_mat():
'''
Tests the input of correlated value.
Test through their correlation matrix (instead of the
covariance matrix).
'''
x = ufloat(1, 0.1)
y = ufloat(2, 0.3)
z = -3*x+y
cov_mat = uncert_core.covariance_matrix([x, y, z])
std_devs = numpy.sqrt(numpy.array(cov_mat).diagonal())
corr_mat = cov_mat/std_devs/std_devs[numpy.newaxis].T
# We make sure that the correlation matrix is indeed diagonal:
assert (corr_mat-corr_mat.T).max() <= 1e-15
# We make sure that there are indeed ones on the diagonal:
assert (corr_mat.diagonal()-1).max() <= 1e-15
# We try to recover the correlated variables through the
# correlation matrix (not through the covariance matrix):
nominal_values = [v.nominal_value for v in (x, y, z)]
std_devs = [v.std_dev for v in (x, y, z)]
x2, y2, z2 = uncert_core.correlated_values_norm(
list(zip(nominal_values, std_devs)), corr_mat)
# arrays_close() is used instead of numbers_close() because
# it compares uncertainties too:
# Test of individual variables:
assert arrays_close(numpy.array([x]), numpy.array([x2]))
assert arrays_close(numpy.array([y]), numpy.array([y2]))
assert arrays_close(numpy.array([z]), numpy.array([z2]))
# Partial correlation test:
assert arrays_close(numpy.array([0]), numpy.array([z2-(-3*x2+y2)]))
# Test of the full covariance matrix:
assert arrays_close(
numpy.array(cov_mat),
numpy.array(uncert_core.covariance_matrix([x2, y2, z2])))
|
en
| 0.786069
|
# coding=utf-8 Tests of the code in uncertainties/__init__.py. These tests can be run through the Nose testing framework. (c) 2010-2016 by <NAME> (EOL). # Standard modules # 3rd-party modules # import nose.tools # Local modules # The following information is useful for making sure that the right # version of Python is running the tests (for instance with the Travis # Continuous Integration system): ############################################################################### # Utilities for unit testing Returns True if the given floats are close enough. The given tolerance is the relative difference allowed, or the absolute difference, if one of the numbers is 0. NaN is allowed: it is considered close to itself. # !!! Python 3.5+ has math.isclose(): maybe it could be used here. # Instead of using a try and ZeroDivisionError, we do a test, # NaN could appear silently: # Symmetric form of the test: # Either x or y is zero Tests if two numbers with uncertainties are close, as random variables: this is stronger than testing whether their nominal value and standard deviation are close. The tolerance is applied to both the nominal value and the standard deviation of the difference between the numbers. Checks the derivatives of a function 'func' (as returned by the wrap() wrapper), by comparing them to the 'numerical_derivatives' functions. Raises a DerivativesDiffer exception in case of problem. These functions all take the number of arguments listed in num_args_list. If num_args is None, it is automatically obtained. Tests are done on random arguments. # print "Testing", func.__name__ # Detecting automatically the correct number of arguments is not # always easy (because not all values are allowed, etc.): # Both numbers of arguments are tested # We loop until we find reasonable function arguments: # We get the number of arguments by trial and error: #! Giving integer arguments is good for preventing # certain functions from failing even though num_args # is their correct number of arguments # (e.g. math.ldexp(x, i), where i must be an integer) # Not the right number of arguments # No error # num_args is a good number of arguments for func: # Argument numbers that will have a random integer value: # The second argument must be an integer: # We include negative numbers, for more thorough tests: # 'args', but as scalar values: # Some functions yield simple Python constants, after # wrapping in wrap(): no test has to be performed. # Some functions also yield tuples... # We compare all derivatives: # Some arguments might not be differentiable: # This message is useful: the user can see that # tests are really performed (instead of not being # performed, silently): #%d of %s at %s" % ( # It is possible that the result is NaN: #%d of function '%s' may be" # Arguments out of range, or of wrong type # Factorial(real) lands here: # We try with different arguments # Some arguments might have to be integers, for instance: # Another argument might be forced to be an integer: # We have found reasonable arguments, and the test passed: ############################################################################### Tests the various means of constructing a constant number with uncertainty *without a string* (see test_ufloat_fromstr(), for this). ## Simple construction: # ... with tag as positional argument: # ... with tag keyword: ## Comparison with the obsolete tuple form: # The following tuple is stored in a variable instead of being # repeated in the calls below, so that the automatic code update # does not replace ufloat((3, 0.14)) by ufloat(3, 14): the goal # here is to make sure that the obsolete form gives the same # result as the new form. # Obsolete representation # Obsolete # With tag as positional argument: # Obsolete # With tag keyword: # Obsolete # Negative standard deviations should be caught in a nice way # (with the right exception): # Obsolete form: ## Incorrect forms should not raise any deprecation warning, but ## raise an exception: # Form that has never been allowed # String representation, and numerical values: # (Nominal value, error) # Spaces ignored # The following tests that the ufloat() routine does # not consider '31' like the tuple ('3', '1'), which would # make it expect two numbers (instead of 2 1-character # strings): # Global exponent: ## Pretty-print notation: # ± sign, global exponent (not pretty-printed): # ± sign, individual exponent: # ± sign, times symbol, superscript (= full pretty-print): # NaN uncertainty: # NaN value: # "Double-floats" # Special float representation: # Without tag: # With a tag as positional argument: # With a tag as keyword argument: ## Obsolete forms # Obsolete # Call with a tag list argument: # Obsolete # Call with a tag keyword argument: # Obsolete ############################################################################### # Test of correctness of the fixed (usually analytical) derivatives: Pre-calculated derivatives for operations on AffineScalarFunc. Makes sure that the derivatives for function '__op__' of class AffineScalarFunc, which takes num_args arguments, are correct. If num_args is None, a correct value is calculated. # The __neg__ etc. methods of AffineScalarFunc only apply, # by definition, to AffineScalarFunc objects: we first map # possible scalar arguments (used for calculating # derivatives) to AffineScalarFunc objects: # Operators that take 1 value: # Operators that take 2 values: # Additional, more complex checks, for use with the nose unit testing # framework. # y must not copy the dependence on x # Copy tests on expressions: # t depends on x: # The relationship between the copy of an expression and the # original variables should be preserved: # Shallow copy: the variables on which t depends are not copied: # However, the relationship between a deep copy and the original # variables should be broken, since the deep copy created new, # independent variables: # Test of implementations with weak references: # Weak references: destroying a variable should never destroy the # integrity of its copies (which would happen if the copy keeps a # weak reference to the original, in its derivatives member: the # weak reference to the original would become invalid): ## Classes for the pickling tests (put at the module level, so that ## they can be unpickled): # Subclass without slots: # Subclass with slots defined by a tuple: # Subclass with slots defined by a string: # Pickling creates copies ## Tests with correlations and AffineScalarFunc objects: # Correlations must be preserved: ## Tests with subclasses: # Pickling test with possibly uninitialized slots: # Unpickling test: # Must exist (from the slots of the parent class): # Must exist ## # Corner case test: when an attribute is present both in __slots__ # and in __dict__, it is first looked up from the slots # (references: # http://docs.python.org/2/reference/datamodel.html#invoking-descriptors, # http://stackoverflow.com/a/15139208/42973). As a consequence, # the pickling process must pickle the correct value (i.e., not # the value from __dict__): # Corner case: __dict__ key which is also a slot name (it is # shadowed by the corresponding slot, so this is very unusual, # though): # Additional __dict__ attribute: # We make sure that the data is still there and untouched: ## # Corner case that should have no impact on the code but which is # not prevented by the documentation: case of constant linear # terms (the potential gotcha is that if the linear_combo # attribute is empty, __getstate__()'s result could be false, and # so __setstate__() would not be called and the original empty # linear combination would not be set in linear_combo. # We perform all operations on floats, because derivatives can # otherwise be meaningless: # All errors are supposed to be small, so the ufloat() # in x violates the assumption. Therefore, the following is # correct: # Operations on quantities equivalent to Python numbers must still # be correct: # This is first given to int.__lt__() # One constraint is that usual Python code for inequality testing # still work in a reasonable way (for instance, it is generally # desirable that functions defined by different formulas on # different intervals can still do "if 0 < x < 1:...". This # supposes again that errors are "small" (as for the estimate of # the standard error). # The limit case is not obvious: # Comparaison between Variable and AffineScalarFunc: # Comparaison between 2 _different_ AffineScalarFunc objects # representing the same value: # With uncorrelated result that have the same behavior (value and # standard error): # Comparaison between 2 _different_ Variable objects # that are uncorrelated: # Comparison to other types should work: # Not comparable # Comparable, even though the types are different #################### # Checks of the semantics of logical operations: they return True # iff they are always True when the parameters vary in an # infinitesimal interval inside sigma (sigma == 0 is a special # case): Takes two Variable objects. Fails if any comparison operation fails to follow the proper semantics: a comparison only returns True if the correspond float comparison results are True for all the float values taken by the variables (of x and y) when they vary in an infinitesimal neighborhood within their uncertainty. This test is stochastic: it may, exceptionally, fail for correctly implemented comparison operators. Returns a random value for Variable var, in an infinitesimal interval withing its uncertainty. The case of a zero uncertainty is special. # All operations are tested: # Python 2.3's floats don't have __ne__ # Determination of the correct truth value of func(x, y): # The "main" value is an important particular case, and # the starting value for the final result # (correct_result): # Many points checked # Almost all results must be True, for the final value # to be True: # 1 exception is considered OK: # With different numbers: # Special number # Special number # With identical numbers: # Only infinitseimal neighborhood are used # Obsolete function, protected against automatic modification: # Obsolete # Obsolete call # Case of AffineScalarFunc objects: # Details on the sources of error: # 'a' and 'x' # Derivative values should be available: # Modification of the standard deviation of variables: # New error contribution! # Calculated values with uncertainties should not have a settable # standard deviation: # Calculation of deviations in units of the standard deviations: # "In units of the standard deviation" is not always meaningful: # Normal behavior # Correlations cancel "naive" additions of uncertainties: Coercion of Variable object to a simple float. The coercion should be impossible, like for complex numbers. Wrap a function that takes only positional-or-keyword parameters. # Like f_auto_unc, but does not accept numbers with uncertainties: ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: # Call with keyword arguments: ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: # No derivative for y # Call with keyword arguments: ### Explicit derivatives: ## Fully defined derivatives: # Call with keyword arguments: ## Automatic additional derivatives for non-defined derivatives: # No derivative for y # Call with keyword arguments: Wrap a function that takes only positional-or-keyword and var-positional parameters. # Like f_auto_unc, but does not accept numbers with uncertainties: # var-positional parameters ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: # No derivative for y ### Explicit derivatives: ## Fully defined derivatives: ## Automatic additional derivatives for non-defined derivatives: # No derivative for y: Wrap a function that takes only positional-or-keyword and var-keyword parameters. # Like f_auto_unc, but does not accept numbers with uncertainties: # Arguments not in signature ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: # Call with keyword arguments: ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: # Call with keyword arguments: # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: # Call with keyword arguments: # No derivative for positional-or-keyword parameter y, derivative # for optional-keyword parameter z: # Call with keyword arguments: ### Explicit derivatives: ## Fully defined derivatives: # Call with keyword arguments: ## Automatic additional derivatives for non-defined derivatives: # No derivative for y or z: # Call with keyword arguments: Wrap a function that takes positional-or-keyword, var-positional and var-keyword parameters. # Like f_auto_unc, but does not accept numbers with uncertainties: # Arguments not in signature ### Automatic numerical derivatives: ## Fully automatic numerical derivatives: ## Automatic additional derivatives for non-defined derivatives, ## and explicit None derivative: # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: # No derivative for positional-or-keyword parameter y, no # derivative for optional-keyword parameter z: # No derivative for positional-or-keyword parameter y, derivative # for optional-keyword parameter z: ### Explicit derivatives: ## Fully defined derivatives: ## Automatic additional derivatives for non-defined derivatives: # No derivative for y or z: Test uncertainty-aware functions obtained through wrapping. ######################################## # Function which can automatically handle numbers with # uncertainties: # We make sure that this function is only ever called with # numbers with no uncertainty (since it is wrapped): ######################################## # Test of a wrapped function that only calls the original # function: it should obtain the exact same result: # 1 == 1 +/- 0, so the type must be checked too: ######################################## # Call with uncertainties: # The random variables must be the same (full correlation): ######################################## # Non-numerical arguments, and explicit and implicit derivatives: # No deriv. for u Tests wrap() on functions with keyword arguments. Includes both wrapping a function that takes optional keyword arguments and calling a wrapped function with keyword arguments (optional or not). # Version of f() that automatically works with numbers with # uncertainties: # We also add keyword arguments in the function which is wrapped: # We make sure that f is not called directly with a number with # uncertainty: ######################################## # We make sure that analytical derivatives are indeed used. We # also test the automatic handling of additional *args arguments # beyond the number of supplied derivatives. # The derivatives must be perfectly identical: # The *args parameter of f() is given as a keyword argument, so as # to try to confuse the code: # Derivatives supplied through the keyword-parameter dictionary of # derivatives, and also derivatives supplied for the # var-positional arguments (*args[0]): # The derivatives should be exactly the same, because they are # obtained with the exact same analytic formula: ######################################## # Making sure that user-supplied derivatives are indeed called: Raised to signal that a function is indeed called. ############################################################################### # std_dev for Variable and AffineScalarFunc objects: # std_dev for other objects: ############################################################################### # Diagonal elements are simple: # Non-diagonal elements: ############################################################################### Checks all cases for the value and derivatives of x**p. Checks all cases for the value and derivatives of power-like operator op (op is typically the built-in pow(), or math.pow()). Checks only the details of special results like 0, 1 or NaN). Different cases for the value of x**p and its derivatives are tested by dividing the (x, p) plane with: - x < 0, x = 0, x > 0 - p integer or not, p < 0, p = 0, p > 0 (not all combinations are distinct: for instance x > 0 gives identical formulas for all p). ## negative**integer # Limit cases: ## negative**non-integer ## zero**... # Special cases: ## positive**...: this is a quite regular case where the value and ## the derivatives are all defined. ############################################################################### Checks special cases of x**p. # We want the same behavior for numbers with uncertainties and for # math.pow() at their nominal values: # http://stackoverflow.com/questions/10282674/difference-between-the-built-in-pow-and-math-pow-for-floats-in-python # The reason why it should also fail in Python 3 is that the # result of Python 3 is a complex number, which uncertainties # does not handle (no uncertainties on complex numbers). In # Python 2, this should always fail, since Python 2 does not # know how to calculate it. Checks special cases of the uncertainty power operator op (where op is typically the built-in pow or uncertainties.umath.pow). The values x = 0, x = 1 and x = NaN are special, as are null, integral and NaN values of p. # The outcome of 1**nan and nan**0 was undefined before Python # 2.6 (http://docs.python.org/library/math.html#math.pow): # …**0 == 1.0: # …**zero: # one**… == 1.0 # … with two numbers with uncertainties: # 1**… == 1.0: Checks special cases of the built-in pow() power operator. Checks special cases of the uncertainty power operator op (where op is typically the built-in pow or uncertainties.umath.pow), by comparing its results to the reference power operator ref_op (which is typically the built-in pow or math.pow). # Negative numbers with uncertainty can be exponentiated to an # integral power: # Case of numbers with no uncertainty: should give the same result # as numbers with uncertainties: ############################################################################### Test of the calculation of the number of significant digits for the uncertainty. # The 3 cases of the rounding rules are covered in each case: # Very big floats: # Very small floats: Test the representation of numbers with uncertainty. # The uncertainty is a power of 2, so that it can be exactly # represented: # Tagging: Test the formatting of numbers with uncertainty. # The way NaN is formatted with F, E and G depends on the version # of Python (NAN for Python 2.5+ at least): # !! The way NaN is formatted with F, E and G might depend on the # version of Python, if it is like NaN (could be tested with # Python 2.3 or 2.4 vs Python 2.7): # Tests of each point of the docstring of # AffineScalarFunc.__format__() in turn, mostly in the same order. # The LaTeX tests do not use the customization of # uncert_core.GROUP_SYMBOLS and uncert_core.EXP_PRINT: this # way, problems in the customization themselves are caught. # (Nominal value, uncertainty): {format: result,...} # Usual float formatting, and individual widths, etc.: # 0 fill # Width and align # Duplicated exponent # Forced double exponent # Full generalization of float formatting: # Alignment is not available with the % formatting # operator of Python < 2.6: # Width and align # Number of digits of the uncertainty fixed: # Sign handling: # Uncertainty larger than the nominal value: # ... Same thing, but with an exponent: # Test of the various float formats: the nominal value should # have a similar representation as if it were directly # represented as a float: # Case where g triggers the exponent notation # Approximate error indicated with "." # Percent notation: # Because '%' does 0.0055*100, the value # 0.5499999999999999 is obtained, which rounds to 0.5. The # original rounded value is 0.006. The same behavior is # found in Python 2.7: '{:.1%}'.format(0.0055) is '0.5%'. # Particle Data Group automatic convention, including limit cases: # Automatic extension of the uncertainty up to the decimal # point: # The nominal value has 1 <= mantissa < 10. The precision # is the number of significant digits of the uncertainty: # Spectroscopic notation: # The decimal point shows that the uncertainty is not # exact: # LaTeX notation: # # # ... combined with the spectroscopic notation: # Special cases for the uncertainty (0, nan) and format # strings (extension S, L, U,..., global width, etc.). # # Python 3.2 and 3.3 give 1.4e-12*1e+12 = 1.4000000000000001 # instead of 1.4 for Python 3.1. The problem does not appear # with 1.2, so 1.2 is used. # Pure "width" formats are not accepted by the % operator, # and only %-compatible formats are accepted, for Python < # 2.6: # No factored exponent, LaTeX # Python 3.2 and 3.3 give 1.4e-12*1e+12 = 1.4000000000000001 # instead of 1.4 for Python 3.1. The problem does not appear # with 1.2, so 1.2 is used. # u ignored, format used # LaTeX NaN # Pretty-print priority, but not for NaN: # Uppercase NaN and LaTeX: # Character (Unicode) strings: # PDG rules: 2 digits # Pretty-print has higher priority # Truncated non-zero uncertainty: # Some special cases: # The % format type is like f # Should be the same as '+05', for floats, but is not, in # Python 2.7: # 5 is the *minimal* width, 6 is the default number of # digits after the decimal point: # There is a difference between '{}'.format(1.) and # '{:g}'.format(1.), which is not fully obvious in the # documentation, which indicates that a None format type # is like g. The reason is that the empty format string is # actually interpreted as str(), and that str() does not # have to behave like g ('{}'.format(1.234567890123456789) # and '{:g}'.format(1.234567890123456789) are different). # This is ugly, but consistent with # '{:+05}'.format(float('nan')) and format(1.) (which # differs from format(1)!): # The precision has an effect on the exponent, like for # floats: # Same exponent as for 9.99 alone # Same exponent as for 9.99 alone # 0 uncertainty: nominal value displayed like a float: # No factored exponent # Alignment and filling characters (supported in Python 2.6+): # Trying to trip the format parsing with a fill character # which is an alignment character: # u ignored # Default precision = 6 # A default precision of 6 is used because the uncertainty # cannot be used for defining a default precision (it does # not have a magnitude): # A default precision of 6 is used because the uncertainty # cannot be used for defining a default precision (it does # not have a magnitude): # Rounding of the uncertainty that "changes" the number of # significant digits: # Discontinuity in the number of digits # Decimal point on the uncertainty # No decimal point on the uncertainty # Make defining the first significant digit problematic # Simple float formatting # NaN *nominal value* # Like '{}'.format(100.) # Like '{:g}'.format(100.) # Similar to 1±nan # NaN *nominal value* # Like '{}'.format(1e8) # Like '{:g}'.format(1e8) # 'nane+08' would be strange # NaN *nominal value* # Similar to '{}'.format(123456789.) # Similar to '{:g}'.format(123456789.) # *Double* NaN # Inf *nominal value* # Like '{}'.format(100.) # Like '{:g}'.format(100.) # Similar to 1±inf # Inf *nominal value* # Like '{}'.format(1e8) # Like '{:g}'.format(1e8) # 'infe+08' would be strange # Inf *nominal value* # Similar to '{}'.format(123456789.) # Similar to '{:g}'.format(123456789.) # *Double* Inf # Like the tests for +infinity, but for -infinity: # Inf *nominal value* # Like '{}'.format(100.) # Like '{:g}'.format(100.) # Similar to 1±inf # Inf *nominal value* # Like '{}'.format(1e8) # Like '{:g}'.format(1e8) # 'infe+08' would be strange # Inf *nominal value* # Similar to '{}'.format(123456789.) # Similar to '{:g}'.format(123456789.) # *Double* Inf # The Particle Data Group convention trumps the "at least one # digit past the decimal point" for Python floats, but only # with a non-zero uncertainty: # More NaN and infinity, in particular with LaTeX and various # options: # The following is consistent with Python's own # formatting, which depends on the version of Python: # formatting float("-inf") with format(..., "020") gives # '-0000000000000000inf' with Python 2.7, but # '-00000000000000.0inf' with Python 2.6. However, Python # 2.6 gives the better, Python 2.7 form when format()ting # with "020g" instead, so this formatting would be better, # in principle, and similarly for "%020g" % ... Thus, # Python's format() breaks the official rule according to # which no format type is equivalent to "g", for # floats. If the better behavior was needed, internal # formatting could in principle force the "g" formatting # type when none is given; however, Python does not # actually fully treat the none format type in the same # was as the "g" format, so this solution cannot be used, # as it would break other formatting behaviors in this # code. It is thus best to mimic the native behavior of # none type formatting (even if it does not look so good # in Python 2.6). # Leading zeroes in the shorthand notation: # ',' format option: introduced in Python 2.7 # Particle Data Group convention # True if we can detect that the Jython interpreter is running this code: # print "FORMATTING {} WITH '{}'".format(repr(value), format_spec) # Jython 2.5.2 does not always represent NaN as nan or NAN # in the CPython way: for example, '%.2g' % float('nan') # is '\ufffd'. The test is skipped, in this case: # Call that works with Python < 2.6 too: # The representation is used, for terminal that do not # support some characters like ±, and superscripts: # An empty format string is like calling str() # (http://docs.python.org/2/library/string.html#formatspec): # Parsing back into a number with uncertainty (unless the # LaTeX or comma notation is used): # * = fill with * # "0nan" # "0inf" # Specific case: # The original number and the new one should be consistent # with each other: # The nominal value can be rounded to 0 when the # uncertainty is larger (because p digits on the # uncertainty can still show 0.00... for the # nominal value). The relative error is infinite, # so this should not cause an error: # If the uncertainty is zero, then the relative # change can be large: # !! The following string formatting requires # str() to work (to not raise an exception) on the # values (which have a non-standard class): Test of the unicode formatting of numbers with uncertainties ############################################################################### # The tests below require NumPy, which is an optional package: Returns True iff m1 and m2 are almost equal, where elements can be either floats or AffineScalarFunc objects. Two independent AffineScalarFunc objects are deemed equal if both their nominal value and uncertainty are equal (up to the given precision). m1, m2 -- NumPy arrays. precision -- precision passed through to uncertainties.test_uncertainties.numbers_close(). # ! numpy.allclose() is similar to this function, but does not # work on arrays that contain numbers with uncertainties, because # of the isinf() function. # For a simpler comparison, both elements are # converted to AffineScalarFunc objects: # Comparison with a different type: # NumPy arrays can be compared, through element-wise # comparisons. Numbers with uncertainties should yield the # same kind of results as pure floats (i.e., a NumPy array, # etc.). # We test the comparison operators both for the uncertainties # package *and* the NumPy package: # Equalities, etc.: # Inequalities: # More detailed test, that shows that the comparisons are # meaningful (x >= 0, but not x <= 1): Correlated variables. Test through the input of the (full) covariance matrix. # "1" is used instead of u.nominal_value because # u.nominal_value might return a float. The idea is to force # the new variable u2 to be defined through an integer nominal # value: # Calculations with u2 should be possible, like with u #################### # Covariances between output and input variables: # Test of the diagonal covariance elements: # "Inversion" of the covariance matrix: creation of new # variables: # Even the uncertainties should be correctly reconstructed: # ... and the covariances too: #################### # ... as well as functional relations: # Covariance matrices: # Correlated variables can be constructed from a covariance # matrix, if NumPy is available: # arrays_close() is used instead of numbers_close() because # it compares uncertainties too: # Spot checks of the correlation matrix: #################### # Test of numerical robustness despite wildly different # orders of magnitude (see # https://github.com/lebigot/uncertainties/issues/95): # Since the numbers are very small, we need to compare them # in a stricter way, that handles the case of a 0 variance # in `variables`: #################### # 0 variances are a bit special, since the correlation matrix # cannot be calculated naively, so we test that there is no # specific problem in this case: Tests the input of correlated value. Test through their correlation matrix (instead of the covariance matrix). # We make sure that the correlation matrix is indeed diagonal: # We make sure that there are indeed ones on the diagonal: # We try to recover the correlated variables through the # correlation matrix (not through the covariance matrix): # arrays_close() is used instead of numbers_close() because # it compares uncertainties too: # Test of individual variables: # Partial correlation test: # Test of the full covariance matrix:
| 2.774126
| 3
|
52digest.py
|
shaheen-k/homework
| 0
|
6629896
|
#!/usr/bin/env python3
# 52digest.py
import re
import sys
# Write a program that performs an EcoRI digest on the SARS-COV2 genome
# The program should have 2 arguments
# 1. The genome file
# 2. The restriction pattern
# The output should be the sizes of the restriction fragments
#File Import
seq = ""
found_origin = False
with open(sys.argv[1]) as fp:
for line in fp.readlines():
if line.startswith("ORIGIN"):
found_origin = True
if found_origin:
words = line.split() #convert to words
seq += "".join(words[1:]) #join w/ no characters btwn
#genome = 'attatgaattcattagaattcattatcg'
#Restriction Enzyme Cut
pattern = sys.argv[2]
start = 0
for match in re.finditer(pattern, seq):
print(len(seq[start:match.start()+2]))
start = match.start()+2
print(len(seq[start:]))
"""
python3 52digest.py ../Data/sars-cov2.gb gaattc
1160
10573
5546
448
2550
2592
3569
2112
1069
289?
"""
|
#!/usr/bin/env python3
# 52digest.py
import re
import sys
# Write a program that performs an EcoRI digest on the SARS-COV2 genome
# The program should have 2 arguments
# 1. The genome file
# 2. The restriction pattern
# The output should be the sizes of the restriction fragments
#File Import
seq = ""
found_origin = False
with open(sys.argv[1]) as fp:
for line in fp.readlines():
if line.startswith("ORIGIN"):
found_origin = True
if found_origin:
words = line.split() #convert to words
seq += "".join(words[1:]) #join w/ no characters btwn
#genome = 'attatgaattcattagaattcattatcg'
#Restriction Enzyme Cut
pattern = sys.argv[2]
start = 0
for match in re.finditer(pattern, seq):
print(len(seq[start:match.start()+2]))
start = match.start()+2
print(len(seq[start:]))
"""
python3 52digest.py ../Data/sars-cov2.gb gaattc
1160
10573
5546
448
2550
2592
3569
2112
1069
289?
"""
|
en
| 0.473859
|
#!/usr/bin/env python3 # 52digest.py # Write a program that performs an EcoRI digest on the SARS-COV2 genome # The program should have 2 arguments # 1. The genome file # 2. The restriction pattern # The output should be the sizes of the restriction fragments #File Import #convert to words #join w/ no characters btwn #genome = 'attatgaattcattagaattcattatcg' #Restriction Enzyme Cut python3 52digest.py ../Data/sars-cov2.gb gaattc 1160 10573 5546 448 2550 2592 3569 2112 1069 289?
| 3.466179
| 3
|
core/src/zeit/workflow/browser/metadata.py
|
rickdg/vivi
| 5
|
6629897
|
<filename>core/src/zeit/workflow/browser/metadata.py
import zope.cachedescriptors.property
import zope.component
import zope.viewlet.viewlet
import zeit.workflow.interfaces
class WorkflowPreview(zope.viewlet.viewlet.ViewletBase):
fields = zope.formlib.form.FormFields(
zeit.workflow.interfaces.IContentWorkflow)
widgets = None
def update(self):
if self.workflow is not None:
self.widgets = zope.formlib.form.setUpEditWidgets(
self.fields, 'workflow', self.workflow, self.request,
for_display=True)
def render(self):
if not self.widgets:
return u''
return super(WorkflowPreview, self).render()
@zope.cachedescriptors.property.Lazy
def workflow(self):
return zeit.workflow.interfaces.IContentWorkflow(self.context, None)
|
<filename>core/src/zeit/workflow/browser/metadata.py
import zope.cachedescriptors.property
import zope.component
import zope.viewlet.viewlet
import zeit.workflow.interfaces
class WorkflowPreview(zope.viewlet.viewlet.ViewletBase):
fields = zope.formlib.form.FormFields(
zeit.workflow.interfaces.IContentWorkflow)
widgets = None
def update(self):
if self.workflow is not None:
self.widgets = zope.formlib.form.setUpEditWidgets(
self.fields, 'workflow', self.workflow, self.request,
for_display=True)
def render(self):
if not self.widgets:
return u''
return super(WorkflowPreview, self).render()
@zope.cachedescriptors.property.Lazy
def workflow(self):
return zeit.workflow.interfaces.IContentWorkflow(self.context, None)
|
none
| 1
| 1.719259
| 2
|
|
orb_simulator/tools.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
| 1
|
6629898
|
EPSILON = ''
import pickle
from os import path, mkdir
from typing import Any, Tuple
from PIL import Image
import math
# COLORS
SELECT_BLUE_COLOR = (44, 176, 218)
WHITE_COLOR = (255, 255, 255)
GREEN_COLOR = (0, 255, 0)
BLACK_COLOR = (0, 0, 0)
RED_COLOR= (255, 0, 0)
SOLID_BLUE_COLOR = (0, 0, 255)
PLUM_COLOR = (221,160,221)
WINE_COLOR = (88, 24, 31)
LIGHT_GRAY = (211, 211, 211)
def make_pickle_file(file_name, data):
with open(f"{file_name}.pickle", "wb") as outfile:
pickle.dump(data, outfile)
def unpick_pickle_file(file_name):
with open(file_name, 'rb') as f:
data = pickle.load(f)
return data
def serialize_data(data, file_name: str):
if not path.exists('./serialized_data'):
mkdir('./serialized_data')
make_pickle_file(file_name, data)
def deserialize_data(file_name) -> Tuple[bool, Any]:
if path.exists(file_name):
data = unpick_pickle_file(file_name)
return data
else: return None
def open_image(ori_path: str):
return Image.open(ori_path)
def generate_images_to_rotation(ori_path, des_path):
im = open_image(ori_path)
#rotate image
angle = 0
while angle < 360:
angle += 15
new_img = im.rotate(angle)
new_img.save(des_path)
def next_point_moving_in_elipse(point: Tuple[float, float], a, b, degree):
new_x = point[0] + (a*math.cos(degree * 2 * math.pi / 360))
new_y = point[1] + (b*math.sin(degree * 2 * math.pi / 360))
return (new_x, new_y)
def round_off_wi_exceed(number):
return math.floor(number*100)/100
|
EPSILON = ''
import pickle
from os import path, mkdir
from typing import Any, Tuple
from PIL import Image
import math
# COLORS
SELECT_BLUE_COLOR = (44, 176, 218)
WHITE_COLOR = (255, 255, 255)
GREEN_COLOR = (0, 255, 0)
BLACK_COLOR = (0, 0, 0)
RED_COLOR= (255, 0, 0)
SOLID_BLUE_COLOR = (0, 0, 255)
PLUM_COLOR = (221,160,221)
WINE_COLOR = (88, 24, 31)
LIGHT_GRAY = (211, 211, 211)
def make_pickle_file(file_name, data):
with open(f"{file_name}.pickle", "wb") as outfile:
pickle.dump(data, outfile)
def unpick_pickle_file(file_name):
with open(file_name, 'rb') as f:
data = pickle.load(f)
return data
def serialize_data(data, file_name: str):
if not path.exists('./serialized_data'):
mkdir('./serialized_data')
make_pickle_file(file_name, data)
def deserialize_data(file_name) -> Tuple[bool, Any]:
if path.exists(file_name):
data = unpick_pickle_file(file_name)
return data
else: return None
def open_image(ori_path: str):
return Image.open(ori_path)
def generate_images_to_rotation(ori_path, des_path):
im = open_image(ori_path)
#rotate image
angle = 0
while angle < 360:
angle += 15
new_img = im.rotate(angle)
new_img.save(des_path)
def next_point_moving_in_elipse(point: Tuple[float, float], a, b, degree):
new_x = point[0] + (a*math.cos(degree * 2 * math.pi / 360))
new_y = point[1] + (b*math.sin(degree * 2 * math.pi / 360))
return (new_x, new_y)
def round_off_wi_exceed(number):
return math.floor(number*100)/100
|
en
| 0.433914
|
# COLORS #rotate image
| 2.747617
| 3
|
users/views.py
|
DavidBarcenas/django-photosgram
| 0
|
6629899
|
<filename>users/views.py
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect, render
from django.views.generic import DetailView, FormView
from django.urls import reverse, reverse_lazy
from django.contrib.auth.models import User
from django.views.generic.edit import UpdateView
from users.forms import SignupForm
from users.models import Profile
from posts.models import Post
class UserDetailView(LoginRequiredMixin, DetailView):
template_name = 'users/detail.html'
slug_field = 'username'
slug_url_kwarg = 'username'
queryset = User.objects.all()
context_object_name = 'user'
def get_context_data(self, **kwargs):
"""Add user's posts to context"""
context = super().get_context_data(**kwargs)
user = self.get_object()
context ['posts'] = Post.objects.filter(user=user).order_by('-created')
return context
# Create your views here.
def login_view(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user:
login(request, user)
return redirect('posts:feed')
else:
return render(request, 'users/login.html', {'error': 'Invalid username and/or password'})
return render(request, 'users/login.html')
class SignupView(FormView):
"""Users signup view"""
template_name = 'users/signup.html'
form_class = SignupForm
success_url = reverse_lazy('users:login')
def form_valid(self, form):
"""Save form data"""
form.save()
return super().form_valid(form)
class UpdateProfileView(LoginRequiredMixin, UpdateView):
"""Update user profile"""
template_name = 'users/update_profile.html'
model = Profile
fields = ['website', 'phone_number', 'biography', 'picture']
def get_object(self):
"""Return user's profile"""
return self.request.user.profile
def get_success_url(self):
"""Return to user's profile"""
username = self.object.user.username
return reverse('users:detail', kwargs={'username': username})
@login_required
def logout_view(request):
logout(request)
return redirect('users:login')
|
<filename>users/views.py
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect, render
from django.views.generic import DetailView, FormView
from django.urls import reverse, reverse_lazy
from django.contrib.auth.models import User
from django.views.generic.edit import UpdateView
from users.forms import SignupForm
from users.models import Profile
from posts.models import Post
class UserDetailView(LoginRequiredMixin, DetailView):
template_name = 'users/detail.html'
slug_field = 'username'
slug_url_kwarg = 'username'
queryset = User.objects.all()
context_object_name = 'user'
def get_context_data(self, **kwargs):
"""Add user's posts to context"""
context = super().get_context_data(**kwargs)
user = self.get_object()
context ['posts'] = Post.objects.filter(user=user).order_by('-created')
return context
# Create your views here.
def login_view(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user:
login(request, user)
return redirect('posts:feed')
else:
return render(request, 'users/login.html', {'error': 'Invalid username and/or password'})
return render(request, 'users/login.html')
class SignupView(FormView):
"""Users signup view"""
template_name = 'users/signup.html'
form_class = SignupForm
success_url = reverse_lazy('users:login')
def form_valid(self, form):
"""Save form data"""
form.save()
return super().form_valid(form)
class UpdateProfileView(LoginRequiredMixin, UpdateView):
"""Update user profile"""
template_name = 'users/update_profile.html'
model = Profile
fields = ['website', 'phone_number', 'biography', 'picture']
def get_object(self):
"""Return user's profile"""
return self.request.user.profile
def get_success_url(self):
"""Return to user's profile"""
username = self.object.user.username
return reverse('users:detail', kwargs={'username': username})
@login_required
def logout_view(request):
logout(request)
return redirect('users:login')
|
en
| 0.842549
|
Add user's posts to context # Create your views here. Users signup view Save form data Update user profile Return user's profile Return to user's profile
| 2.357182
| 2
|
openstack_dashboard/contrib/trove/content/database_clusters/cluster_manager.py
|
Tesora-Release/tesora-horizon
| 0
|
6629900
|
<reponame>Tesora-Release/tesora-horizon
# Copyright 2015 Tesora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core import cache
def get(cluster_id):
if not has_cluster(cluster_id):
manager = ClusterInstanceManager(cluster_id)
cache.cache.set(cluster_id, manager)
return cache.cache.get(cluster_id)
def delete(cluster_id):
manager = get(cluster_id)
manager.clear_instances()
cache.cache.delete(cluster_id)
def update(cluster_id, manager):
cache.cache.set(cluster_id, manager)
def has_cluster(cluster_id):
if cache.cache.get(cluster_id):
return True
else:
return False
class ClusterInstanceManager(object):
instances = []
def __init__(self, cluster_id):
self.cluster_id = cluster_id
def get_instances(self):
return self.instances
def get_instance(self, id):
for instance in self.instances:
if instance.id == id:
return instance
return None
def add_instance(self, id, name, flavor_id,
flavor, volume, type, related_to):
instance = ClusterInstance(id, name, flavor_id, flavor,
volume, type, related_to)
self.instances.append(instance)
update(self.cluster_id, self)
return self.instances
def delete_instance(self, id):
instance = self.get_instance(id)
if instance:
self.instances.remove(instance)
update(self.cluster_id, self)
def clear_instances(self):
del self.instances[:]
class ClusterInstance(object):
def __init__(self, id, name, flavor_id, flavor, volume, type, related_to):
self.id = id
self.name = name
self.flavor_id = flavor_id
self.flavor = flavor
self.volume = volume
self.type = type
self.related_to = related_to
|
# Copyright 2015 Tesora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core import cache
def get(cluster_id):
if not has_cluster(cluster_id):
manager = ClusterInstanceManager(cluster_id)
cache.cache.set(cluster_id, manager)
return cache.cache.get(cluster_id)
def delete(cluster_id):
manager = get(cluster_id)
manager.clear_instances()
cache.cache.delete(cluster_id)
def update(cluster_id, manager):
cache.cache.set(cluster_id, manager)
def has_cluster(cluster_id):
if cache.cache.get(cluster_id):
return True
else:
return False
class ClusterInstanceManager(object):
instances = []
def __init__(self, cluster_id):
self.cluster_id = cluster_id
def get_instances(self):
return self.instances
def get_instance(self, id):
for instance in self.instances:
if instance.id == id:
return instance
return None
def add_instance(self, id, name, flavor_id,
flavor, volume, type, related_to):
instance = ClusterInstance(id, name, flavor_id, flavor,
volume, type, related_to)
self.instances.append(instance)
update(self.cluster_id, self)
return self.instances
def delete_instance(self, id):
instance = self.get_instance(id)
if instance:
self.instances.remove(instance)
update(self.cluster_id, self)
def clear_instances(self):
del self.instances[:]
class ClusterInstance(object):
def __init__(self, id, name, flavor_id, flavor, volume, type, related_to):
self.id = id
self.name = name
self.flavor_id = flavor_id
self.flavor = flavor
self.volume = volume
self.type = type
self.related_to = related_to
|
en
| 0.845182
|
# Copyright 2015 Tesora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License.
| 1.782043
| 2
|
output/models/nist_data/atomic/decimal/schema_instance/nistschema_sv_iv_atomic_decimal_max_exclusive_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
| 1
|
6629901
|
from output.models.nist_data.atomic.decimal.schema_instance.nistschema_sv_iv_atomic_decimal_max_exclusive_2_xsd.nistschema_sv_iv_atomic_decimal_max_exclusive_2 import NistschemaSvIvAtomicDecimalMaxExclusive2
__all__ = [
"NistschemaSvIvAtomicDecimalMaxExclusive2",
]
|
from output.models.nist_data.atomic.decimal.schema_instance.nistschema_sv_iv_atomic_decimal_max_exclusive_2_xsd.nistschema_sv_iv_atomic_decimal_max_exclusive_2 import NistschemaSvIvAtomicDecimalMaxExclusive2
__all__ = [
"NistschemaSvIvAtomicDecimalMaxExclusive2",
]
|
none
| 1
| 1.06494
| 1
|
|
pycopula/main.py
|
merz9b/pycopula
| 71
|
6629902
|
<reponame>merz9b/pycopula<gh_stars>10-100
import sys
sys.path.insert(0, '..')
from pycopula.copula import *
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
from matplotlib import cm
from pycopula.visualization import pdf_2d, cdf_2d, concentrationFunction
from pycopula.simulation import simulate
data = pd.read_csv("mydata.csv").values[:,1:]
#plt.figure()
#plt.scatter(data[:,0], data[:,1], marker="x")
#plt.show()
#print(data.shape[1])
#print(data)
clayton = ArchimedeanCopula(family="clayton", dim=2)
indep = Copula(dim=2, name='frechet_up')
student = StudentCopula(dim=2)
gaussian = GaussianCopula(dim=2)
opti, params = clayton.fit(data, method='mle', marginals=[ scipy.stats.gamma, scipy.stats.expon ], hyper_param=[ { 'a': None, 'scale': 1.2 }, { 'scale': None } ], hyper_param_bounds=[ [0, None], [0, None]])
#gaussian.fit(data)
print(clayton)
#print(params)
#opti, params = clayton.fit(data, method='ifm', marginals=[ scipy.stats.gamma, scipy.stats.expon ], hyper_param=[ { 'a': None, 'scale': 1.2 }, { 'scale': None } ], hyper_param_bounds=[ [0, None], [0, None]])
#print(clayton)
#print(params)
#opti, params = gaussian.fit(data, method='mle', marginals=[ scipy.stats.gamma, scipy.stats.expon ], hyper_param=[ { 'a': None, 'scale': 1.2 }, { 'scale': None } ], hyper_param_bounds=[ [0, None], [0, None]])
gaussian.fit(data, method='cmle')
print(gaussian)
print(params)
#print(gaussian)
#sys.exit()
clayton = ArchimedeanCopula(family="clayton", dim=2)
boundAlpha = [0, None] # Greater than 0
boundGamma = [0, None]
bounds = [ boundAlpha, boundGamma ]
paramX1 = { 'a': None, 'scale': 1.2 } # Hyper-parameters of first Gamma
paramX2 = { 'scale': None } # Hyper-parameters of second Gamma
hyperParams = [ paramX1, paramX2 ] # The hyper-parameters
gamma = scipy.stats.gamma # The Gamma distribution
# Fitting copula with MLE method and Gamma marginals distributions
#clayton.fit(data, method='ifm', marginals=[gamma, gamma], hyper_param=hyperParams, hyper_param_bounds=bounds)
clayton.fit(data, method='cmle')
u, v, carchi = pdf_2d(clayton, zclip=5)
u, v, Carchi = cdf_2d(clayton)
u, v, Cgauss = cdf_2d(gaussian)
u, v, cgauss = pdf_2d(gaussian, zclip=5)
u, v, Cstudent = cdf_2d(student)
u, v, cstudent = pdf_2d(student)
#sys.exit()
print(indep)
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d', title="Student copula CDF")
X, Y = np.meshgrid(u, v)
#c[c>5]= np.nan
ax.set_zlim(0, 1)
#ax.set_zlim(0, 8)
ax.plot_surface(X, Y, Cstudent, cmap=cm.Blues)
ax.plot_wireframe(X, Y, Cstudent, color='black', alpha=0.3)
ax = fig.add_subplot(122, projection='3d', title="Student copula PDF")
X, Y = np.meshgrid(u, v)
ax.set_zlim(0, 5)
ax.plot_surface(X, Y, cstudent, cmap=cm.Blues)
ax.plot_wireframe(X, Y, cstudent, color='black', alpha=0.3)
ax = fig.add_subplot(122, title="Student copula PDF")
ax.contour(X, Y, cstudent, levels = np.arange(0,5,0.15))
gaussian.setCovariance([[1, 0.8], [0.8, 1]])
clayton.setParameter(0.85)
sim = simulate(student, 3000)
fig = plt.figure()
plt.contour(X, Y, cstudent, levels = np.arange(0,5,0.15), alpha=0.4)
plt.scatter([ s[0] for s in sim ], [s[1] for s in sim ], alpha=0.4, edgecolors='none')
plt.title("Simulation of 1000 points with Clayton copula")
plt.xlim(0, 1)
plt.ylim(0, 1)
downI, upI, tailDown, tailUp = concentrationFunction(sim)
ClaytonDown = [ student.concentrationDown(x) for x in downI ]
ClaytonUp = [ student.concentrationUp(x) for x in upI ]
plt.figure()
plt.plot(downI, tailDown, color='red', linewidth=3, label="Empirical concentration")
plt.plot(upI, tailUp, color='red', linewidth=3)
plt.plot(downI, ClaytonDown, color='blue', linewidth=1, label="Clayton concentration")
plt.plot(upI, ClaytonUp, color='blue', linewidth=1)
plt.plot([0.5, 0.5], [0, 1], color='gray', alpha=0.6, linestyle='--', linewidth=1)
plt.title("Lower-Upper tail dependence Coefficients")
plt.xlabel("Lower Tail Upper Tail")
plt.legend(loc=0)
plt.show()
|
import sys
sys.path.insert(0, '..')
from pycopula.copula import *
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
from matplotlib import cm
from pycopula.visualization import pdf_2d, cdf_2d, concentrationFunction
from pycopula.simulation import simulate
data = pd.read_csv("mydata.csv").values[:,1:]
#plt.figure()
#plt.scatter(data[:,0], data[:,1], marker="x")
#plt.show()
#print(data.shape[1])
#print(data)
clayton = ArchimedeanCopula(family="clayton", dim=2)
indep = Copula(dim=2, name='frechet_up')
student = StudentCopula(dim=2)
gaussian = GaussianCopula(dim=2)
opti, params = clayton.fit(data, method='mle', marginals=[ scipy.stats.gamma, scipy.stats.expon ], hyper_param=[ { 'a': None, 'scale': 1.2 }, { 'scale': None } ], hyper_param_bounds=[ [0, None], [0, None]])
#gaussian.fit(data)
print(clayton)
#print(params)
#opti, params = clayton.fit(data, method='ifm', marginals=[ scipy.stats.gamma, scipy.stats.expon ], hyper_param=[ { 'a': None, 'scale': 1.2 }, { 'scale': None } ], hyper_param_bounds=[ [0, None], [0, None]])
#print(clayton)
#print(params)
#opti, params = gaussian.fit(data, method='mle', marginals=[ scipy.stats.gamma, scipy.stats.expon ], hyper_param=[ { 'a': None, 'scale': 1.2 }, { 'scale': None } ], hyper_param_bounds=[ [0, None], [0, None]])
gaussian.fit(data, method='cmle')
print(gaussian)
print(params)
#print(gaussian)
#sys.exit()
clayton = ArchimedeanCopula(family="clayton", dim=2)
boundAlpha = [0, None] # Greater than 0
boundGamma = [0, None]
bounds = [ boundAlpha, boundGamma ]
paramX1 = { 'a': None, 'scale': 1.2 } # Hyper-parameters of first Gamma
paramX2 = { 'scale': None } # Hyper-parameters of second Gamma
hyperParams = [ paramX1, paramX2 ] # The hyper-parameters
gamma = scipy.stats.gamma # The Gamma distribution
# Fitting copula with MLE method and Gamma marginals distributions
#clayton.fit(data, method='ifm', marginals=[gamma, gamma], hyper_param=hyperParams, hyper_param_bounds=bounds)
clayton.fit(data, method='cmle')
u, v, carchi = pdf_2d(clayton, zclip=5)
u, v, Carchi = cdf_2d(clayton)
u, v, Cgauss = cdf_2d(gaussian)
u, v, cgauss = pdf_2d(gaussian, zclip=5)
u, v, Cstudent = cdf_2d(student)
u, v, cstudent = pdf_2d(student)
#sys.exit()
print(indep)
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d', title="Student copula CDF")
X, Y = np.meshgrid(u, v)
#c[c>5]= np.nan
ax.set_zlim(0, 1)
#ax.set_zlim(0, 8)
ax.plot_surface(X, Y, Cstudent, cmap=cm.Blues)
ax.plot_wireframe(X, Y, Cstudent, color='black', alpha=0.3)
ax = fig.add_subplot(122, projection='3d', title="Student copula PDF")
X, Y = np.meshgrid(u, v)
ax.set_zlim(0, 5)
ax.plot_surface(X, Y, cstudent, cmap=cm.Blues)
ax.plot_wireframe(X, Y, cstudent, color='black', alpha=0.3)
ax = fig.add_subplot(122, title="Student copula PDF")
ax.contour(X, Y, cstudent, levels = np.arange(0,5,0.15))
gaussian.setCovariance([[1, 0.8], [0.8, 1]])
clayton.setParameter(0.85)
sim = simulate(student, 3000)
fig = plt.figure()
plt.contour(X, Y, cstudent, levels = np.arange(0,5,0.15), alpha=0.4)
plt.scatter([ s[0] for s in sim ], [s[1] for s in sim ], alpha=0.4, edgecolors='none')
plt.title("Simulation of 1000 points with Clayton copula")
plt.xlim(0, 1)
plt.ylim(0, 1)
downI, upI, tailDown, tailUp = concentrationFunction(sim)
ClaytonDown = [ student.concentrationDown(x) for x in downI ]
ClaytonUp = [ student.concentrationUp(x) for x in upI ]
plt.figure()
plt.plot(downI, tailDown, color='red', linewidth=3, label="Empirical concentration")
plt.plot(upI, tailUp, color='red', linewidth=3)
plt.plot(downI, ClaytonDown, color='blue', linewidth=1, label="Clayton concentration")
plt.plot(upI, ClaytonUp, color='blue', linewidth=1)
plt.plot([0.5, 0.5], [0, 1], color='gray', alpha=0.6, linestyle='--', linewidth=1)
plt.title("Lower-Upper tail dependence Coefficients")
plt.xlabel("Lower Tail Upper Tail")
plt.legend(loc=0)
plt.show()
|
en
| 0.163439
|
#plt.figure() #plt.scatter(data[:,0], data[:,1], marker="x") #plt.show() #print(data.shape[1]) #print(data) #gaussian.fit(data) #print(params) #opti, params = clayton.fit(data, method='ifm', marginals=[ scipy.stats.gamma, scipy.stats.expon ], hyper_param=[ { 'a': None, 'scale': 1.2 }, { 'scale': None } ], hyper_param_bounds=[ [0, None], [0, None]]) #print(clayton) #print(params) #opti, params = gaussian.fit(data, method='mle', marginals=[ scipy.stats.gamma, scipy.stats.expon ], hyper_param=[ { 'a': None, 'scale': 1.2 }, { 'scale': None } ], hyper_param_bounds=[ [0, None], [0, None]]) #print(gaussian) #sys.exit() # Greater than 0 # Hyper-parameters of first Gamma # Hyper-parameters of second Gamma # The hyper-parameters # The Gamma distribution # Fitting copula with MLE method and Gamma marginals distributions #clayton.fit(data, method='ifm', marginals=[gamma, gamma], hyper_param=hyperParams, hyper_param_bounds=bounds) #sys.exit() #c[c>5]= np.nan #ax.set_zlim(0, 8)
| 2.627425
| 3
|
locomotion_analysis/src/levy.py
|
sciple/neurobau
| 2
|
6629903
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 14 10:45:00 2017
@author: lex
Levy distribution
"""
import numpy as np
from Path import Path
from load_excel_data import read_xls_data
def compute_distances(list_of_coordinates):
# stores curvatures for every sheet in xls file
list_of_densities = []
for coordinates in list_of_coordinates:
# read in coordinates and convert to path object
p = Path(coordinates)
list_of_densities.append(np.histogram(p.distances,bins=20,range=(0,0.1))[0])
return list_of_densities
fname = 'Raw data-algea_h2o-Trial 11.xlsx'
xls_data = read_xls_data('../data/'+fname)
#p = Path(xls_data[0])
data_out = compute_distances(xls_data)
c=1
m = 0.05
x = np.linspace(0.0001,0.1)
import random
import math
def levy_distro(mu):
''' From the Harris Nature paper. '''
# uniform distribution, in range [-0.5pi, 0.5pi]
x = random.uniform(-0.5 * math.pi, 0.5 * math.pi)
# y has a unit exponential distribution.
y = -math.log(random.uniform(0.0, 1.0))
a = math.sin( (mu - 1.0) * x ) / (math.pow(math.cos(x), (1.0 / (mu - 1.0))))
b = math.pow( (math.cos((2.0 - mu) * x) / y), ((2.0 - mu) / (mu - 1.0)) )
z = a * b
return z
#np.savetxt('../output/levy_'+fname+'.txt',np.array(data_out).T,delimiter='\t',newline='\n')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 14 10:45:00 2017
@author: lex
Levy distribution
"""
import numpy as np
from Path import Path
from load_excel_data import read_xls_data
def compute_distances(list_of_coordinates):
# stores curvatures for every sheet in xls file
list_of_densities = []
for coordinates in list_of_coordinates:
# read in coordinates and convert to path object
p = Path(coordinates)
list_of_densities.append(np.histogram(p.distances,bins=20,range=(0,0.1))[0])
return list_of_densities
fname = 'Raw data-algea_h2o-Trial 11.xlsx'
xls_data = read_xls_data('../data/'+fname)
#p = Path(xls_data[0])
data_out = compute_distances(xls_data)
c=1
m = 0.05
x = np.linspace(0.0001,0.1)
import random
import math
def levy_distro(mu):
''' From the Harris Nature paper. '''
# uniform distribution, in range [-0.5pi, 0.5pi]
x = random.uniform(-0.5 * math.pi, 0.5 * math.pi)
# y has a unit exponential distribution.
y = -math.log(random.uniform(0.0, 1.0))
a = math.sin( (mu - 1.0) * x ) / (math.pow(math.cos(x), (1.0 / (mu - 1.0))))
b = math.pow( (math.cos((2.0 - mu) * x) / y), ((2.0 - mu) / (mu - 1.0)) )
z = a * b
return z
#np.savetxt('../output/levy_'+fname+'.txt',np.array(data_out).T,delimiter='\t',newline='\n')
|
en
| 0.606437
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Thu Dec 14 10:45:00 2017 @author: lex Levy distribution # stores curvatures for every sheet in xls file # read in coordinates and convert to path object #p = Path(xls_data[0]) From the Harris Nature paper. # uniform distribution, in range [-0.5pi, 0.5pi] # y has a unit exponential distribution. #np.savetxt('../output/levy_'+fname+'.txt',np.array(data_out).T,delimiter='\t',newline='\n')
| 2.984054
| 3
|
command_line/merge.py
|
ndevenish/dials-fork
| 0
|
6629904
|
<filename>command_line/merge.py
# coding: utf-8
"""
Command line script to allow merging and truncating of a dials dataset.
"""
from __future__ import absolute_import, division, print_function
import logging
import sys
from six.moves import cStringIO as StringIO
from libtbx import phil
from dials.algorithms.merging.merge import (
make_MAD_merged_mtz_file,
make_merged_mtz_file,
merge_and_truncate,
)
from dials.util import Sorry, log, show_mail_handle_errors
from dials.util.export_mtz import match_wavelengths
from dials.util.options import OptionParser, reflections_and_experiments_from_files
from dials.util.version import dials_version
help_message = """
Merge scaled dials data.
Examples::
dials.merge scaled.expt scaled.refl
dials.merge scaled.expt scaled.refl truncate=False
"""
logger = logging.getLogger("dials")
phil_scope = phil.parse(
"""
assess_space_group = True
.type = bool
.help = "Option to assess space group by testing presence of axial reflections"
anomalous = True
.type = bool
.help = "Output anomalous as well as mean intensities."
truncate = True
.type = bool
.help = "Option to perform truncation on merged data."
d_min = None
.type = float
.help = "High resolution limit to apply to the data."
d_max = None
.type = float
.help = "Low resolution limit to apply to the data."
combine_partials = True
.type = bool
.help = "Combine partials that have the same partial id into one
reflection, with an updated partiality given by the sum of the
individual partialities."
partiality_threshold=0.4
.type = float
.help = "All reflections with partiality values above the partiality
threshold will be retained. This is done after any combination of
partials if applicable."
n_residues = 200
.type = int
.help = "Number of residues to use in Wilson scaling"
merging {
use_internal_variance = False
.type = bool
n_bins = 20
.type = int(value_min=5)
anomalous = False
.type = bool
.help = "Option to control whether reported merging stats are anomalous."
}
reporting {
wilson_stats = True
.type = bool
.help = "Option to turn off reporting of Wilson statistics"
merging_stats = True
.type = bool
.help = "Option to turn off reporting of merging statistics."
}
output {
log = dials.merge.log
.type = str
mtz = merged.mtz
.type = str
.help = "Filename to use for mtz output."
crystal_names = XTAL
.type = strings
.help = "Crystal name to be used in MTZ file output (multiple names
allowed for MAD datasets)"
project_name = AUTOMATIC
.type = str
.help = "Project name to be used in MTZ file output"
dataset_names = NATIVE
.type = strings
.help = "Dataset name to be used in MTZ file output (multiple names
allowed for MAD datasets)"
}
include scope cctbx.french_wilson.master_phil
""",
process_includes=True,
)
def merge_data_to_mtz(params, experiments, reflections):
"""Merge data (at each wavelength) and write to an mtz file object."""
wavelengths = match_wavelengths(experiments)
if len(wavelengths) > 1:
logger.info(
"Multiple wavelengths found: \n%s",
"\n".join(
" Wavlength: %.5f, experiment numbers: %s "
% (k, ",".join(map(str, v)))
for k, v in wavelengths.items()
),
)
return make_MAD_merged_mtz_file(params, experiments, reflections, wavelengths)
merged_data = merge_and_truncate(params, experiments, reflections)
return make_merged_mtz_file(*((params, list(wavelengths)[0]) + merged_data))
@show_mail_handle_errors()
def run(args=None):
"""Run the merging from the command-line."""
usage = """Usage: dials.merge scaled.refl scaled.expt [options]"""
parser = OptionParser(
usage=usage,
read_experiments=True,
read_reflections=True,
phil=phil_scope,
check_format=False,
epilog=help_message,
)
params, options = parser.parse_args(args=args, show_diff_phil=False)
if not params.input.experiments or not params.input.reflections:
parser.print_help()
sys.exit()
reflections, experiments = reflections_and_experiments_from_files(
params.input.reflections, params.input.experiments
)
log.config(verbosity=options.verbose, logfile=params.output.log)
logger.info(dials_version())
diff_phil = parser.diff_phil.as_str()
if diff_phil != "":
logger.info("The following parameters have been modified:\n")
logger.info(diff_phil)
### Assert that all data have been scaled with dials - should only be
# able to input one reflection table and experimentlist that are
# matching and scaled together.
if len(reflections) != 1:
raise Sorry(
"""Only data scaled together as a single reflection dataset
can be processed with dials.merge"""
)
for k in [
"intensity.scale.value",
"intensity.scale.variance",
"inverse_scale_factor",
]:
if k not in reflections[0]:
raise Sorry(
"""%s not found in the reflection table.
Only scaled data can be processed with dials.merge"""
% k
)
try:
mtz_file = merge_data_to_mtz(params, experiments, reflections)
except ValueError as e:
raise Sorry(e)
logger.info("\nWriting reflections to %s", (params.output.mtz))
out = StringIO()
mtz_file.show_summary(out=out)
logger.info(out.getvalue())
mtz_file.write(params.output.mtz)
if __name__ == "__main__":
run()
|
<filename>command_line/merge.py
# coding: utf-8
"""
Command line script to allow merging and truncating of a dials dataset.
"""
from __future__ import absolute_import, division, print_function
import logging
import sys
from six.moves import cStringIO as StringIO
from libtbx import phil
from dials.algorithms.merging.merge import (
make_MAD_merged_mtz_file,
make_merged_mtz_file,
merge_and_truncate,
)
from dials.util import Sorry, log, show_mail_handle_errors
from dials.util.export_mtz import match_wavelengths
from dials.util.options import OptionParser, reflections_and_experiments_from_files
from dials.util.version import dials_version
help_message = """
Merge scaled dials data.
Examples::
dials.merge scaled.expt scaled.refl
dials.merge scaled.expt scaled.refl truncate=False
"""
logger = logging.getLogger("dials")
phil_scope = phil.parse(
"""
assess_space_group = True
.type = bool
.help = "Option to assess space group by testing presence of axial reflections"
anomalous = True
.type = bool
.help = "Output anomalous as well as mean intensities."
truncate = True
.type = bool
.help = "Option to perform truncation on merged data."
d_min = None
.type = float
.help = "High resolution limit to apply to the data."
d_max = None
.type = float
.help = "Low resolution limit to apply to the data."
combine_partials = True
.type = bool
.help = "Combine partials that have the same partial id into one
reflection, with an updated partiality given by the sum of the
individual partialities."
partiality_threshold=0.4
.type = float
.help = "All reflections with partiality values above the partiality
threshold will be retained. This is done after any combination of
partials if applicable."
n_residues = 200
.type = int
.help = "Number of residues to use in Wilson scaling"
merging {
use_internal_variance = False
.type = bool
n_bins = 20
.type = int(value_min=5)
anomalous = False
.type = bool
.help = "Option to control whether reported merging stats are anomalous."
}
reporting {
wilson_stats = True
.type = bool
.help = "Option to turn off reporting of Wilson statistics"
merging_stats = True
.type = bool
.help = "Option to turn off reporting of merging statistics."
}
output {
log = dials.merge.log
.type = str
mtz = merged.mtz
.type = str
.help = "Filename to use for mtz output."
crystal_names = XTAL
.type = strings
.help = "Crystal name to be used in MTZ file output (multiple names
allowed for MAD datasets)"
project_name = AUTOMATIC
.type = str
.help = "Project name to be used in MTZ file output"
dataset_names = NATIVE
.type = strings
.help = "Dataset name to be used in MTZ file output (multiple names
allowed for MAD datasets)"
}
include scope cctbx.french_wilson.master_phil
""",
process_includes=True,
)
def merge_data_to_mtz(params, experiments, reflections):
"""Merge data (at each wavelength) and write to an mtz file object."""
wavelengths = match_wavelengths(experiments)
if len(wavelengths) > 1:
logger.info(
"Multiple wavelengths found: \n%s",
"\n".join(
" Wavlength: %.5f, experiment numbers: %s "
% (k, ",".join(map(str, v)))
for k, v in wavelengths.items()
),
)
return make_MAD_merged_mtz_file(params, experiments, reflections, wavelengths)
merged_data = merge_and_truncate(params, experiments, reflections)
return make_merged_mtz_file(*((params, list(wavelengths)[0]) + merged_data))
@show_mail_handle_errors()
def run(args=None):
"""Run the merging from the command-line."""
usage = """Usage: dials.merge scaled.refl scaled.expt [options]"""
parser = OptionParser(
usage=usage,
read_experiments=True,
read_reflections=True,
phil=phil_scope,
check_format=False,
epilog=help_message,
)
params, options = parser.parse_args(args=args, show_diff_phil=False)
if not params.input.experiments or not params.input.reflections:
parser.print_help()
sys.exit()
reflections, experiments = reflections_and_experiments_from_files(
params.input.reflections, params.input.experiments
)
log.config(verbosity=options.verbose, logfile=params.output.log)
logger.info(dials_version())
diff_phil = parser.diff_phil.as_str()
if diff_phil != "":
logger.info("The following parameters have been modified:\n")
logger.info(diff_phil)
### Assert that all data have been scaled with dials - should only be
# able to input one reflection table and experimentlist that are
# matching and scaled together.
if len(reflections) != 1:
raise Sorry(
"""Only data scaled together as a single reflection dataset
can be processed with dials.merge"""
)
for k in [
"intensity.scale.value",
"intensity.scale.variance",
"inverse_scale_factor",
]:
if k not in reflections[0]:
raise Sorry(
"""%s not found in the reflection table.
Only scaled data can be processed with dials.merge"""
% k
)
try:
mtz_file = merge_data_to_mtz(params, experiments, reflections)
except ValueError as e:
raise Sorry(e)
logger.info("\nWriting reflections to %s", (params.output.mtz))
out = StringIO()
mtz_file.show_summary(out=out)
logger.info(out.getvalue())
mtz_file.write(params.output.mtz)
if __name__ == "__main__":
run()
|
en
| 0.776621
|
# coding: utf-8 Command line script to allow merging and truncating of a dials dataset. Merge scaled dials data. Examples:: dials.merge scaled.expt scaled.refl dials.merge scaled.expt scaled.refl truncate=False assess_space_group = True .type = bool .help = "Option to assess space group by testing presence of axial reflections" anomalous = True .type = bool .help = "Output anomalous as well as mean intensities." truncate = True .type = bool .help = "Option to perform truncation on merged data." d_min = None .type = float .help = "High resolution limit to apply to the data." d_max = None .type = float .help = "Low resolution limit to apply to the data." combine_partials = True .type = bool .help = "Combine partials that have the same partial id into one reflection, with an updated partiality given by the sum of the individual partialities." partiality_threshold=0.4 .type = float .help = "All reflections with partiality values above the partiality threshold will be retained. This is done after any combination of partials if applicable." n_residues = 200 .type = int .help = "Number of residues to use in Wilson scaling" merging { use_internal_variance = False .type = bool n_bins = 20 .type = int(value_min=5) anomalous = False .type = bool .help = "Option to control whether reported merging stats are anomalous." } reporting { wilson_stats = True .type = bool .help = "Option to turn off reporting of Wilson statistics" merging_stats = True .type = bool .help = "Option to turn off reporting of merging statistics." } output { log = dials.merge.log .type = str mtz = merged.mtz .type = str .help = "Filename to use for mtz output." crystal_names = XTAL .type = strings .help = "Crystal name to be used in MTZ file output (multiple names allowed for MAD datasets)" project_name = AUTOMATIC .type = str .help = "Project name to be used in MTZ file output" dataset_names = NATIVE .type = strings .help = "Dataset name to be used in MTZ file output (multiple names allowed for MAD datasets)" } include scope cctbx.french_wilson.master_phil Merge data (at each wavelength) and write to an mtz file object. Run the merging from the command-line. Usage: dials.merge scaled.refl scaled.expt [options] ### Assert that all data have been scaled with dials - should only be # able to input one reflection table and experimentlist that are # matching and scaled together. Only data scaled together as a single reflection dataset can be processed with dials.merge %s not found in the reflection table. Only scaled data can be processed with dials.merge
| 2.294281
| 2
|
server/apis/sale.py
|
AAULAN/kiosk
| 0
|
6629905
|
<filename>server/apis/sale.py
from flask import request
from flask_restplus import Namespace, Resource, fields
from core.database import get_db_products, update_db_product, get_db_sales, add_db_sales, delete_db_sales
from datetime import datetime
api = Namespace('sales', description='Sale related operations')
sale_input = api.model('Sale_request', {
'product': fields.Integer(required=True, description='The product identifier'),
'amount': fields.Integer(description='The amount purchased', default=1)
})
sale_output = api.model('Sale_response', {
'id': fields.Integer(description='The sale identifier'),
'product': fields.Integer(description='The product identifier'),
'amount': fields.Integer(description='The amount purchased'),
'payment': fields.Float(description='Total price for the sale'),
'timestamp': fields.DateTime(description='The timestamp of the purchase')
})
@api.route('/')
class Sales(Resource):
@api.doc('Retrieve all sales')
@api.param('from', '[OPTIONAL] Start time (ISO 8601) of sales to fetch')
@api.param('to', '[OPTIONAL] End time (ISO 8601) of sales to fetch')
@api.marshal_list_with(sale_output)
@api.response(400, 'Invalid time period')
def get(self):
from_t = request.args.get('from')
to_t = request.args.get('to')
if not from_t and not to_t:
return [db_sale.serialize for db_sale in get_db_sales()]
else:
if not from_t or not to_t:
api.abort(400)
else:
timespan = {'from': datetime.fromisoformat(from_t), 'to': datetime.fromisoformat(to_t)}
if not timespan['from'] or not timespan['to']:
api.abort(400)
return [db_sale.serialize for db_sale in get_db_sales(timespan=timespan)]
@api.doc('Add a sale')
@api.expect(sale_input)
@api.response(201, "Sale added")
@api.response(400, "Malformed request or product out of stock")
@api.response(404, "Product not found")
def post(self):
json = request.json
if not json or 'product' not in json:
api.abort(400, 'Malformed request')
amount = json['amount'] if 'amount' in json else 1
if amount <= 0:
api.abort(400, "'amount' must be a positive number")
db_product = get_db_products(product_id=json['product'])
if not db_product:
api.abort(404)
if not db_product.stock == -1:
if db_product.stock - amount < 0:
api.abort(400, 'Not enough stock')
db_product.stock -= amount
if db_product.collection and db_product.collection != "":
collection_products = get_db_products(collection=db_product.collection)
for product in collection_products:
product.stock = db_product.stock
update_db_product(product.id, product.serialize)
db_sale = {
'product': json['product'],
'amount': amount,
'payment': db_product.price * amount,
'timestamp': datetime.utcnow()
}
update_db_product(db_product.id, db_product.serialize)
add_db_sales(db_sale)
return {'result': 'success'}, 201
@api.route('/<int:product>')
@api.param('product', 'The id for the product of which to get all sales')
class Product(Resource):
@api.doc('Get total sales for a product')
@api.response(404, 'No sales for product')
def get(self, product):
count = 0
db_product = get_db_products(product_id=product)
product_sale = get_db_sales(product_id=product)
if not product_sale:
api.abort(404)
for prod in product_sale:
count += prod.amount
return {'product': db_product.serialize, 'count': count}
@api.route('/<int:sale>')
@api.param('sale', 'The id for the sale to delete')
class Sale(Resource):
@api.doc('Delete a sale')
@api.response(404, 'Sale not found')
def delete(self, sale):
db_sale = get_db_sales(sale_id=sale)
db_product = get_db_products(db_sale.product)
if not db_sale or not db_product:
api.abort(404)
if not db_product.stock == -1:
db_product.stock += db_sale.amount
if db_product.collection and db_product.collection != "":
collection_products = get_db_products(collection=db_product.collection)
for product in collection_products:
product.stock = db_product.stock
update_db_product(product.id, product.serialize)
update_db_product(db_product.id, db_product.serialize)
delete_db_sales(sale)
return {'result': 'success'}
|
<filename>server/apis/sale.py
from flask import request
from flask_restplus import Namespace, Resource, fields
from core.database import get_db_products, update_db_product, get_db_sales, add_db_sales, delete_db_sales
from datetime import datetime
api = Namespace('sales', description='Sale related operations')
sale_input = api.model('Sale_request', {
'product': fields.Integer(required=True, description='The product identifier'),
'amount': fields.Integer(description='The amount purchased', default=1)
})
sale_output = api.model('Sale_response', {
'id': fields.Integer(description='The sale identifier'),
'product': fields.Integer(description='The product identifier'),
'amount': fields.Integer(description='The amount purchased'),
'payment': fields.Float(description='Total price for the sale'),
'timestamp': fields.DateTime(description='The timestamp of the purchase')
})
@api.route('/')
class Sales(Resource):
@api.doc('Retrieve all sales')
@api.param('from', '[OPTIONAL] Start time (ISO 8601) of sales to fetch')
@api.param('to', '[OPTIONAL] End time (ISO 8601) of sales to fetch')
@api.marshal_list_with(sale_output)
@api.response(400, 'Invalid time period')
def get(self):
from_t = request.args.get('from')
to_t = request.args.get('to')
if not from_t and not to_t:
return [db_sale.serialize for db_sale in get_db_sales()]
else:
if not from_t or not to_t:
api.abort(400)
else:
timespan = {'from': datetime.fromisoformat(from_t), 'to': datetime.fromisoformat(to_t)}
if not timespan['from'] or not timespan['to']:
api.abort(400)
return [db_sale.serialize for db_sale in get_db_sales(timespan=timespan)]
@api.doc('Add a sale')
@api.expect(sale_input)
@api.response(201, "Sale added")
@api.response(400, "Malformed request or product out of stock")
@api.response(404, "Product not found")
def post(self):
json = request.json
if not json or 'product' not in json:
api.abort(400, 'Malformed request')
amount = json['amount'] if 'amount' in json else 1
if amount <= 0:
api.abort(400, "'amount' must be a positive number")
db_product = get_db_products(product_id=json['product'])
if not db_product:
api.abort(404)
if not db_product.stock == -1:
if db_product.stock - amount < 0:
api.abort(400, 'Not enough stock')
db_product.stock -= amount
if db_product.collection and db_product.collection != "":
collection_products = get_db_products(collection=db_product.collection)
for product in collection_products:
product.stock = db_product.stock
update_db_product(product.id, product.serialize)
db_sale = {
'product': json['product'],
'amount': amount,
'payment': db_product.price * amount,
'timestamp': datetime.utcnow()
}
update_db_product(db_product.id, db_product.serialize)
add_db_sales(db_sale)
return {'result': 'success'}, 201
@api.route('/<int:product>')
@api.param('product', 'The id for the product of which to get all sales')
class Product(Resource):
@api.doc('Get total sales for a product')
@api.response(404, 'No sales for product')
def get(self, product):
count = 0
db_product = get_db_products(product_id=product)
product_sale = get_db_sales(product_id=product)
if not product_sale:
api.abort(404)
for prod in product_sale:
count += prod.amount
return {'product': db_product.serialize, 'count': count}
@api.route('/<int:sale>')
@api.param('sale', 'The id for the sale to delete')
class Sale(Resource):
@api.doc('Delete a sale')
@api.response(404, 'Sale not found')
def delete(self, sale):
db_sale = get_db_sales(sale_id=sale)
db_product = get_db_products(db_sale.product)
if not db_sale or not db_product:
api.abort(404)
if not db_product.stock == -1:
db_product.stock += db_sale.amount
if db_product.collection and db_product.collection != "":
collection_products = get_db_products(collection=db_product.collection)
for product in collection_products:
product.stock = db_product.stock
update_db_product(product.id, product.serialize)
update_db_product(db_product.id, db_product.serialize)
delete_db_sales(sale)
return {'result': 'success'}
|
none
| 1
| 2.603843
| 3
|
|
docs/gallery/plot_hs.py
|
wavespectra/wavespectra
| 19
|
6629906
|
"""
Calculate and plot Hs
=====================
Plots Hs calculated from spectra dataset
"""
import matplotlib.pyplot as plt
from wavespectra import read_ww3
dset = read_ww3("../_static/ww3file.nc")
fig = plt.figure(figsize=(8, 4))
hs = dset.spec.hs()
p = hs.plot.line(x="time")
|
"""
Calculate and plot Hs
=====================
Plots Hs calculated from spectra dataset
"""
import matplotlib.pyplot as plt
from wavespectra import read_ww3
dset = read_ww3("../_static/ww3file.nc")
fig = plt.figure(figsize=(8, 4))
hs = dset.spec.hs()
p = hs.plot.line(x="time")
|
en
| 0.829223
|
Calculate and plot Hs ===================== Plots Hs calculated from spectra dataset
| 2.74844
| 3
|
internet_speed_test.py
|
JoeThomas-git/JTpython
| 0
|
6629907
|
#This script tests your internet speed (download/upload/ping).
import speedtest
st = speedtest.Speedtest()
option = int(input('''What speed do you want to test?
1) Download
2) Upload
3) Ping
Your Choice: '''))
if option == 1:
dl = (st.download()/1000000)
print('Your download speed is %.2f mb/s' % dl)
if dl <= 20:
print("Pretty slow internet bud.")
elif dl >= 20:
print("Not bad!")
elif option == 2:
ul = (st.upload()/1000000)
print('Your upload speed is %.2f mb/s' % ul)
if ul <= 10:
print("Sucks bud.")
elif ul >= 10:
print("You're killing it!")
elif option == 3:
servernames = []
st.get_servers(servernames)
print(st.results.ping)
else:
print("Not a valid answer, please enter a value 1-3.")
|
#This script tests your internet speed (download/upload/ping).
import speedtest
st = speedtest.Speedtest()
option = int(input('''What speed do you want to test?
1) Download
2) Upload
3) Ping
Your Choice: '''))
if option == 1:
dl = (st.download()/1000000)
print('Your download speed is %.2f mb/s' % dl)
if dl <= 20:
print("Pretty slow internet bud.")
elif dl >= 20:
print("Not bad!")
elif option == 2:
ul = (st.upload()/1000000)
print('Your upload speed is %.2f mb/s' % ul)
if ul <= 10:
print("Sucks bud.")
elif ul >= 10:
print("You're killing it!")
elif option == 3:
servernames = []
st.get_servers(servernames)
print(st.results.ping)
else:
print("Not a valid answer, please enter a value 1-3.")
|
en
| 0.687044
|
#This script tests your internet speed (download/upload/ping). What speed do you want to test? 1) Download 2) Upload 3) Ping Your Choice:
| 3.74365
| 4
|
framework/__init__.py
|
wbqhb/SEPC
| 2
|
6629908
|
<gh_stars>1-10
from .framework import Framework
|
from .framework import Framework
|
none
| 1
| 1.120122
| 1
|
|
scribbler/src/libs/parseurl.py
|
yellowheroes/scribbler
| 0
|
6629909
|
<reponame>yellowheroes/scribbler<gh_stars>0
'''
Created on 27 May 2019
@author: Robert
'''
'''
invoke: UrlParse(environ)
instance variables:
script_name = name of currently executing python script
path_info = path up to ?
path = a list of seperate path strings without /'s
query_string = the query string
NOT YET AVAILABLE: params = the key-value pairs of the query string - e.g. ?name=john&age=36
'''
class UrlParse:
def __init__(self, environ = None):
self.environ = environ # environ is a dictionary
#path = environ['PATH_INFO']
#print('path : ' + path)
#print('path variable type : ', type(path))
#split_path = path.split('/')
self.parse()
def parse(self):
self.script_name = self.environ.get('SCRIPT_NAME', '') # where are we now
self.path_info = self.environ['PATH_INFO']
self.path = self.path_info.split('/') # split path in constituent parts - returns a list of strings
self.path = list(filter(None, self.path)) # remove empty strings from path and prepend a slash
if self.environ.get('QUERY_STRING'):
self.query_string = self.environ['QUERY_STRING'] # get query string
|
'''
Created on 27 May 2019
@author: Robert
'''
'''
invoke: UrlParse(environ)
instance variables:
script_name = name of currently executing python script
path_info = path up to ?
path = a list of seperate path strings without /'s
query_string = the query string
NOT YET AVAILABLE: params = the key-value pairs of the query string - e.g. ?name=john&age=36
'''
class UrlParse:
def __init__(self, environ = None):
self.environ = environ # environ is a dictionary
#path = environ['PATH_INFO']
#print('path : ' + path)
#print('path variable type : ', type(path))
#split_path = path.split('/')
self.parse()
def parse(self):
self.script_name = self.environ.get('SCRIPT_NAME', '') # where are we now
self.path_info = self.environ['PATH_INFO']
self.path = self.path_info.split('/') # split path in constituent parts - returns a list of strings
self.path = list(filter(None, self.path)) # remove empty strings from path and prepend a slash
if self.environ.get('QUERY_STRING'):
self.query_string = self.environ['QUERY_STRING'] # get query string
|
en
| 0.576506
|
Created on 27 May 2019
@author: Robert invoke: UrlParse(environ)
instance variables:
script_name = name of currently executing python script
path_info = path up to ?
path = a list of seperate path strings without /'s
query_string = the query string
NOT YET AVAILABLE: params = the key-value pairs of the query string - e.g. ?name=john&age=36 # environ is a dictionary #path = environ['PATH_INFO'] #print('path : ' + path) #print('path variable type : ', type(path)) #split_path = path.split('/') # where are we now # split path in constituent parts - returns a list of strings # remove empty strings from path and prepend a slash # get query string
| 3.316803
| 3
|
utils/make_per_run_histograms.py
|
BlackHershey/asl-scripts
| 0
|
6629910
|
import argparse
import re
import matplotlib
matplotlib.use('Qt4Agg') # must be set prior to pyplot import
from cycler import cycler
from image_utils import get_num_frames
from matplotlib import pyplot
import numpy as np
from os import chdir, getcwd, listdir, remove
from os.path import join, exists
from subprocess import call
PROJECT_DIR = '/net/zfs-black/BLACK/black/MPDP'
def make_per_run_histograms(patids):
colors = pyplot.rcParams['axes.prop_cycle'].by_key()['color']
pyplot.rc('axes', prop_cycle=(cycler('linestyle', ['-', ':']) * cycler('color', colors)))
for patid in patids:
chdir(join(PROJECT_DIR, patid))
asl_runs = [ d for d in listdir(getcwd()) if d.startswith('asl') ]
asl_runs.sort()
for run in asl_runs:
chdir(run)
hist_img = '_'.join([patid, run, 'per-frame_histogram_shifted.png'])
# if exists(hist_img):
# chdir('..')
# continue
img = '_'.join([patid, 'a' + run[-1], 'xr3d_atl_brainmasked_cbf_shifted_msk'])
num_frames = get_num_frames(img + '.4dfp.ifh')
frames = range(1, num_frames+1)
for i in frames:
hist_file = '.'.join([img + '_vol' + str(i), 'hist'])
call(['img_hist_4dfp', img, '-h', '-r-61to121', '-b183', '-m../atlas/' + patid +'_asl_xr3d_atl_dfndm.4dfp.img', '-f' + str(i)])
data = [ [], [] ]
with open(hist_file, 'r') as f:
for line in f:
if line.startswith('#'):
continue
line_arr = line.split()
data[0].append(float(line_arr[0]))
data[1].append(float(line_arr[1]))
pyplot.plot(data[0], data[1])
remove(hist_file)
pyplot.xlim(-60,120) # remove axis padding
pyplot.ylim(0,2000) # remove axis padding
pyplot.figlegend(['frame' + str(i) for i in frames])
pyplot.show()
#pyplot.savefig('_'.join([patid, run, 'per-frame_histogram_shifted.png']))
pyplot.close()
chdir('..')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='plot intensity histogram for each aslrun')
parser.add_argument('-p', '--patids', nargs='+', help='limit plot to certain participants (default is to include all)')
args = parser.parse_args()
patids = args.patids if args.patids else [ d for d in listdir(PROJECT_DIR) if re.match('MPD\d{3}', d) ]
make_per_run_histograms(patids)
|
import argparse
import re
import matplotlib
matplotlib.use('Qt4Agg') # must be set prior to pyplot import
from cycler import cycler
from image_utils import get_num_frames
from matplotlib import pyplot
import numpy as np
from os import chdir, getcwd, listdir, remove
from os.path import join, exists
from subprocess import call
PROJECT_DIR = '/net/zfs-black/BLACK/black/MPDP'
def make_per_run_histograms(patids):
colors = pyplot.rcParams['axes.prop_cycle'].by_key()['color']
pyplot.rc('axes', prop_cycle=(cycler('linestyle', ['-', ':']) * cycler('color', colors)))
for patid in patids:
chdir(join(PROJECT_DIR, patid))
asl_runs = [ d for d in listdir(getcwd()) if d.startswith('asl') ]
asl_runs.sort()
for run in asl_runs:
chdir(run)
hist_img = '_'.join([patid, run, 'per-frame_histogram_shifted.png'])
# if exists(hist_img):
# chdir('..')
# continue
img = '_'.join([patid, 'a' + run[-1], 'xr3d_atl_brainmasked_cbf_shifted_msk'])
num_frames = get_num_frames(img + '.4dfp.ifh')
frames = range(1, num_frames+1)
for i in frames:
hist_file = '.'.join([img + '_vol' + str(i), 'hist'])
call(['img_hist_4dfp', img, '-h', '-r-61to121', '-b183', '-m../atlas/' + patid +'_asl_xr3d_atl_dfndm.4dfp.img', '-f' + str(i)])
data = [ [], [] ]
with open(hist_file, 'r') as f:
for line in f:
if line.startswith('#'):
continue
line_arr = line.split()
data[0].append(float(line_arr[0]))
data[1].append(float(line_arr[1]))
pyplot.plot(data[0], data[1])
remove(hist_file)
pyplot.xlim(-60,120) # remove axis padding
pyplot.ylim(0,2000) # remove axis padding
pyplot.figlegend(['frame' + str(i) for i in frames])
pyplot.show()
#pyplot.savefig('_'.join([patid, run, 'per-frame_histogram_shifted.png']))
pyplot.close()
chdir('..')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='plot intensity histogram for each aslrun')
parser.add_argument('-p', '--patids', nargs='+', help='limit plot to certain participants (default is to include all)')
args = parser.parse_args()
patids = args.patids if args.patids else [ d for d in listdir(PROJECT_DIR) if re.match('MPD\d{3}', d) ]
make_per_run_histograms(patids)
|
en
| 0.245666
|
# must be set prior to pyplot import # if exists(hist_img): # chdir('..') # continue # remove axis padding # remove axis padding #pyplot.savefig('_'.join([patid, run, 'per-frame_histogram_shifted.png']))
| 2.03688
| 2
|
tests/functional/test_management_user.py
|
bcgov/nr-optimize-objstor-admin-
| 0
|
6629911
|
<gh_stars>0
import time
from ecsclient import schemas
from ecsclient.common.exceptions import ECSClientException
from tests import functional
class TestManagementUser(functional.BaseTestCase):
def __init__(self, *args, **kwargs):
super(TestManagementUser, self).__init__(*args, **kwargs)
self.management_user_1 = "functional-tests-managementuser-%s" % int(time.time())
self.management_user_2 = self.management_user_1 + "_second"
def setUp(self):
super(TestManagementUser, self).setUp()
self.client.management_user.create(
self.management_user_1, password="<PASSWORD>"
)
def tearDown(self):
super(TestManagementUser, self).tearDown()
for management_user in [self.management_user_1, self.management_user_2]:
try:
self.client.management_user.delete(management_user)
except ECSClientException:
pass
def test_management_user_list(self):
response = self.client.management_user.list()
self.assertValidSchema(response, schemas.MANAGEMENT_USERS)
def test_management_user_get(self):
response = self.client.management_user.get(self.management_user_1)
self.assertValidSchema(response, schemas.MANAGEMENT_USER)
self.assertEqual(response["userId"], self.management_user_1)
def test_management_user_create(self):
response = self.client.management_user.create(
self.management_user_2,
password="<PASSWORD>",
is_system_admin=True,
is_system_monitor=True,
)
self.assertValidSchema(response, schemas.MANAGEMENT_USER)
self.assertEqual(response["userId"], self.management_user_2)
self.assertTrue(response["isSystemAdmin"])
self.assertTrue(response["isSystemMonitor"])
def test_management_user_delete(self):
self.client.management_user.delete(self.management_user_1)
f = self.client.management_user.get
self.assertRaises(ECSClientException, f, self.management_user_1)
def test_management_user_update(self):
response = self.client.management_user.get(self.management_user_1)
self.assertFalse(response["isSystemAdmin"])
self.assertFalse(response["isSystemMonitor"])
self.client.management_user.update(
self.management_user_1,
password="<PASSWORD>",
is_system_admin=True,
is_system_monitor=True,
)
response = self.client.management_user.get(self.management_user_1)
self.assertTrue(response["isSystemAdmin"])
self.assertTrue(response["isSystemMonitor"])
|
import time
from ecsclient import schemas
from ecsclient.common.exceptions import ECSClientException
from tests import functional
class TestManagementUser(functional.BaseTestCase):
def __init__(self, *args, **kwargs):
super(TestManagementUser, self).__init__(*args, **kwargs)
self.management_user_1 = "functional-tests-managementuser-%s" % int(time.time())
self.management_user_2 = self.management_user_1 + "_second"
def setUp(self):
super(TestManagementUser, self).setUp()
self.client.management_user.create(
self.management_user_1, password="<PASSWORD>"
)
def tearDown(self):
super(TestManagementUser, self).tearDown()
for management_user in [self.management_user_1, self.management_user_2]:
try:
self.client.management_user.delete(management_user)
except ECSClientException:
pass
def test_management_user_list(self):
response = self.client.management_user.list()
self.assertValidSchema(response, schemas.MANAGEMENT_USERS)
def test_management_user_get(self):
response = self.client.management_user.get(self.management_user_1)
self.assertValidSchema(response, schemas.MANAGEMENT_USER)
self.assertEqual(response["userId"], self.management_user_1)
def test_management_user_create(self):
response = self.client.management_user.create(
self.management_user_2,
password="<PASSWORD>",
is_system_admin=True,
is_system_monitor=True,
)
self.assertValidSchema(response, schemas.MANAGEMENT_USER)
self.assertEqual(response["userId"], self.management_user_2)
self.assertTrue(response["isSystemAdmin"])
self.assertTrue(response["isSystemMonitor"])
def test_management_user_delete(self):
self.client.management_user.delete(self.management_user_1)
f = self.client.management_user.get
self.assertRaises(ECSClientException, f, self.management_user_1)
def test_management_user_update(self):
response = self.client.management_user.get(self.management_user_1)
self.assertFalse(response["isSystemAdmin"])
self.assertFalse(response["isSystemMonitor"])
self.client.management_user.update(
self.management_user_1,
password="<PASSWORD>",
is_system_admin=True,
is_system_monitor=True,
)
response = self.client.management_user.get(self.management_user_1)
self.assertTrue(response["isSystemAdmin"])
self.assertTrue(response["isSystemMonitor"])
|
none
| 1
| 2.414539
| 2
|
|
model/ops_test.py
|
rdh1115/cog
| 38
|
6629912
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model/ops.py"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import tensorflow as tf
from model import ops
class OpsTest(unittest.TestCase):
def testConv2DByBatch(self):
tf.reset_default_graph()
bs = 4
in_channels = 3
h, w = 7, 7
fh, fw = 1, 1
out_channels = 5
inputs = tf.placeholder('float', [bs, h, w, in_channels])
filters = tf.placeholder('float', [bs, fh, fw, in_channels, out_channels])
outputs = ops.conv2d_by_batch(inputs, filters, (1, 1, 1, 1), 'SAME')
inputs_ = np.random.randn(bs, h, w, in_channels)
filters_ = np.random.rand(bs, fh, fw, in_channels, out_channels)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
outputs_ = sess.run(outputs,
feed_dict={inputs: inputs_, filters: filters_})
tmp = list()
for i in range(bs):
tmp.append(np.dot(inputs_[i], filters_[i, 0, 0]))
outputs_2 = np.array(tmp)
self.assertTrue(np.mean(abs(outputs_-outputs_2)) < 1e-6)
if __name__ == '__main__':
unittest.main()
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model/ops.py"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import tensorflow as tf
from model import ops
class OpsTest(unittest.TestCase):
def testConv2DByBatch(self):
tf.reset_default_graph()
bs = 4
in_channels = 3
h, w = 7, 7
fh, fw = 1, 1
out_channels = 5
inputs = tf.placeholder('float', [bs, h, w, in_channels])
filters = tf.placeholder('float', [bs, fh, fw, in_channels, out_channels])
outputs = ops.conv2d_by_batch(inputs, filters, (1, 1, 1, 1), 'SAME')
inputs_ = np.random.randn(bs, h, w, in_channels)
filters_ = np.random.rand(bs, fh, fw, in_channels, out_channels)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
outputs_ = sess.run(outputs,
feed_dict={inputs: inputs_, filters: filters_})
tmp = list()
for i in range(bs):
tmp.append(np.dot(inputs_[i], filters_[i, 0, 0]))
outputs_2 = np.array(tmp)
self.assertTrue(np.mean(abs(outputs_-outputs_2)) < 1e-6)
if __name__ == '__main__':
unittest.main()
|
en
| 0.817396
|
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Tests for model/ops.py
| 2.190769
| 2
|
dataset/convert_tfrecords.py
|
simenvg/SSD.TensorFlow
| 1
|
6629913
|
# Copyright 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import xml.etree.ElementTree as xml_tree
import shutil
import numpy as np
import six
import tensorflow as tf
'''How to organize your dataset folder:
VOCROOT/
|->VOC2007/
| |->Annotations/
| |->ImageSets/
| |->...
|->VOC2012/
| |->Annotations/
| |->ImageSets/
| |->...
|->VOC2007TEST/
| |->Annotations/
| |->...
'''
tf.app.flags.DEFINE_string('DATA_PATH', '~/data',
'All datas directory')
tf.app.flags.DEFINE_string('train_splits', 'VOC2007, VOC2012',
'Comma-separated list of the training data sub-directory')
tf.app.flags.DEFINE_string('validation_splits', 'VOC2007TEST',
'Comma-separated list of the validation data sub-directory')
tf.app.flags.DEFINE_string('output_directory', '/media/rs/7A0EE8880EE83EAF/Detections/SSD/dataset/tfrecords',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 16,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 16,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
RANDOM_SEED = 180428
FLAGS = tf.app.flags.FLAGS
classes = []
def set_training_datasets():
try:
datasets = os.listdir(FLAGS.DATA_PATH)
except Exception as e:
print('No folder named ~/data')
print('Exception: ', e)
exit()
if len(datasets) == 0:
print('No datasets in ~/data, run config_new_dataset.py on your dataset and move the dataset folder to ~/data')
for i in range(0, len(datasets)):
print('[', i, ']', datasets[i])
user_input = str(input(
'Input the number for the datasets you wish to train on, separate numbers with space: ')).split()
training_dataset_paths = []
for dataset_index in user_input:
training_dataset_paths.append(
os.path.join(FLAGS.DATA_PATH, datasets[int(dataset_index)]))
return training_dataset_paths
def setup_tmp_folder():
tmp_folder_path = os.path.join(FLAGS.DATA_PATH, 'tmp')
tmp_folder_test_path = os.path.join(tmp_folder_path, 'test')
tmp_folder_train_path = os.path.join(tmp_folder_path, 'train')
train_and_test = [tmp_folder_test_path, tmp_folder_train_path]
if os.path.exists(tmp_folder_path):
shutil.rmtree(tmp_folder_path, ignore_errors=True)
os.makedirs(tmp_folder_path)
os.makedirs(tmp_folder_test_path)
os.makedirs(tmp_folder_train_path)
for directory in train_and_test:
os.makedirs(os.path.join(directory, 'Annotations'))
os.makedirs(os.path.join(directory, 'JPEGImages'))
return [tmp_folder_path, tmp_folder_train_path, tmp_folder_test_path]
def setup_train_data():
datasets = set_training_datasets()
[tmp_folder_path, tmp_folder_train_path,
tmp_folder_test_path] = setup_tmp_folder()
for dataset in datasets:
for filename in os.listdir(os.path.join(dataset, 'train')):
if filename.endswith('.jpg'):
shutil.copy2(os.path.join(dataset, 'train', filename),
os.path.join(tmp_folder_train_path, 'JPEGImages'))
elif filename.endswith('.xml'):
shutil.copy2(os.path.join(dataset, 'train', filename),
os.path.join(tmp_folder_train_path, 'Annotations'))
for filename in os.listdir(os.path.join(dataset, 'test')):
if filename.endswith('.jpg'):
shutil.copy2(os.path.join(dataset, 'test', filename),
os.path.join(tmp_folder_test_path, 'JPEGImages'))
elif filename.endswith('.xml'):
shutil.copy2(os.path.join(dataset, 'test', filename),
os.path.join(tmp_folder_test_path, 'Annotations'))
return get_classes([os.path.join(tmp_folder_test_path, 'Annotations'), os.path.join(tmp_folder_train_path, 'Annotations')])
def get_classes(paths):
classes = ['none']
for path in paths:
for filename in os.listdir(path):
if filename.endswith(".xml"):
tree = xml_tree.parse(os.path.join(path, filename))
root = tree.getroot()
for obj in root.findall('object'):
label = obj.find('name').text
if label not in classes:
classes.append(label)
else:
continue
return classes
def generate_classes_file(path, classes):
class_file = open(os.path.join(path, 'classes.txt'), 'w')
for clas in classes:
if clas == classes[-1]:
class_file.write(clas)
else:
class_file.write(clas + '\n')
class_file.close()
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_list_feature(value):
"""Wrapper for inserting a list of bytes features into Example proto.
"""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if isinstance(value, six.string_types):
value = six.binary_type(value, encoding='utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_name, image_buffer, bboxes, labels, labels_text,
difficult, truncated, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
bboxes: List of bounding boxes for each image
labels: List of labels for bounding box
labels_text: List of labels' name for bounding box
difficult: List of ints indicate the difficulty of that bounding box
truncated: List of ints indicate the truncation of that bounding box
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
ymin = []
xmin = []
ymax = []
xmax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/channels': _int64_feature(channels),
'image/shape': _int64_feature([height, width, channels]),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature(labels),
'image/object/bbox/label_text': _bytes_list_feature(labels_text),
'image/object/bbox/difficult': _int64_feature(difficult),
'image/object/bbox/truncated': _int64_feature(truncated),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(image_name.encode('utf8')),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(
image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(
image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(
self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _find_image_bounding_boxes(directory, cur_record, classes):
"""Find the bounding boxes for a given image file.
Args:
directory: string; the path of all datas.
cur_record: list of strings; the first of which is the sub-directory of cur_record, the second is the image filename.
Returns:
bboxes: List of bounding boxes for each image.
labels: List of labels for bounding box.
labels_text: List of labels' name for bounding box.
difficult: List of ints indicate the difficulty of that bounding box.
truncated: List of ints indicate the truncation of that bounding box.
"""
anna_file = os.path.join(
directory, cur_record[0], 'Annotations', cur_record[1].replace('jpg', 'xml'))
tree = xml_tree.parse(anna_file)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text),
int(size.find('width').text),
int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label = obj.find('name').text
labels.append(int(classes.index(label)))
labels_text.append(label.encode('ascii'))
isdifficult = obj.find('difficult')
if isdifficult is not None:
difficult.append(int(isdifficult.text))
else:
difficult.append(0)
istruncated = obj.find('truncated')
if istruncated is not None:
truncated.append(int(istruncated.text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return bboxes, labels, labels_text, difficult, truncated
def _process_image_files_batch(coder, thread_index, ranges, name, directory, all_records, num_shards, classes):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
directory: string; the path of all datas
all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(
shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
cur_record = all_records[i]
filename = os.path.join(
directory, cur_record[0], 'JPEGImages', cur_record[1])
bboxes, labels, labels_text, difficult, truncated = _find_image_bounding_boxes(
directory, cur_record, classes)
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, cur_record[1], image_buffer, bboxes, labels, labels_text,
difficult, truncated, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, directory, all_records, num_shards, classes):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
directory: string; the path of all datas
all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename.
num_shards: integer number of shards for this data set.
"""
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(all_records),
FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' %
(FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name,
directory, all_records, num_shards, classes)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(all_records)))
sys.stdout.flush()
def _process_dataset(name, directory, all_splits, num_shards, classes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
all_splits: list of strings, sub-path to the data set.
num_shards: integer number of shards for this data set.
"""
all_records = []
for split in all_splits:
jpeg_file_path = os.path.join(directory, split, 'JPEGImages')
images = tf.gfile.ListDirectory(jpeg_file_path)
jpegs = [im_name for im_name in images if im_name.strip()[-3:]
== 'jpg']
all_records.extend(list(zip([split] * len(jpegs), jpegs)))
shuffled_index = list(range(len(all_records)))
random.seed(RANDOM_SEED)
random.shuffle(shuffled_index)
all_records = [all_records[i] for i in shuffled_index]
_process_image_files(name, directory, all_records, num_shards, classes)
def parse_comma_list(args):
return [s.strip() for s in args.split(',')]
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
classes = setup_train_data()
generate_classes_file(os.path.join(FLAGS.DATA_PATH, 'tmp'), classes)
_process_dataset('val', FLAGS.DATA_PATH, ['tmp/test'],
FLAGS.validation_shards, classes)
_process_dataset('train', FLAGS.DATA_PATH,
['tmp/train'], FLAGS.train_shards, classes)
if __name__ == '__main__':
tf.app.run()
|
# Copyright 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import xml.etree.ElementTree as xml_tree
import shutil
import numpy as np
import six
import tensorflow as tf
'''How to organize your dataset folder:
VOCROOT/
|->VOC2007/
| |->Annotations/
| |->ImageSets/
| |->...
|->VOC2012/
| |->Annotations/
| |->ImageSets/
| |->...
|->VOC2007TEST/
| |->Annotations/
| |->...
'''
tf.app.flags.DEFINE_string('DATA_PATH', '~/data',
'All datas directory')
tf.app.flags.DEFINE_string('train_splits', 'VOC2007, VOC2012',
'Comma-separated list of the training data sub-directory')
tf.app.flags.DEFINE_string('validation_splits', 'VOC2007TEST',
'Comma-separated list of the validation data sub-directory')
tf.app.flags.DEFINE_string('output_directory', '/media/rs/7A0EE8880EE83EAF/Detections/SSD/dataset/tfrecords',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 16,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 16,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
RANDOM_SEED = 180428
FLAGS = tf.app.flags.FLAGS
classes = []
def set_training_datasets():
try:
datasets = os.listdir(FLAGS.DATA_PATH)
except Exception as e:
print('No folder named ~/data')
print('Exception: ', e)
exit()
if len(datasets) == 0:
print('No datasets in ~/data, run config_new_dataset.py on your dataset and move the dataset folder to ~/data')
for i in range(0, len(datasets)):
print('[', i, ']', datasets[i])
user_input = str(input(
'Input the number for the datasets you wish to train on, separate numbers with space: ')).split()
training_dataset_paths = []
for dataset_index in user_input:
training_dataset_paths.append(
os.path.join(FLAGS.DATA_PATH, datasets[int(dataset_index)]))
return training_dataset_paths
def setup_tmp_folder():
tmp_folder_path = os.path.join(FLAGS.DATA_PATH, 'tmp')
tmp_folder_test_path = os.path.join(tmp_folder_path, 'test')
tmp_folder_train_path = os.path.join(tmp_folder_path, 'train')
train_and_test = [tmp_folder_test_path, tmp_folder_train_path]
if os.path.exists(tmp_folder_path):
shutil.rmtree(tmp_folder_path, ignore_errors=True)
os.makedirs(tmp_folder_path)
os.makedirs(tmp_folder_test_path)
os.makedirs(tmp_folder_train_path)
for directory in train_and_test:
os.makedirs(os.path.join(directory, 'Annotations'))
os.makedirs(os.path.join(directory, 'JPEGImages'))
return [tmp_folder_path, tmp_folder_train_path, tmp_folder_test_path]
def setup_train_data():
datasets = set_training_datasets()
[tmp_folder_path, tmp_folder_train_path,
tmp_folder_test_path] = setup_tmp_folder()
for dataset in datasets:
for filename in os.listdir(os.path.join(dataset, 'train')):
if filename.endswith('.jpg'):
shutil.copy2(os.path.join(dataset, 'train', filename),
os.path.join(tmp_folder_train_path, 'JPEGImages'))
elif filename.endswith('.xml'):
shutil.copy2(os.path.join(dataset, 'train', filename),
os.path.join(tmp_folder_train_path, 'Annotations'))
for filename in os.listdir(os.path.join(dataset, 'test')):
if filename.endswith('.jpg'):
shutil.copy2(os.path.join(dataset, 'test', filename),
os.path.join(tmp_folder_test_path, 'JPEGImages'))
elif filename.endswith('.xml'):
shutil.copy2(os.path.join(dataset, 'test', filename),
os.path.join(tmp_folder_test_path, 'Annotations'))
return get_classes([os.path.join(tmp_folder_test_path, 'Annotations'), os.path.join(tmp_folder_train_path, 'Annotations')])
def get_classes(paths):
classes = ['none']
for path in paths:
for filename in os.listdir(path):
if filename.endswith(".xml"):
tree = xml_tree.parse(os.path.join(path, filename))
root = tree.getroot()
for obj in root.findall('object'):
label = obj.find('name').text
if label not in classes:
classes.append(label)
else:
continue
return classes
def generate_classes_file(path, classes):
class_file = open(os.path.join(path, 'classes.txt'), 'w')
for clas in classes:
if clas == classes[-1]:
class_file.write(clas)
else:
class_file.write(clas + '\n')
class_file.close()
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_list_feature(value):
"""Wrapper for inserting a list of bytes features into Example proto.
"""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if isinstance(value, six.string_types):
value = six.binary_type(value, encoding='utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_name, image_buffer, bboxes, labels, labels_text,
difficult, truncated, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
bboxes: List of bounding boxes for each image
labels: List of labels for bounding box
labels_text: List of labels' name for bounding box
difficult: List of ints indicate the difficulty of that bounding box
truncated: List of ints indicate the truncation of that bounding box
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
ymin = []
xmin = []
ymax = []
xmax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/channels': _int64_feature(channels),
'image/shape': _int64_feature([height, width, channels]),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature(labels),
'image/object/bbox/label_text': _bytes_list_feature(labels_text),
'image/object/bbox/difficult': _int64_feature(difficult),
'image/object/bbox/truncated': _int64_feature(truncated),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(image_name.encode('utf8')),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(
image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(
image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(
self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _find_image_bounding_boxes(directory, cur_record, classes):
"""Find the bounding boxes for a given image file.
Args:
directory: string; the path of all datas.
cur_record: list of strings; the first of which is the sub-directory of cur_record, the second is the image filename.
Returns:
bboxes: List of bounding boxes for each image.
labels: List of labels for bounding box.
labels_text: List of labels' name for bounding box.
difficult: List of ints indicate the difficulty of that bounding box.
truncated: List of ints indicate the truncation of that bounding box.
"""
anna_file = os.path.join(
directory, cur_record[0], 'Annotations', cur_record[1].replace('jpg', 'xml'))
tree = xml_tree.parse(anna_file)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text),
int(size.find('width').text),
int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label = obj.find('name').text
labels.append(int(classes.index(label)))
labels_text.append(label.encode('ascii'))
isdifficult = obj.find('difficult')
if isdifficult is not None:
difficult.append(int(isdifficult.text))
else:
difficult.append(0)
istruncated = obj.find('truncated')
if istruncated is not None:
truncated.append(int(istruncated.text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return bboxes, labels, labels_text, difficult, truncated
def _process_image_files_batch(coder, thread_index, ranges, name, directory, all_records, num_shards, classes):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
directory: string; the path of all datas
all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(
shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
cur_record = all_records[i]
filename = os.path.join(
directory, cur_record[0], 'JPEGImages', cur_record[1])
bboxes, labels, labels_text, difficult, truncated = _find_image_bounding_boxes(
directory, cur_record, classes)
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, cur_record[1], image_buffer, bboxes, labels, labels_text,
difficult, truncated, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, directory, all_records, num_shards, classes):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
directory: string; the path of all datas
all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename.
num_shards: integer number of shards for this data set.
"""
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(all_records),
FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' %
(FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name,
directory, all_records, num_shards, classes)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(all_records)))
sys.stdout.flush()
def _process_dataset(name, directory, all_splits, num_shards, classes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
all_splits: list of strings, sub-path to the data set.
num_shards: integer number of shards for this data set.
"""
all_records = []
for split in all_splits:
jpeg_file_path = os.path.join(directory, split, 'JPEGImages')
images = tf.gfile.ListDirectory(jpeg_file_path)
jpegs = [im_name for im_name in images if im_name.strip()[-3:]
== 'jpg']
all_records.extend(list(zip([split] * len(jpegs), jpegs)))
shuffled_index = list(range(len(all_records)))
random.seed(RANDOM_SEED)
random.shuffle(shuffled_index)
all_records = [all_records[i] for i in shuffled_index]
_process_image_files(name, directory, all_records, num_shards, classes)
def parse_comma_list(args):
return [s.strip() for s in args.split(',')]
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
classes = setup_train_data()
generate_classes_file(os.path.join(FLAGS.DATA_PATH, 'tmp'), classes)
_process_dataset('val', FLAGS.DATA_PATH, ['tmp/test'],
FLAGS.validation_shards, classes)
_process_dataset('train', FLAGS.DATA_PATH,
['tmp/train'], FLAGS.train_shards, classes)
if __name__ == '__main__':
tf.app.run()
|
en
| 0.761366
|
# Copyright 2018 <NAME> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= How to organize your dataset folder: VOCROOT/ |->VOC2007/ | |->Annotations/ | |->ImageSets/ | |->... |->VOC2012/ | |->Annotations/ | |->ImageSets/ | |->... |->VOC2007TEST/ | |->Annotations/ | |->... Wrapper for inserting int64 features into Example proto. Wrapper for inserting float features into Example proto. Wrapper for inserting a list of bytes features into Example proto. Wrapper for inserting bytes features into Example proto. Build an Example proto for an example. Args: filename: string, path to an image file, e.g., '/path/to/example.JPG' image_buffer: string, JPEG encoding of RGB image bboxes: List of bounding boxes for each image labels: List of labels for bounding box labels_text: List of labels' name for bounding box difficult: List of ints indicate the difficulty of that bounding box truncated: List of ints indicate the truncation of that bounding box height: integer, image height in pixels width: integer, image width in pixels Returns: Example proto # pylint: disable=expression-not-assigned # pylint: enable=expression-not-assigned Helper class that provides TensorFlow image coding utilities. # Create a single Session to run all image coding calls. # Initializes function that converts PNG to JPEG data. # Initializes function that converts CMYK JPEG data to RGB JPEG data. # Initializes function that decodes RGB JPEG data. Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. coder: instance of ImageCoder to provide TensorFlow image coding utils. Returns: image_buffer: string, JPEG encoding of RGB image. height: integer, image height in pixels. width: integer, image width in pixels. # Read the image file. # Decode the RGB JPEG. # Check that image converted to RGB Find the bounding boxes for a given image file. Args: directory: string; the path of all datas. cur_record: list of strings; the first of which is the sub-directory of cur_record, the second is the image filename. Returns: bboxes: List of bounding boxes for each image. labels: List of labels for bounding box. labels_text: List of labels' name for bounding box. difficult: List of ints indicate the difficulty of that bounding box. truncated: List of ints indicate the truncation of that bounding box. # Image shape. # Find annotations. Processes and saves list of images as TFRecord in 1 thread. Args: coder: instance of ImageCoder to provide TensorFlow image coding utils. thread_index: integer, unique batch to run index is within [0, len(ranges)). ranges: list of pairs of integers specifying ranges of each batches to analyze in parallel. name: string, unique identifier specifying the data set directory: string; the path of all datas all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename. num_shards: integer number of shards for this data set. # Each thread produces N shards where N = int(num_shards / num_threads). # For instance, if num_shards = 128, and the num_threads = 2, then the first # thread would produce shards [0, 64). # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' Process and save list of images as TFRecord of Example protos. Args: name: string, unique identifier specifying the data set directory: string; the path of all datas all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename. num_shards: integer number of shards for this data set. # Break all images into batches with a [ranges[i][0], ranges[i][1]]. # Launch a thread for each batch. # Create a mechanism for monitoring when all threads are finished. # Create a generic TensorFlow-based utility for converting all image codings. # Wait for all the threads to terminate. Process a complete data set and save it as a TFRecord. Args: name: string, unique identifier specifying the data set. directory: string, root path to the data set. all_splits: list of strings, sub-path to the data set. num_shards: integer number of shards for this data set. # Run it!
| 1.910257
| 2
|
django_chat/apps/web/urls.py
|
ipeternella/django-chat
| 7
|
6629914
|
"""
Module with urls for the web app.
"""
from django.urls import re_path
from django_chat.apps.web import views
app_name = "web"
urlpatterns = [
re_path(r"^chat/(?P<chat_room>\w+)/(?P<chat_user>\w+)/$", views.index, name="index"),
]
|
"""
Module with urls for the web app.
"""
from django.urls import re_path
from django_chat.apps.web import views
app_name = "web"
urlpatterns = [
re_path(r"^chat/(?P<chat_room>\w+)/(?P<chat_user>\w+)/$", views.index, name="index"),
]
|
en
| 0.840864
|
Module with urls for the web app.
| 2.036558
| 2
|
py/test/utils/media_utils_unittest.py
|
arccode/factory
| 3
|
6629915
|
<gh_stars>1-10
#!/usr/bin/env python3
# Copyright 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#####
#
# NOTE: This test is currently broken and is blocklisted in the top-level
# Makefile.
#
#####
import logging
import os
import subprocess
import tempfile
import unittest
from cros.factory.test.utils.media_utils import MediaMonitor
from cros.factory.test.utils.media_utils import MountedMedia
from cros.factory.external import glib
from cros.factory.external import gtk
from cros.factory.external import pyudev
# udev constants
_UDEV_ACTION_INSERT = 'add'
_UDEV_ACTION_REMOVE = 'remove'
_WRITING_TEST_FILENAME = 'media_utils_unittest.test'
_WRITING_TEST_STR = 'Unittest writing test...'
_VIRTUAL_PATITION_NUMBER = 3
class TestMountedMedia(unittest.TestCase):
def setUp(self):
"""Creates a temp file to mock as a media device."""
self._virtual_device = tempfile.NamedTemporaryFile(
prefix='media_utils_unitttest')
exit_code, ret = subprocess.getstatusoutput(
'truncate -s 1048576 %s && mkfs -F -t ext3 %s' %
(self._virtual_device.name, self._virtual_device.name))
self.assertEqual(0, exit_code)
exit_code, ret = subprocess.getstatusoutput('losetup --show -f %s' %
self._virtual_device.name)
self._free_loop_device = ret
self.assertEqual(0, exit_code)
def tearDown(self):
exit_code, ret = subprocess.getstatusoutput(
'losetup -d %s' % self._free_loop_device)
self.assertEqual(0, exit_code)
self._virtual_device.close()
def testFailToMount(self):
"""Tests the MountedMedia throws exceptions when it fails."""
def with_wrapper():
with MountedMedia('/dev/device_not_exist') as path:
pass
self.assertRaises(Exception, with_wrapper)
def testNormalMount(self):
"""Tests mounting partition."""
with MountedMedia(self._free_loop_device) as path:
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'w') as f:
f.write(_WRITING_TEST_STR)
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'r') as f:
self.assertEqual(_WRITING_TEST_STR, f.readline())
def testPartitionMountSDA(self):
"""Tests mounting partition.
This tests mounting partition with devices enumerated
in alphabets (ex, sda).
"""
virtual_partition = tempfile.NamedTemporaryFile(
prefix='virtual_partition',
suffix='sdc%d' % _VIRTUAL_PATITION_NUMBER)
exit_code, ret = subprocess.getstatusoutput(
'ln -s -f %s %s' %
(self._free_loop_device, virtual_partition.name))
self.assertEqual(0, exit_code)
with MountedMedia(virtual_partition.name[:-1],
_VIRTUAL_PATITION_NUMBER) as path:
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'w') as f:
f.write(_WRITING_TEST_STR)
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'r') as f:
self.assertEqual(_WRITING_TEST_STR, f.readline())
virtual_partition.close()
def testPartitionMountMMCBLK0(self):
"""Tests mounting partition.
This tests mounting partition with devices enumerated
in alphabets (ex, mmcblk0).
"""
virtual_partition = tempfile.NamedTemporaryFile(
prefix='virtual_partition',
suffix='mmcblk0p%d' % _VIRTUAL_PATITION_NUMBER)
exit_code, ret = subprocess.getstatusoutput(
'ln -s -f %s %s' %
(self._free_loop_device, virtual_partition.name))
self.assertEqual(0, exit_code)
with MountedMedia(virtual_partition.name[:-2],
_VIRTUAL_PATITION_NUMBER) as path:
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'w') as f:
f.write(_WRITING_TEST_STR)
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'r') as f:
self.assertEqual(_WRITING_TEST_STR, f.readline())
virtual_partition.close()
def testPartitionMountFloppy(self):
"""Tests mounting a device without partition table."""
with MountedMedia(self._free_loop_device, 1) as path:
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'w') as f:
f.write(_WRITING_TEST_STR)
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'r') as f:
self.assertEqual(_WRITING_TEST_STR, f.readline())
class TestMediaMonitor(unittest.TestCase):
def setUp(self):
"""Creates a temp file to mock as a media device."""
self._virtual_device = tempfile.NamedTemporaryFile(
prefix='media_utils_unitttest')
exit_code, ret = subprocess.getstatusoutput(
'truncate -s 1048576 %s' % self._virtual_device.name)
self.assertEqual(0, exit_code)
exit_code, ret = subprocess.getstatusoutput('losetup --show -f %s' %
self._virtual_device.name)
self._free_loop_device = ret
self.assertEqual(0, exit_code)
def tearDown(self):
exit_code, ret = subprocess.getstatusoutput(
'losetup -d %s' % self._free_loop_device)
self.assertEqual(0, exit_code)
self._virtual_device.close()
def testMediaMonitor(self):
def on_insert(dev_path):
self.assertEqual(self._free_loop_device, dev_path)
self._media_inserted = True
gtk.main_quit()
def on_remove(dev_path):
self.assertEqual(self._free_loop_device, dev_path)
self._media_removed = True
gtk.main_quit()
def one_time_timer_mock_insert():
monitor._observer.emit('device-event',
_UDEV_ACTION_INSERT,
self._mock_device)
return False
def one_time_timer_mock_remove():
monitor._observer.emit('device-event',
_UDEV_ACTION_REMOVE,
self._mock_device)
return False
self._media_inserted = False
self._media_removed = False
self._context = pyudev.Context()
self._mock_device = pyudev.Device.from_name(
self._context, 'block',
os.path.basename(self._free_loop_device))
# Start the monitor.
TIMEOUT_SECOND = 1
monitor = MediaMonitor('block', 'disk')
monitor.start(on_insert=on_insert, on_remove=on_remove)
# Simulating the insertion of a valid media device.
timer_tag = glib.timeout_add_seconds(TIMEOUT_SECOND,
one_time_timer_mock_insert)
gtk.main()
# Simulating the removal of a valid media device.
glib.source_remove(timer_tag)
timer_tag = glib.timeout_add_seconds(TIMEOUT_SECOND,
one_time_timer_mock_remove)
gtk.main()
monitor.stop()
self.assertEqual(True, self._media_inserted)
self.assertEqual(True, self._media_removed)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# Copyright 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#####
#
# NOTE: This test is currently broken and is blocklisted in the top-level
# Makefile.
#
#####
import logging
import os
import subprocess
import tempfile
import unittest
from cros.factory.test.utils.media_utils import MediaMonitor
from cros.factory.test.utils.media_utils import MountedMedia
from cros.factory.external import glib
from cros.factory.external import gtk
from cros.factory.external import pyudev
# udev constants
_UDEV_ACTION_INSERT = 'add'
_UDEV_ACTION_REMOVE = 'remove'
_WRITING_TEST_FILENAME = 'media_utils_unittest.test'
_WRITING_TEST_STR = 'Unittest writing test...'
_VIRTUAL_PATITION_NUMBER = 3
class TestMountedMedia(unittest.TestCase):
def setUp(self):
"""Creates a temp file to mock as a media device."""
self._virtual_device = tempfile.NamedTemporaryFile(
prefix='media_utils_unitttest')
exit_code, ret = subprocess.getstatusoutput(
'truncate -s 1048576 %s && mkfs -F -t ext3 %s' %
(self._virtual_device.name, self._virtual_device.name))
self.assertEqual(0, exit_code)
exit_code, ret = subprocess.getstatusoutput('losetup --show -f %s' %
self._virtual_device.name)
self._free_loop_device = ret
self.assertEqual(0, exit_code)
def tearDown(self):
exit_code, ret = subprocess.getstatusoutput(
'losetup -d %s' % self._free_loop_device)
self.assertEqual(0, exit_code)
self._virtual_device.close()
def testFailToMount(self):
"""Tests the MountedMedia throws exceptions when it fails."""
def with_wrapper():
with MountedMedia('/dev/device_not_exist') as path:
pass
self.assertRaises(Exception, with_wrapper)
def testNormalMount(self):
"""Tests mounting partition."""
with MountedMedia(self._free_loop_device) as path:
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'w') as f:
f.write(_WRITING_TEST_STR)
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'r') as f:
self.assertEqual(_WRITING_TEST_STR, f.readline())
def testPartitionMountSDA(self):
"""Tests mounting partition.
This tests mounting partition with devices enumerated
in alphabets (ex, sda).
"""
virtual_partition = tempfile.NamedTemporaryFile(
prefix='virtual_partition',
suffix='sdc%d' % _VIRTUAL_PATITION_NUMBER)
exit_code, ret = subprocess.getstatusoutput(
'ln -s -f %s %s' %
(self._free_loop_device, virtual_partition.name))
self.assertEqual(0, exit_code)
with MountedMedia(virtual_partition.name[:-1],
_VIRTUAL_PATITION_NUMBER) as path:
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'w') as f:
f.write(_WRITING_TEST_STR)
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'r') as f:
self.assertEqual(_WRITING_TEST_STR, f.readline())
virtual_partition.close()
def testPartitionMountMMCBLK0(self):
"""Tests mounting partition.
This tests mounting partition with devices enumerated
in alphabets (ex, mmcblk0).
"""
virtual_partition = tempfile.NamedTemporaryFile(
prefix='virtual_partition',
suffix='mmcblk0p%d' % _VIRTUAL_PATITION_NUMBER)
exit_code, ret = subprocess.getstatusoutput(
'ln -s -f %s %s' %
(self._free_loop_device, virtual_partition.name))
self.assertEqual(0, exit_code)
with MountedMedia(virtual_partition.name[:-2],
_VIRTUAL_PATITION_NUMBER) as path:
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'w') as f:
f.write(_WRITING_TEST_STR)
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'r') as f:
self.assertEqual(_WRITING_TEST_STR, f.readline())
virtual_partition.close()
def testPartitionMountFloppy(self):
"""Tests mounting a device without partition table."""
with MountedMedia(self._free_loop_device, 1) as path:
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'w') as f:
f.write(_WRITING_TEST_STR)
with open(os.path.join(path, _WRITING_TEST_FILENAME), 'r') as f:
self.assertEqual(_WRITING_TEST_STR, f.readline())
class TestMediaMonitor(unittest.TestCase):
def setUp(self):
"""Creates a temp file to mock as a media device."""
self._virtual_device = tempfile.NamedTemporaryFile(
prefix='media_utils_unitttest')
exit_code, ret = subprocess.getstatusoutput(
'truncate -s 1048576 %s' % self._virtual_device.name)
self.assertEqual(0, exit_code)
exit_code, ret = subprocess.getstatusoutput('losetup --show -f %s' %
self._virtual_device.name)
self._free_loop_device = ret
self.assertEqual(0, exit_code)
def tearDown(self):
exit_code, ret = subprocess.getstatusoutput(
'losetup -d %s' % self._free_loop_device)
self.assertEqual(0, exit_code)
self._virtual_device.close()
def testMediaMonitor(self):
def on_insert(dev_path):
self.assertEqual(self._free_loop_device, dev_path)
self._media_inserted = True
gtk.main_quit()
def on_remove(dev_path):
self.assertEqual(self._free_loop_device, dev_path)
self._media_removed = True
gtk.main_quit()
def one_time_timer_mock_insert():
monitor._observer.emit('device-event',
_UDEV_ACTION_INSERT,
self._mock_device)
return False
def one_time_timer_mock_remove():
monitor._observer.emit('device-event',
_UDEV_ACTION_REMOVE,
self._mock_device)
return False
self._media_inserted = False
self._media_removed = False
self._context = pyudev.Context()
self._mock_device = pyudev.Device.from_name(
self._context, 'block',
os.path.basename(self._free_loop_device))
# Start the monitor.
TIMEOUT_SECOND = 1
monitor = MediaMonitor('block', 'disk')
monitor.start(on_insert=on_insert, on_remove=on_remove)
# Simulating the insertion of a valid media device.
timer_tag = glib.timeout_add_seconds(TIMEOUT_SECOND,
one_time_timer_mock_insert)
gtk.main()
# Simulating the removal of a valid media device.
glib.source_remove(timer_tag)
timer_tag = glib.timeout_add_seconds(TIMEOUT_SECOND,
one_time_timer_mock_remove)
gtk.main()
monitor.stop()
self.assertEqual(True, self._media_inserted)
self.assertEqual(True, self._media_removed)
if __name__ == '__main__':
unittest.main()
|
en
| 0.855817
|
#!/usr/bin/env python3 # Copyright 2012 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. ##### # # NOTE: This test is currently broken and is blocklisted in the top-level # Makefile. # ##### # udev constants Creates a temp file to mock as a media device. Tests the MountedMedia throws exceptions when it fails. Tests mounting partition. Tests mounting partition. This tests mounting partition with devices enumerated in alphabets (ex, sda). Tests mounting partition. This tests mounting partition with devices enumerated in alphabets (ex, mmcblk0). Tests mounting a device without partition table. Creates a temp file to mock as a media device. # Start the monitor. # Simulating the insertion of a valid media device. # Simulating the removal of a valid media device.
| 2.03079
| 2
|
antlir/tests/test_unshare.py
|
zeroxoneb/antlir
| 28
|
6629916
|
<filename>antlir/tests/test_unshare.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import signal
import subprocess
import tempfile
import time
import unittest
from typing import Iterable
from unittest import mock
from ..unshare import Namespace, Unshare, nsenter_as_root, nsenter_as_user
# `user` omitted for reasons described in Unshare's docblock
_NS_FILES = ["cgroup", "ipc", "mnt", "net", "pid", "uts"]
class UnshareTestCase(unittest.TestCase):
def test_nsenter_wrappers(self):
self.assertEqual(("a", "b"), nsenter_as_user(None, "a", "b"))
self.assertEqual(("sudo", "c", "d"), nsenter_as_root(None, "c", "d"))
def _popen_sleep_forever(self, unshare: Unshare):
# We need the ready signal to know when we've actually executed the
# payload -- otherwise, we might try to interact with it while we're
# still at `nsenter`.
proc = subprocess.Popen(
nsenter_as_user(
unshare,
# pyre-fixme[6]: Expected `List[Variable[typing.AnyStr <: [str,
# bytes]]]` for 2nd param but got `str`.
"bash",
# pyre-fixme[6]: Expected `List[Variable[typing.AnyStr <: [str,
# bytes]]]` for 3rd param but got `str`.
"-uec",
# pyre-fixme[6]: Expected `List[Variable[typing.AnyStr <: [str,
# bytes]]]` for 4th param but got `str`.
"echo ready $$ ; exec sleep infinity",
),
stdout=subprocess.PIPE,
)
# Wait for the child to start
# pyre-fixme[16]: Optional type has no attribute `readline`.
ready_and_pid = proc.stdout.readline().split(b" ")
self.assertEqual(b"ready", ready_and_pid[0])
# pyre-fixme[16]: Optional type has no attribute `close`.
proc.stdout.close() # `sudo` keeps stdout open, but will not write.
# Returning the PID lets us clean up the `sleep infinity` when it is
# not inside a PID namespace.
return proc, int(ready_and_pid[1])
def _check_ns_diff(self, unshare: Unshare, ns_diff: Iterable[str]):
list_ns_cmd = [
"readlink",
*(f"/proc/self/ns/{name}" for name in _NS_FILES),
]
in_ns, out_ns = [
dict(
ns_ino.split(":")
for ns_ino in subprocess.check_output(cmd)
.decode()
.strip()
.split("\n")
)
# pyre-fixme[6]: Expected `List[Variable[typing.AnyStr <: [str,
# bytes]]]` for 2nd param but got `str`.
for cmd in [list_ns_cmd, nsenter_as_root(unshare, *list_ns_cmd)]
]
for ns in ns_diff:
self.assertNotEqual(in_ns.pop(ns), out_ns.pop(ns), ns)
self.assertEqual(in_ns, out_ns)
def _kill_keepalive(self, unshare: Unshare):
# We can kill the inner keepalive `cat` since it runs w/ our UID
# Since it's an `init` of a PID namespace, we must use SIGKILL.
cat_pid = int(
# pyre-fixme[16]: Optional type has no attribute `group`.
re.match(
"^/proc/([0-9]+)/ns/",
next(iter(unshare._namespace_to_file.values())).name,
).group(1)
)
print("Sending SIGKILL to", cat_pid)
os.kill(cat_pid, signal.SIGKILL)
def test_pid_namespace(self):
with Unshare([Namespace.PID]) as unshare:
proc, _ = self._popen_sleep_forever(unshare)
# Check that "as user" works.
for arg, expected in (("-u", os.geteuid()), ("-g", os.getegid())):
actual = int(
subprocess.check_output(nsenter_as_user(unshare, "id", arg))
)
self.assertEqual(expected, actual)
time.sleep(2) # Leave some time for `sleep` to exit erroneously
self.assertEqual(None, proc.poll()) # Sleeps forever
self._check_ns_diff(unshare, {"pid"})
self.assertEqual(-signal.SIGKILL, proc.poll()) # Reaped by PID NS
def test_pid_namespace_dead_keepalive(self):
with Unshare([Namespace.PID]) as unshare:
self._check_ns_diff(unshare, {"pid"})
good_echo = nsenter_as_user(unshare, "echo")
subprocess.check_call(good_echo) # Will fail once the NS is dead
proc, _ = self._popen_sleep_forever(unshare)
time.sleep(2) # Leave some time for `sleep` to exit erroneously
self.assertEqual(None, proc.poll()) # Sleeps forever
self._kill_keepalive(unshare)
self.assertEqual(-signal.SIGKILL, proc.wait()) # The NS is dead
# The `echo` command that worked above no longer works.
with self.assertRaises(subprocess.CalledProcessError):
subprocess.check_call(good_echo)
def test_context_enter_error(self):
"Exercise triggering __exit__ when __enter__ raises"
unshare = Unshare([Namespace.MOUNT]) # This does not fail
# Give bad arguments to the inner `sudo` to make the keepalive fail
# quickly without outputting the inner PID.
# Early failure caught by "assert not keepalive_proc.poll()"
with mock.patch(
"os.geteuid", side_effect="NOT-A-REAL-USER-ID"
), self.assertRaises(AssertionError):
with unshare:
raise AssertionError # Guarantees __enter__ was what failed
# The Unshare was left in a clean-ish state, which strongly suggests
# that __exit__ ran, given that __enter__ immediately assigns to
# `self._keepalive_proc`, and that did run (CalledProcessError).
self.assertEqual(None, unshare._keepalive_proc)
self.assertEqual(None, unshare._namespace_to_file)
def test_no_namespaces(self):
"""
A silly test that shows that unsharing nothing still works -- which
is useful to distinguish self._namespace_to_file {} vs None. That
said, people should just use nsenter_as_*(None, ...) instead.
"""
with Unshare([]) as unshare:
self._check_ns_diff(unshare, {})
def test_multiple_namespaces(self):
"Just a smoke test for multiple namespaces being entered at once"
with Unshare([Namespace.PID, Namespace.MOUNT]) as unshare:
self._check_ns_diff(unshare, {"mnt", "pid"})
def test_mount_namespace(self):
try:
sleep_pid = None
with tempfile.TemporaryDirectory() as mnt_src, tempfile.TemporaryDirectory() as mnt_dest1, tempfile.TemporaryDirectory() as mnt_dest2: # noqa: E501
with open(os.path.join(mnt_src, "cypa"), "w") as outfile:
outfile.write("kvoh")
def check_mnt_dest(mnt_dest: str):
cypa = os.path.join(mnt_dest, "cypa")
# The outer NS cannot see the mount
self.assertFalse(os.path.exists(cypa))
# But we can read it from inside the namespace
self.assertEqual(
b"kvoh",
subprocess.check_output(
# pyre-fixme[6]: Expected
# `List[Variable[typing.AnyStr <: [str, bytes]]]`
# for 2nd param but got `str`.
nsenter_as_user(unshare, "cat", cypa)
),
)
with Unshare([Namespace.MOUNT]) as unshare:
# Without a PID namespace, this will outlive the
# __exit__ -- in fact, this process would leak but for
# our `finally`.
proc, sleep_pid = self._popen_sleep_forever(unshare)
subprocess.check_call(
nsenter_as_root(
unshare, "mount", mnt_src, mnt_dest1, "-o", "bind"
)
)
check_mnt_dest(mnt_dest1)
# Mount namespaces remain usable after the keepalive dies
self._kill_keepalive(unshare)
# We can make a second mount inside the namespace
subprocess.check_call(
nsenter_as_root(
unshare, "mount", mnt_src, mnt_dest2, "-o", "bind"
)
)
check_mnt_dest(mnt_dest2)
check_mnt_dest(mnt_dest1) # The old mount is still good
# Outside the context, nsenter cannot work. There's no way
# to test the mounts are gone since we don't have any handle
# by which to access them. That's the point.
with self.assertRaisesRegex(
RuntimeError, "Must nsenter from inside an Unshare"
):
check_mnt_dest(mnt_dest1)
time.sleep(2) # Give some time for `sleep` to exit erroneously
self.assertIs(None, proc.poll()) # Processes leak
finally:
# Ensure we don't leak the `sleep infinity` -- since it was
# started via `sudo`, `subprocess` cannot kill it automatically.
if sleep_pid:
if proc.poll() is None:
os.kill(sleep_pid, signal.SIGTERM)
proc.wait()
def test_network_namespace(self):
# create a network namespace and a tap device within it, ensuring that
# it is only visible within the namespace
with Unshare([Namespace.NETWORK]) as unshare:
# does not already exist within the namespace
self.assertNotIn(
"ns-tap",
subprocess.run(
nsenter_as_root(unshare, "ip", "link"),
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
).stdout,
)
subprocess.run(
nsenter_as_root(
unshare,
"ip",
"tuntap",
"add",
"dev",
"ns-tap",
"mode",
"tap",
),
check=True,
)
# visible inside the namespace
self.assertIn(
"ns-tap",
subprocess.run(
nsenter_as_root(unshare, "ip", "link"),
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
).stdout,
)
# not visible outside the namespace
self.assertNotIn(
"ns-tap",
subprocess.run(
["ip", "link"],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
).stdout,
)
def test_nsenter_without_sudo(self):
# This just creates the namespace and then compares commands generated
# to confirm that the `sudo` is dropped. Since `nsenter` requires
# root to enter a namespace, if we tried to actually run the command
# it would surely break.
with Unshare([Namespace.MOUNT]) as unshare:
sudo_cmd = unshare.nsenter_as_root(["/bin/ls"])
no_sudo_cmd = unshare.nsenter_without_sudo(["/bin/ls"])
self.assertEqual(no_sudo_cmd, sudo_cmd[1:])
|
<filename>antlir/tests/test_unshare.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import signal
import subprocess
import tempfile
import time
import unittest
from typing import Iterable
from unittest import mock
from ..unshare import Namespace, Unshare, nsenter_as_root, nsenter_as_user
# `user` omitted for reasons described in Unshare's docblock
_NS_FILES = ["cgroup", "ipc", "mnt", "net", "pid", "uts"]
class UnshareTestCase(unittest.TestCase):
def test_nsenter_wrappers(self):
self.assertEqual(("a", "b"), nsenter_as_user(None, "a", "b"))
self.assertEqual(("sudo", "c", "d"), nsenter_as_root(None, "c", "d"))
def _popen_sleep_forever(self, unshare: Unshare):
# We need the ready signal to know when we've actually executed the
# payload -- otherwise, we might try to interact with it while we're
# still at `nsenter`.
proc = subprocess.Popen(
nsenter_as_user(
unshare,
# pyre-fixme[6]: Expected `List[Variable[typing.AnyStr <: [str,
# bytes]]]` for 2nd param but got `str`.
"bash",
# pyre-fixme[6]: Expected `List[Variable[typing.AnyStr <: [str,
# bytes]]]` for 3rd param but got `str`.
"-uec",
# pyre-fixme[6]: Expected `List[Variable[typing.AnyStr <: [str,
# bytes]]]` for 4th param but got `str`.
"echo ready $$ ; exec sleep infinity",
),
stdout=subprocess.PIPE,
)
# Wait for the child to start
# pyre-fixme[16]: Optional type has no attribute `readline`.
ready_and_pid = proc.stdout.readline().split(b" ")
self.assertEqual(b"ready", ready_and_pid[0])
# pyre-fixme[16]: Optional type has no attribute `close`.
proc.stdout.close() # `sudo` keeps stdout open, but will not write.
# Returning the PID lets us clean up the `sleep infinity` when it is
# not inside a PID namespace.
return proc, int(ready_and_pid[1])
def _check_ns_diff(self, unshare: Unshare, ns_diff: Iterable[str]):
list_ns_cmd = [
"readlink",
*(f"/proc/self/ns/{name}" for name in _NS_FILES),
]
in_ns, out_ns = [
dict(
ns_ino.split(":")
for ns_ino in subprocess.check_output(cmd)
.decode()
.strip()
.split("\n")
)
# pyre-fixme[6]: Expected `List[Variable[typing.AnyStr <: [str,
# bytes]]]` for 2nd param but got `str`.
for cmd in [list_ns_cmd, nsenter_as_root(unshare, *list_ns_cmd)]
]
for ns in ns_diff:
self.assertNotEqual(in_ns.pop(ns), out_ns.pop(ns), ns)
self.assertEqual(in_ns, out_ns)
def _kill_keepalive(self, unshare: Unshare):
# We can kill the inner keepalive `cat` since it runs w/ our UID
# Since it's an `init` of a PID namespace, we must use SIGKILL.
cat_pid = int(
# pyre-fixme[16]: Optional type has no attribute `group`.
re.match(
"^/proc/([0-9]+)/ns/",
next(iter(unshare._namespace_to_file.values())).name,
).group(1)
)
print("Sending SIGKILL to", cat_pid)
os.kill(cat_pid, signal.SIGKILL)
def test_pid_namespace(self):
with Unshare([Namespace.PID]) as unshare:
proc, _ = self._popen_sleep_forever(unshare)
# Check that "as user" works.
for arg, expected in (("-u", os.geteuid()), ("-g", os.getegid())):
actual = int(
subprocess.check_output(nsenter_as_user(unshare, "id", arg))
)
self.assertEqual(expected, actual)
time.sleep(2) # Leave some time for `sleep` to exit erroneously
self.assertEqual(None, proc.poll()) # Sleeps forever
self._check_ns_diff(unshare, {"pid"})
self.assertEqual(-signal.SIGKILL, proc.poll()) # Reaped by PID NS
def test_pid_namespace_dead_keepalive(self):
with Unshare([Namespace.PID]) as unshare:
self._check_ns_diff(unshare, {"pid"})
good_echo = nsenter_as_user(unshare, "echo")
subprocess.check_call(good_echo) # Will fail once the NS is dead
proc, _ = self._popen_sleep_forever(unshare)
time.sleep(2) # Leave some time for `sleep` to exit erroneously
self.assertEqual(None, proc.poll()) # Sleeps forever
self._kill_keepalive(unshare)
self.assertEqual(-signal.SIGKILL, proc.wait()) # The NS is dead
# The `echo` command that worked above no longer works.
with self.assertRaises(subprocess.CalledProcessError):
subprocess.check_call(good_echo)
def test_context_enter_error(self):
"Exercise triggering __exit__ when __enter__ raises"
unshare = Unshare([Namespace.MOUNT]) # This does not fail
# Give bad arguments to the inner `sudo` to make the keepalive fail
# quickly without outputting the inner PID.
# Early failure caught by "assert not keepalive_proc.poll()"
with mock.patch(
"os.geteuid", side_effect="NOT-A-REAL-USER-ID"
), self.assertRaises(AssertionError):
with unshare:
raise AssertionError # Guarantees __enter__ was what failed
# The Unshare was left in a clean-ish state, which strongly suggests
# that __exit__ ran, given that __enter__ immediately assigns to
# `self._keepalive_proc`, and that did run (CalledProcessError).
self.assertEqual(None, unshare._keepalive_proc)
self.assertEqual(None, unshare._namespace_to_file)
def test_no_namespaces(self):
"""
A silly test that shows that unsharing nothing still works -- which
is useful to distinguish self._namespace_to_file {} vs None. That
said, people should just use nsenter_as_*(None, ...) instead.
"""
with Unshare([]) as unshare:
self._check_ns_diff(unshare, {})
def test_multiple_namespaces(self):
"Just a smoke test for multiple namespaces being entered at once"
with Unshare([Namespace.PID, Namespace.MOUNT]) as unshare:
self._check_ns_diff(unshare, {"mnt", "pid"})
def test_mount_namespace(self):
try:
sleep_pid = None
with tempfile.TemporaryDirectory() as mnt_src, tempfile.TemporaryDirectory() as mnt_dest1, tempfile.TemporaryDirectory() as mnt_dest2: # noqa: E501
with open(os.path.join(mnt_src, "cypa"), "w") as outfile:
outfile.write("kvoh")
def check_mnt_dest(mnt_dest: str):
cypa = os.path.join(mnt_dest, "cypa")
# The outer NS cannot see the mount
self.assertFalse(os.path.exists(cypa))
# But we can read it from inside the namespace
self.assertEqual(
b"kvoh",
subprocess.check_output(
# pyre-fixme[6]: Expected
# `List[Variable[typing.AnyStr <: [str, bytes]]]`
# for 2nd param but got `str`.
nsenter_as_user(unshare, "cat", cypa)
),
)
with Unshare([Namespace.MOUNT]) as unshare:
# Without a PID namespace, this will outlive the
# __exit__ -- in fact, this process would leak but for
# our `finally`.
proc, sleep_pid = self._popen_sleep_forever(unshare)
subprocess.check_call(
nsenter_as_root(
unshare, "mount", mnt_src, mnt_dest1, "-o", "bind"
)
)
check_mnt_dest(mnt_dest1)
# Mount namespaces remain usable after the keepalive dies
self._kill_keepalive(unshare)
# We can make a second mount inside the namespace
subprocess.check_call(
nsenter_as_root(
unshare, "mount", mnt_src, mnt_dest2, "-o", "bind"
)
)
check_mnt_dest(mnt_dest2)
check_mnt_dest(mnt_dest1) # The old mount is still good
# Outside the context, nsenter cannot work. There's no way
# to test the mounts are gone since we don't have any handle
# by which to access them. That's the point.
with self.assertRaisesRegex(
RuntimeError, "Must nsenter from inside an Unshare"
):
check_mnt_dest(mnt_dest1)
time.sleep(2) # Give some time for `sleep` to exit erroneously
self.assertIs(None, proc.poll()) # Processes leak
finally:
# Ensure we don't leak the `sleep infinity` -- since it was
# started via `sudo`, `subprocess` cannot kill it automatically.
if sleep_pid:
if proc.poll() is None:
os.kill(sleep_pid, signal.SIGTERM)
proc.wait()
def test_network_namespace(self):
# create a network namespace and a tap device within it, ensuring that
# it is only visible within the namespace
with Unshare([Namespace.NETWORK]) as unshare:
# does not already exist within the namespace
self.assertNotIn(
"ns-tap",
subprocess.run(
nsenter_as_root(unshare, "ip", "link"),
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
).stdout,
)
subprocess.run(
nsenter_as_root(
unshare,
"ip",
"tuntap",
"add",
"dev",
"ns-tap",
"mode",
"tap",
),
check=True,
)
# visible inside the namespace
self.assertIn(
"ns-tap",
subprocess.run(
nsenter_as_root(unshare, "ip", "link"),
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
).stdout,
)
# not visible outside the namespace
self.assertNotIn(
"ns-tap",
subprocess.run(
["ip", "link"],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
).stdout,
)
def test_nsenter_without_sudo(self):
# This just creates the namespace and then compares commands generated
# to confirm that the `sudo` is dropped. Since `nsenter` requires
# root to enter a namespace, if we tried to actually run the command
# it would surely break.
with Unshare([Namespace.MOUNT]) as unshare:
sudo_cmd = unshare.nsenter_as_root(["/bin/ls"])
no_sudo_cmd = unshare.nsenter_without_sudo(["/bin/ls"])
self.assertEqual(no_sudo_cmd, sudo_cmd[1:])
|
en
| 0.870552
|
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # `user` omitted for reasons described in Unshare's docblock # We need the ready signal to know when we've actually executed the # payload -- otherwise, we might try to interact with it while we're # still at `nsenter`. # pyre-fixme[6]: Expected `List[Variable[typing.AnyStr <: [str, # bytes]]]` for 2nd param but got `str`. # pyre-fixme[6]: Expected `List[Variable[typing.AnyStr <: [str, # bytes]]]` for 3rd param but got `str`. # pyre-fixme[6]: Expected `List[Variable[typing.AnyStr <: [str, # bytes]]]` for 4th param but got `str`. # Wait for the child to start # pyre-fixme[16]: Optional type has no attribute `readline`. # pyre-fixme[16]: Optional type has no attribute `close`. # `sudo` keeps stdout open, but will not write. # Returning the PID lets us clean up the `sleep infinity` when it is # not inside a PID namespace. # pyre-fixme[6]: Expected `List[Variable[typing.AnyStr <: [str, # bytes]]]` for 2nd param but got `str`. # We can kill the inner keepalive `cat` since it runs w/ our UID # Since it's an `init` of a PID namespace, we must use SIGKILL. # pyre-fixme[16]: Optional type has no attribute `group`. # Check that "as user" works. # Leave some time for `sleep` to exit erroneously # Sleeps forever # Reaped by PID NS # Will fail once the NS is dead # Leave some time for `sleep` to exit erroneously # Sleeps forever # The NS is dead # The `echo` command that worked above no longer works. # This does not fail # Give bad arguments to the inner `sudo` to make the keepalive fail # quickly without outputting the inner PID. # Early failure caught by "assert not keepalive_proc.poll()" # Guarantees __enter__ was what failed # The Unshare was left in a clean-ish state, which strongly suggests # that __exit__ ran, given that __enter__ immediately assigns to # `self._keepalive_proc`, and that did run (CalledProcessError). A silly test that shows that unsharing nothing still works -- which is useful to distinguish self._namespace_to_file {} vs None. That said, people should just use nsenter_as_*(None, ...) instead. # noqa: E501 # The outer NS cannot see the mount # But we can read it from inside the namespace # pyre-fixme[6]: Expected # `List[Variable[typing.AnyStr <: [str, bytes]]]` # for 2nd param but got `str`. # Without a PID namespace, this will outlive the # __exit__ -- in fact, this process would leak but for # our `finally`. # Mount namespaces remain usable after the keepalive dies # We can make a second mount inside the namespace # The old mount is still good # Outside the context, nsenter cannot work. There's no way # to test the mounts are gone since we don't have any handle # by which to access them. That's the point. # Give some time for `sleep` to exit erroneously # Processes leak # Ensure we don't leak the `sleep infinity` -- since it was # started via `sudo`, `subprocess` cannot kill it automatically. # create a network namespace and a tap device within it, ensuring that # it is only visible within the namespace # does not already exist within the namespace # visible inside the namespace # not visible outside the namespace # This just creates the namespace and then compares commands generated # to confirm that the `sudo` is dropped. Since `nsenter` requires # root to enter a namespace, if we tried to actually run the command # it would surely break.
| 1.938626
| 2
|
scionlab/tasks.py
|
ManuelMeinen/scionlab
| 0
|
6629917
|
<reponame>ManuelMeinen/scionlab<gh_stars>0
# Copyright 2018 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Huey tasks config for scionlab project.
"""
import logging
import shlex
import subprocess
import huey.contrib.djhuey as huey
from django.conf import settings
def deploy_host_config(host):
"""
Initiates the configuration deployment for a managed scionlab host.
Ensures that a task is only triggered when the configuration change requires a deployment.
The deployment is run asynchronously, the version that will be deployed can be any version newer
than the current one.
:param Host host:
"""
assert host.managed
# Double check that this is not a no-op:
if not host.needs_config_deployment():
return
_queue_or_trigger(host.ssh_host, host.uid, host.secret)
def _queue_or_trigger(ssh_host, host_id, host_secret):
"""
Queues and/or sets the trigger for the configuration deployment of a managed scionlab host.
Ensures that only one such task is executing per host by enforcing that
at most one deploy task per host is in the queue.
The deployment is run asynchronously, the version that will be deployed can be any version newer
than the current one.
:param str ssh_host: name to ssh to host
:param str host_id: unique id of the Host object
:param str host_secret: secret to authenticate request for this Host object
"""
# Set the trigger for the task to run/re-run it if necessary.
_put_if_empty(_key_deploy_host_triggered(host_id), True)
# Custom trickery with hueys key-value store:
# ensure only one task per host is in the queue or executing at any time.
if _put_if_empty(_key_deploy_host_running(host_id), True):
_deploy_host_config(ssh_host, host_id, host_secret)
@huey.task()
def _deploy_host_config(ssh_host, host_id, host_secret):
"""
Task to deploy configuration to a managed scionlab host.
Note: parameters are passed individually instead of as the full host object,
because the parameters are serialised by huey.
:param str ssh_host: name to ssh to host
:param str host_id: id (primary key) of the Host object
:param str host_secret: secret to authenticate request for this Host object
"""
try:
triggered = huey.HUEY.get(_key_deploy_host_triggered(host_id))
# Check that the task was triggered since its last execution and it still needs deployment
if triggered and _check_host_needs_config_deployment(host_id):
# The task was triggered and needs execution, run it
_invoke_ssh_scionlab_config(ssh_host, host_id, host_secret)
# Schedule the task to be rerun no sooner than after the delay
_deploy_host_config.schedule(args=(ssh_host, host_id, host_secret),
delay=settings.DEPLOYMENT_PERIOD)
return
except Exception as e:
logging.error("Huey task _deploy_host_config failed with %s" % e)
# task was not run or failed to run, release lock
huey.HUEY.get(_key_deploy_host_running(host_id))
def _check_host_needs_config_deployment(host_id):
from scionlab.models.core import Host
return Host.objects.get(uid=host_id).needs_config_deployment()
# this wrapper is missing from huey.api
def _put_if_empty(key, value):
import pickle
return huey.HUEY.storage.put_if_empty(key,
pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
def _key_deploy_host_running(host_id):
return 'scionlab_deploy_host_ongoing_' + host_id
def _key_deploy_host_triggered(host_id):
return 'scionlab_deploy_host_triggered_' + host_id
def _invoke_ssh_scionlab_config(ssh_host, host_id, host_secret):
"""
Calls the actual ssh command to deploy the configuration to a managed scionlab host.
:param str ssh_host: name to ssh to host
:param str host_id: id (primary key) of the Host object
:param str host_secret: secret to authenticate request for this Host object
"""
command = ('scionlab-config'
' --host-id {host_id}'
' --host-secret {host_secret}'
' --url "{url}"').format(
host_id=host_id,
host_secret=host_secret,
url=settings.SCIONLAB_SITE)
args = ['ssh', '-F', settings.SSH_CONFIG_PATH, ssh_host, command]
logging.info(' '.join(shlex.quote(a) for a in args))
subprocess.call(args)
|
# Copyright 2018 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Huey tasks config for scionlab project.
"""
import logging
import shlex
import subprocess
import huey.contrib.djhuey as huey
from django.conf import settings
def deploy_host_config(host):
"""
Initiates the configuration deployment for a managed scionlab host.
Ensures that a task is only triggered when the configuration change requires a deployment.
The deployment is run asynchronously, the version that will be deployed can be any version newer
than the current one.
:param Host host:
"""
assert host.managed
# Double check that this is not a no-op:
if not host.needs_config_deployment():
return
_queue_or_trigger(host.ssh_host, host.uid, host.secret)
def _queue_or_trigger(ssh_host, host_id, host_secret):
"""
Queues and/or sets the trigger for the configuration deployment of a managed scionlab host.
Ensures that only one such task is executing per host by enforcing that
at most one deploy task per host is in the queue.
The deployment is run asynchronously, the version that will be deployed can be any version newer
than the current one.
:param str ssh_host: name to ssh to host
:param str host_id: unique id of the Host object
:param str host_secret: secret to authenticate request for this Host object
"""
# Set the trigger for the task to run/re-run it if necessary.
_put_if_empty(_key_deploy_host_triggered(host_id), True)
# Custom trickery with hueys key-value store:
# ensure only one task per host is in the queue or executing at any time.
if _put_if_empty(_key_deploy_host_running(host_id), True):
_deploy_host_config(ssh_host, host_id, host_secret)
@huey.task()
def _deploy_host_config(ssh_host, host_id, host_secret):
"""
Task to deploy configuration to a managed scionlab host.
Note: parameters are passed individually instead of as the full host object,
because the parameters are serialised by huey.
:param str ssh_host: name to ssh to host
:param str host_id: id (primary key) of the Host object
:param str host_secret: secret to authenticate request for this Host object
"""
try:
triggered = huey.HUEY.get(_key_deploy_host_triggered(host_id))
# Check that the task was triggered since its last execution and it still needs deployment
if triggered and _check_host_needs_config_deployment(host_id):
# The task was triggered and needs execution, run it
_invoke_ssh_scionlab_config(ssh_host, host_id, host_secret)
# Schedule the task to be rerun no sooner than after the delay
_deploy_host_config.schedule(args=(ssh_host, host_id, host_secret),
delay=settings.DEPLOYMENT_PERIOD)
return
except Exception as e:
logging.error("Huey task _deploy_host_config failed with %s" % e)
# task was not run or failed to run, release lock
huey.HUEY.get(_key_deploy_host_running(host_id))
def _check_host_needs_config_deployment(host_id):
from scionlab.models.core import Host
return Host.objects.get(uid=host_id).needs_config_deployment()
# this wrapper is missing from huey.api
def _put_if_empty(key, value):
import pickle
return huey.HUEY.storage.put_if_empty(key,
pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
def _key_deploy_host_running(host_id):
return 'scionlab_deploy_host_ongoing_' + host_id
def _key_deploy_host_triggered(host_id):
return 'scionlab_deploy_host_triggered_' + host_id
def _invoke_ssh_scionlab_config(ssh_host, host_id, host_secret):
"""
Calls the actual ssh command to deploy the configuration to a managed scionlab host.
:param str ssh_host: name to ssh to host
:param str host_id: id (primary key) of the Host object
:param str host_secret: secret to authenticate request for this Host object
"""
command = ('scionlab-config'
' --host-id {host_id}'
' --host-secret {host_secret}'
' --url "{url}"').format(
host_id=host_id,
host_secret=host_secret,
url=settings.SCIONLAB_SITE)
args = ['ssh', '-F', settings.SSH_CONFIG_PATH, ssh_host, command]
logging.info(' '.join(shlex.quote(a) for a in args))
subprocess.call(args)
|
en
| 0.885637
|
# Copyright 2018 ETH Zurich # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Huey tasks config for scionlab project. Initiates the configuration deployment for a managed scionlab host. Ensures that a task is only triggered when the configuration change requires a deployment. The deployment is run asynchronously, the version that will be deployed can be any version newer than the current one. :param Host host: # Double check that this is not a no-op: Queues and/or sets the trigger for the configuration deployment of a managed scionlab host. Ensures that only one such task is executing per host by enforcing that at most one deploy task per host is in the queue. The deployment is run asynchronously, the version that will be deployed can be any version newer than the current one. :param str ssh_host: name to ssh to host :param str host_id: unique id of the Host object :param str host_secret: secret to authenticate request for this Host object # Set the trigger for the task to run/re-run it if necessary. # Custom trickery with hueys key-value store: # ensure only one task per host is in the queue or executing at any time. Task to deploy configuration to a managed scionlab host. Note: parameters are passed individually instead of as the full host object, because the parameters are serialised by huey. :param str ssh_host: name to ssh to host :param str host_id: id (primary key) of the Host object :param str host_secret: secret to authenticate request for this Host object # Check that the task was triggered since its last execution and it still needs deployment # The task was triggered and needs execution, run it # Schedule the task to be rerun no sooner than after the delay # task was not run or failed to run, release lock # this wrapper is missing from huey.api Calls the actual ssh command to deploy the configuration to a managed scionlab host. :param str ssh_host: name to ssh to host :param str host_id: id (primary key) of the Host object :param str host_secret: secret to authenticate request for this Host object
| 1.856988
| 2
|
pyleecan/Methods/Simulation/MagFEMM/comp_flux_airgap.py
|
tobsen2code/pyleecan
| 0
|
6629918
|
from os.path import isfile
from shutil import copyfile
from numpy import zeros
from SciDataTool import Data1D
from ....Classes._FEMMHandler import _FEMMHandler
from ....Classes.OutMagFEMM import OutMagFEMM
from ....Functions.labels import STATOR_LAB
from ....Functions.FEMM.draw_FEMM import draw_FEMM
from ....Functions.MeshSolution.build_solution_data import build_solution_data
from ....Functions.MeshSolution.build_meshsolution import build_meshsolution
from ....Functions.MeshSolution.build_solution_vector import build_solution_vector
def comp_flux_airgap(self, output, axes_dict, Is_val=None, Ir_val=None):
"""Build and solve FEMM model to calculate and store magnetic quantities
Parameters
----------
self : MagFEMM
a MagFEMM object
output : Output
an Output object
axes_dict: {Data}
Dict of axes used for magnetic calculation
Returns
-------
out_dict: dict
Dict containing the following quantities:
Br : ndarray
Airgap radial flux density (Nt,Na) [T]
Bt : ndarray
Airgap tangential flux density (Nt,Na) [T]
Tem : ndarray
Electromagnetic torque over time (Nt,) [Nm]
Phi_wind_stator : ndarray
Stator winding flux (qs,Nt) [Wb]
Phi_wind : dict
Dict of winding fluxlinkage with respect to Machine.get_lam_list_label (qs,Nt) [Wb]
meshsolution: MeshSolution
MeshSolution object containing magnetic quantities B, H, mu for each time step
"""
logger = self.get_logger()
# Init output
out_dict = dict()
if output.mag.internal is None:
output.mag.internal = OutMagFEMM()
# Get time and angular axes
Angle = axes_dict["angle"]
Time = axes_dict["time"]
# Set the angular symmetry factor according to the machine and check if it is anti-periodic
sym, is_antiper_a = Angle.get_periodicity()
# Import angular vector from Data object
angle = Angle.get_values(
is_oneperiod=self.is_periodicity_a,
is_antiperiod=is_antiper_a and self.is_periodicity_a,
)
Na = angle.size
# Check if the time axis is anti-periodic
_, is_antiper_t = Time.get_periodicity()
# Number of time steps
time = Time.get_values(
is_oneperiod=self.is_periodicity_t,
is_antiperiod=is_antiper_t and self.is_periodicity_t,
)
Nt = time.size
# Get rotor angular position
angle_rotor = output.get_angle_rotor()[0:Nt]
# Setup the FEMM simulation
# Geometry building and assigning property in FEMM
# Instanciate a new FEMM
femm = _FEMMHandler()
output.mag.internal.handler_list.append(femm)
if self.import_file is None:
path_femm = self.get_path_save_fem(output)
logger.debug("Drawing machine in FEMM at " + path_femm)
FEMM_dict = draw_FEMM(
femm,
output,
is_mmfr=self.is_mmfr,
is_mmfs=self.is_mmfs,
sym=sym,
is_antiper=is_antiper_a,
type_calc_leakage=self.type_calc_leakage,
is_remove_ventS=self.is_remove_ventS,
is_remove_ventR=self.is_remove_ventR,
is_remove_slotS=self.is_remove_slotS,
is_remove_slotR=self.is_remove_slotR,
type_BH_stator=self.type_BH_stator,
type_BH_rotor=self.type_BH_rotor,
kgeo_fineness=self.Kgeo_fineness,
kmesh_fineness=self.Kmesh_fineness,
user_FEMM_dict=self.FEMM_dict_enforced,
path_save=path_femm,
is_sliding_band=self.is_sliding_band,
transform_list=self.transform_list,
rotor_dxf=self.rotor_dxf,
stator_dxf=self.stator_dxf,
is_fast_draw=self.is_fast_draw,
T_mag=self.T_mag,
)
else:
logger.debug("Reusing the FEMM file: " + self.import_file)
if output.mag.internal.FEMM_dict is not None:
FEMM_dict = output.mag.internal.FEMM_dict
else:
FEMM_dict = self.FEMM_dict_enforced
# Init flux arrays in out_dict
out_dict["B_{rad}"] = zeros((Nt, Na))
out_dict["B_{circ}"] = zeros((Nt, Na))
# Init torque array in out_dict
out_dict["Tem"] = zeros((Nt))
# Init lamination winding flux list of arrays in out_dict
machine = output.simu.machine
out_dict["Phi_wind"] = {}
axes_dict_elec = output.elec.axes_dict
for label in machine.get_lam_list_label():
if "phase_" + label in axes_dict_elec:
qs = axes_dict_elec["phase_" + label].get_length(is_smallestperiod=True)
out_dict["Phi_wind"][label] = zeros((Nt, qs))
# delete 'Phi_wind' if empty
if len(out_dict["Phi_wind"]) == 0:
out_dict.pop("Phi_wind")
# Solve for all time step and store all the results in out_dict
if self.nb_worker > 1:
# A Femm handler will be created for each worker
femm.closefemm()
output.mag.internal.handler_list.remove(femm)
# With parallelization
B_elem, H_elem, mu_elem, A_node, meshFEMM, groups = self.solve_FEMM_parallel(
femm,
output,
out_dict,
FEMM_dict=FEMM_dict,
sym=sym,
Nt=Nt,
angle=angle,
Is=Is_val,
Ir=Ir_val,
angle_rotor=angle_rotor,
filename=self.import_file,
)
else:
# Without parallelization
B_elem, H_elem, mu_elem, A_node, meshFEMM, groups = self.solve_FEMM(
femm,
output,
out_dict,
FEMM_dict=FEMM_dict,
sym=sym,
Nt=Nt,
angle=angle,
Is=Is_val,
Ir=Ir_val,
angle_rotor=angle_rotor,
is_close_femm=self.is_close_femm,
filename=self.import_file,
)
# Store FEMM_dict in out_dict if FEMM file is not imported
if self.import_file is None:
# Especially useful to avoid redrawing machine in case of skew
self.import_file = path_femm
output.mag.internal.FEMM_dict = FEMM_dict
# Store stator winding flux
if STATOR_LAB + "-0" in out_dict["Phi_wind"].keys():
out_dict["Phi_wind_stator"] = out_dict["Phi_wind"][STATOR_LAB + "-0"]
# Store mesh data & solution
if self.is_get_meshsolution and B_elem is not None:
# Define axis
Time = Time.copy()
meshFEMM[0].sym = sym
meshFEMM[0].is_antiper_a = is_antiper_a
indices_cell = meshFEMM[0].cell["triangle"].indice
Indices_Cell = Data1D(
name="indice", values=indices_cell, is_components=True, is_overlay=False
)
Slice = axes_dict["z"]
axis_list = [Time, Indices_Cell, Slice]
B_sol = build_solution_vector(
field=B_elem[:, :, None, :], # quick fix for slice issue
axis_list=axis_list,
name="Magnetic Flux Density",
symbol="B",
unit="T",
)
H_sol = build_solution_vector(
field=H_elem[:, :, None, :],
axis_list=axis_list,
name="Magnetic Field",
symbol="H",
unit="A/m",
)
mu_sol = build_solution_data(
field=mu_elem[:, :, None],
axis_list=axis_list,
name="Magnetic Permeability",
symbol="\mu",
unit="H/m",
)
indices_nodes = meshFEMM[0].node.indice
Indices_Nodes = Data1D(name="indice", values=indices_nodes, is_components=True)
axis_list_node = [Time, Indices_Nodes]
A_sol = build_solution_data(
field=A_node,
axis_list=axis_list_node,
name="Magnetic Potential Vector",
symbol="A_z",
unit="T.m",
)
A_sol.type_cell = "node"
list_solution = [B_sol, H_sol, mu_sol, A_sol]
out_dict["meshsolution"] = build_meshsolution(
list_solution=list_solution,
label="FEMM 2D Magnetostatic",
list_mesh=meshFEMM,
group=groups,
)
return out_dict
|
from os.path import isfile
from shutil import copyfile
from numpy import zeros
from SciDataTool import Data1D
from ....Classes._FEMMHandler import _FEMMHandler
from ....Classes.OutMagFEMM import OutMagFEMM
from ....Functions.labels import STATOR_LAB
from ....Functions.FEMM.draw_FEMM import draw_FEMM
from ....Functions.MeshSolution.build_solution_data import build_solution_data
from ....Functions.MeshSolution.build_meshsolution import build_meshsolution
from ....Functions.MeshSolution.build_solution_vector import build_solution_vector
def comp_flux_airgap(self, output, axes_dict, Is_val=None, Ir_val=None):
"""Build and solve FEMM model to calculate and store magnetic quantities
Parameters
----------
self : MagFEMM
a MagFEMM object
output : Output
an Output object
axes_dict: {Data}
Dict of axes used for magnetic calculation
Returns
-------
out_dict: dict
Dict containing the following quantities:
Br : ndarray
Airgap radial flux density (Nt,Na) [T]
Bt : ndarray
Airgap tangential flux density (Nt,Na) [T]
Tem : ndarray
Electromagnetic torque over time (Nt,) [Nm]
Phi_wind_stator : ndarray
Stator winding flux (qs,Nt) [Wb]
Phi_wind : dict
Dict of winding fluxlinkage with respect to Machine.get_lam_list_label (qs,Nt) [Wb]
meshsolution: MeshSolution
MeshSolution object containing magnetic quantities B, H, mu for each time step
"""
logger = self.get_logger()
# Init output
out_dict = dict()
if output.mag.internal is None:
output.mag.internal = OutMagFEMM()
# Get time and angular axes
Angle = axes_dict["angle"]
Time = axes_dict["time"]
# Set the angular symmetry factor according to the machine and check if it is anti-periodic
sym, is_antiper_a = Angle.get_periodicity()
# Import angular vector from Data object
angle = Angle.get_values(
is_oneperiod=self.is_periodicity_a,
is_antiperiod=is_antiper_a and self.is_periodicity_a,
)
Na = angle.size
# Check if the time axis is anti-periodic
_, is_antiper_t = Time.get_periodicity()
# Number of time steps
time = Time.get_values(
is_oneperiod=self.is_periodicity_t,
is_antiperiod=is_antiper_t and self.is_periodicity_t,
)
Nt = time.size
# Get rotor angular position
angle_rotor = output.get_angle_rotor()[0:Nt]
# Setup the FEMM simulation
# Geometry building and assigning property in FEMM
# Instanciate a new FEMM
femm = _FEMMHandler()
output.mag.internal.handler_list.append(femm)
if self.import_file is None:
path_femm = self.get_path_save_fem(output)
logger.debug("Drawing machine in FEMM at " + path_femm)
FEMM_dict = draw_FEMM(
femm,
output,
is_mmfr=self.is_mmfr,
is_mmfs=self.is_mmfs,
sym=sym,
is_antiper=is_antiper_a,
type_calc_leakage=self.type_calc_leakage,
is_remove_ventS=self.is_remove_ventS,
is_remove_ventR=self.is_remove_ventR,
is_remove_slotS=self.is_remove_slotS,
is_remove_slotR=self.is_remove_slotR,
type_BH_stator=self.type_BH_stator,
type_BH_rotor=self.type_BH_rotor,
kgeo_fineness=self.Kgeo_fineness,
kmesh_fineness=self.Kmesh_fineness,
user_FEMM_dict=self.FEMM_dict_enforced,
path_save=path_femm,
is_sliding_band=self.is_sliding_band,
transform_list=self.transform_list,
rotor_dxf=self.rotor_dxf,
stator_dxf=self.stator_dxf,
is_fast_draw=self.is_fast_draw,
T_mag=self.T_mag,
)
else:
logger.debug("Reusing the FEMM file: " + self.import_file)
if output.mag.internal.FEMM_dict is not None:
FEMM_dict = output.mag.internal.FEMM_dict
else:
FEMM_dict = self.FEMM_dict_enforced
# Init flux arrays in out_dict
out_dict["B_{rad}"] = zeros((Nt, Na))
out_dict["B_{circ}"] = zeros((Nt, Na))
# Init torque array in out_dict
out_dict["Tem"] = zeros((Nt))
# Init lamination winding flux list of arrays in out_dict
machine = output.simu.machine
out_dict["Phi_wind"] = {}
axes_dict_elec = output.elec.axes_dict
for label in machine.get_lam_list_label():
if "phase_" + label in axes_dict_elec:
qs = axes_dict_elec["phase_" + label].get_length(is_smallestperiod=True)
out_dict["Phi_wind"][label] = zeros((Nt, qs))
# delete 'Phi_wind' if empty
if len(out_dict["Phi_wind"]) == 0:
out_dict.pop("Phi_wind")
# Solve for all time step and store all the results in out_dict
if self.nb_worker > 1:
# A Femm handler will be created for each worker
femm.closefemm()
output.mag.internal.handler_list.remove(femm)
# With parallelization
B_elem, H_elem, mu_elem, A_node, meshFEMM, groups = self.solve_FEMM_parallel(
femm,
output,
out_dict,
FEMM_dict=FEMM_dict,
sym=sym,
Nt=Nt,
angle=angle,
Is=Is_val,
Ir=Ir_val,
angle_rotor=angle_rotor,
filename=self.import_file,
)
else:
# Without parallelization
B_elem, H_elem, mu_elem, A_node, meshFEMM, groups = self.solve_FEMM(
femm,
output,
out_dict,
FEMM_dict=FEMM_dict,
sym=sym,
Nt=Nt,
angle=angle,
Is=Is_val,
Ir=Ir_val,
angle_rotor=angle_rotor,
is_close_femm=self.is_close_femm,
filename=self.import_file,
)
# Store FEMM_dict in out_dict if FEMM file is not imported
if self.import_file is None:
# Especially useful to avoid redrawing machine in case of skew
self.import_file = path_femm
output.mag.internal.FEMM_dict = FEMM_dict
# Store stator winding flux
if STATOR_LAB + "-0" in out_dict["Phi_wind"].keys():
out_dict["Phi_wind_stator"] = out_dict["Phi_wind"][STATOR_LAB + "-0"]
# Store mesh data & solution
if self.is_get_meshsolution and B_elem is not None:
# Define axis
Time = Time.copy()
meshFEMM[0].sym = sym
meshFEMM[0].is_antiper_a = is_antiper_a
indices_cell = meshFEMM[0].cell["triangle"].indice
Indices_Cell = Data1D(
name="indice", values=indices_cell, is_components=True, is_overlay=False
)
Slice = axes_dict["z"]
axis_list = [Time, Indices_Cell, Slice]
B_sol = build_solution_vector(
field=B_elem[:, :, None, :], # quick fix for slice issue
axis_list=axis_list,
name="Magnetic Flux Density",
symbol="B",
unit="T",
)
H_sol = build_solution_vector(
field=H_elem[:, :, None, :],
axis_list=axis_list,
name="Magnetic Field",
symbol="H",
unit="A/m",
)
mu_sol = build_solution_data(
field=mu_elem[:, :, None],
axis_list=axis_list,
name="Magnetic Permeability",
symbol="\mu",
unit="H/m",
)
indices_nodes = meshFEMM[0].node.indice
Indices_Nodes = Data1D(name="indice", values=indices_nodes, is_components=True)
axis_list_node = [Time, Indices_Nodes]
A_sol = build_solution_data(
field=A_node,
axis_list=axis_list_node,
name="Magnetic Potential Vector",
symbol="A_z",
unit="T.m",
)
A_sol.type_cell = "node"
list_solution = [B_sol, H_sol, mu_sol, A_sol]
out_dict["meshsolution"] = build_meshsolution(
list_solution=list_solution,
label="FEMM 2D Magnetostatic",
list_mesh=meshFEMM,
group=groups,
)
return out_dict
|
en
| 0.731271
|
Build and solve FEMM model to calculate and store magnetic quantities Parameters ---------- self : MagFEMM a MagFEMM object output : Output an Output object axes_dict: {Data} Dict of axes used for magnetic calculation Returns ------- out_dict: dict Dict containing the following quantities: Br : ndarray Airgap radial flux density (Nt,Na) [T] Bt : ndarray Airgap tangential flux density (Nt,Na) [T] Tem : ndarray Electromagnetic torque over time (Nt,) [Nm] Phi_wind_stator : ndarray Stator winding flux (qs,Nt) [Wb] Phi_wind : dict Dict of winding fluxlinkage with respect to Machine.get_lam_list_label (qs,Nt) [Wb] meshsolution: MeshSolution MeshSolution object containing magnetic quantities B, H, mu for each time step # Init output # Get time and angular axes # Set the angular symmetry factor according to the machine and check if it is anti-periodic # Import angular vector from Data object # Check if the time axis is anti-periodic # Number of time steps # Get rotor angular position # Setup the FEMM simulation # Geometry building and assigning property in FEMM # Instanciate a new FEMM # Init flux arrays in out_dict # Init torque array in out_dict # Init lamination winding flux list of arrays in out_dict # delete 'Phi_wind' if empty # Solve for all time step and store all the results in out_dict # A Femm handler will be created for each worker # With parallelization # Without parallelization # Store FEMM_dict in out_dict if FEMM file is not imported # Especially useful to avoid redrawing machine in case of skew # Store stator winding flux # Store mesh data & solution # Define axis # quick fix for slice issue
| 2.357188
| 2
|
compressor/management/commands/compress.py
|
ikatson/django_compressor
| 0
|
6629919
|
# flake8: noqa
import os
import sys
from types import MethodType
from fnmatch import fnmatch
from optparse import make_option
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO # noqa
from django.core.management.base import NoArgsCommand, CommandError
from django.template import (Context, Template,
TemplateDoesNotExist, TemplateSyntaxError)
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
from django.template.loader import get_template # noqa Leave this in to preload template locations
from django.template.defaulttags import IfNode
from django.template.loader_tags import (ExtendsNode, BlockNode,
BLOCK_CONTEXT_KEY)
try:
from django.template.loaders.cached import Loader as CachedLoader
except ImportError:
CachedLoader = None # noqa
from compressor.cache import get_offline_hexdigest, write_offline_manifest
from compressor.conf import settings
from compressor.exceptions import OfflineGenerationError
from compressor.templatetags.compress import CompressorNode
from compressor.utils import walk, any
def patched_render(self, context):
# 'Fake' _render method that just returns the context instead of
# rendering. It also checks whether the first node is an extend node or
# not, to be able to handle complex inheritance chain.
self._render_firstnode = MethodType(patched_render_firstnode, self)
self._render_firstnode(context)
# Cleanup, uninstall our _render monkeypatch now that it has been called
self._render = self._old_render
return context
def patched_render_firstnode(self, context):
# If this template has a ExtendsNode, we want to find out what
# should be put in render_context to make the {% block ... %}
# tags work.
#
# We can't fully render the base template(s) (we don't have the
# full context vars - only what's necessary to render the compress
# nodes!), therefore we hack the ExtendsNode we found, patching
# its get_parent method so that rendering the ExtendsNode only
# gives us the blocks content without doing any actual rendering.
extra_context = {}
try:
firstnode = self.nodelist[0]
except IndexError:
firstnode = None
if isinstance(firstnode, ExtendsNode):
firstnode._log = self._log
firstnode._log_verbosity = self._log_verbosity
firstnode._old_get_parent = firstnode.get_parent
firstnode.get_parent = MethodType(patched_get_parent, firstnode)
try:
extra_context = firstnode.render(context)
context.render_context = extra_context.render_context
# We aren't rendering {% block %} tags, but we want
# {{ block.super }} inside {% compress %} inside {% block %}s to
# work. Therefore, we need to pop() the last block context for
# each block name, to emulate what would have been done if the
# {% block %} had been fully rendered.
for blockname in firstnode.blocks.keys():
context.render_context[BLOCK_CONTEXT_KEY].pop(blockname)
except (IOError, TemplateSyntaxError, TemplateDoesNotExist):
# That first node we are trying to render might cause more errors
# that we didn't catch when simply creating a Template instance
# above, so we need to catch that (and ignore it, just like above)
# as well.
if self._log_verbosity > 0:
self._log.write("Caught error when rendering extend node from "
"template %s\n" % getattr(self, 'name', self))
return None
finally:
# Cleanup, uninstall our get_parent monkeypatch now that it has been called
firstnode.get_parent = firstnode._old_get_parent
return extra_context
def patched_get_parent(self, context):
# Patch template returned by extendsnode's get_parent to make sure their
# _render method is just returning the context instead of actually
# rendering stuff.
# In addition, this follows the inheritance chain by looking if the first
# node of the template is an extend node itself.
compiled_template = self._old_get_parent(context)
compiled_template._log = self._log
compiled_template._log_verbosity = self._log_verbosity
compiled_template._old_render = compiled_template._render
compiled_template._render = MethodType(patched_render, compiled_template)
return compiled_template
class Command(NoArgsCommand):
help = "Compress content outside of the request/response cycle"
option_list = NoArgsCommand.option_list + (
make_option('--extension', '-e', action='append', dest='extensions',
help='The file extension(s) to examine (default: ".html", '
'separate multiple extensions with commas, or use -e '
'multiple times)'),
make_option('-f', '--force', default=False, action='store_true',
help="Force the generation of compressed content even if the "
"COMPRESS_ENABLED setting is not True.", dest='force'),
make_option('--follow-links', default=False, action='store_true',
help="Follow symlinks when traversing the COMPRESS_ROOT "
"(which defaults to MEDIA_ROOT). Be aware that using this "
"can lead to infinite recursion if a link points to a parent "
"directory of itself.", dest='follow_links'),
)
requires_model_validation = False
def get_loaders(self):
from django.template.loader import template_source_loaders
if template_source_loaders is None:
try:
from django.template.loader import (
find_template as finder_func)
except ImportError:
from django.template.loader import (
find_template_source as finder_func) # noqa
try:
# Force django to calculate template_source_loaders from
# TEMPLATE_LOADERS settings, by asking to find a dummy template
source, name = finder_func('test')
except TemplateDoesNotExist:
pass
# Reload template_source_loaders now that it has been calculated ;
# it should contain the list of valid, instanciated template loaders
# to use.
from django.template.loader import template_source_loaders
loaders = []
# If template loader is CachedTemplateLoader, return the loaders
# that it wraps around. So if we have
# TEMPLATE_LOADERS = (
# ('django.template.loaders.cached.Loader', (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )),
# )
# The loaders will return django.template.loaders.filesystem.Loader
# and django.template.loaders.app_directories.Loader
for loader in template_source_loaders:
if CachedLoader is not None and isinstance(loader, CachedLoader):
loaders.extend(loader.loaders)
else:
loaders.append(loader)
return loaders
def compress(self, log=None, **options):
"""
Searches templates containing 'compress' nodes and compresses them
"offline" -- outside of the request/response cycle.
The result is cached with a cache-key derived from the content of the
compress nodes (not the content of the possibly linked files!).
"""
extensions = options.get('extensions')
extensions = self.handle_extensions(extensions or ['html'])
verbosity = int(options.get("verbosity", 0))
if not log:
log = StringIO()
if not settings.TEMPLATE_LOADERS:
raise OfflineGenerationError("No template loaders defined. You "
"must set TEMPLATE_LOADERS in your "
"settings.")
paths = set()
for loader in self.get_loaders():
try:
module = import_module(loader.__module__)
get_template_sources = getattr(module,
'get_template_sources', None)
if get_template_sources is None:
get_template_sources = loader.get_template_sources
paths.update(list(get_template_sources('')))
except (ImportError, AttributeError):
# Yeah, this didn't work out so well, let's move on
pass
if not paths:
raise OfflineGenerationError("No template paths found. None of "
"the configured template loaders "
"provided template paths. See "
"http://django.me/template-loaders "
"for more information on template "
"loaders.")
if verbosity > 1:
log.write("Considering paths:\n\t" + "\n\t".join(paths) + "\n")
templates = set()
for path in paths:
for root, dirs, files in walk(path,
followlinks=options.get('followlinks', False)):
templates.update(os.path.join(root, name)
for name in files if not name.startswith('.') and
any(fnmatch(name, "*%s" % glob) for glob in extensions))
if not templates:
raise OfflineGenerationError("No templates found. Make sure your "
"TEMPLATE_LOADERS and TEMPLATE_DIRS "
"settings are correct.")
if verbosity > 1:
log.write("Found templates:\n\t" + "\n\t".join(templates) + "\n")
compressor_nodes = SortedDict()
for template_name in templates:
try:
template_file = open(template_name)
try:
template = Template(template_file.read().decode(
settings.FILE_CHARSET))
finally:
template_file.close()
except IOError: # unreadable file -> ignore
if verbosity > 0:
log.write("Unreadable template at: %s\n" % template_name)
continue
except TemplateSyntaxError, e: # broken template -> ignore
if verbosity > 0:
log.write("Invalid template %s: %s\n" % (template_name, e))
continue
except TemplateDoesNotExist: # non existent template -> ignore
if verbosity > 0:
log.write("Non-existent template at: %s\n" % template_name)
continue
except UnicodeDecodeError:
if verbosity > 0:
log.write("UnicodeDecodeError while trying to read "
"template %s\n" % template_name)
nodes = list(self.walk_nodes(template))
if nodes:
template.template_name = template_name
compressor_nodes.setdefault(template, []).extend(nodes)
if not compressor_nodes:
raise OfflineGenerationError(
"No 'compress' template tags found in templates."
"Try running compress command with --follow-links and/or"
"--extension=EXTENSIONS")
if verbosity > 0:
log.write("Found 'compress' tags in:\n\t" +
"\n\t".join((t.template_name
for t in compressor_nodes.keys())) + "\n")
log.write("Compressing... ")
count = 0
results = []
offline_manifest = SortedDict()
for template, nodes in compressor_nodes.iteritems():
context = Context(settings.COMPRESS_OFFLINE_CONTEXT)
template._log = log
template._log_verbosity = verbosity
template._render_firstnode = MethodType(patched_render_firstnode, template)
extra_context = template._render_firstnode(context)
if extra_context is None:
# Something is wrong - ignore this template
continue
for node in nodes:
context.push()
if extra_context and node._block_name:
# Give a block context to the node if it was found inside
# a {% block %}.
context['block'] = context.render_context[BLOCK_CONTEXT_KEY].get_block(node._block_name)
if context['block']:
context['block'].context = context
key = get_offline_hexdigest(node.nodelist.render(context))
try:
result = node.render(context, forced=True)
except Exception, e:
raise CommandError("An error occured during rendering %s: "
"%s" % (template.template_name, e))
offline_manifest[key] = result
context.pop()
results.append(result)
count += 1
write_offline_manifest(offline_manifest)
log.write("done\nCompressed %d block(s) from %d template(s).\n" %
(count, len(compressor_nodes)))
return count, results
def get_nodelist(self, node):
if (isinstance(node, IfNode) and
hasattr(node, 'nodelist_true') and
hasattr(node, 'nodelist_false')):
return node.nodelist_true + node.nodelist_false
return getattr(node, "nodelist", [])
def walk_nodes(self, node, block_name=None):
for node in self.get_nodelist(node):
if isinstance(node, BlockNode):
block_name = node.name
if isinstance(node, CompressorNode) and node.is_offline_compression_enabled(forced=True):
node._block_name = block_name
yield node
else:
for node in self.walk_nodes(node, block_name=block_name):
yield node
def handle_extensions(self, extensions=('html',)):
"""
organizes multiple extensions that are separated with commas or
passed by using --extension/-e multiple times.
for example: running 'django-admin compress -e js,txt -e xhtml -a'
would result in a extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
['.html', '.js']
>>> handle_extensions(['.html, txt,.tpl'])
['.html', '.tpl', '.txt']
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def handle_noargs(self, **options):
if not settings.COMPRESS_ENABLED and not options.get("force"):
raise CommandError(
"Compressor is disabled. Set the COMPRESS_ENABLED "
"settting or use --force to override.")
if not settings.COMPRESS_OFFLINE:
if not options.get("force"):
raise CommandError(
"Offline compression is disabled. Set "
"COMPRESS_OFFLINE or use the --force to override.")
self.compress(sys.stdout, **options)
|
# flake8: noqa
import os
import sys
from types import MethodType
from fnmatch import fnmatch
from optparse import make_option
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO # noqa
from django.core.management.base import NoArgsCommand, CommandError
from django.template import (Context, Template,
TemplateDoesNotExist, TemplateSyntaxError)
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
from django.template.loader import get_template # noqa Leave this in to preload template locations
from django.template.defaulttags import IfNode
from django.template.loader_tags import (ExtendsNode, BlockNode,
BLOCK_CONTEXT_KEY)
try:
from django.template.loaders.cached import Loader as CachedLoader
except ImportError:
CachedLoader = None # noqa
from compressor.cache import get_offline_hexdigest, write_offline_manifest
from compressor.conf import settings
from compressor.exceptions import OfflineGenerationError
from compressor.templatetags.compress import CompressorNode
from compressor.utils import walk, any
def patched_render(self, context):
# 'Fake' _render method that just returns the context instead of
# rendering. It also checks whether the first node is an extend node or
# not, to be able to handle complex inheritance chain.
self._render_firstnode = MethodType(patched_render_firstnode, self)
self._render_firstnode(context)
# Cleanup, uninstall our _render monkeypatch now that it has been called
self._render = self._old_render
return context
def patched_render_firstnode(self, context):
# If this template has a ExtendsNode, we want to find out what
# should be put in render_context to make the {% block ... %}
# tags work.
#
# We can't fully render the base template(s) (we don't have the
# full context vars - only what's necessary to render the compress
# nodes!), therefore we hack the ExtendsNode we found, patching
# its get_parent method so that rendering the ExtendsNode only
# gives us the blocks content without doing any actual rendering.
extra_context = {}
try:
firstnode = self.nodelist[0]
except IndexError:
firstnode = None
if isinstance(firstnode, ExtendsNode):
firstnode._log = self._log
firstnode._log_verbosity = self._log_verbosity
firstnode._old_get_parent = firstnode.get_parent
firstnode.get_parent = MethodType(patched_get_parent, firstnode)
try:
extra_context = firstnode.render(context)
context.render_context = extra_context.render_context
# We aren't rendering {% block %} tags, but we want
# {{ block.super }} inside {% compress %} inside {% block %}s to
# work. Therefore, we need to pop() the last block context for
# each block name, to emulate what would have been done if the
# {% block %} had been fully rendered.
for blockname in firstnode.blocks.keys():
context.render_context[BLOCK_CONTEXT_KEY].pop(blockname)
except (IOError, TemplateSyntaxError, TemplateDoesNotExist):
# That first node we are trying to render might cause more errors
# that we didn't catch when simply creating a Template instance
# above, so we need to catch that (and ignore it, just like above)
# as well.
if self._log_verbosity > 0:
self._log.write("Caught error when rendering extend node from "
"template %s\n" % getattr(self, 'name', self))
return None
finally:
# Cleanup, uninstall our get_parent monkeypatch now that it has been called
firstnode.get_parent = firstnode._old_get_parent
return extra_context
def patched_get_parent(self, context):
# Patch template returned by extendsnode's get_parent to make sure their
# _render method is just returning the context instead of actually
# rendering stuff.
# In addition, this follows the inheritance chain by looking if the first
# node of the template is an extend node itself.
compiled_template = self._old_get_parent(context)
compiled_template._log = self._log
compiled_template._log_verbosity = self._log_verbosity
compiled_template._old_render = compiled_template._render
compiled_template._render = MethodType(patched_render, compiled_template)
return compiled_template
class Command(NoArgsCommand):
help = "Compress content outside of the request/response cycle"
option_list = NoArgsCommand.option_list + (
make_option('--extension', '-e', action='append', dest='extensions',
help='The file extension(s) to examine (default: ".html", '
'separate multiple extensions with commas, or use -e '
'multiple times)'),
make_option('-f', '--force', default=False, action='store_true',
help="Force the generation of compressed content even if the "
"COMPRESS_ENABLED setting is not True.", dest='force'),
make_option('--follow-links', default=False, action='store_true',
help="Follow symlinks when traversing the COMPRESS_ROOT "
"(which defaults to MEDIA_ROOT). Be aware that using this "
"can lead to infinite recursion if a link points to a parent "
"directory of itself.", dest='follow_links'),
)
requires_model_validation = False
def get_loaders(self):
from django.template.loader import template_source_loaders
if template_source_loaders is None:
try:
from django.template.loader import (
find_template as finder_func)
except ImportError:
from django.template.loader import (
find_template_source as finder_func) # noqa
try:
# Force django to calculate template_source_loaders from
# TEMPLATE_LOADERS settings, by asking to find a dummy template
source, name = finder_func('test')
except TemplateDoesNotExist:
pass
# Reload template_source_loaders now that it has been calculated ;
# it should contain the list of valid, instanciated template loaders
# to use.
from django.template.loader import template_source_loaders
loaders = []
# If template loader is CachedTemplateLoader, return the loaders
# that it wraps around. So if we have
# TEMPLATE_LOADERS = (
# ('django.template.loaders.cached.Loader', (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )),
# )
# The loaders will return django.template.loaders.filesystem.Loader
# and django.template.loaders.app_directories.Loader
for loader in template_source_loaders:
if CachedLoader is not None and isinstance(loader, CachedLoader):
loaders.extend(loader.loaders)
else:
loaders.append(loader)
return loaders
def compress(self, log=None, **options):
"""
Searches templates containing 'compress' nodes and compresses them
"offline" -- outside of the request/response cycle.
The result is cached with a cache-key derived from the content of the
compress nodes (not the content of the possibly linked files!).
"""
extensions = options.get('extensions')
extensions = self.handle_extensions(extensions or ['html'])
verbosity = int(options.get("verbosity", 0))
if not log:
log = StringIO()
if not settings.TEMPLATE_LOADERS:
raise OfflineGenerationError("No template loaders defined. You "
"must set TEMPLATE_LOADERS in your "
"settings.")
paths = set()
for loader in self.get_loaders():
try:
module = import_module(loader.__module__)
get_template_sources = getattr(module,
'get_template_sources', None)
if get_template_sources is None:
get_template_sources = loader.get_template_sources
paths.update(list(get_template_sources('')))
except (ImportError, AttributeError):
# Yeah, this didn't work out so well, let's move on
pass
if not paths:
raise OfflineGenerationError("No template paths found. None of "
"the configured template loaders "
"provided template paths. See "
"http://django.me/template-loaders "
"for more information on template "
"loaders.")
if verbosity > 1:
log.write("Considering paths:\n\t" + "\n\t".join(paths) + "\n")
templates = set()
for path in paths:
for root, dirs, files in walk(path,
followlinks=options.get('followlinks', False)):
templates.update(os.path.join(root, name)
for name in files if not name.startswith('.') and
any(fnmatch(name, "*%s" % glob) for glob in extensions))
if not templates:
raise OfflineGenerationError("No templates found. Make sure your "
"TEMPLATE_LOADERS and TEMPLATE_DIRS "
"settings are correct.")
if verbosity > 1:
log.write("Found templates:\n\t" + "\n\t".join(templates) + "\n")
compressor_nodes = SortedDict()
for template_name in templates:
try:
template_file = open(template_name)
try:
template = Template(template_file.read().decode(
settings.FILE_CHARSET))
finally:
template_file.close()
except IOError: # unreadable file -> ignore
if verbosity > 0:
log.write("Unreadable template at: %s\n" % template_name)
continue
except TemplateSyntaxError, e: # broken template -> ignore
if verbosity > 0:
log.write("Invalid template %s: %s\n" % (template_name, e))
continue
except TemplateDoesNotExist: # non existent template -> ignore
if verbosity > 0:
log.write("Non-existent template at: %s\n" % template_name)
continue
except UnicodeDecodeError:
if verbosity > 0:
log.write("UnicodeDecodeError while trying to read "
"template %s\n" % template_name)
nodes = list(self.walk_nodes(template))
if nodes:
template.template_name = template_name
compressor_nodes.setdefault(template, []).extend(nodes)
if not compressor_nodes:
raise OfflineGenerationError(
"No 'compress' template tags found in templates."
"Try running compress command with --follow-links and/or"
"--extension=EXTENSIONS")
if verbosity > 0:
log.write("Found 'compress' tags in:\n\t" +
"\n\t".join((t.template_name
for t in compressor_nodes.keys())) + "\n")
log.write("Compressing... ")
count = 0
results = []
offline_manifest = SortedDict()
for template, nodes in compressor_nodes.iteritems():
context = Context(settings.COMPRESS_OFFLINE_CONTEXT)
template._log = log
template._log_verbosity = verbosity
template._render_firstnode = MethodType(patched_render_firstnode, template)
extra_context = template._render_firstnode(context)
if extra_context is None:
# Something is wrong - ignore this template
continue
for node in nodes:
context.push()
if extra_context and node._block_name:
# Give a block context to the node if it was found inside
# a {% block %}.
context['block'] = context.render_context[BLOCK_CONTEXT_KEY].get_block(node._block_name)
if context['block']:
context['block'].context = context
key = get_offline_hexdigest(node.nodelist.render(context))
try:
result = node.render(context, forced=True)
except Exception, e:
raise CommandError("An error occured during rendering %s: "
"%s" % (template.template_name, e))
offline_manifest[key] = result
context.pop()
results.append(result)
count += 1
write_offline_manifest(offline_manifest)
log.write("done\nCompressed %d block(s) from %d template(s).\n" %
(count, len(compressor_nodes)))
return count, results
def get_nodelist(self, node):
if (isinstance(node, IfNode) and
hasattr(node, 'nodelist_true') and
hasattr(node, 'nodelist_false')):
return node.nodelist_true + node.nodelist_false
return getattr(node, "nodelist", [])
def walk_nodes(self, node, block_name=None):
for node in self.get_nodelist(node):
if isinstance(node, BlockNode):
block_name = node.name
if isinstance(node, CompressorNode) and node.is_offline_compression_enabled(forced=True):
node._block_name = block_name
yield node
else:
for node in self.walk_nodes(node, block_name=block_name):
yield node
def handle_extensions(self, extensions=('html',)):
"""
organizes multiple extensions that are separated with commas or
passed by using --extension/-e multiple times.
for example: running 'django-admin compress -e js,txt -e xhtml -a'
would result in a extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
['.html', '.js']
>>> handle_extensions(['.html, txt,.tpl'])
['.html', '.tpl', '.txt']
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def handle_noargs(self, **options):
if not settings.COMPRESS_ENABLED and not options.get("force"):
raise CommandError(
"Compressor is disabled. Set the COMPRESS_ENABLED "
"settting or use --force to override.")
if not settings.COMPRESS_OFFLINE:
if not options.get("force"):
raise CommandError(
"Offline compression is disabled. Set "
"COMPRESS_OFFLINE or use the --force to override.")
self.compress(sys.stdout, **options)
|
en
| 0.81914
|
# flake8: noqa # noqa # noqa Leave this in to preload template locations # noqa # 'Fake' _render method that just returns the context instead of # rendering. It also checks whether the first node is an extend node or # not, to be able to handle complex inheritance chain. # Cleanup, uninstall our _render monkeypatch now that it has been called # If this template has a ExtendsNode, we want to find out what # should be put in render_context to make the {% block ... %} # tags work. # # We can't fully render the base template(s) (we don't have the # full context vars - only what's necessary to render the compress # nodes!), therefore we hack the ExtendsNode we found, patching # its get_parent method so that rendering the ExtendsNode only # gives us the blocks content without doing any actual rendering. # We aren't rendering {% block %} tags, but we want # {{ block.super }} inside {% compress %} inside {% block %}s to # work. Therefore, we need to pop() the last block context for # each block name, to emulate what would have been done if the # {% block %} had been fully rendered. # That first node we are trying to render might cause more errors # that we didn't catch when simply creating a Template instance # above, so we need to catch that (and ignore it, just like above) # as well. # Cleanup, uninstall our get_parent monkeypatch now that it has been called # Patch template returned by extendsnode's get_parent to make sure their # _render method is just returning the context instead of actually # rendering stuff. # In addition, this follows the inheritance chain by looking if the first # node of the template is an extend node itself. # noqa # Force django to calculate template_source_loaders from # TEMPLATE_LOADERS settings, by asking to find a dummy template # Reload template_source_loaders now that it has been calculated ; # it should contain the list of valid, instanciated template loaders # to use. # If template loader is CachedTemplateLoader, return the loaders # that it wraps around. So if we have # TEMPLATE_LOADERS = ( # ('django.template.loaders.cached.Loader', ( # 'django.template.loaders.filesystem.Loader', # 'django.template.loaders.app_directories.Loader', # )), # ) # The loaders will return django.template.loaders.filesystem.Loader # and django.template.loaders.app_directories.Loader Searches templates containing 'compress' nodes and compresses them "offline" -- outside of the request/response cycle. The result is cached with a cache-key derived from the content of the compress nodes (not the content of the possibly linked files!). # Yeah, this didn't work out so well, let's move on # unreadable file -> ignore # broken template -> ignore # non existent template -> ignore # Something is wrong - ignore this template # Give a block context to the node if it was found inside # a {% block %}. organizes multiple extensions that are separated with commas or passed by using --extension/-e multiple times. for example: running 'django-admin compress -e js,txt -e xhtml -a' would result in a extension list: ['.js', '.txt', '.xhtml'] >>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py']) ['.html', '.js'] >>> handle_extensions(['.html, txt,.tpl']) ['.html', '.tpl', '.txt']
| 1.898593
| 2
|
binding.gyp
|
Ali-Amir/ssvm-napi
| 0
|
6629920
|
{
"targets": [
{
"target_name": "<(module_name)",
"cflags_cc": [ "-std=c++17" ],
"cflags!": [ "-fno-exceptions", "-fno-rtti" ],
"cflags_cc!": [ "-fno-exceptions", "-fno-rtti" ],
"xcode_settings": {
"OTHER_CFLAGS": [ "-std=c++17"],
},
"link_settings": {
"libraries": [
"/usr/lib/llvm-10/lib/libLLVM.so",
"/usr/lib/llvm-10/lib/liblldELF.a",
"/usr/lib/llvm-10/lib/liblldCommon.a",
"/usr/lib/llvm-10/lib/liblldCore.a",
"/usr/lib/llvm-10/lib/liblldDriver.a",
"/usr/lib/llvm-10/lib/liblldReaderWriter.a",
"/usr/lib/llvm-10/lib/liblldYAML.a",
],
},
"sources": [
"src/addon.cc",
"src/bytecode.cc",
"src/options.cc",
"src/ssvmaddon.cc",
"src/utils.cc",
"ssvm-core/lib/aot/compiler.cpp",
"ssvm-core/lib/ast/description.cpp",
"ssvm-core/lib/ast/expression.cpp",
"ssvm-core/lib/ast/instruction.cpp",
"ssvm-core/lib/ast/module.cpp",
"ssvm-core/lib/ast/section.cpp",
"ssvm-core/lib/ast/segment.cpp",
"ssvm-core/lib/ast/type.cpp",
"ssvm-core/lib/common/hexstr.cpp",
"ssvm-core/lib/common/log.cpp",
"ssvm-core/lib/common/proposal.cpp",
"ssvm-core/lib/host/ssvm_process/processfunc.cpp",
"ssvm-core/lib/host/ssvm_process/processmodule.cpp",
"ssvm-core/lib/host/wasi/wasienv.cpp",
"ssvm-core/lib/host/wasi/wasifunc.cpp",
"ssvm-core/lib/host/wasi/wasimodule.cpp",
"ssvm-core/lib/interpreter/engine/control.cpp",
"ssvm-core/lib/interpreter/engine/engine.cpp",
"ssvm-core/lib/interpreter/engine/memory.cpp",
"ssvm-core/lib/interpreter/engine/proxy.cpp",
"ssvm-core/lib/interpreter/engine/table.cpp",
"ssvm-core/lib/interpreter/engine/variable.cpp",
"ssvm-core/lib/interpreter/instantiate/data.cpp",
"ssvm-core/lib/interpreter/instantiate/elem.cpp",
"ssvm-core/lib/interpreter/instantiate/export.cpp",
"ssvm-core/lib/interpreter/instantiate/function.cpp",
"ssvm-core/lib/interpreter/instantiate/global.cpp",
"ssvm-core/lib/interpreter/instantiate/import.cpp",
"ssvm-core/lib/interpreter/instantiate/memory.cpp",
"ssvm-core/lib/interpreter/instantiate/module.cpp",
"ssvm-core/lib/interpreter/instantiate/table.cpp",
"ssvm-core/lib/interpreter/helper.cpp",
"ssvm-core/lib/interpreter/interpreter.cpp",
"ssvm-core/lib/loader/filemgr.cpp",
"ssvm-core/lib/loader/ldmgr.cpp",
"ssvm-core/lib/loader/loader.cpp",
"ssvm-core/lib/loader/shared_library.cpp",
"ssvm-core/lib/validator/formchecker.cpp",
"ssvm-core/lib/validator/validator.cpp",
"ssvm-core/lib/vm/vm.cpp",
"ssvm-core/thirdparty/easyloggingpp/easylogging++.cc",
],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")",
"src",
"ssvm-core/include",
"ssvm-core/thirdparty",
"ssvm-core/thirdparty/googletest/include",
"/usr/lib/llvm-10/include",
],
'defines': [ 'NAPI_DISABLE_CPP_EXCEPTIONS' ],
},
{
"target_name": "action_after_build",
"type": "none",
"dependencies": [ "<(module_name)" ],
"copies": [
{
"files": [ "<(PRODUCT_DIR)/<(module_name).node" ],
"destination": "<(module_path)"
}
]
}
]
}
|
{
"targets": [
{
"target_name": "<(module_name)",
"cflags_cc": [ "-std=c++17" ],
"cflags!": [ "-fno-exceptions", "-fno-rtti" ],
"cflags_cc!": [ "-fno-exceptions", "-fno-rtti" ],
"xcode_settings": {
"OTHER_CFLAGS": [ "-std=c++17"],
},
"link_settings": {
"libraries": [
"/usr/lib/llvm-10/lib/libLLVM.so",
"/usr/lib/llvm-10/lib/liblldELF.a",
"/usr/lib/llvm-10/lib/liblldCommon.a",
"/usr/lib/llvm-10/lib/liblldCore.a",
"/usr/lib/llvm-10/lib/liblldDriver.a",
"/usr/lib/llvm-10/lib/liblldReaderWriter.a",
"/usr/lib/llvm-10/lib/liblldYAML.a",
],
},
"sources": [
"src/addon.cc",
"src/bytecode.cc",
"src/options.cc",
"src/ssvmaddon.cc",
"src/utils.cc",
"ssvm-core/lib/aot/compiler.cpp",
"ssvm-core/lib/ast/description.cpp",
"ssvm-core/lib/ast/expression.cpp",
"ssvm-core/lib/ast/instruction.cpp",
"ssvm-core/lib/ast/module.cpp",
"ssvm-core/lib/ast/section.cpp",
"ssvm-core/lib/ast/segment.cpp",
"ssvm-core/lib/ast/type.cpp",
"ssvm-core/lib/common/hexstr.cpp",
"ssvm-core/lib/common/log.cpp",
"ssvm-core/lib/common/proposal.cpp",
"ssvm-core/lib/host/ssvm_process/processfunc.cpp",
"ssvm-core/lib/host/ssvm_process/processmodule.cpp",
"ssvm-core/lib/host/wasi/wasienv.cpp",
"ssvm-core/lib/host/wasi/wasifunc.cpp",
"ssvm-core/lib/host/wasi/wasimodule.cpp",
"ssvm-core/lib/interpreter/engine/control.cpp",
"ssvm-core/lib/interpreter/engine/engine.cpp",
"ssvm-core/lib/interpreter/engine/memory.cpp",
"ssvm-core/lib/interpreter/engine/proxy.cpp",
"ssvm-core/lib/interpreter/engine/table.cpp",
"ssvm-core/lib/interpreter/engine/variable.cpp",
"ssvm-core/lib/interpreter/instantiate/data.cpp",
"ssvm-core/lib/interpreter/instantiate/elem.cpp",
"ssvm-core/lib/interpreter/instantiate/export.cpp",
"ssvm-core/lib/interpreter/instantiate/function.cpp",
"ssvm-core/lib/interpreter/instantiate/global.cpp",
"ssvm-core/lib/interpreter/instantiate/import.cpp",
"ssvm-core/lib/interpreter/instantiate/memory.cpp",
"ssvm-core/lib/interpreter/instantiate/module.cpp",
"ssvm-core/lib/interpreter/instantiate/table.cpp",
"ssvm-core/lib/interpreter/helper.cpp",
"ssvm-core/lib/interpreter/interpreter.cpp",
"ssvm-core/lib/loader/filemgr.cpp",
"ssvm-core/lib/loader/ldmgr.cpp",
"ssvm-core/lib/loader/loader.cpp",
"ssvm-core/lib/loader/shared_library.cpp",
"ssvm-core/lib/validator/formchecker.cpp",
"ssvm-core/lib/validator/validator.cpp",
"ssvm-core/lib/vm/vm.cpp",
"ssvm-core/thirdparty/easyloggingpp/easylogging++.cc",
],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")",
"src",
"ssvm-core/include",
"ssvm-core/thirdparty",
"ssvm-core/thirdparty/googletest/include",
"/usr/lib/llvm-10/include",
],
'defines': [ 'NAPI_DISABLE_CPP_EXCEPTIONS' ],
},
{
"target_name": "action_after_build",
"type": "none",
"dependencies": [ "<(module_name)" ],
"copies": [
{
"files": [ "<(PRODUCT_DIR)/<(module_name).node" ],
"destination": "<(module_path)"
}
]
}
]
}
|
none
| 1
| 1.270547
| 1
|
|
OneByteLdr.py
|
rakion99/OneByteLdr
| 0
|
6629921
|
<reponame>rakion99/OneByteLdr<gh_stars>0
import pymem
import re
pm = pymem.Pymem('csgo.exe')
# bypass NtOpenFile hook in csgo.exe
csgo = pymem.process.module_from_name(pm.process_handle,
'csgo.exe')
csgoModule = pm.read_bytes(csgo.lpBaseOfDll, csgo.SizeOfImage)
address = csgo.lpBaseOfDll + re.search(rb'.\x1A\xF6\x45\x0C\x20',
csgoModule).start()
pm.write_uchar(address, 0xEB if pm.read_uchar(address) == 0x74 else 0x74)
# bypass thread creation detection in DllMain of client.dll
client = pymem.process.module_from_name(pm.process_handle,
'client.dll')
clientModule = pm.read_bytes(client.lpBaseOfDll, client.SizeOfImage)
address = client.lpBaseOfDll + re.search(rb'.\x69\x6A\x00\x6A\x04',
clientModule).start()
pm.write_uchar(address, 0xEB if pm.read_uchar(address) == 0x74 else 0x74)
pm.close_process()
|
import pymem
import re
pm = pymem.Pymem('csgo.exe')
# bypass NtOpenFile hook in csgo.exe
csgo = pymem.process.module_from_name(pm.process_handle,
'csgo.exe')
csgoModule = pm.read_bytes(csgo.lpBaseOfDll, csgo.SizeOfImage)
address = csgo.lpBaseOfDll + re.search(rb'.\x1A\xF6\x45\x0C\x20',
csgoModule).start()
pm.write_uchar(address, 0xEB if pm.read_uchar(address) == 0x74 else 0x74)
# bypass thread creation detection in DllMain of client.dll
client = pymem.process.module_from_name(pm.process_handle,
'client.dll')
clientModule = pm.read_bytes(client.lpBaseOfDll, client.SizeOfImage)
address = client.lpBaseOfDll + re.search(rb'.\x69\x6A\x00\x6A\x04',
clientModule).start()
pm.write_uchar(address, 0xEB if pm.read_uchar(address) == 0x74 else 0x74)
pm.close_process()
|
en
| 0.91532
|
# bypass NtOpenFile hook in csgo.exe # bypass thread creation detection in DllMain of client.dll
| 1.906331
| 2
|
social_network/urls.py
|
eidelmanj/volunteer_app
| 0
|
6629922
|
<reponame>eidelmanj/volunteer_app<filename>social_network/urls.py
from django.conf.urls import patterns, url
from social_network import views
urlpatterns = patterns('',
# Main page URLS
url(r'^$', views.index, name='index'),
url(r'login/', views.log_in, name='log_in'),
url(r'authenticate/', views.authentication, name='authentication'),
url(r'create_account/', views.create_account, name='create_account'),
url(r'new_account_success/', views.new_account_success, name='new_account_success'),
url(r'job_search/', views.job_search, name='job_search'),
url(r'search_backend/', views.job_search_backend, name='job_search_backend'),
url(r'profile/', views.profile, name='profile'),
url(r'logout/', views.log_out, name='log_out'),
)
|
from django.conf.urls import patterns, url
from social_network import views
urlpatterns = patterns('',
# Main page URLS
url(r'^$', views.index, name='index'),
url(r'login/', views.log_in, name='log_in'),
url(r'authenticate/', views.authentication, name='authentication'),
url(r'create_account/', views.create_account, name='create_account'),
url(r'new_account_success/', views.new_account_success, name='new_account_success'),
url(r'job_search/', views.job_search, name='job_search'),
url(r'search_backend/', views.job_search_backend, name='job_search_backend'),
url(r'profile/', views.profile, name='profile'),
url(r'logout/', views.log_out, name='log_out'),
)
|
en
| 0.459698
|
# Main page URLS
| 1.906921
| 2
|
settings_test.py
|
srijyothsna/django-turtle-shell
| 1
|
6629923
|
from pathlib import Path
ROOT_DIR = Path(__file__).parent
DEBUG = True
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
}
SECRET_KEY = "whatever"
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"turtle_shell",
"graphene_django",
)
|
from pathlib import Path
ROOT_DIR = Path(__file__).parent
DEBUG = True
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
}
SECRET_KEY = "whatever"
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"turtle_shell",
"graphene_django",
)
|
none
| 1
| 1.589453
| 2
|
|
pyronear/models/mobilenet.py
|
JoaoFdC/PyroNear
| 0
|
6629924
|
#!usr/bin/python
# -*- coding: utf-8 -*-
from torchvision.models.mobilenet import MobileNetV2, model_urls as imagenet_urls
from torchvision.models.utils import load_state_dict_from_url
from .utils import cnn_model
__all__ = ['mobilenet_v2']
model_urls = {
'mobilenet_v2': 'https://srv-file7.gofile.io/download/RKagNy/mobilenet_v2-binary-classification.pth'
}
model_cut = -1
def mobilenet_v2(pretrained=False, progress=True, imagenet_pretrained=False,
num_classes=1, lin_features=512, dropout_prob=0.5,
bn_final=False, concat_pool=True, **kwargs):
r"""MobileNetV2 model from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.mobilenet.MobileNetV2`
"""
# Model creation
base_model = MobileNetV2(num_classes=num_classes, **kwargs)
# Imagenet pretraining
if imagenet_pretrained:
if pretrained:
raise ValueError('imagenet_pretrained cannot be set to True if pretrained=True')
state_dict = load_state_dict_from_url(imagenet_urls['mobilenet_v2'],
progress=progress)
# Remove FC params from dict
for key in ('classifier.1.weight', 'classifier.1.bias'):
state_dict.pop(key, None)
missing, unexpected = base_model.load_state_dict(state_dict, strict=False)
if any(unexpected) or any(not elt.startswith('classifier.') for elt in missing):
raise KeyError(f"Missing parameters: {missing}\nUnexpected parameters: {unexpected}")
# Cut at last conv layers
model = cnn_model(base_model, model_cut, base_model.classifier[1].in_features, num_classes,
lin_features, dropout_prob, bn_final=bn_final, concat_pool=concat_pool)
# Parameter loading
if pretrained:
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
progress=progress)
model.load_state_dict(state_dict)
return model
|
#!usr/bin/python
# -*- coding: utf-8 -*-
from torchvision.models.mobilenet import MobileNetV2, model_urls as imagenet_urls
from torchvision.models.utils import load_state_dict_from_url
from .utils import cnn_model
__all__ = ['mobilenet_v2']
model_urls = {
'mobilenet_v2': 'https://srv-file7.gofile.io/download/RKagNy/mobilenet_v2-binary-classification.pth'
}
model_cut = -1
def mobilenet_v2(pretrained=False, progress=True, imagenet_pretrained=False,
num_classes=1, lin_features=512, dropout_prob=0.5,
bn_final=False, concat_pool=True, **kwargs):
r"""MobileNetV2 model from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
num_classes (int, optional): number of output classes
lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head
dropout_prob (float, optional): dropout probability of head FC layers
bn_final (bool, optional): should a batch norm be added after the last layer
concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
**kwargs: optional arguments of :mod:`torchvision.models.mobilenet.MobileNetV2`
"""
# Model creation
base_model = MobileNetV2(num_classes=num_classes, **kwargs)
# Imagenet pretraining
if imagenet_pretrained:
if pretrained:
raise ValueError('imagenet_pretrained cannot be set to True if pretrained=True')
state_dict = load_state_dict_from_url(imagenet_urls['mobilenet_v2'],
progress=progress)
# Remove FC params from dict
for key in ('classifier.1.weight', 'classifier.1.bias'):
state_dict.pop(key, None)
missing, unexpected = base_model.load_state_dict(state_dict, strict=False)
if any(unexpected) or any(not elt.startswith('classifier.') for elt in missing):
raise KeyError(f"Missing parameters: {missing}\nUnexpected parameters: {unexpected}")
# Cut at last conv layers
model = cnn_model(base_model, model_cut, base_model.classifier[1].in_features, num_classes,
lin_features, dropout_prob, bn_final=bn_final, concat_pool=concat_pool)
# Parameter loading
if pretrained:
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
progress=progress)
model.load_state_dict(state_dict)
return model
|
en
| 0.603811
|
#!usr/bin/python # -*- coding: utf-8 -*- MobileNetV2 model from `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) num_classes (int, optional): number of output classes lin_features (Union[int, list<int>], optional): number of nodes in intermediate layers of model's head dropout_prob (float, optional): dropout probability of head FC layers bn_final (bool, optional): should a batch norm be added after the last layer concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` **kwargs: optional arguments of :mod:`torchvision.models.mobilenet.MobileNetV2` # Model creation # Imagenet pretraining # Remove FC params from dict # Cut at last conv layers # Parameter loading
| 2.61992
| 3
|
app.py
|
lmateus/API_perfil
| 0
|
6629925
|
<filename>app.py
from flask import Flask, jsonify, request
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
from products import profile_data
from profile_gdal_t import pruebaPerfil
from Perfil import Perfil
@app.route('/ping')
def ping():
return jsonify({"message":"pong"})
@app.route('/products')
def getProducts():
return jsonify(profile_data)
@app.route('/products',methods=['POST'])
def addProduct():
profile = Perfil(request.json["Punto_inicial"],request.json["Punto_final"])
profile.datos_perfil()
return jsonify(profile.profile_json)
if __name__ == '__main__':
app.run(debug=True, port=4000)
|
<filename>app.py
from flask import Flask, jsonify, request
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
from products import profile_data
from profile_gdal_t import pruebaPerfil
from Perfil import Perfil
@app.route('/ping')
def ping():
return jsonify({"message":"pong"})
@app.route('/products')
def getProducts():
return jsonify(profile_data)
@app.route('/products',methods=['POST'])
def addProduct():
profile = Perfil(request.json["Punto_inicial"],request.json["Punto_final"])
profile.datos_perfil()
return jsonify(profile.profile_json)
if __name__ == '__main__':
app.run(debug=True, port=4000)
|
none
| 1
| 2.396137
| 2
|
|
game2048/models.py
|
JJHAirylin/2048-api
| 0
|
6629926
|
import keras
from keras.layers import Input, Dense, Conv2D, concatenate, Flatten, BatchNormalization, Activation
from keras.models import Model
import numpy as np
import sys
sys.path.append("./")
from game2048.getdata import read_data_all
# define model
inputs = Input((4,4,11))
conv = inputs
FILTERS = 128
conv41 = Conv2D(filters=FILTERS,kernel_size=(4, 1),kernel_initializer='he_uniform')(conv)
conv14 = Conv2D(filters=FILTERS,kernel_size=(1, 4),kernel_initializer='he_uniform')(conv)
conv22 = Conv2D(filters=FILTERS,kernel_size=(2, 2),kernel_initializer='he_uniform')(conv)
conv33 = Conv2D(filters=FILTERS,kernel_size=(3, 3),kernel_initializer='he_uniform')(conv)
conv44 = Conv2D(filters=FILTERS,kernel_size=(4, 4),kernel_initializer='he_uniform')(conv)
hidden = concatenate([Flatten()(conv41),Flatten()(conv14),Flatten()(conv22),Flatten()(conv33),Flatten()(conv44)])
x = BatchNormalization()(hidden)
x = Activation('relu')(hidden)
for width in[512,128]:
x = Dense(width,kernel_initializer='he_uniform')(x)
x = BatchNormalization()(x)
x = Activation('relu')(hidden)
outputs = Dense(4,activation='softmax')(x)
model = Model(inputs,outputs)
model.summary()
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
# get training data
x_train, y_train = read_data_all('TrainFor128-256.csv', 1)
model.fit(x_train, y_train, batch_size = 512, epochs=50)
model.save('model.h5')
|
import keras
from keras.layers import Input, Dense, Conv2D, concatenate, Flatten, BatchNormalization, Activation
from keras.models import Model
import numpy as np
import sys
sys.path.append("./")
from game2048.getdata import read_data_all
# define model
inputs = Input((4,4,11))
conv = inputs
FILTERS = 128
conv41 = Conv2D(filters=FILTERS,kernel_size=(4, 1),kernel_initializer='he_uniform')(conv)
conv14 = Conv2D(filters=FILTERS,kernel_size=(1, 4),kernel_initializer='he_uniform')(conv)
conv22 = Conv2D(filters=FILTERS,kernel_size=(2, 2),kernel_initializer='he_uniform')(conv)
conv33 = Conv2D(filters=FILTERS,kernel_size=(3, 3),kernel_initializer='he_uniform')(conv)
conv44 = Conv2D(filters=FILTERS,kernel_size=(4, 4),kernel_initializer='he_uniform')(conv)
hidden = concatenate([Flatten()(conv41),Flatten()(conv14),Flatten()(conv22),Flatten()(conv33),Flatten()(conv44)])
x = BatchNormalization()(hidden)
x = Activation('relu')(hidden)
for width in[512,128]:
x = Dense(width,kernel_initializer='he_uniform')(x)
x = BatchNormalization()(x)
x = Activation('relu')(hidden)
outputs = Dense(4,activation='softmax')(x)
model = Model(inputs,outputs)
model.summary()
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
# get training data
x_train, y_train = read_data_all('TrainFor128-256.csv', 1)
model.fit(x_train, y_train, batch_size = 512, epochs=50)
model.save('model.h5')
|
en
| 0.807843
|
# define model # get training data
| 2.906037
| 3
|
config.py
|
AndreaEdwards/dna_assembly_tools
| 0
|
6629927
|
<filename>config.py
#!/usr/bin/env python
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, The LASER Project"
__credits__ = ["<NAME>"]
__license__ = "BSD"
__version__ = "0.1.0-dev"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class ConfigurationManager(object):
""""""
def __init__(self):
self.web_port = 3005
config = ConfigurationManager()
|
<filename>config.py
#!/usr/bin/env python
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, The LASER Project"
__credits__ = ["<NAME>"]
__license__ = "BSD"
__version__ = "0.1.0-dev"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class ConfigurationManager(object):
""""""
def __init__(self):
self.web_port = 3005
config = ConfigurationManager()
|
ru
| 0.26433
|
#!/usr/bin/env python
| 1.807519
| 2
|
blog_api-project/posts/serializers.py
|
Muntasir-Mahmud/Blog-Api
| 0
|
6629928
|
from django.contrib.auth import get_user_model
from rest_framework import serializers
from . models import Post
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = '__all__'
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('id', 'username',)
|
from django.contrib.auth import get_user_model
from rest_framework import serializers
from . models import Post
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = '__all__'
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('id', 'username',)
|
none
| 1
| 2.11577
| 2
|
|
backend/auth0login/templatetags/poll_extras.py
|
UTMIST/WallStreetBots
| 4
|
6629929
|
<reponame>UTMIST/WallStreetBots<filename>backend/auth0login/templatetags/poll_extras.py
from django import template
register = template.Library()
@register.filter
def subtract(value, arg):
return float(value) - float(arg)
@register.filter
def multiply(value, arg):
return float(value) * float(arg)
@register.filter
def find_percent(change_today):
return round(float(change_today)*100, 2)
@register.filter
def compare(value, arg):
if float(value) > float(arg):
return True
@register.filter
def check_positive(value):
if float(value) > 0:
return True
@register.filter
def round_2(value):
return round(float(value), 2)
|
from django import template
register = template.Library()
@register.filter
def subtract(value, arg):
return float(value) - float(arg)
@register.filter
def multiply(value, arg):
return float(value) * float(arg)
@register.filter
def find_percent(change_today):
return round(float(change_today)*100, 2)
@register.filter
def compare(value, arg):
if float(value) > float(arg):
return True
@register.filter
def check_positive(value):
if float(value) > 0:
return True
@register.filter
def round_2(value):
return round(float(value), 2)
|
none
| 1
| 2.502308
| 3
|
|
py/instalog/plugins/input_archive.py
|
arccode/factory
| 3
|
6629930
|
#!/usr/bin/env python3
#
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Input archive plugin.
Import all events from archives which are made by output_archive.
The archive name:
'InstalogEvents_' + year + month + day + hour + minute + second
The archive structure:
InstalogEvents_YYYYmmddHHMMSS.tar.gz
InstalogEvents_YYYYmmddHHMMSS/
events.json
attachments/ # Will not have this dir if no attachment.
000_${EVENT_0_ATTACHMENT_0_NAME}
000_${EVENT_0_ATTACHMENT_1_NAME}
001_${EVENT_1_ATTACHMENT_0_NAME}
001_${EVENT_1_ATTACHMENT_1_NAME}
...
"""
# TODO(kitching): Add a unittest.
import glob
import os
import tarfile
from cros.factory.instalog import datatypes
from cros.factory.instalog import plugin_base
from cros.factory.instalog.utils.arg_utils import Arg
from cros.factory.instalog.utils import file_utils
class InputArchive(plugin_base.InputPlugin):
ARGS = [
Arg('path', str,
'Path to the set of archives on disk. Uses glob syntax. '
'e.g. "/path/to/InstalogEvents_*.tar.gz"'),
]
def ExtractArchive(self, archive_path, tmp_path):
"""Extracts archive to tmp_path, and checks that events.json exists.
Returns:
json_path: The path of log file in tmp_path.
"""
with tarfile.open(archive_path, 'r:gz') as tar:
tar.extractall(tmp_path)
archive_name = os.path.basename(archive_path)
# Remove '.tar.gz'
dir_name = archive_name.split(os.extsep)[0]
json_path = os.path.join(tmp_path, dir_name, 'events.json')
if os.path.isfile(json_path):
return json_path
self.error('File "%s" does not have event.json', archive_name)
raise IOError
def ProcessArchive(self, archive_path):
"""Extracts archive to tmp_path, then parses and emits events within."""
self.info('Processing archive %s...', archive_path)
with file_utils.TempDirectory(prefix='input_archive_') as tmp_path:
try:
json_path = self.ExtractArchive(archive_path, tmp_path)
if not self.ParseAndEmit(json_path):
self.error('Emit failed!')
raise IOError
except Exception:
# We might not have permission to access this file, or there could be
# some other IO problem, or the tarfile was broken.
self.exception('Exception while accessing file, check permissions, '
'files in archive, and "path" argument.')
raise
def Main(self):
"""Main thread of the plugin."""
archive_paths = sorted(glob.glob(self.args.path))
self.info('Scanned for archives, %d files detected', len(archive_paths))
for archive_path in archive_paths:
self.ProcessArchive(archive_path)
self.info('Finished importing all archives')
def ParseAndEmit(self, path):
"""Parses lines in path to events, and emits to Instalog.
Returns:
Result from the Emit call (boolean representing its success).
"""
events = []
event_dir = os.path.dirname(path)
with open(path, 'r') as f:
for line in f:
event = self.ParseEvent(path, line)
for att_id, att_path in event.attachments.items():
event.attachments[att_id] = os.path.join(event_dir, att_path)
events.append(event)
self.info('Parsed %d events', len(events))
return self.Emit(events)
def ParseEvent(self, path, line):
"""Returns an Instalog Event parsed from line.
Args:
path: Path to the log file in question.
line: The JSON line to be parsed.
May include trailing \r and \n characters.
"""
try:
return datatypes.Event.Deserialize(line)
except Exception:
self.error('Encountered invalid line "%s" in %s, aborting import',
line.rstrip(), path, exc_info=True)
raise
if __name__ == '__main__':
plugin_base.main()
|
#!/usr/bin/env python3
#
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Input archive plugin.
Import all events from archives which are made by output_archive.
The archive name:
'InstalogEvents_' + year + month + day + hour + minute + second
The archive structure:
InstalogEvents_YYYYmmddHHMMSS.tar.gz
InstalogEvents_YYYYmmddHHMMSS/
events.json
attachments/ # Will not have this dir if no attachment.
000_${EVENT_0_ATTACHMENT_0_NAME}
000_${EVENT_0_ATTACHMENT_1_NAME}
001_${EVENT_1_ATTACHMENT_0_NAME}
001_${EVENT_1_ATTACHMENT_1_NAME}
...
"""
# TODO(kitching): Add a unittest.
import glob
import os
import tarfile
from cros.factory.instalog import datatypes
from cros.factory.instalog import plugin_base
from cros.factory.instalog.utils.arg_utils import Arg
from cros.factory.instalog.utils import file_utils
class InputArchive(plugin_base.InputPlugin):
ARGS = [
Arg('path', str,
'Path to the set of archives on disk. Uses glob syntax. '
'e.g. "/path/to/InstalogEvents_*.tar.gz"'),
]
def ExtractArchive(self, archive_path, tmp_path):
"""Extracts archive to tmp_path, and checks that events.json exists.
Returns:
json_path: The path of log file in tmp_path.
"""
with tarfile.open(archive_path, 'r:gz') as tar:
tar.extractall(tmp_path)
archive_name = os.path.basename(archive_path)
# Remove '.tar.gz'
dir_name = archive_name.split(os.extsep)[0]
json_path = os.path.join(tmp_path, dir_name, 'events.json')
if os.path.isfile(json_path):
return json_path
self.error('File "%s" does not have event.json', archive_name)
raise IOError
def ProcessArchive(self, archive_path):
"""Extracts archive to tmp_path, then parses and emits events within."""
self.info('Processing archive %s...', archive_path)
with file_utils.TempDirectory(prefix='input_archive_') as tmp_path:
try:
json_path = self.ExtractArchive(archive_path, tmp_path)
if not self.ParseAndEmit(json_path):
self.error('Emit failed!')
raise IOError
except Exception:
# We might not have permission to access this file, or there could be
# some other IO problem, or the tarfile was broken.
self.exception('Exception while accessing file, check permissions, '
'files in archive, and "path" argument.')
raise
def Main(self):
"""Main thread of the plugin."""
archive_paths = sorted(glob.glob(self.args.path))
self.info('Scanned for archives, %d files detected', len(archive_paths))
for archive_path in archive_paths:
self.ProcessArchive(archive_path)
self.info('Finished importing all archives')
def ParseAndEmit(self, path):
"""Parses lines in path to events, and emits to Instalog.
Returns:
Result from the Emit call (boolean representing its success).
"""
events = []
event_dir = os.path.dirname(path)
with open(path, 'r') as f:
for line in f:
event = self.ParseEvent(path, line)
for att_id, att_path in event.attachments.items():
event.attachments[att_id] = os.path.join(event_dir, att_path)
events.append(event)
self.info('Parsed %d events', len(events))
return self.Emit(events)
def ParseEvent(self, path, line):
"""Returns an Instalog Event parsed from line.
Args:
path: Path to the log file in question.
line: The JSON line to be parsed.
May include trailing \r and \n characters.
"""
try:
return datatypes.Event.Deserialize(line)
except Exception:
self.error('Encountered invalid line "%s" in %s, aborting import',
line.rstrip(), path, exc_info=True)
raise
if __name__ == '__main__':
plugin_base.main()
|
en
| 0.806461
|
#!/usr/bin/env python3 # # Copyright 2016 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Input archive plugin. Import all events from archives which are made by output_archive. The archive name: 'InstalogEvents_' + year + month + day + hour + minute + second The archive structure: InstalogEvents_YYYYmmddHHMMSS.tar.gz InstalogEvents_YYYYmmddHHMMSS/ events.json attachments/ # Will not have this dir if no attachment. 000_${EVENT_0_ATTACHMENT_0_NAME} 000_${EVENT_0_ATTACHMENT_1_NAME} 001_${EVENT_1_ATTACHMENT_0_NAME} 001_${EVENT_1_ATTACHMENT_1_NAME} ... # TODO(kitching): Add a unittest. Extracts archive to tmp_path, and checks that events.json exists. Returns: json_path: The path of log file in tmp_path. # Remove '.tar.gz' Extracts archive to tmp_path, then parses and emits events within. # We might not have permission to access this file, or there could be # some other IO problem, or the tarfile was broken. Main thread of the plugin. Parses lines in path to events, and emits to Instalog. Returns: Result from the Emit call (boolean representing its success). Returns an Instalog Event parsed from line. Args: path: Path to the log file in question. line: The JSON line to be parsed. May include trailing \r and \n characters.
| 2.307864
| 2
|
project/2222.py
|
louay-rouabeh/Energy-Management-Live-Dashboard
| 0
|
6629931
|
import base64
import datetime
import io
import plotly.graph_objs as go
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
from sqlalchemy import create_engine
# INSERT INTO "data1.db" (data, month, meter) VALUES (6666,'11-UU',844)
colors = {"graphBackground": "#F5F5F5", "background": "#ffffff", "text": "#000000"}
engine = create_engine('sqlite:///data.db', echo=False)
# Dash
def generate_table(dataframe, max_rows=10):
return html.Table(
# Header
[html.Tr([html.Th(col) for col in dataframe.columns])] +
# Body
[html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))]
)
app = dash.Dash()
app.layout = html.Div([
dcc.Upload(
id="upload-data",
children=html.Div(["Drag and Drop or ", html.A("Select Files")]),
style={
"width": "100%",
"height": "60px",
"lineHeight": "60px",
"borderWidth": "1px",
"borderStyle": "dashed",
"borderRadius": "5px",
"textAlign": "center",
"margin": "10px",
},
# Allow multiple files to be uploaded
multiple=True,
),
html.P(id='saveSql', style={'display': 'none'}),
dcc.Input(
id='sql-query',
value='SELECT * FROM "floor1.db"',
style={'width': '100%'},
type='text'
),
html.Button('Run Query', id='run-query'),
html.Hr(),
html.Div([
html.Div(id='table-container', className="four columns"),
html.Div([
html.Div([
html.Div([
html.Label('Select X'),
dcc.Dropdown(
id='dropdown-x',
clearable=False,
)
], className="six columns"),
html.Div([
html.Label('Select Y'),
dcc.Dropdown(
id='dropdown-y',
clearable=False,
)
], className="six columns")
], className="row"),
html.Div(dcc.Graph(id='graph'), className="ten columns")
], className="eight columns")
], className="row"),
# hidden store element
html.Div(id='table-store', style={'display': 'none'})
])
@app.callback(Output('saveSql', 'children'), [
Input('upload-data', 'contents'),
Input('upload-data', 'filename')
])
def update_graph(contents, filename):
if contents:
contents = contents[0]
filename = filename[0]
df = parse_data(contents, filename)
df = df.set_index(df.columns[0])
df.to_sql('floor1.db', con=engine, if_exists='replace')
def parse_data(contents, filename):
content_type, content_string = contents.split(",")
decoded = base64.b64decode(content_string)
try:
if "csv" in filename:
df = pd.read_csv(io.StringIO(decoded.decode("utf-8")))
elif "xls" in filename:
df = pd.read_excel(io.BytesIO(decoded))
elif "txt" or "tsv" in filename:
df = pd.read_csv(io.StringIO(decoded.decode("utf-8")), delimiter=r"\s+")
except Exception as e:
print(e)
return html.Div(["There was an error processing this file."])
return df
@app.callback(
Output("output-data-upload", "children"),
[Input("upload-data", "contents"), Input("upload-data", "filename")],
)
def update_table(contents, filename):
table = html.Div()
if contents:
contents = contents[0]
filename = filename[0]
df = parse_data(contents, filename)
table = html.Div(
[
html.H5(filename),
dash_table.DataTable(
data=df.to_dict("rows"),
columns=[{"name": i, "id": i} for i in df.columns],
),
html.Hr(),
html.Div("Raw Content"),
html.Pre(
contents[0:200] + "...",
style={"whiteSpace": "pre-wrap", "wordBreak": "break-all"},
),
]
)
return table
@app.callback(
dash.dependencies.Output('table-store', 'children'),
[dash.dependencies.Input('run-query', 'n_clicks')],
state=[dash.dependencies.State('sql-query', 'value')])
def sql(number_of_times_button_has_been_clicked, sql_query):
dff = pd.read_sql_query(
sql_query,
engine
)
return dff.to_json()
@app.callback(
dash.dependencies.Output('table-container', 'children'),
[dash.dependencies.Input('table-store', 'children')])
def dff_to_table(dff_json):
dff = pd.read_json(dff_json)
return generate_table(dff)
@app.callback(
dash.dependencies.Output('graph', 'figure'),
[dash.dependencies.Input('table-store', 'children'),
dash.dependencies.Input('dropdown-x', 'value'),
dash.dependencies.Input('dropdown-y', 'value')])
def dff_to_table(dff_json, dropdown_x, dropdown_y):
dff = pd.read_json(dff_json)
return {
'data': [{
'x': dff[dropdown_x],
'y': dff[dropdown_y],
'type': 'bar'
}],
'layout': {
'margin': {
'l': 20,
'r': 10,
'b': 60,
't': 10
}
}
}
@app.callback(
dash.dependencies.Output('dropdown-x', 'options'),
[dash.dependencies.Input('table-store', 'children')])
def create_options_x(dff_json):
dff = pd.read_json(dff_json)
return [{'label': i, 'value': i} for i in dff.columns]
@app.callback(
dash.dependencies.Output('dropdown-y', 'options'),
[dash.dependencies.Input('table-store', 'children')])
def create_options_y(dff_json):
dff = pd.read_json(dff_json)
return [{'label': i, 'value': i} for i in dff.columns]
app.css.append_css({"external_url": "https://codepen.io/chriddyp/pen/bWLwgP.css"})
if __name__ == '__main__':
app.run_server(debug=True)
|
import base64
import datetime
import io
import plotly.graph_objs as go
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
from sqlalchemy import create_engine
# INSERT INTO "data1.db" (data, month, meter) VALUES (6666,'11-UU',844)
colors = {"graphBackground": "#F5F5F5", "background": "#ffffff", "text": "#000000"}
engine = create_engine('sqlite:///data.db', echo=False)
# Dash
def generate_table(dataframe, max_rows=10):
return html.Table(
# Header
[html.Tr([html.Th(col) for col in dataframe.columns])] +
# Body
[html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))]
)
app = dash.Dash()
app.layout = html.Div([
dcc.Upload(
id="upload-data",
children=html.Div(["Drag and Drop or ", html.A("Select Files")]),
style={
"width": "100%",
"height": "60px",
"lineHeight": "60px",
"borderWidth": "1px",
"borderStyle": "dashed",
"borderRadius": "5px",
"textAlign": "center",
"margin": "10px",
},
# Allow multiple files to be uploaded
multiple=True,
),
html.P(id='saveSql', style={'display': 'none'}),
dcc.Input(
id='sql-query',
value='SELECT * FROM "floor1.db"',
style={'width': '100%'},
type='text'
),
html.Button('Run Query', id='run-query'),
html.Hr(),
html.Div([
html.Div(id='table-container', className="four columns"),
html.Div([
html.Div([
html.Div([
html.Label('Select X'),
dcc.Dropdown(
id='dropdown-x',
clearable=False,
)
], className="six columns"),
html.Div([
html.Label('Select Y'),
dcc.Dropdown(
id='dropdown-y',
clearable=False,
)
], className="six columns")
], className="row"),
html.Div(dcc.Graph(id='graph'), className="ten columns")
], className="eight columns")
], className="row"),
# hidden store element
html.Div(id='table-store', style={'display': 'none'})
])
@app.callback(Output('saveSql', 'children'), [
Input('upload-data', 'contents'),
Input('upload-data', 'filename')
])
def update_graph(contents, filename):
if contents:
contents = contents[0]
filename = filename[0]
df = parse_data(contents, filename)
df = df.set_index(df.columns[0])
df.to_sql('floor1.db', con=engine, if_exists='replace')
def parse_data(contents, filename):
content_type, content_string = contents.split(",")
decoded = base64.b64decode(content_string)
try:
if "csv" in filename:
df = pd.read_csv(io.StringIO(decoded.decode("utf-8")))
elif "xls" in filename:
df = pd.read_excel(io.BytesIO(decoded))
elif "txt" or "tsv" in filename:
df = pd.read_csv(io.StringIO(decoded.decode("utf-8")), delimiter=r"\s+")
except Exception as e:
print(e)
return html.Div(["There was an error processing this file."])
return df
@app.callback(
Output("output-data-upload", "children"),
[Input("upload-data", "contents"), Input("upload-data", "filename")],
)
def update_table(contents, filename):
table = html.Div()
if contents:
contents = contents[0]
filename = filename[0]
df = parse_data(contents, filename)
table = html.Div(
[
html.H5(filename),
dash_table.DataTable(
data=df.to_dict("rows"),
columns=[{"name": i, "id": i} for i in df.columns],
),
html.Hr(),
html.Div("Raw Content"),
html.Pre(
contents[0:200] + "...",
style={"whiteSpace": "pre-wrap", "wordBreak": "break-all"},
),
]
)
return table
@app.callback(
dash.dependencies.Output('table-store', 'children'),
[dash.dependencies.Input('run-query', 'n_clicks')],
state=[dash.dependencies.State('sql-query', 'value')])
def sql(number_of_times_button_has_been_clicked, sql_query):
dff = pd.read_sql_query(
sql_query,
engine
)
return dff.to_json()
@app.callback(
dash.dependencies.Output('table-container', 'children'),
[dash.dependencies.Input('table-store', 'children')])
def dff_to_table(dff_json):
dff = pd.read_json(dff_json)
return generate_table(dff)
@app.callback(
dash.dependencies.Output('graph', 'figure'),
[dash.dependencies.Input('table-store', 'children'),
dash.dependencies.Input('dropdown-x', 'value'),
dash.dependencies.Input('dropdown-y', 'value')])
def dff_to_table(dff_json, dropdown_x, dropdown_y):
dff = pd.read_json(dff_json)
return {
'data': [{
'x': dff[dropdown_x],
'y': dff[dropdown_y],
'type': 'bar'
}],
'layout': {
'margin': {
'l': 20,
'r': 10,
'b': 60,
't': 10
}
}
}
@app.callback(
dash.dependencies.Output('dropdown-x', 'options'),
[dash.dependencies.Input('table-store', 'children')])
def create_options_x(dff_json):
dff = pd.read_json(dff_json)
return [{'label': i, 'value': i} for i in dff.columns]
@app.callback(
dash.dependencies.Output('dropdown-y', 'options'),
[dash.dependencies.Input('table-store', 'children')])
def create_options_y(dff_json):
dff = pd.read_json(dff_json)
return [{'label': i, 'value': i} for i in dff.columns]
app.css.append_css({"external_url": "https://codepen.io/chriddyp/pen/bWLwgP.css"})
if __name__ == '__main__':
app.run_server(debug=True)
|
en
| 0.594935
|
# INSERT INTO "data1.db" (data, month, meter) VALUES (6666,'11-UU',844) # Dash # Header # Body # Allow multiple files to be uploaded # hidden store element
| 2.692648
| 3
|
candycrush/candycrush.py
|
dzhou22/candykart-complexity
| 0
|
6629932
|
<reponame>dzhou22/candykart-complexity
from big_ol_pile_of_manim_imports import *
class Reduction(Scene):
def construct(self):
title=TextMobject("The Reduction").scale(2)
self.play(Write(title))
self.wait(2)
title2=TextMobject("The Reduction").to_corner(UL)
self.play(Transform(title,title2))
suppose=TextMobject(r"Suppose we are given a formula $\varphi$ with $n$ variables and $m$ clauses.").scale(0.75)
suppose.move_to(2*UP+LEFT)
self.play(FadeIn(suppose))
board=TextMobject("We use the gadgets to construct an equivalent gameboard.").scale(0.75)
board.move_to(UP+1.63*LEFT)
self.play(FadeIn(board))
self.wait(2)
self.play(FadeOut(suppose),FadeOut(board))
rect=Rectangle(height=1,width=2).move_to(DOWN*1.5)
rect2=Rectangle(height=1,width=2).move_to(DOWN*0.5)
rect3=Rectangle(height=1,width=2).move_to(UP*0.5)
rect4=Rectangle(height=1,width=2).move_to(UP*1.5)
l1=TextMobject("True").move_to(rect)
l2=TextMobject("False").move_to(rect2)
l3=TextMobject(r"$\neg x_i$").move_to(rect3)
l4=TextMobject(r"$x_i$").move_to(rect4)
self.play(FadeIn(rect),FadeIn(rect2),FadeIn(rect3),FadeIn(rect4),FadeIn(l1),FadeIn(l2),FadeIn(l3),FadeIn(l4))
self.wait()
varRect=Rectangle(height=4,width=1.5).move_to(3*LEFT)
self.play(ReplacementTransform(rect,varRect),ReplacementTransform(rect2,varRect),ReplacementTransform(rect3,varRect),ReplacementTransform(rect4,varRect),ReplacementTransform(l1,varRect),ReplacementTransform(l2,varRect),ReplacementTransform(l3,varRect),ReplacementTransform(l4,varRect))
self.wait()
self.remove(varRect)
newVarRect=Rectangle(height=4,width=1.5).move_to(3*LEFT)
self.add(newVarRect)
varRect2=Rectangle(height=4,width=1.5).move_to(5*LEFT)
varRect3=Rectangle(height=4,width=1.5).move_to(4*RIGHT)
dots=TextMobject(r"$\cdots$").scale(3)
self.play(ReplacementTransform(varRect.copy(),varRect2),ReplacementTransform(varRect,varRect3),Write(dots))
self.wait()
varRect4=Rectangle(height=4,width=3).move_to(4*LEFT)
self.play(ReplacementTransform(newVarRect,varRect4),ReplacementTransform(varRect2,varRect4),ReplacementTransform(varRect3,varRect4),ReplacementTransform(dots,varRect4))
self.remove(varRect4)
newVarRect4=Rectangle(height=4,width=3).move_to(4*LEFT)
self.add(newVarRect4) #variable rectangle
self.wait()
cRect=Rectangle(height=2,width=1).move_to(2*UP+RIGHT)
cRect2=Rectangle(height=2,width=1).move_to(2*RIGHT)
cRect3=Rectangle(height=2,width=1).move_to(2*DOWN+3*RIGHT)
self.play(Write(cRect),Write(cRect2),Write(cRect3))
self.wait()
wire1=Rectangle(height=0,width=3).move_to(1.5*UP+LEFT)
wire2=Rectangle(height=0,width=4).move_to(0.5*LEFT)
wire3=Rectangle(height=0,width=5).move_to(1.5*DOWN)
self.play(Write(wire1),Write(wire2),Write(wire3))
self.wait()
self.remove(newVarRect4)
self.remove(cRect)
self.remove(cRect2)
self.remove(cRect3)
self.remove(wire1)
self.remove(wire2)
self.remove(wire3)
self.wait()
dim=TextMobject(r"This board has width $O(n+m)$ and height $O(poly(n+m))$").move_to(0.5*UP)
dim2=TextMobject(r"so we can construct it in polynomial time.").move_to(0.5*DOWN)
self.play(Write(dim),Write(dim2))
self.wait()
self.play(FadeOut(dim),FadeOut(dim2))
self.wait()
text=TextMobject(r"Now pass this gameboard into $CANDYCRUSH$").move_to(0.5*UP)
text2=TextMobject(r"with $k=n$ and $s$ being the max score achievable.").move_to(0.5*DOWN)
self.play(Write(text),Write(text2))
self.wait()
text3=TextMobject(r"$CANDYCRUSH$ outputs $1$ iff $\varphi$ is satisfiable.").move_to(UP)
self.play(Transform(text,text3),Transform(text2,text3))
self.wait()
conclusion=TextMobject(r"So $3SAT\le_p CANDYCRUSH$.")
self.play(Write(conclusion))
self.wait()
conclusion2=TextMobject(r"Therefore, $CANDYCRUSH$ is $\mathbf{NP}$-hard.").move_to(DOWN)
self.play(Write(conclusion2))
self.play(FadeOut(conclusion),FadeOut(conclusion2),FadeOut(title),FadeOut(text),FadeOut(text2))
self.wait()
tit=TextMobject("Conclusion").to_corner(UL)
self.play(FadeIn(tit))
end=TextMobject(r"We have shown that $CANDYCRUSH\in\mathbf{NP}$").move_to(UP)
end2=TextMobject(r"and $CANDYCRUSH$ is $\mathbf{NP}$-hard.")
end3=TextMobject(r"Together, these imply that $CANDYCRUSH$ is $\mathbf{NP}$-complete.").move_to(DOWN)
self.play(Write(end))
self.play(Write(end2))
self.play(Write(end3))
sq=TextMobject(r"$\blacksquare$").scale(5).to_corner(DR)
self.wait()
self.add(sq)
self.wait()
self.play(FadeOut(tit),FadeOut(end),FadeOut(end2),FadeOut(end3),FadeOut(sq))
self.wait()
class Main(Scene):
def construct(self):
self.title()
self.definitions()
self.np()
npcomplete=TextMobject("Now we show that $CANDYCRUSH$ is $\mathbf{NP}$-hard.")
self.play(Write(npcomplete.to_edge(UP)))
method=TextMobject("We show this by a reduction from $3SAT$.")
method.to_edge(UP,buff=2.5)
self.play(Write(method))
gadgets=TextMobject("We will introduce a series of gadgets").to_edge(UP,buff=3.5)
gadgets2=TextMobject("that will allow us to convert any $3SAT$ instance").to_edge(UP,buff=4)
gadgets3=TextMobject("to an instance of $CANDYCRUSH$.").to_edge(UP,buff=4.5)
self.play(FadeIn(gadgets), FadeIn(gadgets2),FadeIn(gadgets3))
self.wait(3)
self.play(FadeOut(gadgets),FadeOut(gadgets2),FadeOut(gadgets3),FadeOut(method))
gadgets4 = TextMobject("Gadgets").scale(1.5).to_corner(UL)
self.play(Transform(npcomplete,gadgets4))
self.background()
self.truegadget()
#self.falsegadget()
self.assignmentneg()
self.assignmentpos()
self.wire()
self.clause()
title=TextMobject("How do we put it all together?").to_edge(UP).scale(1.5)
self.play(Transform(npcomplete, title))
def background(self):
back=TexMobject(r"""
\begin{matrix}
R & O & R & O & R\\
B & P & B & P & B\\
R & O & R & O & R\\
B & P & B & P & B\\
R & O & R & O & R
\end{matrix}
""")
text=TextMobject("Neutral Background").next_to(back,DOWN,buff=1)
back2=TexMobject(r"""
\begin{matrix}
R & O & R & O & R\\
B & P & & & B\\
R & & B & P & R\\
B & O & R & O & B\\
R & P & B & P & R
\end{matrix}
""")
back3=TexMobject(r"""
\begin{matrix}
R & O & R & O & R\\
B & P & G & G & B\\
R & G & B & P & R\\
B & O & R & O & B\\
R & P & B & P & R
\end{matrix}
""")
back4=TexMobject(r"""
\begin{matrix}
\cdot & \cdot & \cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot & \cdot & \cdot
\end{matrix}
""")
self.play(FadeIn(text))
self.play(FadeIn(back))
self.wait()
self.play(FadeOut(back),FadeIn(back2))
self.wait()
self.play(FadeOut(back2),FadeIn(back3))
self.wait()
self.play(FadeOut(back3),FadeIn(back4))
self.wait()
self.play(FadeOut(back4),FadeOut(text))
self.wait()
def title(self):
title=TextMobject("How hard is Candy Crush?").scale(2)
self.play(Write(title))
self.wait()
self.play(LaggedStart(FadeOutAndShiftDown, title))
answer=TextMobject("Answer: NP-complete").scale(1.5)
self.play(Write(answer))
self.wait()
self.play(FadeOut(answer))
def definitions(self):
grid = NumberPlane()
grid_title = TextMobject("Candy Crush is played on a grid").move_to(0.5*UP)
grid_title.scale(1.5)
self.add(grid, grid_title) # Make sure title is on top of grid
self.play(
FadeInFromDown(grid_title),
Write(grid), run_time=2.5
)
self.wait()
dimensions=TexMobject(r"\text{Call the dimensions } w \times h").scale(1.5)
dimensions.move_to(0.5*DOWN)
self.play(Transform(grid_title, dimensions))
self.wait(2)
self.remove(grid,grid_title)
text=TextMobject("Each square is filled with one of six different colored candies")
arr=TextMobject("R", "O", "Y", "G", "B", "P")
arr2=TextMobject("R", "O", "G", "Y", "B", "P")
arr.move_to(DOWN*1.5)
arr[0].set_color(RED)
arr[1].set_color(ORANGE)
arr[2].set_color("#FFFF00")
arr[3].set_color("00FF00")
arr[4].set_color(BLUE)
arr[5].set_color("DC28E2")
arr2.move_to(DOWN*1.5)
arr2[0].set_color(RED)
arr2[1].set_color(ORANGE)
arr2[3].set_color("#FFFF00")
arr2[2].set_color("00FF00")
arr2[4].set_color(BLUE)
arr2[5].set_color("DC28E2")
self.play(FadeIn(text))
self.play(Write(arr))
self.wait(1.5)
self.play(FadeOut(text))
text2=TextMobject("Each square is filled with one of six different colored candies")
text3=TextMobject("A player may swap two candies in neighboring squares")
text4=TextMobject("When a player forms a chain of 3 identical candies or more, the identical")
text5=TextMobject("candies are deleted and new ones fall from above and take their place.")
text6=TextMobject("The player's final score is equal to the number of chains deleted.")
text7=TextMobject("Define the $CANDYCRUSH$ problem as follows:")
problem=TextMobject("Given a gameboard and a number $k$ swaps, is a score $s$ achievable?")
self.play(FadeIn(text3))
self.play(Transform(arr,arr2))
self.wait(2)
self.play(FadeOut(text3))
self.play(FadeOut(arr))
self.play(FadeIn(text4.scale(0.75).move_to(0.25*UP)),FadeIn(text5.scale(0.75).move_to(0.25*DOWN)))
self.wait(2)
self.play(FadeOut(text4),FadeOut(text5))
self.play(FadeIn(text6))
self.wait(2)
self.play(FadeOut(text6))
self.play(FadeIn(text7))
self.wait(2)
self.play(FadeOut(text7))
self.play(FadeIn(problem.scale(0.85)))
self.wait(3)
self.play(FadeOut(problem))
def np(self):
text=TextMobject("First we show that $CANDYCRUSH\in\mathbf{NP}.$")
text.to_edge(UP)
self.play(Write(text))
text2=TextMobject("Given a sequence of moves, we can just play the game")
text3=TextMobject("then check if the final score is large enough.")
text2.to_edge(UP,buff=2)
text3.to_edge(UP,buff=2.5)
self.play(FadeIn(text2), FadeIn(text3))
self.wait()
text4=TextMobject("Each move takes constant time.")
text5=TextMobject("Comparing the final score can be done digit by digit")
text6=TextMobject("in $O(\log n)$ time.")
text4.to_edge(UP,buff=3.5)
text5.to_edge(UP,buff=4.5)
text6.to_edge(UP,buff=5)
self.play(FadeIn(text4))
self.wait()
self.play(FadeIn(text5), FadeIn(text6))
self.wait(2)
text7=TextMobject("Then overall, the $CANDYCRUSH$ problem is checkable in polynomial time.")
text7.to_edge(DOWN,buff=1.5).scale(0.75)
self.play(FadeIn(text7))
self.wait(2)
self.play(FadeOut(text),FadeOut(text2),FadeOut(text3),FadeOut(text4),FadeOut(text5),FadeOut(text6))
self.play(FadeOut(text7))
self.wait()
def clause(self):
title=TextMobject("Clause")
subtitle=TexMobject(r"""
\text{``anything such that the 4th column drops}\\
\text{only on a satisfying assignment''}\hspace{7mm}
""")
#subtitle.to_edge(UP,buff=4)
self.play(Write(title))
self.wait()
self.play(Transform(title,subtitle))
self.wait()
example=TexMobject(r"\text{For example: } \neg x_1 \vee x_2")
example.to_edge(UP,buff=0.5)
self.play(Transform(title,example))
self.wait()
matrix=TexMobject(r"""
\begin{matrix}
& G & G & \cdot & \cdot \\
\neg x_2 & \cdot & \cdot & \cdot & \cdot \\
& \cdot & \cdot & \cdot & \cdot \\
& \cdot & G & \cdot & \cdot \\
& \cdot & \cdot & \cdot & \cdot \\
& \cdot & \cdot & G & G \\
& \cdot & \cdot & \cdot & \cdot \\
& G & G & \cdot & \cdot \\
x_2 & \cdot & \cdot & \cdot & \cdot \\
& \cdot & \cdot & \cdot & \cdot \\
& \cdot & \cdot & \cdot & \cdot \\
x_1 & G & G & \cdot & \cdot \\
& \cdot & \cdot & \cdot & \cdot \\
& \cdot & G & \cdot & \cdot \\
& \cdot & \cdot & G & G \\
& \cdot & \cdot & \cdot & \cdot \\
\neg x_1 & G & G & \cdot & \cdot \\
\end{matrix}
""")
matrix.scale(0.5)
self.play(Write(matrix))
self.wait()
plus=TexMobject(r"+")
matrix2=TexMobject(r"""
\begin{matrix}
G & \cdot & \cdot \\
\cdot & G & G
\end{matrix}
""")
plus.move_to(2*RIGHT)
matrix2.move_to(4*RIGHT)
timesn=TexMobject(r"\bigg(\hspace{18mm}\bigg)\times N")
timesn.move_to(4.7*RIGHT)
self.play(Write(plus))
self.play(Write(matrix2),Write(timesn))
self.wait()
rect=Rectangle(height=2,width=2)
rect.move_to(6*RIGHT+2*DOWN)
self.play(Transform(matrix,rect),Transform(matrix2,rect),Transform(plus,rect),Transform(timesn,rect),Transform(title,rect),)
self.play(Write(TextMobject("clause").move_to(6*RIGHT+2*DOWN)))
self.wait()
def wire(self):
label=TextMobject("wire")
label2=TextMobject("wire")
row1_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot \\
G
\end{matrix}
""")
row1_2=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row1_3=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row1_1.move_to(LEFT)
row1_2.move_to(LEFT)
row1_3.move_to(LEFT)
row2_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot \\
G
\end{matrix}
""")
row2_2=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row2_3=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1=TexMobject(r"""
\begin{matrix}
G \\
out \\
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_3=TexMobject(r"""
\begin{matrix}
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1.move_to(RIGHT+0.1*UP)
row3_3.move_to(RIGHT)
rect = Rectangle(height=0,width=2)
rect.move_to(3.5*LEFT+2.25*DOWN)
label2.move_to(3.5*LEFT+2.75*DOWN)
label.next_to(row2_1,DOWN,buff=0.5)
self.play(Write(label))
self.wait()
self.play(FadeInFrom(row1_1, DOWN),FadeInFrom(row2_1, DOWN),FadeInFrom(row3_1, DOWN))
self.wait()
self.play(Transform(row1_1,row1_2),Transform(row2_1,row2_2))
self.wait()
self.play(Transform(row1_1,row1_3),Transform(row2_1,row2_3),Transform(row3_1,row3_3))
self.wait()
self.play(Transform(label,label2),Transform(row1_1,rect),Transform(row2_1,rect),Transform(row3_1,rect))
self.wait()
def assignmentpos(self):
label=TexMobject("x_i")
label2=TexMobject("x_i")
row1_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot
\end{matrix}
""")
row1_3=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row1_1.move_to(LEFT)
row1_3.move_to(LEFT)
row2_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot\\
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot
\end{matrix}
""")
row2_3=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot\\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1= TexMobject(r"""
\begin{matrix}
G \\
\cdot \\
G \\
\cdot \\
out \\
\cdot \\
\cdot
\end{matrix}
""")
row3_2= TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
G \\
out \\
G \\
\cdot
\end{matrix}
""")
row3_3= TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1.move_to(RIGHT+0.1*UP)
row3_2.move_to(RIGHT)
row3_3.move_to(RIGHT)
rect=Rectangle(height=1,width=2)
rect.move_to(6*LEFT+0.5*UP)
label2.move_to(6*LEFT+0.5*UP)
label.next_to(row2_1,DOWN,buff=0.5)
self.play(Write(label))
self.play(FadeInFrom(row1_1, DOWN),FadeInFrom(row2_1, DOWN),FadeInFrom(row3_1, DOWN))
self.wait()
self.play(Transform(row3_1,row3_2))
self.wait()
self.play(Transform(row1_1, row1_3),Transform(row2_1, row2_3),Transform(row3_1, row3_3))
self.wait()
self.play(Transform(row1_1,rect),Transform(row2_1,rect),Transform(row3_1,rect),Transform(label,label2))
self.wait()
def assignmentneg(self):
label=TexMobject("\\neg x_i")
label2=TexMobject("\\neg x_i")
row1_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot
\end{matrix}
""")
row1_3=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row1_1.move_to(LEFT)
row1_3.move_to(LEFT)
row2_1=TexMobject(r"""
\begin{matrix}
\cdot\\
G \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row2_2=TexMobject(r"""
\begin{matrix}
\cdot\\
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot
\end{matrix}
""")
row2_3=TexMobject(r"""
\begin{matrix}
\cdot\\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1= TexMobject(r"""
\begin{matrix}
G \\
out \\
\cdot \\
\cdot \\
G \\
\cdot
\end{matrix}
""")
row3_3= TexMobject(r"""
\begin{matrix}
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1.move_to(RIGHT+0.1*UP)
row3_3.move_to(RIGHT)
rect=Rectangle(height=1,width=2)
rect.move_to(6*LEFT+0.5*DOWN)
label2.move_to(6*LEFT+0.5*DOWN)
label.next_to(row2_2,DOWN,buff=0.5)
self.play(Write(label))
self.play(FadeInFrom(row1_1, DOWN),FadeInFrom(row2_1, DOWN),FadeInFrom(row3_1, DOWN))
self.wait()
self.play(Transform(row2_1,row2_2))
self.wait()
self.play(Transform(row1_1, row1_3),Transform(row2_1, row2_3),Transform(row3_1, row3_3))
self.wait()
self.play(Transform(row1_1,rect),Transform(row2_1,rect),Transform(row3_1,rect),Transform(label,label2))
self.wait()
def truegadget(self):
title=TextMobject("True")
title2=TextMobject("True")
row1_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row1_1.move_to(LEFT)
row2_1=TexMobject(r"""
\begin{matrix}
\cdot\\
\cdot\\
G \\
G
\end{matrix}
""")
row2_1.move_to(0.05*DOWN)
row3_1=TexMobject(r"""
\begin{matrix}
G \\
G \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1.move_to(RIGHT+0.1*UP)
row2_2=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
G
\end{matrix}
""")
row2_2.move_to(0.05*DOWN)
row3_2 = TexMobject(r"""
\begin{matrix}
G \\
G \\
G \\
\cdot
\end{matrix}
""")
row3_2.move_to(RIGHT+0.1*UP)
row2_3=TexMobject(r"""
\begin{matrix}
\cdot\\
\cdot \\
\cdot \\
G
\end{matrix}
""")
row2_3.move_to(0.05*DOWN)
row3_3 = TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_3.move_to(RIGHT)
square=Rectangle(height=1,width=2)
square.move_to(6*LEFT+2.5*DOWN)
title2.move_to(6*LEFT+2.5*DOWN)
title.next_to(row2_1,DOWN,buff=0.5)
self.play(Write(title),FadeInFrom(row1_1, UP),FadeInFrom(row2_1, UP),FadeInFrom(row3_1, UP))
self.wait()
self.play(Transform(row2_1,row2_2),Transform(row3_1,row3_2))
self.wait()
self.play(Transform(row2_1,row2_3),Transform(row3_1,row3_3))
self.wait()
self.play(Transform(row1_1,square),Transform(row2_1,square),Transform(row3_1,square),Transform(title,title2))
self.wait()
self.falsegadget()
def falsegadget(self):
title=TextMobject("False")
title2=TextMobject("False")
row1_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row1_1.move_to(LEFT)
row2_1=TexMobject(r"""
\begin{matrix}
\cdot\\
\cdot\\
G \\
G
\end{matrix}
""")
row3_1=TexMobject(r"""
\begin{matrix}
G \\
G \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1.move_to(RIGHT+0.1*UP)
row2_2=TexMobject(r"""
\begin{matrix}
\cdot \\
G \\
G \\
G
\end{matrix}
""")
row3_2 = TexMobject(r"""
\begin{matrix}
G \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_2.move_to(RIGHT+0.1*UP)
row2_3=TexMobject(r"""
\begin{matrix}
\cdot\\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_3 = TexMobject(r"""
\begin{matrix}
G \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_3.move_to(RIGHT+0.1*UP)
square=Rectangle(height=1,width=2)
square.move_to(6*LEFT+1.5*DOWN)
title2.move_to(6*LEFT+1.5*DOWN)
title.next_to(row2_1,DOWN,buff=0.5)
self.play(Write(title),FadeInFrom(row1_1, UP),FadeInFrom(row2_1, UP),FadeInFrom(row3_1, UP))
self.wait()
self.play(Transform(row2_1,row2_2),Transform(row3_1,row3_2))
self.wait()
self.play(Transform(row2_1,row2_3),Transform(row3_1,row3_3))
self.wait()
self.play(Transform(row1_1,square),Transform(row2_1,square),Transform(row3_1,square),Transform(title,title2))
self.wait()
class TrueGadget(Scene):
def construct(self):
title=TextMobject("True")
title2=TextMobject("True")
matrix1 = TexMobject(r"""
\begin{matrix}
\cdot & \cdot & G \\
\cdot & \cdot & G \\
\cdot & G & \cdot \\
\cdot & G & \cdot
\end{matrix}
""")
matrix2 = TexMobject(r"""
\begin{matrix}
\cdot & \cdot & G \\
\cdot & \cdot & G \\
\cdot & \cdot & G \\
\cdot & G & \cdot
\end{matrix}
""")
matrix3 = TexMobject(r"""
\begin{matrix}
\cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot \\
\cdot & G & \cdot
\end{matrix}
""")
square=Square()
square.move_to(6*LEFT+2.5*UP)
title2.move_to(6*LEFT+2.5*UP)
title.next_to(matrix1,DOWN,buff=0.5)
self.play(Write(title),FadeInFrom(matrix1, UP))
self.wait()
self.play(Transform(matrix1,matrix2))
self.wait()
self.play(Transform(matrix1, matrix3))
self.wait()
self.play(Transform(matrix1,square),Transform(title,title2))
self.wait()
class FalseGadget(Scene):
def construct(self):
title=TextMobject("False")
title2=TextMobject("False")
matrix1 = TexMobject(r"""
\begin{matrix}
\cdot & \cdot & G \\
\cdot & \cdot & G \\
\cdot & G & \cdot \\
\cdot & G & \cdot
\end{matrix}
""")
matrix2 = TexMobject(r"""
\begin{matrix}
\cdot & \cdot & G \\
\cdot & G & \cdot \\
\cdot & G & \cdot \\
\cdot & G & \cdot
\end{matrix}
""")
matrix3 = TexMobject(r"""
\begin{matrix}
\cdot & \cdot & G \\
\cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot
\end{matrix}
""")
square=Square()
square.move_to(6*LEFT+1.5*DOWN)
title2.move_to(6*LEFT+1.5*DOWN)
title.next_to(matrix1,DOWN,buff=0.5)
self.play(Write(title),FadeInFrom(matrix1, UP))
self.wait()
self.play(Transform(matrix1,matrix2))
self.wait()
self.play(Transform(matrix1, matrix3))
self.wait()
self.play(Transform(matrix1,square),Transform(title,title2))
self.wait()
|
from big_ol_pile_of_manim_imports import *
class Reduction(Scene):
def construct(self):
title=TextMobject("The Reduction").scale(2)
self.play(Write(title))
self.wait(2)
title2=TextMobject("The Reduction").to_corner(UL)
self.play(Transform(title,title2))
suppose=TextMobject(r"Suppose we are given a formula $\varphi$ with $n$ variables and $m$ clauses.").scale(0.75)
suppose.move_to(2*UP+LEFT)
self.play(FadeIn(suppose))
board=TextMobject("We use the gadgets to construct an equivalent gameboard.").scale(0.75)
board.move_to(UP+1.63*LEFT)
self.play(FadeIn(board))
self.wait(2)
self.play(FadeOut(suppose),FadeOut(board))
rect=Rectangle(height=1,width=2).move_to(DOWN*1.5)
rect2=Rectangle(height=1,width=2).move_to(DOWN*0.5)
rect3=Rectangle(height=1,width=2).move_to(UP*0.5)
rect4=Rectangle(height=1,width=2).move_to(UP*1.5)
l1=TextMobject("True").move_to(rect)
l2=TextMobject("False").move_to(rect2)
l3=TextMobject(r"$\neg x_i$").move_to(rect3)
l4=TextMobject(r"$x_i$").move_to(rect4)
self.play(FadeIn(rect),FadeIn(rect2),FadeIn(rect3),FadeIn(rect4),FadeIn(l1),FadeIn(l2),FadeIn(l3),FadeIn(l4))
self.wait()
varRect=Rectangle(height=4,width=1.5).move_to(3*LEFT)
self.play(ReplacementTransform(rect,varRect),ReplacementTransform(rect2,varRect),ReplacementTransform(rect3,varRect),ReplacementTransform(rect4,varRect),ReplacementTransform(l1,varRect),ReplacementTransform(l2,varRect),ReplacementTransform(l3,varRect),ReplacementTransform(l4,varRect))
self.wait()
self.remove(varRect)
newVarRect=Rectangle(height=4,width=1.5).move_to(3*LEFT)
self.add(newVarRect)
varRect2=Rectangle(height=4,width=1.5).move_to(5*LEFT)
varRect3=Rectangle(height=4,width=1.5).move_to(4*RIGHT)
dots=TextMobject(r"$\cdots$").scale(3)
self.play(ReplacementTransform(varRect.copy(),varRect2),ReplacementTransform(varRect,varRect3),Write(dots))
self.wait()
varRect4=Rectangle(height=4,width=3).move_to(4*LEFT)
self.play(ReplacementTransform(newVarRect,varRect4),ReplacementTransform(varRect2,varRect4),ReplacementTransform(varRect3,varRect4),ReplacementTransform(dots,varRect4))
self.remove(varRect4)
newVarRect4=Rectangle(height=4,width=3).move_to(4*LEFT)
self.add(newVarRect4) #variable rectangle
self.wait()
cRect=Rectangle(height=2,width=1).move_to(2*UP+RIGHT)
cRect2=Rectangle(height=2,width=1).move_to(2*RIGHT)
cRect3=Rectangle(height=2,width=1).move_to(2*DOWN+3*RIGHT)
self.play(Write(cRect),Write(cRect2),Write(cRect3))
self.wait()
wire1=Rectangle(height=0,width=3).move_to(1.5*UP+LEFT)
wire2=Rectangle(height=0,width=4).move_to(0.5*LEFT)
wire3=Rectangle(height=0,width=5).move_to(1.5*DOWN)
self.play(Write(wire1),Write(wire2),Write(wire3))
self.wait()
self.remove(newVarRect4)
self.remove(cRect)
self.remove(cRect2)
self.remove(cRect3)
self.remove(wire1)
self.remove(wire2)
self.remove(wire3)
self.wait()
dim=TextMobject(r"This board has width $O(n+m)$ and height $O(poly(n+m))$").move_to(0.5*UP)
dim2=TextMobject(r"so we can construct it in polynomial time.").move_to(0.5*DOWN)
self.play(Write(dim),Write(dim2))
self.wait()
self.play(FadeOut(dim),FadeOut(dim2))
self.wait()
text=TextMobject(r"Now pass this gameboard into $CANDYCRUSH$").move_to(0.5*UP)
text2=TextMobject(r"with $k=n$ and $s$ being the max score achievable.").move_to(0.5*DOWN)
self.play(Write(text),Write(text2))
self.wait()
text3=TextMobject(r"$CANDYCRUSH$ outputs $1$ iff $\varphi$ is satisfiable.").move_to(UP)
self.play(Transform(text,text3),Transform(text2,text3))
self.wait()
conclusion=TextMobject(r"So $3SAT\le_p CANDYCRUSH$.")
self.play(Write(conclusion))
self.wait()
conclusion2=TextMobject(r"Therefore, $CANDYCRUSH$ is $\mathbf{NP}$-hard.").move_to(DOWN)
self.play(Write(conclusion2))
self.play(FadeOut(conclusion),FadeOut(conclusion2),FadeOut(title),FadeOut(text),FadeOut(text2))
self.wait()
tit=TextMobject("Conclusion").to_corner(UL)
self.play(FadeIn(tit))
end=TextMobject(r"We have shown that $CANDYCRUSH\in\mathbf{NP}$").move_to(UP)
end2=TextMobject(r"and $CANDYCRUSH$ is $\mathbf{NP}$-hard.")
end3=TextMobject(r"Together, these imply that $CANDYCRUSH$ is $\mathbf{NP}$-complete.").move_to(DOWN)
self.play(Write(end))
self.play(Write(end2))
self.play(Write(end3))
sq=TextMobject(r"$\blacksquare$").scale(5).to_corner(DR)
self.wait()
self.add(sq)
self.wait()
self.play(FadeOut(tit),FadeOut(end),FadeOut(end2),FadeOut(end3),FadeOut(sq))
self.wait()
class Main(Scene):
def construct(self):
self.title()
self.definitions()
self.np()
npcomplete=TextMobject("Now we show that $CANDYCRUSH$ is $\mathbf{NP}$-hard.")
self.play(Write(npcomplete.to_edge(UP)))
method=TextMobject("We show this by a reduction from $3SAT$.")
method.to_edge(UP,buff=2.5)
self.play(Write(method))
gadgets=TextMobject("We will introduce a series of gadgets").to_edge(UP,buff=3.5)
gadgets2=TextMobject("that will allow us to convert any $3SAT$ instance").to_edge(UP,buff=4)
gadgets3=TextMobject("to an instance of $CANDYCRUSH$.").to_edge(UP,buff=4.5)
self.play(FadeIn(gadgets), FadeIn(gadgets2),FadeIn(gadgets3))
self.wait(3)
self.play(FadeOut(gadgets),FadeOut(gadgets2),FadeOut(gadgets3),FadeOut(method))
gadgets4 = TextMobject("Gadgets").scale(1.5).to_corner(UL)
self.play(Transform(npcomplete,gadgets4))
self.background()
self.truegadget()
#self.falsegadget()
self.assignmentneg()
self.assignmentpos()
self.wire()
self.clause()
title=TextMobject("How do we put it all together?").to_edge(UP).scale(1.5)
self.play(Transform(npcomplete, title))
def background(self):
back=TexMobject(r"""
\begin{matrix}
R & O & R & O & R\\
B & P & B & P & B\\
R & O & R & O & R\\
B & P & B & P & B\\
R & O & R & O & R
\end{matrix}
""")
text=TextMobject("Neutral Background").next_to(back,DOWN,buff=1)
back2=TexMobject(r"""
\begin{matrix}
R & O & R & O & R\\
B & P & & & B\\
R & & B & P & R\\
B & O & R & O & B\\
R & P & B & P & R
\end{matrix}
""")
back3=TexMobject(r"""
\begin{matrix}
R & O & R & O & R\\
B & P & G & G & B\\
R & G & B & P & R\\
B & O & R & O & B\\
R & P & B & P & R
\end{matrix}
""")
back4=TexMobject(r"""
\begin{matrix}
\cdot & \cdot & \cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot & \cdot & \cdot
\end{matrix}
""")
self.play(FadeIn(text))
self.play(FadeIn(back))
self.wait()
self.play(FadeOut(back),FadeIn(back2))
self.wait()
self.play(FadeOut(back2),FadeIn(back3))
self.wait()
self.play(FadeOut(back3),FadeIn(back4))
self.wait()
self.play(FadeOut(back4),FadeOut(text))
self.wait()
def title(self):
title=TextMobject("How hard is Candy Crush?").scale(2)
self.play(Write(title))
self.wait()
self.play(LaggedStart(FadeOutAndShiftDown, title))
answer=TextMobject("Answer: NP-complete").scale(1.5)
self.play(Write(answer))
self.wait()
self.play(FadeOut(answer))
def definitions(self):
grid = NumberPlane()
grid_title = TextMobject("Candy Crush is played on a grid").move_to(0.5*UP)
grid_title.scale(1.5)
self.add(grid, grid_title) # Make sure title is on top of grid
self.play(
FadeInFromDown(grid_title),
Write(grid), run_time=2.5
)
self.wait()
dimensions=TexMobject(r"\text{Call the dimensions } w \times h").scale(1.5)
dimensions.move_to(0.5*DOWN)
self.play(Transform(grid_title, dimensions))
self.wait(2)
self.remove(grid,grid_title)
text=TextMobject("Each square is filled with one of six different colored candies")
arr=TextMobject("R", "O", "Y", "G", "B", "P")
arr2=TextMobject("R", "O", "G", "Y", "B", "P")
arr.move_to(DOWN*1.5)
arr[0].set_color(RED)
arr[1].set_color(ORANGE)
arr[2].set_color("#FFFF00")
arr[3].set_color("00FF00")
arr[4].set_color(BLUE)
arr[5].set_color("DC28E2")
arr2.move_to(DOWN*1.5)
arr2[0].set_color(RED)
arr2[1].set_color(ORANGE)
arr2[3].set_color("#FFFF00")
arr2[2].set_color("00FF00")
arr2[4].set_color(BLUE)
arr2[5].set_color("DC28E2")
self.play(FadeIn(text))
self.play(Write(arr))
self.wait(1.5)
self.play(FadeOut(text))
text2=TextMobject("Each square is filled with one of six different colored candies")
text3=TextMobject("A player may swap two candies in neighboring squares")
text4=TextMobject("When a player forms a chain of 3 identical candies or more, the identical")
text5=TextMobject("candies are deleted and new ones fall from above and take their place.")
text6=TextMobject("The player's final score is equal to the number of chains deleted.")
text7=TextMobject("Define the $CANDYCRUSH$ problem as follows:")
problem=TextMobject("Given a gameboard and a number $k$ swaps, is a score $s$ achievable?")
self.play(FadeIn(text3))
self.play(Transform(arr,arr2))
self.wait(2)
self.play(FadeOut(text3))
self.play(FadeOut(arr))
self.play(FadeIn(text4.scale(0.75).move_to(0.25*UP)),FadeIn(text5.scale(0.75).move_to(0.25*DOWN)))
self.wait(2)
self.play(FadeOut(text4),FadeOut(text5))
self.play(FadeIn(text6))
self.wait(2)
self.play(FadeOut(text6))
self.play(FadeIn(text7))
self.wait(2)
self.play(FadeOut(text7))
self.play(FadeIn(problem.scale(0.85)))
self.wait(3)
self.play(FadeOut(problem))
def np(self):
text=TextMobject("First we show that $CANDYCRUSH\in\mathbf{NP}.$")
text.to_edge(UP)
self.play(Write(text))
text2=TextMobject("Given a sequence of moves, we can just play the game")
text3=TextMobject("then check if the final score is large enough.")
text2.to_edge(UP,buff=2)
text3.to_edge(UP,buff=2.5)
self.play(FadeIn(text2), FadeIn(text3))
self.wait()
text4=TextMobject("Each move takes constant time.")
text5=TextMobject("Comparing the final score can be done digit by digit")
text6=TextMobject("in $O(\log n)$ time.")
text4.to_edge(UP,buff=3.5)
text5.to_edge(UP,buff=4.5)
text6.to_edge(UP,buff=5)
self.play(FadeIn(text4))
self.wait()
self.play(FadeIn(text5), FadeIn(text6))
self.wait(2)
text7=TextMobject("Then overall, the $CANDYCRUSH$ problem is checkable in polynomial time.")
text7.to_edge(DOWN,buff=1.5).scale(0.75)
self.play(FadeIn(text7))
self.wait(2)
self.play(FadeOut(text),FadeOut(text2),FadeOut(text3),FadeOut(text4),FadeOut(text5),FadeOut(text6))
self.play(FadeOut(text7))
self.wait()
def clause(self):
title=TextMobject("Clause")
subtitle=TexMobject(r"""
\text{``anything such that the 4th column drops}\\
\text{only on a satisfying assignment''}\hspace{7mm}
""")
#subtitle.to_edge(UP,buff=4)
self.play(Write(title))
self.wait()
self.play(Transform(title,subtitle))
self.wait()
example=TexMobject(r"\text{For example: } \neg x_1 \vee x_2")
example.to_edge(UP,buff=0.5)
self.play(Transform(title,example))
self.wait()
matrix=TexMobject(r"""
\begin{matrix}
& G & G & \cdot & \cdot \\
\neg x_2 & \cdot & \cdot & \cdot & \cdot \\
& \cdot & \cdot & \cdot & \cdot \\
& \cdot & G & \cdot & \cdot \\
& \cdot & \cdot & \cdot & \cdot \\
& \cdot & \cdot & G & G \\
& \cdot & \cdot & \cdot & \cdot \\
& G & G & \cdot & \cdot \\
x_2 & \cdot & \cdot & \cdot & \cdot \\
& \cdot & \cdot & \cdot & \cdot \\
& \cdot & \cdot & \cdot & \cdot \\
x_1 & G & G & \cdot & \cdot \\
& \cdot & \cdot & \cdot & \cdot \\
& \cdot & G & \cdot & \cdot \\
& \cdot & \cdot & G & G \\
& \cdot & \cdot & \cdot & \cdot \\
\neg x_1 & G & G & \cdot & \cdot \\
\end{matrix}
""")
matrix.scale(0.5)
self.play(Write(matrix))
self.wait()
plus=TexMobject(r"+")
matrix2=TexMobject(r"""
\begin{matrix}
G & \cdot & \cdot \\
\cdot & G & G
\end{matrix}
""")
plus.move_to(2*RIGHT)
matrix2.move_to(4*RIGHT)
timesn=TexMobject(r"\bigg(\hspace{18mm}\bigg)\times N")
timesn.move_to(4.7*RIGHT)
self.play(Write(plus))
self.play(Write(matrix2),Write(timesn))
self.wait()
rect=Rectangle(height=2,width=2)
rect.move_to(6*RIGHT+2*DOWN)
self.play(Transform(matrix,rect),Transform(matrix2,rect),Transform(plus,rect),Transform(timesn,rect),Transform(title,rect),)
self.play(Write(TextMobject("clause").move_to(6*RIGHT+2*DOWN)))
self.wait()
def wire(self):
label=TextMobject("wire")
label2=TextMobject("wire")
row1_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot \\
G
\end{matrix}
""")
row1_2=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row1_3=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row1_1.move_to(LEFT)
row1_2.move_to(LEFT)
row1_3.move_to(LEFT)
row2_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot \\
G
\end{matrix}
""")
row2_2=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row2_3=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1=TexMobject(r"""
\begin{matrix}
G \\
out \\
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_3=TexMobject(r"""
\begin{matrix}
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1.move_to(RIGHT+0.1*UP)
row3_3.move_to(RIGHT)
rect = Rectangle(height=0,width=2)
rect.move_to(3.5*LEFT+2.25*DOWN)
label2.move_to(3.5*LEFT+2.75*DOWN)
label.next_to(row2_1,DOWN,buff=0.5)
self.play(Write(label))
self.wait()
self.play(FadeInFrom(row1_1, DOWN),FadeInFrom(row2_1, DOWN),FadeInFrom(row3_1, DOWN))
self.wait()
self.play(Transform(row1_1,row1_2),Transform(row2_1,row2_2))
self.wait()
self.play(Transform(row1_1,row1_3),Transform(row2_1,row2_3),Transform(row3_1,row3_3))
self.wait()
self.play(Transform(label,label2),Transform(row1_1,rect),Transform(row2_1,rect),Transform(row3_1,rect))
self.wait()
def assignmentpos(self):
label=TexMobject("x_i")
label2=TexMobject("x_i")
row1_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot
\end{matrix}
""")
row1_3=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row1_1.move_to(LEFT)
row1_3.move_to(LEFT)
row2_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot\\
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot
\end{matrix}
""")
row2_3=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot\\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1= TexMobject(r"""
\begin{matrix}
G \\
\cdot \\
G \\
\cdot \\
out \\
\cdot \\
\cdot
\end{matrix}
""")
row3_2= TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
G \\
out \\
G \\
\cdot
\end{matrix}
""")
row3_3= TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1.move_to(RIGHT+0.1*UP)
row3_2.move_to(RIGHT)
row3_3.move_to(RIGHT)
rect=Rectangle(height=1,width=2)
rect.move_to(6*LEFT+0.5*UP)
label2.move_to(6*LEFT+0.5*UP)
label.next_to(row2_1,DOWN,buff=0.5)
self.play(Write(label))
self.play(FadeInFrom(row1_1, DOWN),FadeInFrom(row2_1, DOWN),FadeInFrom(row3_1, DOWN))
self.wait()
self.play(Transform(row3_1,row3_2))
self.wait()
self.play(Transform(row1_1, row1_3),Transform(row2_1, row2_3),Transform(row3_1, row3_3))
self.wait()
self.play(Transform(row1_1,rect),Transform(row2_1,rect),Transform(row3_1,rect),Transform(label,label2))
self.wait()
def assignmentneg(self):
label=TexMobject("\\neg x_i")
label2=TexMobject("\\neg x_i")
row1_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot
\end{matrix}
""")
row1_3=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row1_1.move_to(LEFT)
row1_3.move_to(LEFT)
row2_1=TexMobject(r"""
\begin{matrix}
\cdot\\
G \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row2_2=TexMobject(r"""
\begin{matrix}
\cdot\\
\cdot \\
\cdot \\
\cdot \\
G \\
\cdot
\end{matrix}
""")
row2_3=TexMobject(r"""
\begin{matrix}
\cdot\\
\cdot \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1= TexMobject(r"""
\begin{matrix}
G \\
out \\
\cdot \\
\cdot \\
G \\
\cdot
\end{matrix}
""")
row3_3= TexMobject(r"""
\begin{matrix}
\cdot \\
G \\
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1.move_to(RIGHT+0.1*UP)
row3_3.move_to(RIGHT)
rect=Rectangle(height=1,width=2)
rect.move_to(6*LEFT+0.5*DOWN)
label2.move_to(6*LEFT+0.5*DOWN)
label.next_to(row2_2,DOWN,buff=0.5)
self.play(Write(label))
self.play(FadeInFrom(row1_1, DOWN),FadeInFrom(row2_1, DOWN),FadeInFrom(row3_1, DOWN))
self.wait()
self.play(Transform(row2_1,row2_2))
self.wait()
self.play(Transform(row1_1, row1_3),Transform(row2_1, row2_3),Transform(row3_1, row3_3))
self.wait()
self.play(Transform(row1_1,rect),Transform(row2_1,rect),Transform(row3_1,rect),Transform(label,label2))
self.wait()
def truegadget(self):
title=TextMobject("True")
title2=TextMobject("True")
row1_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row1_1.move_to(LEFT)
row2_1=TexMobject(r"""
\begin{matrix}
\cdot\\
\cdot\\
G \\
G
\end{matrix}
""")
row2_1.move_to(0.05*DOWN)
row3_1=TexMobject(r"""
\begin{matrix}
G \\
G \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1.move_to(RIGHT+0.1*UP)
row2_2=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
G
\end{matrix}
""")
row2_2.move_to(0.05*DOWN)
row3_2 = TexMobject(r"""
\begin{matrix}
G \\
G \\
G \\
\cdot
\end{matrix}
""")
row3_2.move_to(RIGHT+0.1*UP)
row2_3=TexMobject(r"""
\begin{matrix}
\cdot\\
\cdot \\
\cdot \\
G
\end{matrix}
""")
row2_3.move_to(0.05*DOWN)
row3_3 = TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_3.move_to(RIGHT)
square=Rectangle(height=1,width=2)
square.move_to(6*LEFT+2.5*DOWN)
title2.move_to(6*LEFT+2.5*DOWN)
title.next_to(row2_1,DOWN,buff=0.5)
self.play(Write(title),FadeInFrom(row1_1, UP),FadeInFrom(row2_1, UP),FadeInFrom(row3_1, UP))
self.wait()
self.play(Transform(row2_1,row2_2),Transform(row3_1,row3_2))
self.wait()
self.play(Transform(row2_1,row2_3),Transform(row3_1,row3_3))
self.wait()
self.play(Transform(row1_1,square),Transform(row2_1,square),Transform(row3_1,square),Transform(title,title2))
self.wait()
self.falsegadget()
def falsegadget(self):
title=TextMobject("False")
title2=TextMobject("False")
row1_1=TexMobject(r"""
\begin{matrix}
\cdot \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row1_1.move_to(LEFT)
row2_1=TexMobject(r"""
\begin{matrix}
\cdot\\
\cdot\\
G \\
G
\end{matrix}
""")
row3_1=TexMobject(r"""
\begin{matrix}
G \\
G \\
\cdot \\
\cdot
\end{matrix}
""")
row3_1.move_to(RIGHT+0.1*UP)
row2_2=TexMobject(r"""
\begin{matrix}
\cdot \\
G \\
G \\
G
\end{matrix}
""")
row3_2 = TexMobject(r"""
\begin{matrix}
G \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_2.move_to(RIGHT+0.1*UP)
row2_3=TexMobject(r"""
\begin{matrix}
\cdot\\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_3 = TexMobject(r"""
\begin{matrix}
G \\
\cdot \\
\cdot \\
\cdot
\end{matrix}
""")
row3_3.move_to(RIGHT+0.1*UP)
square=Rectangle(height=1,width=2)
square.move_to(6*LEFT+1.5*DOWN)
title2.move_to(6*LEFT+1.5*DOWN)
title.next_to(row2_1,DOWN,buff=0.5)
self.play(Write(title),FadeInFrom(row1_1, UP),FadeInFrom(row2_1, UP),FadeInFrom(row3_1, UP))
self.wait()
self.play(Transform(row2_1,row2_2),Transform(row3_1,row3_2))
self.wait()
self.play(Transform(row2_1,row2_3),Transform(row3_1,row3_3))
self.wait()
self.play(Transform(row1_1,square),Transform(row2_1,square),Transform(row3_1,square),Transform(title,title2))
self.wait()
class TrueGadget(Scene):
def construct(self):
title=TextMobject("True")
title2=TextMobject("True")
matrix1 = TexMobject(r"""
\begin{matrix}
\cdot & \cdot & G \\
\cdot & \cdot & G \\
\cdot & G & \cdot \\
\cdot & G & \cdot
\end{matrix}
""")
matrix2 = TexMobject(r"""
\begin{matrix}
\cdot & \cdot & G \\
\cdot & \cdot & G \\
\cdot & \cdot & G \\
\cdot & G & \cdot
\end{matrix}
""")
matrix3 = TexMobject(r"""
\begin{matrix}
\cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot \\
\cdot & G & \cdot
\end{matrix}
""")
square=Square()
square.move_to(6*LEFT+2.5*UP)
title2.move_to(6*LEFT+2.5*UP)
title.next_to(matrix1,DOWN,buff=0.5)
self.play(Write(title),FadeInFrom(matrix1, UP))
self.wait()
self.play(Transform(matrix1,matrix2))
self.wait()
self.play(Transform(matrix1, matrix3))
self.wait()
self.play(Transform(matrix1,square),Transform(title,title2))
self.wait()
class FalseGadget(Scene):
def construct(self):
title=TextMobject("False")
title2=TextMobject("False")
matrix1 = TexMobject(r"""
\begin{matrix}
\cdot & \cdot & G \\
\cdot & \cdot & G \\
\cdot & G & \cdot \\
\cdot & G & \cdot
\end{matrix}
""")
matrix2 = TexMobject(r"""
\begin{matrix}
\cdot & \cdot & G \\
\cdot & G & \cdot \\
\cdot & G & \cdot \\
\cdot & G & \cdot
\end{matrix}
""")
matrix3 = TexMobject(r"""
\begin{matrix}
\cdot & \cdot & G \\
\cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot \\
\cdot & \cdot & \cdot
\end{matrix}
""")
square=Square()
square.move_to(6*LEFT+1.5*DOWN)
title2.move_to(6*LEFT+1.5*DOWN)
title.next_to(matrix1,DOWN,buff=0.5)
self.play(Write(title),FadeInFrom(matrix1, UP))
self.wait()
self.play(Transform(matrix1,matrix2))
self.wait()
self.play(Transform(matrix1, matrix3))
self.wait()
self.play(Transform(matrix1,square),Transform(title,title2))
self.wait()
|
de
| 0.351608
|
#variable rectangle #self.falsegadget() \begin{matrix} R & O & R & O & R\\ B & P & B & P & B\\ R & O & R & O & R\\ B & P & B & P & B\\ R & O & R & O & R \end{matrix} \begin{matrix} R & O & R & O & R\\ B & P & & & B\\ R & & B & P & R\\ B & O & R & O & B\\ R & P & B & P & R \end{matrix} \begin{matrix} R & O & R & O & R\\ B & P & G & G & B\\ R & G & B & P & R\\ B & O & R & O & B\\ R & P & B & P & R \end{matrix} \begin{matrix} \cdot & \cdot & \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot \end{matrix} # Make sure title is on top of grid \text{``anything such that the 4th column drops}\\ \text{only on a satisfying assignment''}\hspace{7mm} #subtitle.to_edge(UP,buff=4) \begin{matrix} & G & G & \cdot & \cdot \\ \neg x_2 & \cdot & \cdot & \cdot & \cdot \\ & \cdot & \cdot & \cdot & \cdot \\ & \cdot & G & \cdot & \cdot \\ & \cdot & \cdot & \cdot & \cdot \\ & \cdot & \cdot & G & G \\ & \cdot & \cdot & \cdot & \cdot \\ & G & G & \cdot & \cdot \\ x_2 & \cdot & \cdot & \cdot & \cdot \\ & \cdot & \cdot & \cdot & \cdot \\ & \cdot & \cdot & \cdot & \cdot \\ x_1 & G & G & \cdot & \cdot \\ & \cdot & \cdot & \cdot & \cdot \\ & \cdot & G & \cdot & \cdot \\ & \cdot & \cdot & G & G \\ & \cdot & \cdot & \cdot & \cdot \\ \neg x_1 & G & G & \cdot & \cdot \\ \end{matrix} \begin{matrix} G & \cdot & \cdot \\ \cdot & G & G \end{matrix} \begin{matrix} \cdot \\ \cdot \\ G \\ \cdot \\ \cdot \\ \cdot \\ G \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ G \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot \\ G \\ \cdot \\ \cdot \\ \cdot \\ G \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ G \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} G \\ out \\ \cdot \\ G \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot \\ G \\ \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \\ G \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot\\ \cdot \\ \cdot \\ \cdot \\ G \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot\\ \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} G \\ \cdot \\ G \\ \cdot \\ out \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ G \\ out \\ G \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ \cdot \\ G \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ \cdot \\ G \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot\\ G \\ \cdot \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot\\ \cdot \\ \cdot \\ \cdot \\ G \\ \cdot \end{matrix} \begin{matrix} \cdot\\ \cdot \\ \cdot \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} G \\ out \\ \cdot \\ \cdot \\ G \\ \cdot \end{matrix} \begin{matrix} \cdot \\ G \\ \cdot \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot\\ \cdot\\ G \\ G \end{matrix} \begin{matrix} G \\ G \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ G \end{matrix} \begin{matrix} G \\ G \\ G \\ \cdot \end{matrix} \begin{matrix} \cdot\\ \cdot \\ \cdot \\ G \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot\\ \cdot\\ G \\ G \end{matrix} \begin{matrix} G \\ G \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot \\ G \\ G \\ G \end{matrix} \begin{matrix} G \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot\\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} G \\ \cdot \\ \cdot \\ \cdot \end{matrix} \begin{matrix} \cdot & \cdot & G \\ \cdot & \cdot & G \\ \cdot & G & \cdot \\ \cdot & G & \cdot \end{matrix} \begin{matrix} \cdot & \cdot & G \\ \cdot & \cdot & G \\ \cdot & \cdot & G \\ \cdot & G & \cdot \end{matrix} \begin{matrix} \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot \\ \cdot & G & \cdot \end{matrix} \begin{matrix} \cdot & \cdot & G \\ \cdot & \cdot & G \\ \cdot & G & \cdot \\ \cdot & G & \cdot \end{matrix} \begin{matrix} \cdot & \cdot & G \\ \cdot & G & \cdot \\ \cdot & G & \cdot \\ \cdot & G & \cdot \end{matrix} \begin{matrix} \cdot & \cdot & G \\ \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot \end{matrix}
| 2.694425
| 3
|
testpy/test_repr.py
|
goolor/testpy
| 0
|
6629933
|
<filename>testpy/test_repr.py
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
str1=r"'test string' new line"
print("Original is:"+str1)
print()
print("After repr is:"+repr(str1))
print()
#print("After eval is:"+eval(str1))
print()
#print("After repr(eval is:"+repr(eval(str1)))
print()
print("After eval(repr is:"+eval(repr(str1)))
|
<filename>testpy/test_repr.py
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
str1=r"'test string' new line"
print("Original is:"+str1)
print()
print("After repr is:"+repr(str1))
print()
#print("After eval is:"+eval(str1))
print()
#print("After repr(eval is:"+repr(eval(str1)))
print()
print("After eval(repr is:"+eval(repr(str1)))
|
en
| 0.094816
|
#!/usr/bin/env python3 #-*- coding: utf-8 -*- #print("After eval is:"+eval(str1)) #print("After repr(eval is:"+repr(eval(str1)))
| 3.076
| 3
|
vanquisher/network/__init__.py
|
Gustavo6046/vanquisher-old
| 0
|
6629934
|
<reponame>Gustavo6046/vanquisher-old<gh_stars>0
"""
The Python implementation of the networking protocol that Vanquisher
uses to communicate the playsim state between server and clients.
"""
|
"""
The Python implementation of the networking protocol that Vanquisher
uses to communicate the playsim state between server and clients.
"""
|
en
| 0.898511
|
The Python implementation of the networking protocol that Vanquisher uses to communicate the playsim state between server and clients.
| 1.58745
| 2
|
DummyNews/generator.py
|
videah/dummynews
| 0
|
6629935
|
import random
from datetime import datetime
from DummyNews.models import User, Role, Post, Comment, JobListing, BanAppeal, Ban, GDPRRequest, DMCATakeDown, Report
from faker import Factory
from faker.providers import lorem, internet
fake = Factory.create()
fake.add_provider(lorem)
fake.add_provider(internet)
rnd = lambda a: a[random.randint(0, len(a) -1)]
class RandomUser(object):
Roles = ["Admin", "Moderator", "User"]
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
users = []
roles = self._roles(RandomUser.Roles)
rnd = lambda a: a[random.randint(0, len(a) -1)]
for _ in range(n):
users.append(User(email=fake.email(), username=fake.user_name(), roles = [rnd(list(roles.values()))], active=1, password='<PASSWORD>'))
self.db.session.add(users[len(users)-1])
self.db.session.commit()
return users
def _roles(self, roles):
roles = {role: Role(name=role, description='Lorem ipsum sit amet') for role in roles}
for r in roles.values(): self.db.session.add(r)
self.db.session.commit()
return roles
def get_or_create_users(db, n):
users = User.query.all()
if len(users) < n:
users += RandomUser(db).generate(n - len(users))
return users
class CommentGenerator(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n, parent=0, replies=False):
parent = None if parent == 0 else parent
comments = []
for _ in range(n):
c = Comment(text=fake.sentence(nb_words=15), parent_id=parent)
if replies:
c.replies = self.generate(10, parent=c.id, replies=False)
self.db.session.add(c)
comments.append(c)
self.db.session.commit()
return comments
class RandomPost(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
posts = []
users = get_or_create_users(self.db, 10)
for _ in range(n):
comments = CommentGenerator(db_session=self.db).generate(10, parent=0, replies=True)
posts.append( Post(
poster_id=rnd(users).id, title=fake.sentence(nb_words=10),
link='https://placehold.it/400x70', score=random.randint(32, 256), comments=comments
))
self.db.session.add(posts[len(posts) -1])
self.db.session.commit()
def get_or_create_posts(db, n):
posts = Post.query.all()
if len(posts) < n:
posts += RandomPost(db).generate(n - len(posts))
return posts
class RandomJobListing(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
jobs = []
users = get_or_create_users(self.db, 10)
for _ in range(n):
jobs.append(JobListing(poster_id=rnd(users).id, title=fake.sentence(nb_words=10)))
self.db.session.add(jobs[len(jobs) -1])
self.db.session.commit()
def get_or_create_jobs(db, n):
jobs = JobListing.query.all()
if len(jobs) < n:
jobs += RandomJobListing(db).generate(n - len(jobs))
return jobs
def get_or_create_comments(db, n):
comments = Comment.query.all()
if len(comments) < n:
comments += RandomJobListing(db).generate(n - len(comments))
return comments
class RandomBan(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
users = get_or_create_users(self.db, 10)
bans = []
for _ in range(n):
bans.append(Ban(
user_id = rnd(users).id,
ban_date = datetime.now(), expiry_date = datetime.now(),
post = rnd([True, False]),
comment = rnd([True, False]), vote = rnd([i for i in range(100)])
))
self.db.session.add(bans[len(bans)-1])
self.db.session.commit()
class RandomBanAppeal(object):
def __init__(self, db_session):
self.db = db_session
def get_or_create_bans(self, n):
bans = Ban.query.all()
if len(bans) < n:
bans += RandomUser(self.db).generate(n - len(bans))
return bans
def generate(self, n):
appeals = []
bans = self.get_or_create_bans(n)
for _ in range(n):
ban = rnd(bans)
appeals.append(BanAppeal(
ban_id=ban.id, user_id = ban.user_id,
appeal_reason = fake.sentence(nb_words=20),
creation_date = datetime.now()
))
self.db.session.add(appeals[len(appeals) - 1])
self.db.session.commit()
pass
class RandomGDPR(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
users = get_or_create_users(self.db, n)
gdpr = []
for _ in range(n):
gdpr.append(GDPRRequest(
user_id = rnd(users).id,
creation_date = datetime.now(),
fulfillment_date = datetime.now()
))
self.db.session.add(gdpr[len(gdpr) -1])
self.db.session.commit()
return gdpr
class RandomDMCA(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
dmca = []
for _ in range(n):
dmca.append(DMCATakeDown(
issuer_name = fake.sentence(nb_words=3),
issuer_reason = fake.sentence(nb_words=30),
creation_date = datetime.now(),
fulfillment_date = datetime.now()
))
self.db.session.add(dmca[len(dmca) -1])
self.db.session.commit()
return dmca
class RandomReport(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
reports = []
for _ in range(n):
for r in [self.report_post(), self.report_job(), self.report_comment()]:
self.db.session.add(r)
reports.append(r)
self.db.session.commit()
return reports
def report_post(self):
return Report(
reason=fake.sentence(nb_words=20),
reporter_id=rnd(get_or_create_users(self.db, 10)).id,
post_id=rnd(get_or_create_posts(self.db, 10)).id,
creation_date=datetime.now()
)
def report_job(self):
return Report(
reason=fake.sentence(nb_words=20),
reporter_id=rnd(get_or_create_users(self.db, 10)).id,
job_id=rnd(get_or_create_jobs(self.db, 10)).id,
creation_date=datetime.now()
)
def report_comment(self):
return Report(
reason=fake.sentence(nb_words=20),
reporter_id=rnd(get_or_create_users(self.db, 10)).id,
comment_id=rnd(get_or_create_comments(self.db, 10)).id,
creation_date=datetime.now()
)
|
import random
from datetime import datetime
from DummyNews.models import User, Role, Post, Comment, JobListing, BanAppeal, Ban, GDPRRequest, DMCATakeDown, Report
from faker import Factory
from faker.providers import lorem, internet
fake = Factory.create()
fake.add_provider(lorem)
fake.add_provider(internet)
rnd = lambda a: a[random.randint(0, len(a) -1)]
class RandomUser(object):
Roles = ["Admin", "Moderator", "User"]
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
users = []
roles = self._roles(RandomUser.Roles)
rnd = lambda a: a[random.randint(0, len(a) -1)]
for _ in range(n):
users.append(User(email=fake.email(), username=fake.user_name(), roles = [rnd(list(roles.values()))], active=1, password='<PASSWORD>'))
self.db.session.add(users[len(users)-1])
self.db.session.commit()
return users
def _roles(self, roles):
roles = {role: Role(name=role, description='Lorem ipsum sit amet') for role in roles}
for r in roles.values(): self.db.session.add(r)
self.db.session.commit()
return roles
def get_or_create_users(db, n):
users = User.query.all()
if len(users) < n:
users += RandomUser(db).generate(n - len(users))
return users
class CommentGenerator(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n, parent=0, replies=False):
parent = None if parent == 0 else parent
comments = []
for _ in range(n):
c = Comment(text=fake.sentence(nb_words=15), parent_id=parent)
if replies:
c.replies = self.generate(10, parent=c.id, replies=False)
self.db.session.add(c)
comments.append(c)
self.db.session.commit()
return comments
class RandomPost(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
posts = []
users = get_or_create_users(self.db, 10)
for _ in range(n):
comments = CommentGenerator(db_session=self.db).generate(10, parent=0, replies=True)
posts.append( Post(
poster_id=rnd(users).id, title=fake.sentence(nb_words=10),
link='https://placehold.it/400x70', score=random.randint(32, 256), comments=comments
))
self.db.session.add(posts[len(posts) -1])
self.db.session.commit()
def get_or_create_posts(db, n):
posts = Post.query.all()
if len(posts) < n:
posts += RandomPost(db).generate(n - len(posts))
return posts
class RandomJobListing(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
jobs = []
users = get_or_create_users(self.db, 10)
for _ in range(n):
jobs.append(JobListing(poster_id=rnd(users).id, title=fake.sentence(nb_words=10)))
self.db.session.add(jobs[len(jobs) -1])
self.db.session.commit()
def get_or_create_jobs(db, n):
jobs = JobListing.query.all()
if len(jobs) < n:
jobs += RandomJobListing(db).generate(n - len(jobs))
return jobs
def get_or_create_comments(db, n):
comments = Comment.query.all()
if len(comments) < n:
comments += RandomJobListing(db).generate(n - len(comments))
return comments
class RandomBan(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
users = get_or_create_users(self.db, 10)
bans = []
for _ in range(n):
bans.append(Ban(
user_id = rnd(users).id,
ban_date = datetime.now(), expiry_date = datetime.now(),
post = rnd([True, False]),
comment = rnd([True, False]), vote = rnd([i for i in range(100)])
))
self.db.session.add(bans[len(bans)-1])
self.db.session.commit()
class RandomBanAppeal(object):
def __init__(self, db_session):
self.db = db_session
def get_or_create_bans(self, n):
bans = Ban.query.all()
if len(bans) < n:
bans += RandomUser(self.db).generate(n - len(bans))
return bans
def generate(self, n):
appeals = []
bans = self.get_or_create_bans(n)
for _ in range(n):
ban = rnd(bans)
appeals.append(BanAppeal(
ban_id=ban.id, user_id = ban.user_id,
appeal_reason = fake.sentence(nb_words=20),
creation_date = datetime.now()
))
self.db.session.add(appeals[len(appeals) - 1])
self.db.session.commit()
pass
class RandomGDPR(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
users = get_or_create_users(self.db, n)
gdpr = []
for _ in range(n):
gdpr.append(GDPRRequest(
user_id = rnd(users).id,
creation_date = datetime.now(),
fulfillment_date = datetime.now()
))
self.db.session.add(gdpr[len(gdpr) -1])
self.db.session.commit()
return gdpr
class RandomDMCA(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
dmca = []
for _ in range(n):
dmca.append(DMCATakeDown(
issuer_name = fake.sentence(nb_words=3),
issuer_reason = fake.sentence(nb_words=30),
creation_date = datetime.now(),
fulfillment_date = datetime.now()
))
self.db.session.add(dmca[len(dmca) -1])
self.db.session.commit()
return dmca
class RandomReport(object):
def __init__(self, db_session):
self.db = db_session
def generate(self, n):
reports = []
for _ in range(n):
for r in [self.report_post(), self.report_job(), self.report_comment()]:
self.db.session.add(r)
reports.append(r)
self.db.session.commit()
return reports
def report_post(self):
return Report(
reason=fake.sentence(nb_words=20),
reporter_id=rnd(get_or_create_users(self.db, 10)).id,
post_id=rnd(get_or_create_posts(self.db, 10)).id,
creation_date=datetime.now()
)
def report_job(self):
return Report(
reason=fake.sentence(nb_words=20),
reporter_id=rnd(get_or_create_users(self.db, 10)).id,
job_id=rnd(get_or_create_jobs(self.db, 10)).id,
creation_date=datetime.now()
)
def report_comment(self):
return Report(
reason=fake.sentence(nb_words=20),
reporter_id=rnd(get_or_create_users(self.db, 10)).id,
comment_id=rnd(get_or_create_comments(self.db, 10)).id,
creation_date=datetime.now()
)
|
none
| 1
| 2.475774
| 2
|
|
stubs.min/System/ComponentModel/__init___parts/NestedContainer.py
|
ricardyn/ironpython-stubs
| 1
|
6629936
|
<reponame>ricardyn/ironpython-stubs<gh_stars>1-10
class NestedContainer(Container,IContainer,IDisposable,INestedContainer):
"""
Provides the base implementation for the System.ComponentModel.INestedContainer interface,which enables containers to have an owning component.
NestedContainer(owner: IComponent)
"""
def CreateSite(self,*args):
"""
CreateSite(self: NestedContainer,component: IComponent,name: str) -> ISite
Creates a site for the component within the container.
component: The System.ComponentModel.IComponent to create a site for.
name: The name to assign to component,or null to skip the name assignment.
Returns: The newly created System.ComponentModel.ISite.
"""
pass
def Dispose(self):
"""
Dispose(self: NestedContainer,disposing: bool)
Releases the resources used by the nested container.
disposing: true to release both managed and unmanaged resources; false to release only
unmanaged resources.
"""
pass
def GetService(self,*args):
"""
GetService(self: NestedContainer,service: Type) -> object
Gets the service object of the specified type,if it is available.
service: The System.Type of the service to retrieve.
Returns: An System.Object that implements the requested service,or null if the service
cannot be resolved.
"""
pass
def RemoveWithoutUnsiting(self,*args):
"""
RemoveWithoutUnsiting(self: Container,component: IComponent)
Removes a component from the System.ComponentModel.Container without setting
System.ComponentModel.IComponent.Site to null.
component: The component to remove.
"""
pass
def ValidateName(self,*args):
"""
ValidateName(self: Container,component: IComponent,name: str)
Determines whether the component name is unique for this container.
component: The named component.
name: The component name to validate.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,owner):
""" __new__(cls: type,owner: IComponent) """
pass
Owner=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the owning component for this nested container.
Get: Owner(self: NestedContainer) -> IComponent
"""
OwnerName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the owning component.
"""
|
class NestedContainer(Container,IContainer,IDisposable,INestedContainer):
"""
Provides the base implementation for the System.ComponentModel.INestedContainer interface,which enables containers to have an owning component.
NestedContainer(owner: IComponent)
"""
def CreateSite(self,*args):
"""
CreateSite(self: NestedContainer,component: IComponent,name: str) -> ISite
Creates a site for the component within the container.
component: The System.ComponentModel.IComponent to create a site for.
name: The name to assign to component,or null to skip the name assignment.
Returns: The newly created System.ComponentModel.ISite.
"""
pass
def Dispose(self):
"""
Dispose(self: NestedContainer,disposing: bool)
Releases the resources used by the nested container.
disposing: true to release both managed and unmanaged resources; false to release only
unmanaged resources.
"""
pass
def GetService(self,*args):
"""
GetService(self: NestedContainer,service: Type) -> object
Gets the service object of the specified type,if it is available.
service: The System.Type of the service to retrieve.
Returns: An System.Object that implements the requested service,or null if the service
cannot be resolved.
"""
pass
def RemoveWithoutUnsiting(self,*args):
"""
RemoveWithoutUnsiting(self: Container,component: IComponent)
Removes a component from the System.ComponentModel.Container without setting
System.ComponentModel.IComponent.Site to null.
component: The component to remove.
"""
pass
def ValidateName(self,*args):
"""
ValidateName(self: Container,component: IComponent,name: str)
Determines whether the component name is unique for this container.
component: The named component.
name: The component name to validate.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,owner):
""" __new__(cls: type,owner: IComponent) """
pass
Owner=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the owning component for this nested container.
Get: Owner(self: NestedContainer) -> IComponent
"""
OwnerName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the owning component.
"""
|
en
| 0.618441
|
Provides the base implementation for the System.ComponentModel.INestedContainer interface,which enables containers to have an owning component.
NestedContainer(owner: IComponent) CreateSite(self: NestedContainer,component: IComponent,name: str) -> ISite
Creates a site for the component within the container.
component: The System.ComponentModel.IComponent to create a site for.
name: The name to assign to component,or null to skip the name assignment.
Returns: The newly created System.ComponentModel.ISite. Dispose(self: NestedContainer,disposing: bool)
Releases the resources used by the nested container.
disposing: true to release both managed and unmanaged resources; false to release only
unmanaged resources. GetService(self: NestedContainer,service: Type) -> object
Gets the service object of the specified type,if it is available.
service: The System.Type of the service to retrieve.
Returns: An System.Object that implements the requested service,or null if the service
cannot be resolved. RemoveWithoutUnsiting(self: Container,component: IComponent)
Removes a component from the System.ComponentModel.Container without setting
System.ComponentModel.IComponent.Site to null.
component: The component to remove. ValidateName(self: Container,component: IComponent,name: str)
Determines whether the component name is unique for this container.
component: The named component.
name: The component name to validate. __enter__(self: IDisposable) -> object __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature __new__(cls: type,owner: IComponent) Gets the owning component for this nested container.
Get: Owner(self: NestedContainer) -> IComponent Gets the name of the owning component.
| 2.518811
| 3
|
doc_summarizer/input_handler/read_txt.py
|
tufengxu/DocumentSummarizer
| 0
|
6629937
|
<filename>doc_summarizer/input_handler/read_txt.py
# ignore all empty lines
def read_txt(txt):
lines = list()
for line in txt:
lines.append(line.strip("\n"))
return lines
if __name__ == '__main__':
with open("example.txt", "r") as my_txt:
stored = read_txt(my_txt)
idx = 0
for item in stored:
print("Line ID: ", idx, " Line: ", item)
idx += 1
|
<filename>doc_summarizer/input_handler/read_txt.py
# ignore all empty lines
def read_txt(txt):
lines = list()
for line in txt:
lines.append(line.strip("\n"))
return lines
if __name__ == '__main__':
with open("example.txt", "r") as my_txt:
stored = read_txt(my_txt)
idx = 0
for item in stored:
print("Line ID: ", idx, " Line: ", item)
idx += 1
|
en
| 0.356686
|
# ignore all empty lines
| 3.835516
| 4
|
shamiko/proc_utils.py
|
bonprosoft/shamiko
| 12
|
6629938
|
import os
from typing import Optional
import psutil
def _get_proc(pid):
# type: (int) -> Optional[psutil.Process]
try:
return psutil.Process(pid)
except psutil.NoSuchProcess:
return None
def pid_exists(pid):
# type: (int) -> bool
return psutil.pid_exists(pid)
def guess_executable(pid):
# type: (int) -> Optional[str]
proc = _get_proc(pid)
if proc is None:
return None
return os.path.abspath(proc.exe())
def guess_context_dir(pid):
# type: (int) -> Optional[str]
proc = _get_proc(pid)
if proc is None:
return None
return os.path.abspath(proc.cwd())
|
import os
from typing import Optional
import psutil
def _get_proc(pid):
# type: (int) -> Optional[psutil.Process]
try:
return psutil.Process(pid)
except psutil.NoSuchProcess:
return None
def pid_exists(pid):
# type: (int) -> bool
return psutil.pid_exists(pid)
def guess_executable(pid):
# type: (int) -> Optional[str]
proc = _get_proc(pid)
if proc is None:
return None
return os.path.abspath(proc.exe())
def guess_context_dir(pid):
# type: (int) -> Optional[str]
proc = _get_proc(pid)
if proc is None:
return None
return os.path.abspath(proc.cwd())
|
en
| 0.508718
|
# type: (int) -> Optional[psutil.Process] # type: (int) -> bool # type: (int) -> Optional[str] # type: (int) -> Optional[str]
| 2.508101
| 3
|
dbt_sugar/core/task/base.py
|
jessica-ol/dbt-sugar
| 94
|
6629939
|
<gh_stars>10-100
"""API definition for Task-like objects."""
import abc
import os
import re
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
from dbt_sugar.core.clients.dbt import DbtProfile
from dbt_sugar.core.clients.yaml_helpers import open_yaml, save_yaml
from dbt_sugar.core.config.config import DbtSugarConfig
from dbt_sugar.core.connectors.postgres_connector import PostgresConnector
from dbt_sugar.core.connectors.redshift_connector import RedshiftConnector
from dbt_sugar.core.connectors.snowflake_connector import SnowflakeConnector
from dbt_sugar.core.flags import FlagParser
from dbt_sugar.core.logger import GLOBAL_LOGGER as logger
COLUMN_NOT_DOCUMENTED = "No description for this column."
MODEL_NOT_DOCUMENTED = "No description for this model."
DEFAULT_EXCLUDED_FOLDERS_PATTERN = r"\/target\/|\/dbt_modules\/"
DEFAULT_EXCLUDED_YML_FILES = r"dbt_project.yml|packages.yml"
DB_CONNECTORS = {
"postgres": PostgresConnector,
"snowflake": SnowflakeConnector,
"redshift": RedshiftConnector,
}
class BaseTask(abc.ABC):
"""Sets up basic API for task-like classes."""
def __init__(
self,
flags: FlagParser,
dbt_path: Path,
sugar_config: DbtSugarConfig,
dbt_profile: DbtProfile,
) -> None:
self.repository_path = dbt_path
self._sugar_config = sugar_config
self._flags = flags
self._dbt_profile = dbt_profile
# Populated by class methods
self._excluded_folders_from_search_pattern: str = self.setup_paths_exclusion()
self.all_dbt_models: Dict[str, Path] = {}
self.dbt_definitions: Dict[str, str] = {}
self.dbt_tests: Dict[str, List[Dict[str, Any]]] = {}
self.build_descriptions_dictionary()
def get_connector(self) -> Union[PostgresConnector, SnowflakeConnector, RedshiftConnector]:
dbt_credentials = self._dbt_profile.profile
connector = DB_CONNECTORS.get(dbt_credentials.get("type", ""))
if not connector:
raise NotImplementedError(
f"Connector '{dbt_credentials.get('type')}' is not implemented."
)
return connector(dbt_credentials)
def setup_paths_exclusion(self) -> str:
"""Appends excluded_folders to the default folder exclusion patten."""
if self._sugar_config.dbt_project_info["excluded_folders"]:
excluded_folders_from_search_pattern: str = r"\/|\/".join(
self._sugar_config.dbt_project_info["excluded_folders"]
)
return fr"{DEFAULT_EXCLUDED_FOLDERS_PATTERN}|\/{excluded_folders_from_search_pattern}\/"
else:
return DEFAULT_EXCLUDED_FOLDERS_PATTERN
def get_column_description_from_dbt_definitions(self, column_name: str) -> str:
"""Searches for the description of a column in all the descriptions in DBT.
Args:
column_name (str): column name to get the description from.
Returns:
str: with the description of the column.
"""
return self.dbt_definitions.get(column_name, COLUMN_NOT_DOCUMENTED)
def get_documented_columns(
self, schema_content: Dict[str, Any], model_name: str
) -> Dict[str, str]:
"""Method to get the documented columns from a model in a schema.yml.
Args:
content (Dict[str, Any]): content of the schema.yml.
model_name (str): model name to get the columns from.
Returns:
Dict[str, str]: with the columns names and descriptions documented.
"""
documented_columns = {}
for model in schema_content.get("models", []):
if model["name"] == model_name:
for column in model.get("columns", []):
if column.get("description", COLUMN_NOT_DOCUMENTED) != COLUMN_NOT_DOCUMENTED:
documented_columns[column["name"]] = column["description"]
return documented_columns
def column_has_primary_key_tests(
self, schema_content: Dict[str, Any], model_name: str, column_name: str
) -> Optional[bool]:
"""Method to check that the column with the primary key have the unique and not_null tests.
Args:
schema_content (Dict[str, Any]): content of the schema.yml.
model_name (str): model name to check.
column_name (str): column name with the primary key.
Returns:
Optional[bool]: True if the column have unique and not_null tests,
False if is missing one of them, None if the column don't exists.
"""
for model in schema_content.get("models", []):
if model["name"] == model_name:
for column in model.get("columns", []):
if column.get("name", "") == column_name:
column_tests = column.get("tests", [])
return "unique" in column_tests and "not_null" in column_tests
return None
def get_not_documented_columns(
self, schema_content: Dict[str, Any], model_name: str
) -> Dict[str, str]:
"""Method to get the undocumented columns from a model in a schema.yml.
Args:
schema_content (Dict[str, Any]): content of the schema.yml.
model_name (str): model name to get the columns from.
Returns:
Dict[str, str]: with the columns names and descriptions undocumented.
"""
not_documented_columns = {}
for model in schema_content.get("models", []):
if model["name"] == model_name:
for column in model.get("columns", []):
if column.get("description", COLUMN_NOT_DOCUMENTED) == COLUMN_NOT_DOCUMENTED:
not_documented_columns[column["name"]] = COLUMN_NOT_DOCUMENTED
return not_documented_columns
def combine_two_list_without_duplicates(self, list1: List[Any], list2: List[Any]) -> List[Any]:
"""
Method to combine two list without duplicates.
Args:
list1 (List[Any]): First list with any value.
list2 (List[Any]): Second list with any value.
Returns:
List[Any]: with the combine lists.
"""
if not list1:
return list2
for item in list1:
if item not in list2:
list2.append(item)
return list2
def update_model_description_test_tags(
self,
path_file: Path,
model_name: str,
dict_column_description_to_update: Dict[str, Dict[str, Any]],
):
"""
Method to update a schema.yml with a Dict of columns names, tests, and tags.
Args:
path_file (Path): Path of the schema.yml file to update.
model_name (str): Name of the model to update.
dict_column_description_to_update (Dict[str, Dict[str, Any]]): Dict with the column name with
the description, tags and tests to update.
"""
content = open_yaml(
path_file,
preserve_yaml_order=self._sugar_config.config.get("preserve_yaml_order", False),
)
for model in content.get("models", []):
if model["name"] == model_name:
for column in model.get("columns", []):
column_name = column["name"]
if column_name in dict_column_description_to_update:
# Update the description
description = dict_column_description_to_update[column_name].get(
"description"
)
if description:
column["description"] = description
# Update the tests without duplicating them.
tests = dict_column_description_to_update[column_name].get("tests")
if tests:
column["tests"] = self.combine_two_list_without_duplicates(
column.get("tests", []), tests
)
# Update the tags without duplicating them.
tags = dict_column_description_to_update[column_name].get("tags")
if tags:
column["tags"] = self.combine_two_list_without_duplicates(
column.get("tags", []), tags
)
save_yaml(
path_file,
content,
preserve_yaml_order=self._sugar_config.config.get("preserve_yaml_order", False),
)
def update_column_description_from_schema(
self, path_file: Path, dict_column_description_to_update: Dict[str, Dict[str, Any]]
) -> None:
"""Method to update a schema.yml with a Dict of columns names and description.
Args:
path_file (Path): Path to the schema.yml file to update the columns descriptions from.
dict_column_description_to_update (Dict[str, Dict[str, Any]]): Dict with the column name with
the description to update.
"""
content = open_yaml(
path_file,
preserve_yaml_order=self._sugar_config.config.get("preserve_yaml_order", False),
)
for model in content.get("models", []):
for column in model.get("columns", []):
column_name = column["name"]
if column_name in dict_column_description_to_update:
new_description = dict_column_description_to_update[column_name].get(
"description"
)
if new_description:
column["description"] = new_description
save_yaml(
path_file,
content,
preserve_yaml_order=self._sugar_config.config.get("preserve_yaml_order", False),
)
def update_column_descriptions(
self, dict_column_description_to_update: Dict[str, Dict[str, Any]]
) -> None:
"""Method to update all the schema.ymls from a dbt project with a Dict of columns names and description.
Args:
dict_column_description_to_update (Dict[str, Dict[str, Any]]): Dict with the column name with
the description to update.
"""
for root, _, files in os.walk(self.repository_path):
if not re.search(self._excluded_folders_from_search_pattern, root):
files = [
f
for f in files
if f.lower().endswith(".yml")
and not re.search(DEFAULT_EXCLUDED_YML_FILES, f.lower())
]
for file in files:
path_file = Path(os.path.join(root, file))
self.update_column_description_from_schema(
path_file, dict_column_description_to_update
)
def update_test_in_dbt_tests(self, model_name: str, column: Dict[str, Any]) -> None:
"""Update a column tests in the global tests dictionary.
Args:
model_name (str): with the model name.
column (Dict[str, Any]): column information.
"""
if model_name not in self.dbt_tests:
self.dbt_tests[model_name] = [
{"name": column["name"], "tests": column.get("tests", [])}
]
else:
self.dbt_tests[model_name].append(
{"name": column["name"], "tests": column.get("tests", [])}
)
def update_description_in_dbt_descriptions(
self, column_name: str, column_description: str
) -> None:
"""Update a column description in the global description dictionary.
Args:
column_name (str): column name to update.
column_description (str): column description to update.
"""
if not column_description:
column_description = COLUMN_NOT_DOCUMENTED
self.dbt_definitions[column_name] = column_description
def remove_excluded_models(self, content: Dict[str, Any]) -> Optional[List[Dict[str, Any]]]:
"""Removes models that are excluded_models from the models dict"""
models = content.get("models", [])
# if self._sugar_config.dbt_project_info.get("excluded_models"):
logger.debug(models)
if models:
return [
model_dict
for model_dict in models
if model_dict["name"] not in self._sugar_config.dbt_project_info["excluded_models"]
]
return None
def read_file(self, filename_path: Path) -> str:
"""
Method to read a file.
Args:
filename_path (Path): full path to the file we want to read.
Returns:
str: content of the file.
"""
content = ""
if Path(filename_path).exists():
with open(filename_path, "r") as reader:
content = reader.read()
return content
def load_descriptions_from_a_schema_file(
self, content: Dict[str, Any], path_schema: Path
) -> None:
"""Load the columns descriptions from a schema.yml into the global descriptions cache.
This cache is used so that we can homogenise descriptions across models and import
already documented ones.
Args:
content (Dict[str, Any]): content of the schema.yaml.
"""
if not content:
return
models = self.remove_excluded_models(content)
if not models:
return
for model in models:
self.all_dbt_models[model["name"]] = path_schema
for column in model.get("columns", []):
column_description = column.get("description", None)
self.update_description_in_dbt_descriptions(column["name"], column_description)
self.update_test_in_dbt_tests(model["name"], column)
def get_file_path_from_sql_model(self, model_name: str) -> Optional[Path]:
"""Get the complete file path from a model name.
Args:
model_name (str): with the model name to find.
Returns:
Optional[Path]: Path of the SQL file, None if the file doens't exists.
"""
for root, _, files in os.walk(self.repository_path):
if not re.search(self._excluded_folders_from_search_pattern, root):
for file_name in files:
file_name = file_name.lower()
if file_name == f"{model_name}.sql" and not re.search(
DEFAULT_EXCLUDED_YML_FILES, file_name
):
return Path(os.path.join(root, file_name))
return None
def build_descriptions_dictionary(self) -> None:
"""Load the columns descriptions from all schema files in a dbt project.
This is purely responsble for building the knowledge of all possible definitions.
In other words it is independent from the documentation orchestration.
This happens in the `doc` task
"""
for root, _, files in os.walk(self.repository_path):
if not re.search(self._excluded_folders_from_search_pattern, root):
files = [
f
for f in files
if f.lower().endswith(".yml")
and not re.search(DEFAULT_EXCLUDED_YML_FILES, f.lower())
]
for file in files:
path_file = Path(os.path.join(root, file))
content = open_yaml(
path_file,
preserve_yaml_order=self._sugar_config.config.get(
"preserve_yaml_order", False
),
)
logger.debug(path_file)
if content.get("models"):
self.load_descriptions_from_a_schema_file(content, path_file)
def is_model_in_schema_content(self, content, model_name) -> bool:
"""Method to check if a model exists in a schema.yaml content.
Args:
content (Dict[str, Any]): content of the schema.yaml.
model_name (str): model name to search.
Returns:
boolean: is true if the model is present in the schema.yaml.
"""
if not content:
return False
return any(model["name"] == model_name for model in content.get("models", []))
def find_model_schema_file(self, model_name: str) -> Tuple[Optional[Path], bool, bool]:
for root, _, files in os.walk(self.repository_path):
if not re.search(self._excluded_folders_from_search_pattern, root):
schema_file_path = None
model_file_found = False
schema_file_exists = False
is_already_documented = False
for file in files:
# check the model file exists and if it does return the path
# of the schema.yml it's in.
if file == f"{model_name}.sql":
model_file_found = True
logger.debug(f"Found sql file for '{model_name}'")
schema_file_path = self.all_dbt_models.get(model_name, None)
# if it's not in a schema file, then it's not documented and we
# need to create a schema.yml "dummy" to place it in.
if not schema_file_path and model_file_found:
logger.debug(
f"'{model_name}' was not contained in a schema file. Creating one at {root}"
)
schema_file_path = Path(os.path.join(root, "schema.yml"))
# check whether there is a schema file already present
schema_file_exists = False
if schema_file_path.exists():
schema_file_exists = True
return (schema_file_path, schema_file_exists, is_already_documented)
if schema_file_path and model_file_found:
logger.debug(
f"'{model_name}' found in '{schema_file_path}', we'll update entry."
)
is_already_documented = True
schema_file_exists = True
return (schema_file_path, schema_file_exists, is_already_documented)
return None, False, False
def is_exluded_model(self, model_name: str) -> bool:
if model_name in self._sugar_config.dbt_project_info.get("excluded_models", []):
raise ValueError(
f"You decided to exclude '{model_name}' from dbt-sugar's scope. "
f"You run `{self._flags.task}` on it you will need to remove "
"it from the excluded_models list in the sugar_config.yml"
)
return True
@abc.abstractmethod
def run(self) -> int:
"""Orchestrator method that calls all the needed stuff to run a documentation task."""
...
|
"""API definition for Task-like objects."""
import abc
import os
import re
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
from dbt_sugar.core.clients.dbt import DbtProfile
from dbt_sugar.core.clients.yaml_helpers import open_yaml, save_yaml
from dbt_sugar.core.config.config import DbtSugarConfig
from dbt_sugar.core.connectors.postgres_connector import PostgresConnector
from dbt_sugar.core.connectors.redshift_connector import RedshiftConnector
from dbt_sugar.core.connectors.snowflake_connector import SnowflakeConnector
from dbt_sugar.core.flags import FlagParser
from dbt_sugar.core.logger import GLOBAL_LOGGER as logger
COLUMN_NOT_DOCUMENTED = "No description for this column."
MODEL_NOT_DOCUMENTED = "No description for this model."
DEFAULT_EXCLUDED_FOLDERS_PATTERN = r"\/target\/|\/dbt_modules\/"
DEFAULT_EXCLUDED_YML_FILES = r"dbt_project.yml|packages.yml"
DB_CONNECTORS = {
"postgres": PostgresConnector,
"snowflake": SnowflakeConnector,
"redshift": RedshiftConnector,
}
class BaseTask(abc.ABC):
"""Sets up basic API for task-like classes."""
def __init__(
self,
flags: FlagParser,
dbt_path: Path,
sugar_config: DbtSugarConfig,
dbt_profile: DbtProfile,
) -> None:
self.repository_path = dbt_path
self._sugar_config = sugar_config
self._flags = flags
self._dbt_profile = dbt_profile
# Populated by class methods
self._excluded_folders_from_search_pattern: str = self.setup_paths_exclusion()
self.all_dbt_models: Dict[str, Path] = {}
self.dbt_definitions: Dict[str, str] = {}
self.dbt_tests: Dict[str, List[Dict[str, Any]]] = {}
self.build_descriptions_dictionary()
def get_connector(self) -> Union[PostgresConnector, SnowflakeConnector, RedshiftConnector]:
dbt_credentials = self._dbt_profile.profile
connector = DB_CONNECTORS.get(dbt_credentials.get("type", ""))
if not connector:
raise NotImplementedError(
f"Connector '{dbt_credentials.get('type')}' is not implemented."
)
return connector(dbt_credentials)
def setup_paths_exclusion(self) -> str:
"""Appends excluded_folders to the default folder exclusion patten."""
if self._sugar_config.dbt_project_info["excluded_folders"]:
excluded_folders_from_search_pattern: str = r"\/|\/".join(
self._sugar_config.dbt_project_info["excluded_folders"]
)
return fr"{DEFAULT_EXCLUDED_FOLDERS_PATTERN}|\/{excluded_folders_from_search_pattern}\/"
else:
return DEFAULT_EXCLUDED_FOLDERS_PATTERN
def get_column_description_from_dbt_definitions(self, column_name: str) -> str:
"""Searches for the description of a column in all the descriptions in DBT.
Args:
column_name (str): column name to get the description from.
Returns:
str: with the description of the column.
"""
return self.dbt_definitions.get(column_name, COLUMN_NOT_DOCUMENTED)
def get_documented_columns(
self, schema_content: Dict[str, Any], model_name: str
) -> Dict[str, str]:
"""Method to get the documented columns from a model in a schema.yml.
Args:
content (Dict[str, Any]): content of the schema.yml.
model_name (str): model name to get the columns from.
Returns:
Dict[str, str]: with the columns names and descriptions documented.
"""
documented_columns = {}
for model in schema_content.get("models", []):
if model["name"] == model_name:
for column in model.get("columns", []):
if column.get("description", COLUMN_NOT_DOCUMENTED) != COLUMN_NOT_DOCUMENTED:
documented_columns[column["name"]] = column["description"]
return documented_columns
def column_has_primary_key_tests(
self, schema_content: Dict[str, Any], model_name: str, column_name: str
) -> Optional[bool]:
"""Method to check that the column with the primary key have the unique and not_null tests.
Args:
schema_content (Dict[str, Any]): content of the schema.yml.
model_name (str): model name to check.
column_name (str): column name with the primary key.
Returns:
Optional[bool]: True if the column have unique and not_null tests,
False if is missing one of them, None if the column don't exists.
"""
for model in schema_content.get("models", []):
if model["name"] == model_name:
for column in model.get("columns", []):
if column.get("name", "") == column_name:
column_tests = column.get("tests", [])
return "unique" in column_tests and "not_null" in column_tests
return None
def get_not_documented_columns(
self, schema_content: Dict[str, Any], model_name: str
) -> Dict[str, str]:
"""Method to get the undocumented columns from a model in a schema.yml.
Args:
schema_content (Dict[str, Any]): content of the schema.yml.
model_name (str): model name to get the columns from.
Returns:
Dict[str, str]: with the columns names and descriptions undocumented.
"""
not_documented_columns = {}
for model in schema_content.get("models", []):
if model["name"] == model_name:
for column in model.get("columns", []):
if column.get("description", COLUMN_NOT_DOCUMENTED) == COLUMN_NOT_DOCUMENTED:
not_documented_columns[column["name"]] = COLUMN_NOT_DOCUMENTED
return not_documented_columns
def combine_two_list_without_duplicates(self, list1: List[Any], list2: List[Any]) -> List[Any]:
"""
Method to combine two list without duplicates.
Args:
list1 (List[Any]): First list with any value.
list2 (List[Any]): Second list with any value.
Returns:
List[Any]: with the combine lists.
"""
if not list1:
return list2
for item in list1:
if item not in list2:
list2.append(item)
return list2
def update_model_description_test_tags(
self,
path_file: Path,
model_name: str,
dict_column_description_to_update: Dict[str, Dict[str, Any]],
):
"""
Method to update a schema.yml with a Dict of columns names, tests, and tags.
Args:
path_file (Path): Path of the schema.yml file to update.
model_name (str): Name of the model to update.
dict_column_description_to_update (Dict[str, Dict[str, Any]]): Dict with the column name with
the description, tags and tests to update.
"""
content = open_yaml(
path_file,
preserve_yaml_order=self._sugar_config.config.get("preserve_yaml_order", False),
)
for model in content.get("models", []):
if model["name"] == model_name:
for column in model.get("columns", []):
column_name = column["name"]
if column_name in dict_column_description_to_update:
# Update the description
description = dict_column_description_to_update[column_name].get(
"description"
)
if description:
column["description"] = description
# Update the tests without duplicating them.
tests = dict_column_description_to_update[column_name].get("tests")
if tests:
column["tests"] = self.combine_two_list_without_duplicates(
column.get("tests", []), tests
)
# Update the tags without duplicating them.
tags = dict_column_description_to_update[column_name].get("tags")
if tags:
column["tags"] = self.combine_two_list_without_duplicates(
column.get("tags", []), tags
)
save_yaml(
path_file,
content,
preserve_yaml_order=self._sugar_config.config.get("preserve_yaml_order", False),
)
def update_column_description_from_schema(
self, path_file: Path, dict_column_description_to_update: Dict[str, Dict[str, Any]]
) -> None:
"""Method to update a schema.yml with a Dict of columns names and description.
Args:
path_file (Path): Path to the schema.yml file to update the columns descriptions from.
dict_column_description_to_update (Dict[str, Dict[str, Any]]): Dict with the column name with
the description to update.
"""
content = open_yaml(
path_file,
preserve_yaml_order=self._sugar_config.config.get("preserve_yaml_order", False),
)
for model in content.get("models", []):
for column in model.get("columns", []):
column_name = column["name"]
if column_name in dict_column_description_to_update:
new_description = dict_column_description_to_update[column_name].get(
"description"
)
if new_description:
column["description"] = new_description
save_yaml(
path_file,
content,
preserve_yaml_order=self._sugar_config.config.get("preserve_yaml_order", False),
)
def update_column_descriptions(
self, dict_column_description_to_update: Dict[str, Dict[str, Any]]
) -> None:
"""Method to update all the schema.ymls from a dbt project with a Dict of columns names and description.
Args:
dict_column_description_to_update (Dict[str, Dict[str, Any]]): Dict with the column name with
the description to update.
"""
for root, _, files in os.walk(self.repository_path):
if not re.search(self._excluded_folders_from_search_pattern, root):
files = [
f
for f in files
if f.lower().endswith(".yml")
and not re.search(DEFAULT_EXCLUDED_YML_FILES, f.lower())
]
for file in files:
path_file = Path(os.path.join(root, file))
self.update_column_description_from_schema(
path_file, dict_column_description_to_update
)
def update_test_in_dbt_tests(self, model_name: str, column: Dict[str, Any]) -> None:
"""Update a column tests in the global tests dictionary.
Args:
model_name (str): with the model name.
column (Dict[str, Any]): column information.
"""
if model_name not in self.dbt_tests:
self.dbt_tests[model_name] = [
{"name": column["name"], "tests": column.get("tests", [])}
]
else:
self.dbt_tests[model_name].append(
{"name": column["name"], "tests": column.get("tests", [])}
)
def update_description_in_dbt_descriptions(
self, column_name: str, column_description: str
) -> None:
"""Update a column description in the global description dictionary.
Args:
column_name (str): column name to update.
column_description (str): column description to update.
"""
if not column_description:
column_description = COLUMN_NOT_DOCUMENTED
self.dbt_definitions[column_name] = column_description
def remove_excluded_models(self, content: Dict[str, Any]) -> Optional[List[Dict[str, Any]]]:
"""Removes models that are excluded_models from the models dict"""
models = content.get("models", [])
# if self._sugar_config.dbt_project_info.get("excluded_models"):
logger.debug(models)
if models:
return [
model_dict
for model_dict in models
if model_dict["name"] not in self._sugar_config.dbt_project_info["excluded_models"]
]
return None
def read_file(self, filename_path: Path) -> str:
"""
Method to read a file.
Args:
filename_path (Path): full path to the file we want to read.
Returns:
str: content of the file.
"""
content = ""
if Path(filename_path).exists():
with open(filename_path, "r") as reader:
content = reader.read()
return content
def load_descriptions_from_a_schema_file(
self, content: Dict[str, Any], path_schema: Path
) -> None:
"""Load the columns descriptions from a schema.yml into the global descriptions cache.
This cache is used so that we can homogenise descriptions across models and import
already documented ones.
Args:
content (Dict[str, Any]): content of the schema.yaml.
"""
if not content:
return
models = self.remove_excluded_models(content)
if not models:
return
for model in models:
self.all_dbt_models[model["name"]] = path_schema
for column in model.get("columns", []):
column_description = column.get("description", None)
self.update_description_in_dbt_descriptions(column["name"], column_description)
self.update_test_in_dbt_tests(model["name"], column)
def get_file_path_from_sql_model(self, model_name: str) -> Optional[Path]:
"""Get the complete file path from a model name.
Args:
model_name (str): with the model name to find.
Returns:
Optional[Path]: Path of the SQL file, None if the file doens't exists.
"""
for root, _, files in os.walk(self.repository_path):
if not re.search(self._excluded_folders_from_search_pattern, root):
for file_name in files:
file_name = file_name.lower()
if file_name == f"{model_name}.sql" and not re.search(
DEFAULT_EXCLUDED_YML_FILES, file_name
):
return Path(os.path.join(root, file_name))
return None
def build_descriptions_dictionary(self) -> None:
"""Load the columns descriptions from all schema files in a dbt project.
This is purely responsble for building the knowledge of all possible definitions.
In other words it is independent from the documentation orchestration.
This happens in the `doc` task
"""
for root, _, files in os.walk(self.repository_path):
if not re.search(self._excluded_folders_from_search_pattern, root):
files = [
f
for f in files
if f.lower().endswith(".yml")
and not re.search(DEFAULT_EXCLUDED_YML_FILES, f.lower())
]
for file in files:
path_file = Path(os.path.join(root, file))
content = open_yaml(
path_file,
preserve_yaml_order=self._sugar_config.config.get(
"preserve_yaml_order", False
),
)
logger.debug(path_file)
if content.get("models"):
self.load_descriptions_from_a_schema_file(content, path_file)
def is_model_in_schema_content(self, content, model_name) -> bool:
"""Method to check if a model exists in a schema.yaml content.
Args:
content (Dict[str, Any]): content of the schema.yaml.
model_name (str): model name to search.
Returns:
boolean: is true if the model is present in the schema.yaml.
"""
if not content:
return False
return any(model["name"] == model_name for model in content.get("models", []))
def find_model_schema_file(self, model_name: str) -> Tuple[Optional[Path], bool, bool]:
for root, _, files in os.walk(self.repository_path):
if not re.search(self._excluded_folders_from_search_pattern, root):
schema_file_path = None
model_file_found = False
schema_file_exists = False
is_already_documented = False
for file in files:
# check the model file exists and if it does return the path
# of the schema.yml it's in.
if file == f"{model_name}.sql":
model_file_found = True
logger.debug(f"Found sql file for '{model_name}'")
schema_file_path = self.all_dbt_models.get(model_name, None)
# if it's not in a schema file, then it's not documented and we
# need to create a schema.yml "dummy" to place it in.
if not schema_file_path and model_file_found:
logger.debug(
f"'{model_name}' was not contained in a schema file. Creating one at {root}"
)
schema_file_path = Path(os.path.join(root, "schema.yml"))
# check whether there is a schema file already present
schema_file_exists = False
if schema_file_path.exists():
schema_file_exists = True
return (schema_file_path, schema_file_exists, is_already_documented)
if schema_file_path and model_file_found:
logger.debug(
f"'{model_name}' found in '{schema_file_path}', we'll update entry."
)
is_already_documented = True
schema_file_exists = True
return (schema_file_path, schema_file_exists, is_already_documented)
return None, False, False
def is_exluded_model(self, model_name: str) -> bool:
if model_name in self._sugar_config.dbt_project_info.get("excluded_models", []):
raise ValueError(
f"You decided to exclude '{model_name}' from dbt-sugar's scope. "
f"You run `{self._flags.task}` on it you will need to remove "
"it from the excluded_models list in the sugar_config.yml"
)
return True
@abc.abstractmethod
def run(self) -> int:
"""Orchestrator method that calls all the needed stuff to run a documentation task."""
...
|
en
| 0.694786
|
API definition for Task-like objects. Sets up basic API for task-like classes. # Populated by class methods Appends excluded_folders to the default folder exclusion patten. Searches for the description of a column in all the descriptions in DBT. Args: column_name (str): column name to get the description from. Returns: str: with the description of the column. Method to get the documented columns from a model in a schema.yml. Args: content (Dict[str, Any]): content of the schema.yml. model_name (str): model name to get the columns from. Returns: Dict[str, str]: with the columns names and descriptions documented. Method to check that the column with the primary key have the unique and not_null tests. Args: schema_content (Dict[str, Any]): content of the schema.yml. model_name (str): model name to check. column_name (str): column name with the primary key. Returns: Optional[bool]: True if the column have unique and not_null tests, False if is missing one of them, None if the column don't exists. Method to get the undocumented columns from a model in a schema.yml. Args: schema_content (Dict[str, Any]): content of the schema.yml. model_name (str): model name to get the columns from. Returns: Dict[str, str]: with the columns names and descriptions undocumented. Method to combine two list without duplicates. Args: list1 (List[Any]): First list with any value. list2 (List[Any]): Second list with any value. Returns: List[Any]: with the combine lists. Method to update a schema.yml with a Dict of columns names, tests, and tags. Args: path_file (Path): Path of the schema.yml file to update. model_name (str): Name of the model to update. dict_column_description_to_update (Dict[str, Dict[str, Any]]): Dict with the column name with the description, tags and tests to update. # Update the description # Update the tests without duplicating them. # Update the tags without duplicating them. Method to update a schema.yml with a Dict of columns names and description. Args: path_file (Path): Path to the schema.yml file to update the columns descriptions from. dict_column_description_to_update (Dict[str, Dict[str, Any]]): Dict with the column name with the description to update. Method to update all the schema.ymls from a dbt project with a Dict of columns names and description. Args: dict_column_description_to_update (Dict[str, Dict[str, Any]]): Dict with the column name with the description to update. Update a column tests in the global tests dictionary. Args: model_name (str): with the model name. column (Dict[str, Any]): column information. Update a column description in the global description dictionary. Args: column_name (str): column name to update. column_description (str): column description to update. Removes models that are excluded_models from the models dict # if self._sugar_config.dbt_project_info.get("excluded_models"): Method to read a file. Args: filename_path (Path): full path to the file we want to read. Returns: str: content of the file. Load the columns descriptions from a schema.yml into the global descriptions cache. This cache is used so that we can homogenise descriptions across models and import already documented ones. Args: content (Dict[str, Any]): content of the schema.yaml. Get the complete file path from a model name. Args: model_name (str): with the model name to find. Returns: Optional[Path]: Path of the SQL file, None if the file doens't exists. Load the columns descriptions from all schema files in a dbt project. This is purely responsble for building the knowledge of all possible definitions. In other words it is independent from the documentation orchestration. This happens in the `doc` task Method to check if a model exists in a schema.yaml content. Args: content (Dict[str, Any]): content of the schema.yaml. model_name (str): model name to search. Returns: boolean: is true if the model is present in the schema.yaml. # check the model file exists and if it does return the path # of the schema.yml it's in. # if it's not in a schema file, then it's not documented and we # need to create a schema.yml "dummy" to place it in. # check whether there is a schema file already present Orchestrator method that calls all the needed stuff to run a documentation task.
| 2.09981
| 2
|
src/read_enem/read_enem.py
|
changing-official/enem-reader
| 0
|
6629940
|
class ReaderCSV():
def read(self, file):
return True
def output(self):
# install pandas
#call pandas reading csv file
#return csvfile in string[] format
return ""
|
class ReaderCSV():
def read(self, file):
return True
def output(self):
# install pandas
#call pandas reading csv file
#return csvfile in string[] format
return ""
|
en
| 0.486492
|
# install pandas #call pandas reading csv file #return csvfile in string[] format
| 3.186335
| 3
|
experiments/buffers/buffers.py
|
mukerjee/sdrt
| 7
|
6629941
|
#!/usr/bin/env python
import sys
sys.path.insert(0, '/etalon/experiments')
import buffer_common
import click_common
import common
def main():
cnfs = buffer_common.gen_static_sweep(2, 7) + buffer_common.gen_resize_sweep(0, 8000, 500)
# Use the first experiment's CC mode, or "reno" if no CC mode is specified.
# This avoid unnecessarily restarting the cluster.
common.initializeExperiment('flowgrindd', cnfs[0].get("cc", "reno"))
# For every configuration, add a copy that uses reTCP as the CC mode. Put
# the new configurations at the end so that the CC mode needs to be changed
# only once.
cnfs += [dict(cnf, {'cc': "retcp"}) for cnf in cnfs]
tot = len(cnfs)
for cnt, cnf in enumerate(cnfs, 1):
print('--- running test type {}...'.format(cnf['type']))
print('--- setting switch buffer size to {}...'.format(
cnf['buffer_size']))
click_common.setConfig(cnf)
print('--- done...')
print("--- experiment {} of {}".format(cnt, tot))
common.flowgrind(settings={"flows": [{"src": "r1", "dst": "r2"}]})
common.finishExperiment()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import sys
sys.path.insert(0, '/etalon/experiments')
import buffer_common
import click_common
import common
def main():
cnfs = buffer_common.gen_static_sweep(2, 7) + buffer_common.gen_resize_sweep(0, 8000, 500)
# Use the first experiment's CC mode, or "reno" if no CC mode is specified.
# This avoid unnecessarily restarting the cluster.
common.initializeExperiment('flowgrindd', cnfs[0].get("cc", "reno"))
# For every configuration, add a copy that uses reTCP as the CC mode. Put
# the new configurations at the end so that the CC mode needs to be changed
# only once.
cnfs += [dict(cnf, {'cc': "retcp"}) for cnf in cnfs]
tot = len(cnfs)
for cnt, cnf in enumerate(cnfs, 1):
print('--- running test type {}...'.format(cnf['type']))
print('--- setting switch buffer size to {}...'.format(
cnf['buffer_size']))
click_common.setConfig(cnf)
print('--- done...')
print("--- experiment {} of {}".format(cnt, tot))
common.flowgrind(settings={"flows": [{"src": "r1", "dst": "r2"}]})
common.finishExperiment()
if __name__ == "__main__":
main()
|
en
| 0.812862
|
#!/usr/bin/env python # Use the first experiment's CC mode, or "reno" if no CC mode is specified. # This avoid unnecessarily restarting the cluster. # For every configuration, add a copy that uses reTCP as the CC mode. Put # the new configurations at the end so that the CC mode needs to be changed # only once.
| 1.871264
| 2
|
find_attitude/tests/test_find_attitude.py
|
sot/find_attitude
| 0
|
6629942
|
<reponame>sot/find_attitude
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division
from pprint import pprint
import numpy as np
import agasc
from Ska.quatutil import radec2yagzag
from Quaternion import Quat
from astropy.io import ascii
from find_attitude.find_attitude import (get_dists_yag_zag, find_attitude_solutions,
get_stars_from_text)
def get_stars(ra=119.98, dec=-78, roll=0, select=slice(None, 8), brightest=True,
sigma_1axis=0.4, sigma_mag=0.2):
stars = agasc.get_agasc_cone(ra, dec, 1.0)
ok = (stars['MAG_ACA'] > 5) & (stars['ASPQ1'] == 0) & (stars['MAG_ACA'] < 10.5)
stars = stars[ok]
if brightest:
stars.sort('MAG_ACA')
else:
index = np.arange(len(stars))
np.random.shuffle(index)
stars = stars[index]
stars = stars[select].copy()
yags, zags = radec2yagzag(stars['RA_PMCORR'], stars['DEC_PMCORR'], Quat([ra, dec, roll]))
stars['YAG_ERR'] = np.random.normal(scale=sigma_1axis, size=len(stars))
stars['ZAG_ERR'] = np.random.normal(scale=sigma_1axis, size=len(stars))
stars['MAG_ERROR'] = np.random.normal(scale=sigma_mag, size=len(stars))
stars['YAG'] = yags * 3600 + stars['YAG_ERR']
stars['ZAG'] = zags * 3600 + stars['ZAG_ERR']
stars['MAG_ACA'] += stars['MAG_ERROR']
stars['RA'] = stars['RA_PMCORR']
stars['DEC'] = stars['DEC_PMCORR']
stars = stars['AGASC_ID', 'RA', 'DEC', 'YAG', 'YAG_ERR', 'ZAG', 'ZAG_ERR',
'MAG_ACA', 'MAG_ERROR']
return stars
def find_overlapping_distances(min_n_overlap=3, tolerance=3.0):
while True:
ra = np.random.uniform(0, 360)
dec = np.random.uniform(-90, 90)
roll = np.random.uniform(0, 360)
print(ra, dec, roll)
stars = get_stars(ra, dec, roll, sigma_1axis=0.001, sigma_mag=0.2)
dist_table = get_dists_yag_zag(stars['YAG'], stars['ZAG'])
dists = dist_table['dists']
n_overlap = 0
for i, d0 in enumerate(dists):
for d1 in dists[i + 1:]:
if np.abs(d0 - d1) < tolerance:
n_overlap += 1
if n_overlap >= min_n_overlap:
return ra, dec, roll, stars, dists
def test_overlapping_distances(tolerance=3.0):
"""
Test a case where distance 5-7 = 864.35 and 5-1 is 865.808
"""
global ra, dec, roll, stars, agasc_id_star_maps, g_geom_match, g_dist_match
ra, dec, roll = (131.1371559426714, 65.25369723989581, 112.4351393383257) # 3 overlaps, 3.0 tol
stars = get_stars(ra, dec, roll, sigma_1axis=0.004, sigma_mag=0.2, brightest=True)
solutions = find_attitude_solutions(stars)
check_output(solutions, stars, ra, dec, roll)
def _test_random(n_iter=1, sigma_1axis=0.4, sigma_mag=0.2, brightest=True):
for _ in range(n_iter):
global ra, dec, roll, stars, agasc_id_star_maps, g_geom_match, g_dist_match
ra = np.random.uniform(0, 360)
dec = np.random.uniform(-90, 90)
roll = np.random.uniform(0, 360)
stars = get_stars(ra, dec, roll, sigma_1axis=sigma_1axis,
sigma_mag=sigma_mag, brightest=brightest)
solutions = find_attitude_solutions(stars)
check_output(solutions, stars, ra, dec, roll)
def test_multiple_solutions():
global stars, solutions
ra, dec, roll = 190.3286989834239, 22.698443628394102, 111.51056234863053
stars = ascii.read("""
AGASC_ID RA DEC YAG YAG_ERR ZAG ZAG_ERR MAG_ACA MAG_ERROR
260863544 189.758890214 22.6594185253 567.401869049 0.615027692698 1811.01764078 -0.293256251994 6.47267 0.280073674992
189804208 191.093682446 21.9926851164 -3294.27782744 -0.181642428933 -1445.81915638 0.812100708212 7.94189 0.199006301588
189800592 190.343952117 21.8305748811 -2925.81601843 -0.381447575 1098.70907129 0.455625742141 9.0004 0.265371250728
260856472 190.787449674 23.2708932848 1363.51998803 0.294404741658 -2168.30508446 -0.261280462907 8.88882 -0.186751057505
260858632 190.173364997 22.9716741441 1103.78516827 -0.370985727476 118.089547978 -0.14060714735 9.50322 0.12366348923
189811352 190.148298323 22.1508704117 -1612.84681732 0.241090402814 1282.01534928 -0.309449604245 9.61865 0.234064435566
260838832 190.053449838 23.5688063028 3249.58724799 0.683018406574 -304.398501017 -0.173295600276 9.46925 -0.0731231730791
260851360 190.235827288 22.625601259 -130.370520041 0.3501954033 383.398889404 0.168175442249 9.73112 0.0393770657269
""")
solutions = find_attitude_solutions(stars)
check_output(solutions, stars, ra, dec, roll)
def check_output(solutions, stars, ra, dec, roll):
print('*********************************************')
print()
for solution in solutions:
att_fit = solution['att_fit']
att_in = Quat([ra, dec, roll])
d_att = att_in.inv() * att_fit
d_roll, d_pitch, d_yaw, _ = 2 * np.degrees(d_att.q) * 3600.
print('============================')
print('Input: RA Dec Roll = {} {} {}'.format(ra, dec, roll))
print('Solve: RA Dec Roll = {} {} {}'.format(*att_fit.equatorial))
print(solution['summary'])
if solution['bad_fit']:
print('BAD FIT!')
continue
assert d_roll < 40.
assert d_pitch < 1.
assert d_yaw < 1.
ok = ~solution['summary']['m_agasc_id'].mask
sok = solution['summary'][ok]
assert np.all(sok['AGASC_ID'] == sok['m_agasc_id'])
assert sum(1 for s in solutions if not s['bad_fit']) == 1
print('*********************************************\n')
def test_ra_dec_roll(ra=115.770455413, dec=-77.6580358662, roll=86.4089128685, brightest=True,
provide_mags=True, sigma_1axis=0.4, sigma_mag=0.2):
global stars, agasc_id_star_maps, g_geom_match, g_dist_match, solutions
stars = get_stars(ra, dec, roll, sigma_1axis=sigma_1axis, sigma_mag=sigma_mag,
brightest=brightest)
solutions = find_attitude_solutions(stars)
check_output(solutions, stars, ra, dec, roll)
def test_get_stars_from_greta():
text = """
OBSID 17595 AOACINTT 1697.656 AOACPRGS 0 AOCINTNP ENAB AODITHR3 3.7928e-05
AOFWAIT NOWT Acquisition
ACA IMAGE Status Image Fid Lt Centroid Angle Star AOACASEQ KALM Success GLOBAL STATUS
MEAS # Flags Functn Flag Y Z Mag AOFSTAR GUID AORFSTR1 1 AOACPWRF OK
IMAGE 0 0 FID TRAK FID 922.53 -1004.43 7.2 AONSTARS 5 AORFSTR2 4 AOACRAMF OK
IMAGE 1 1 FID TRAK FID 2141.40 896.38 7.2 AOKALSTR 5 AOACROMF OK
IMAGE 2 2 FID TRAK FID -1825.12 893.98 7.1 ENTRY 0 ID AOACSUMF OK
IMAGE 3 3 STAR TRAK STAR 223.33 55.83 9.7 SUCCESS FLAGS ENTRY 1 ID AOACHIBK OK
IMAGE 4 4 STAR TRAK STAR -453.10 -2084.10 9.6 AOACQSUC SUC ENTRY 2 ID
IMAGE 5 5 STAR TRAK STAR -1255.12 196.58 9.2 AOGDESUC SUC ENTRY 3 ID AOACCALF OK
IMAGE 6 6 STAR TRAK STAR 598.18 2287.97 9.6 AOBRTSUC SUC ENTRY 4 ID AOACRSET OK
IMAGE 7 7 STAR TRAK STAR 2311.45 1140.60 9.8 AOFIDSUC SUC ENTRY 5 ID AOACSNTY OK
AOACRPT 1 ENTRY 6 ID
AORSTART ENAB ENTRY 7 NOID
"""
expected = """
slot type function fid YAG ZAG MAG_ACA
---- ---- -------- ---- -------- ------- -------
3 STAR TRAK STAR 223.33 55.83 9.7
4 STAR TRAK STAR -453.1 -2084.1 9.6
5 STAR TRAK STAR -1255.12 196.58 9.2
6 STAR TRAK STAR 598.18 2287.97 9.6
7 STAR TRAK STAR 2311.45 1140.6 9.8
"""
stars = get_stars_from_text(text)
expected_stars = ascii.read(expected, format='fixed_width_two_line', guess=False)
assert all(np.all(stars[name] == expected_stars[name])
for name in ('slot', 'YAG', 'ZAG', 'MAG_ACA'))
solutions = find_attitude_solutions(stars, tolerance=2.5)
assert len(solutions) == 1
solution = solutions[0]
pprint(solution)
print('RA, Dec, Roll', solutions[0]['att_fit'].equatorial)
def test_get_stars_from_table():
text = """
slot yag zag mag
3 223.33 55.83 9.7
4 -453.1 -2084.1 9.6
5 -1255.12 196.58 9.2
6 598.18 2287.97 9.6
7 2311.45 1140.60 9.8
"""
expected = """
slot YAG ZAG MAG_ACA
---- -------- ------- -------
3 223.33 55.83 9.7
4 -453.1 -2084.1 9.6
5 -1255.12 196.58 9.2
6 598.18 2287.97 9.6
7 2311.45 1140.6 9.8
"""
stars = get_stars_from_text(text)
expected_stars = ascii.read(expected, format='fixed_width_two_line', guess=False)
assert all(np.all(stars[name] == expected_stars[name])
for name in ('slot', 'YAG', 'ZAG', 'MAG_ACA'))
solutions = find_attitude_solutions(stars, tolerance=2.5)
assert len(solutions) == 1
solution = solutions[0]
pprint(solution)
print('RA, Dec, Roll', solutions[0]['att_fit'].equatorial)
def check_at_time(time, qatt=None):
from Ska.engarchive import fetch
from Chandra.Time import DateTime
from astropy.table import Table
msids_all = []
msids = {}
typs = ('fid', 'yan', 'zan', 'mag')
for typ in typs:
msids[typ] = ['aoac{}{}'.format(typ, slot) for slot in range(8)]
msids_all.extend(msids[typ])
msids_all.extend(['aoattqt1', 'aoattqt2', 'aoattqt3', 'aoattqt4'])
tstart = DateTime(time).secs
tstop = tstart + 60
dat = fetch.MSIDset(msids_all, tstart, tstop)
dat.interpolate(2.05)
sample = {msid: dat[msid].vals[5] for msid in msids_all}
vals = {}
slots = [slot for slot in range(8)
if sample['aoacfid{}'.format(slot)] == 'STAR']
for typ in typs:
vals[typ] = [sample['aoac{}{}'.format(typ, slot)] for slot in range(8)
if sample['aoacfid{}'.format(slot)] == 'STAR']
stars = Table([slots, vals['yan'], vals['zan'], vals['mag']],
names=['slots', 'YAG', 'ZAG', 'MAG_ACA'])
if qatt is None:
qatt = Quat([dat['aoattqt{}'.format(i+1)].vals[5] for i in range(4)])
ra, dec, roll = qatt.equatorial
solutions = find_attitude_solutions(stars)
assert len(solutions) == 1
solution = solutions[0]
dq = qatt.inv() * solution['att_fit']
print(solution['att_fit'].equatorial)
print(solution['summary'])
assert abs(dq.q[0] * 2 * 3600) < 60 # arcsec
assert abs(dq.q[1] * 2 * 3600) < 1.5 # arcsec
assert abs(dq.q[2] * 2 * 3600) < 1.5
def test_at_times():
mcc_results = """
2015:007:03:00:00 2015:007:03:05:00 - brute
2015:100:00:00:00 2015:100:00:05:00
2015:110:00:00:00 2015:110:00:05:00 - brute
2015:121:00:00:00 2015:121:00:05:00 - brute
2015:130:00:00:00 2015:130:00:05:00
2015:152:00:00:00 2015:152:00:05:00
2015:156:00:00:00 2015:156:00:05:00
2015:170:00:00:00 2015:170:00:05:00"""
times = [line.split()[0] for line in mcc_results.strip().splitlines()]
qatts = [None] * len(times)
qatts[0] = Quat([300.6576081, 66.73096392, 347.56528804]) # Telem aoattqt* are wrong
for time, qatt in zip(times, qatts):
check_at_time(time, qatt)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division
from pprint import pprint
import numpy as np
import agasc
from Ska.quatutil import radec2yagzag
from Quaternion import Quat
from astropy.io import ascii
from find_attitude.find_attitude import (get_dists_yag_zag, find_attitude_solutions,
get_stars_from_text)
def get_stars(ra=119.98, dec=-78, roll=0, select=slice(None, 8), brightest=True,
sigma_1axis=0.4, sigma_mag=0.2):
stars = agasc.get_agasc_cone(ra, dec, 1.0)
ok = (stars['MAG_ACA'] > 5) & (stars['ASPQ1'] == 0) & (stars['MAG_ACA'] < 10.5)
stars = stars[ok]
if brightest:
stars.sort('MAG_ACA')
else:
index = np.arange(len(stars))
np.random.shuffle(index)
stars = stars[index]
stars = stars[select].copy()
yags, zags = radec2yagzag(stars['RA_PMCORR'], stars['DEC_PMCORR'], Quat([ra, dec, roll]))
stars['YAG_ERR'] = np.random.normal(scale=sigma_1axis, size=len(stars))
stars['ZAG_ERR'] = np.random.normal(scale=sigma_1axis, size=len(stars))
stars['MAG_ERROR'] = np.random.normal(scale=sigma_mag, size=len(stars))
stars['YAG'] = yags * 3600 + stars['YAG_ERR']
stars['ZAG'] = zags * 3600 + stars['ZAG_ERR']
stars['MAG_ACA'] += stars['MAG_ERROR']
stars['RA'] = stars['RA_PMCORR']
stars['DEC'] = stars['DEC_PMCORR']
stars = stars['AGASC_ID', 'RA', 'DEC', 'YAG', 'YAG_ERR', 'ZAG', 'ZAG_ERR',
'MAG_ACA', 'MAG_ERROR']
return stars
def find_overlapping_distances(min_n_overlap=3, tolerance=3.0):
while True:
ra = np.random.uniform(0, 360)
dec = np.random.uniform(-90, 90)
roll = np.random.uniform(0, 360)
print(ra, dec, roll)
stars = get_stars(ra, dec, roll, sigma_1axis=0.001, sigma_mag=0.2)
dist_table = get_dists_yag_zag(stars['YAG'], stars['ZAG'])
dists = dist_table['dists']
n_overlap = 0
for i, d0 in enumerate(dists):
for d1 in dists[i + 1:]:
if np.abs(d0 - d1) < tolerance:
n_overlap += 1
if n_overlap >= min_n_overlap:
return ra, dec, roll, stars, dists
def test_overlapping_distances(tolerance=3.0):
"""
Test a case where distance 5-7 = 864.35 and 5-1 is 865.808
"""
global ra, dec, roll, stars, agasc_id_star_maps, g_geom_match, g_dist_match
ra, dec, roll = (131.1371559426714, 65.25369723989581, 112.4351393383257) # 3 overlaps, 3.0 tol
stars = get_stars(ra, dec, roll, sigma_1axis=0.004, sigma_mag=0.2, brightest=True)
solutions = find_attitude_solutions(stars)
check_output(solutions, stars, ra, dec, roll)
def _test_random(n_iter=1, sigma_1axis=0.4, sigma_mag=0.2, brightest=True):
for _ in range(n_iter):
global ra, dec, roll, stars, agasc_id_star_maps, g_geom_match, g_dist_match
ra = np.random.uniform(0, 360)
dec = np.random.uniform(-90, 90)
roll = np.random.uniform(0, 360)
stars = get_stars(ra, dec, roll, sigma_1axis=sigma_1axis,
sigma_mag=sigma_mag, brightest=brightest)
solutions = find_attitude_solutions(stars)
check_output(solutions, stars, ra, dec, roll)
def test_multiple_solutions():
global stars, solutions
ra, dec, roll = 190.3286989834239, 22.698443628394102, 111.51056234863053
stars = ascii.read("""
AGASC_ID RA DEC YAG YAG_ERR ZAG ZAG_ERR MAG_ACA MAG_ERROR
260863544 189.758890214 22.6594185253 567.401869049 0.615027692698 1811.01764078 -0.293256251994 6.47267 0.280073674992
189804208 191.093682446 21.9926851164 -3294.27782744 -0.181642428933 -1445.81915638 0.812100708212 7.94189 0.199006301588
189800592 190.343952117 21.8305748811 -2925.81601843 -0.381447575 1098.70907129 0.455625742141 9.0004 0.265371250728
260856472 190.787449674 23.2708932848 1363.51998803 0.294404741658 -2168.30508446 -0.261280462907 8.88882 -0.186751057505
260858632 190.173364997 22.9716741441 1103.78516827 -0.370985727476 118.089547978 -0.14060714735 9.50322 0.12366348923
189811352 190.148298323 22.1508704117 -1612.84681732 0.241090402814 1282.01534928 -0.309449604245 9.61865 0.234064435566
260838832 190.053449838 23.5688063028 3249.58724799 0.683018406574 -304.398501017 -0.173295600276 9.46925 -0.0731231730791
260851360 190.235827288 22.625601259 -130.370520041 0.3501954033 383.398889404 0.168175442249 9.73112 0.0393770657269
""")
solutions = find_attitude_solutions(stars)
check_output(solutions, stars, ra, dec, roll)
def check_output(solutions, stars, ra, dec, roll):
print('*********************************************')
print()
for solution in solutions:
att_fit = solution['att_fit']
att_in = Quat([ra, dec, roll])
d_att = att_in.inv() * att_fit
d_roll, d_pitch, d_yaw, _ = 2 * np.degrees(d_att.q) * 3600.
print('============================')
print('Input: RA Dec Roll = {} {} {}'.format(ra, dec, roll))
print('Solve: RA Dec Roll = {} {} {}'.format(*att_fit.equatorial))
print(solution['summary'])
if solution['bad_fit']:
print('BAD FIT!')
continue
assert d_roll < 40.
assert d_pitch < 1.
assert d_yaw < 1.
ok = ~solution['summary']['m_agasc_id'].mask
sok = solution['summary'][ok]
assert np.all(sok['AGASC_ID'] == sok['m_agasc_id'])
assert sum(1 for s in solutions if not s['bad_fit']) == 1
print('*********************************************\n')
def test_ra_dec_roll(ra=115.770455413, dec=-77.6580358662, roll=86.4089128685, brightest=True,
provide_mags=True, sigma_1axis=0.4, sigma_mag=0.2):
global stars, agasc_id_star_maps, g_geom_match, g_dist_match, solutions
stars = get_stars(ra, dec, roll, sigma_1axis=sigma_1axis, sigma_mag=sigma_mag,
brightest=brightest)
solutions = find_attitude_solutions(stars)
check_output(solutions, stars, ra, dec, roll)
def test_get_stars_from_greta():
text = """
OBSID 17595 AOACINTT 1697.656 AOACPRGS 0 AOCINTNP ENAB AODITHR3 3.7928e-05
AOFWAIT NOWT Acquisition
ACA IMAGE Status Image Fid Lt Centroid Angle Star AOACASEQ KALM Success GLOBAL STATUS
MEAS # Flags Functn Flag Y Z Mag AOFSTAR GUID AORFSTR1 1 AOACPWRF OK
IMAGE 0 0 FID TRAK FID 922.53 -1004.43 7.2 AONSTARS 5 AORFSTR2 4 AOACRAMF OK
IMAGE 1 1 FID TRAK FID 2141.40 896.38 7.2 AOKALSTR 5 AOACROMF OK
IMAGE 2 2 FID TRAK FID -1825.12 893.98 7.1 ENTRY 0 ID AOACSUMF OK
IMAGE 3 3 STAR TRAK STAR 223.33 55.83 9.7 SUCCESS FLAGS ENTRY 1 ID AOACHIBK OK
IMAGE 4 4 STAR TRAK STAR -453.10 -2084.10 9.6 AOACQSUC SUC ENTRY 2 ID
IMAGE 5 5 STAR TRAK STAR -1255.12 196.58 9.2 AOGDESUC SUC ENTRY 3 ID AOACCALF OK
IMAGE 6 6 STAR TRAK STAR 598.18 2287.97 9.6 AOBRTSUC SUC ENTRY 4 ID AOACRSET OK
IMAGE 7 7 STAR TRAK STAR 2311.45 1140.60 9.8 AOFIDSUC SUC ENTRY 5 ID AOACSNTY OK
AOACRPT 1 ENTRY 6 ID
AORSTART ENAB ENTRY 7 NOID
"""
expected = """
slot type function fid YAG ZAG MAG_ACA
---- ---- -------- ---- -------- ------- -------
3 STAR TRAK STAR 223.33 55.83 9.7
4 STAR TRAK STAR -453.1 -2084.1 9.6
5 STAR TRAK STAR -1255.12 196.58 9.2
6 STAR TRAK STAR 598.18 2287.97 9.6
7 STAR TRAK STAR 2311.45 1140.6 9.8
"""
stars = get_stars_from_text(text)
expected_stars = ascii.read(expected, format='fixed_width_two_line', guess=False)
assert all(np.all(stars[name] == expected_stars[name])
for name in ('slot', 'YAG', 'ZAG', 'MAG_ACA'))
solutions = find_attitude_solutions(stars, tolerance=2.5)
assert len(solutions) == 1
solution = solutions[0]
pprint(solution)
print('RA, Dec, Roll', solutions[0]['att_fit'].equatorial)
def test_get_stars_from_table():
text = """
slot yag zag mag
3 223.33 55.83 9.7
4 -453.1 -2084.1 9.6
5 -1255.12 196.58 9.2
6 598.18 2287.97 9.6
7 2311.45 1140.60 9.8
"""
expected = """
slot YAG ZAG MAG_ACA
---- -------- ------- -------
3 223.33 55.83 9.7
4 -453.1 -2084.1 9.6
5 -1255.12 196.58 9.2
6 598.18 2287.97 9.6
7 2311.45 1140.6 9.8
"""
stars = get_stars_from_text(text)
expected_stars = ascii.read(expected, format='fixed_width_two_line', guess=False)
assert all(np.all(stars[name] == expected_stars[name])
for name in ('slot', 'YAG', 'ZAG', 'MAG_ACA'))
solutions = find_attitude_solutions(stars, tolerance=2.5)
assert len(solutions) == 1
solution = solutions[0]
pprint(solution)
print('RA, Dec, Roll', solutions[0]['att_fit'].equatorial)
def check_at_time(time, qatt=None):
from Ska.engarchive import fetch
from Chandra.Time import DateTime
from astropy.table import Table
msids_all = []
msids = {}
typs = ('fid', 'yan', 'zan', 'mag')
for typ in typs:
msids[typ] = ['aoac{}{}'.format(typ, slot) for slot in range(8)]
msids_all.extend(msids[typ])
msids_all.extend(['aoattqt1', 'aoattqt2', 'aoattqt3', 'aoattqt4'])
tstart = DateTime(time).secs
tstop = tstart + 60
dat = fetch.MSIDset(msids_all, tstart, tstop)
dat.interpolate(2.05)
sample = {msid: dat[msid].vals[5] for msid in msids_all}
vals = {}
slots = [slot for slot in range(8)
if sample['aoacfid{}'.format(slot)] == 'STAR']
for typ in typs:
vals[typ] = [sample['aoac{}{}'.format(typ, slot)] for slot in range(8)
if sample['aoacfid{}'.format(slot)] == 'STAR']
stars = Table([slots, vals['yan'], vals['zan'], vals['mag']],
names=['slots', 'YAG', 'ZAG', 'MAG_ACA'])
if qatt is None:
qatt = Quat([dat['aoattqt{}'.format(i+1)].vals[5] for i in range(4)])
ra, dec, roll = qatt.equatorial
solutions = find_attitude_solutions(stars)
assert len(solutions) == 1
solution = solutions[0]
dq = qatt.inv() * solution['att_fit']
print(solution['att_fit'].equatorial)
print(solution['summary'])
assert abs(dq.q[0] * 2 * 3600) < 60 # arcsec
assert abs(dq.q[1] * 2 * 3600) < 1.5 # arcsec
assert abs(dq.q[2] * 2 * 3600) < 1.5
def test_at_times():
mcc_results = """
2015:007:03:00:00 2015:007:03:05:00 - brute
2015:100:00:00:00 2015:100:00:05:00
2015:110:00:00:00 2015:110:00:05:00 - brute
2015:121:00:00:00 2015:121:00:05:00 - brute
2015:130:00:00:00 2015:130:00:05:00
2015:152:00:00:00 2015:152:00:05:00
2015:156:00:00:00 2015:156:00:05:00
2015:170:00:00:00 2015:170:00:05:00"""
times = [line.split()[0] for line in mcc_results.strip().splitlines()]
qatts = [None] * len(times)
qatts[0] = Quat([300.6576081, 66.73096392, 347.56528804]) # Telem aoattqt* are wrong
for time, qatt in zip(times, qatts):
check_at_time(time, qatt)
|
en
| 0.269963
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst Test a case where distance 5-7 = 864.35 and 5-1 is 865.808 # 3 overlaps, 3.0 tol AGASC_ID RA DEC YAG YAG_ERR ZAG ZAG_ERR MAG_ACA MAG_ERROR 260863544 189.758890214 22.6594185253 567.401869049 0.615027692698 1811.01764078 -0.293256251994 6.47267 0.280073674992 189804208 191.093682446 21.9926851164 -3294.27782744 -0.181642428933 -1445.81915638 0.812100708212 7.94189 0.199006301588 189800592 190.343952117 21.8305748811 -2925.81601843 -0.381447575 1098.70907129 0.455625742141 9.0004 0.265371250728 260856472 190.787449674 23.2708932848 1363.51998803 0.294404741658 -2168.30508446 -0.261280462907 8.88882 -0.186751057505 260858632 190.173364997 22.9716741441 1103.78516827 -0.370985727476 118.089547978 -0.14060714735 9.50322 0.12366348923 189811352 190.148298323 22.1508704117 -1612.84681732 0.241090402814 1282.01534928 -0.309449604245 9.61865 0.234064435566 260838832 190.053449838 23.5688063028 3249.58724799 0.683018406574 -304.398501017 -0.173295600276 9.46925 -0.0731231730791 260851360 190.235827288 22.625601259 -130.370520041 0.3501954033 383.398889404 0.168175442249 9.73112 0.0393770657269 OBSID 17595 AOACINTT 1697.656 AOACPRGS 0 AOCINTNP ENAB AODITHR3 3.7928e-05 AOFWAIT NOWT Acquisition ACA IMAGE Status Image Fid Lt Centroid Angle Star AOACASEQ KALM Success GLOBAL STATUS MEAS # Flags Functn Flag Y Z Mag AOFSTAR GUID AORFSTR1 1 AOACPWRF OK IMAGE 0 0 FID TRAK FID 922.53 -1004.43 7.2 AONSTARS 5 AORFSTR2 4 AOACRAMF OK IMAGE 1 1 FID TRAK FID 2141.40 896.38 7.2 AOKALSTR 5 AOACROMF OK IMAGE 2 2 FID TRAK FID -1825.12 893.98 7.1 ENTRY 0 ID AOACSUMF OK IMAGE 3 3 STAR TRAK STAR 223.33 55.83 9.7 SUCCESS FLAGS ENTRY 1 ID AOACHIBK OK IMAGE 4 4 STAR TRAK STAR -453.10 -2084.10 9.6 AOACQSUC SUC ENTRY 2 ID IMAGE 5 5 STAR TRAK STAR -1255.12 196.58 9.2 AOGDESUC SUC ENTRY 3 ID AOACCALF OK IMAGE 6 6 STAR TRAK STAR 598.18 2287.97 9.6 AOBRTSUC SUC ENTRY 4 ID AOACRSET OK IMAGE 7 7 STAR TRAK STAR 2311.45 1140.60 9.8 AOFIDSUC SUC ENTRY 5 ID AOACSNTY OK AOACRPT 1 ENTRY 6 ID AORSTART ENAB ENTRY 7 NOID slot type function fid YAG ZAG MAG_ACA ---- ---- -------- ---- -------- ------- ------- 3 STAR TRAK STAR 223.33 55.83 9.7 4 STAR TRAK STAR -453.1 -2084.1 9.6 5 STAR TRAK STAR -1255.12 196.58 9.2 6 STAR TRAK STAR 598.18 2287.97 9.6 7 STAR TRAK STAR 2311.45 1140.6 9.8 slot yag zag mag 3 223.33 55.83 9.7 4 -453.1 -2084.1 9.6 5 -1255.12 196.58 9.2 6 598.18 2287.97 9.6 7 2311.45 1140.60 9.8 slot YAG ZAG MAG_ACA ---- -------- ------- ------- 3 223.33 55.83 9.7 4 -453.1 -2084.1 9.6 5 -1255.12 196.58 9.2 6 598.18 2287.97 9.6 7 2311.45 1140.6 9.8 # arcsec # arcsec 2015:007:03:00:00 2015:007:03:05:00 - brute 2015:100:00:00:00 2015:100:00:05:00 2015:110:00:00:00 2015:110:00:05:00 - brute 2015:121:00:00:00 2015:121:00:05:00 - brute 2015:130:00:00:00 2015:130:00:05:00 2015:152:00:00:00 2015:152:00:05:00 2015:156:00:00:00 2015:156:00:05:00 2015:170:00:00:00 2015:170:00:05:00 # Telem aoattqt* are wrong
| 2.018033
| 2
|
gated_linear_networks/examples/bernoulli_mnist.py
|
kawa-work/deepmind-research
| 10,110
|
6629943
|
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Online MNIST classification example with Bernoulli GLN."""
from absl import app
from absl import flags
import haiku as hk
import jax
import jax.numpy as jnp
import rlax
from gated_linear_networks import bernoulli
from gated_linear_networks.examples import utils
MAX_TRAIN_STEPS = flags.DEFINE_integer(
name='max_train_steps',
default=None,
help='Maximum number of training steps to perform (None=no limit)',
)
# Small example network, achieves ~95% test set accuracy =======================
# Network parameters.
NUM_LAYERS = flags.DEFINE_integer(
name='num_layers',
default=2,
help='Number of network layers',
)
NEURONS_PER_LAYER = flags.DEFINE_integer(
name='neurons_per_layer',
default=100,
help='Number of neurons per layer',
)
CONTEXT_DIM = flags.DEFINE_integer(
name='context_dim',
default=1,
help='Context vector size',
)
# Learning rate schedule.
MAX_LR = flags.DEFINE_float(
name='max_lr',
default=0.003,
help='Maximum learning rate',
)
LR_CONSTANT = flags.DEFINE_float(
name='lr_constant',
default=1.0,
help='Learning rate constant parameter',
)
LR_DECAY = flags.DEFINE_float(
name='lr_decay',
default=0.1,
help='Learning rate decay parameter',
)
# Logging parameters.
EVALUATE_EVERY = flags.DEFINE_integer(
name='evaluate_every',
default=1000,
help='Number of training steps per evaluation epoch',
)
def main(unused_argv):
# Load MNIST dataset =========================================================
mnist_data, info = utils.load_deskewed_mnist(
name='mnist', batch_size=-1, with_info=True)
num_classes = info.features['label'].num_classes
(train_images, train_labels) = (mnist_data['train']['image'],
mnist_data['train']['label'])
(test_images, test_labels) = (mnist_data['test']['image'],
mnist_data['test']['label'])
# Build a (binary) GLN classifier ============================================
def network_factory():
def gln_factory():
output_sizes = [NEURONS_PER_LAYER.value] * NUM_LAYERS.value + [1]
return bernoulli.GatedLinearNetwork(
output_sizes=output_sizes, context_dim=CONTEXT_DIM.value)
return bernoulli.LastNeuronAggregator(gln_factory)
def extract_features(image):
mean, stddev = utils.MeanStdEstimator()(image)
standardized_img = (image - mean) / (stddev + 1.)
inputs = rlax.sigmoid(standardized_img)
side_info = standardized_img
return inputs, side_info
def inference_fn(image, *args, **kwargs):
inputs, side_info = extract_features(image)
return network_factory().inference(inputs, side_info, *args, **kwargs)
def update_fn(image, *args, **kwargs):
inputs, side_info = extract_features(image)
return network_factory().update(inputs, side_info, *args, **kwargs)
init_, inference_ = hk.without_apply_rng(
hk.transform_with_state(inference_fn))
_, update_ = hk.without_apply_rng(hk.transform_with_state(update_fn))
# Map along class dimension to create a one-vs-all classifier ================
@jax.jit
def init(dummy_image, key):
"""One-vs-all classifier init fn."""
dummy_images = jnp.stack([dummy_image] * num_classes, axis=0)
keys = jax.random.split(key, num_classes)
return jax.vmap(init_, in_axes=(0, 0))(keys, dummy_images)
@jax.jit
def accuracy(params, state, image, label):
"""One-vs-all classifier inference fn."""
fn = jax.vmap(inference_, in_axes=(0, 0, None))
predictions, unused_state = fn(params, state, image)
return (jnp.argmax(predictions) == label).astype(jnp.float32)
@jax.jit
def update(params, state, step, image, label):
"""One-vs-all classifier update fn."""
# Learning rate schedules.
learning_rate = jnp.minimum(
MAX_LR.value, LR_CONSTANT.value / (1. + LR_DECAY.value * step))
# Update weights and report log-loss.
targets = hk.one_hot(jnp.asarray(label), num_classes)
fn = jax.vmap(update_, in_axes=(0, 0, None, 0, None))
out = fn(params, state, image, targets, learning_rate)
(params, unused_predictions, log_loss), state = out
return (jnp.mean(log_loss), params), state
# Train on train split =======================================================
dummy_image = train_images[0]
params, state = init(dummy_image, jax.random.PRNGKey(42))
for step, (image, label) in enumerate(zip(train_images, train_labels), 1):
(unused_loss, params), state = update(
params,
state,
step,
image,
label,
)
# Evaluate on test split ===================================================
if not step % EVALUATE_EVERY.value:
batch_accuracy = jax.vmap(accuracy, in_axes=(None, None, 0, 0))
accuracies = batch_accuracy(params, state, test_images, test_labels)
total_accuracy = float(jnp.mean(accuracies))
# Report statistics.
print({
'step': step,
'accuracy': float(total_accuracy),
})
if MAX_TRAIN_STEPS.value is not None and step >= MAX_TRAIN_STEPS.value:
return
if __name__ == '__main__':
app.run(main)
|
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Online MNIST classification example with Bernoulli GLN."""
from absl import app
from absl import flags
import haiku as hk
import jax
import jax.numpy as jnp
import rlax
from gated_linear_networks import bernoulli
from gated_linear_networks.examples import utils
MAX_TRAIN_STEPS = flags.DEFINE_integer(
name='max_train_steps',
default=None,
help='Maximum number of training steps to perform (None=no limit)',
)
# Small example network, achieves ~95% test set accuracy =======================
# Network parameters.
NUM_LAYERS = flags.DEFINE_integer(
name='num_layers',
default=2,
help='Number of network layers',
)
NEURONS_PER_LAYER = flags.DEFINE_integer(
name='neurons_per_layer',
default=100,
help='Number of neurons per layer',
)
CONTEXT_DIM = flags.DEFINE_integer(
name='context_dim',
default=1,
help='Context vector size',
)
# Learning rate schedule.
MAX_LR = flags.DEFINE_float(
name='max_lr',
default=0.003,
help='Maximum learning rate',
)
LR_CONSTANT = flags.DEFINE_float(
name='lr_constant',
default=1.0,
help='Learning rate constant parameter',
)
LR_DECAY = flags.DEFINE_float(
name='lr_decay',
default=0.1,
help='Learning rate decay parameter',
)
# Logging parameters.
EVALUATE_EVERY = flags.DEFINE_integer(
name='evaluate_every',
default=1000,
help='Number of training steps per evaluation epoch',
)
def main(unused_argv):
# Load MNIST dataset =========================================================
mnist_data, info = utils.load_deskewed_mnist(
name='mnist', batch_size=-1, with_info=True)
num_classes = info.features['label'].num_classes
(train_images, train_labels) = (mnist_data['train']['image'],
mnist_data['train']['label'])
(test_images, test_labels) = (mnist_data['test']['image'],
mnist_data['test']['label'])
# Build a (binary) GLN classifier ============================================
def network_factory():
def gln_factory():
output_sizes = [NEURONS_PER_LAYER.value] * NUM_LAYERS.value + [1]
return bernoulli.GatedLinearNetwork(
output_sizes=output_sizes, context_dim=CONTEXT_DIM.value)
return bernoulli.LastNeuronAggregator(gln_factory)
def extract_features(image):
mean, stddev = utils.MeanStdEstimator()(image)
standardized_img = (image - mean) / (stddev + 1.)
inputs = rlax.sigmoid(standardized_img)
side_info = standardized_img
return inputs, side_info
def inference_fn(image, *args, **kwargs):
inputs, side_info = extract_features(image)
return network_factory().inference(inputs, side_info, *args, **kwargs)
def update_fn(image, *args, **kwargs):
inputs, side_info = extract_features(image)
return network_factory().update(inputs, side_info, *args, **kwargs)
init_, inference_ = hk.without_apply_rng(
hk.transform_with_state(inference_fn))
_, update_ = hk.without_apply_rng(hk.transform_with_state(update_fn))
# Map along class dimension to create a one-vs-all classifier ================
@jax.jit
def init(dummy_image, key):
"""One-vs-all classifier init fn."""
dummy_images = jnp.stack([dummy_image] * num_classes, axis=0)
keys = jax.random.split(key, num_classes)
return jax.vmap(init_, in_axes=(0, 0))(keys, dummy_images)
@jax.jit
def accuracy(params, state, image, label):
"""One-vs-all classifier inference fn."""
fn = jax.vmap(inference_, in_axes=(0, 0, None))
predictions, unused_state = fn(params, state, image)
return (jnp.argmax(predictions) == label).astype(jnp.float32)
@jax.jit
def update(params, state, step, image, label):
"""One-vs-all classifier update fn."""
# Learning rate schedules.
learning_rate = jnp.minimum(
MAX_LR.value, LR_CONSTANT.value / (1. + LR_DECAY.value * step))
# Update weights and report log-loss.
targets = hk.one_hot(jnp.asarray(label), num_classes)
fn = jax.vmap(update_, in_axes=(0, 0, None, 0, None))
out = fn(params, state, image, targets, learning_rate)
(params, unused_predictions, log_loss), state = out
return (jnp.mean(log_loss), params), state
# Train on train split =======================================================
dummy_image = train_images[0]
params, state = init(dummy_image, jax.random.PRNGKey(42))
for step, (image, label) in enumerate(zip(train_images, train_labels), 1):
(unused_loss, params), state = update(
params,
state,
step,
image,
label,
)
# Evaluate on test split ===================================================
if not step % EVALUATE_EVERY.value:
batch_accuracy = jax.vmap(accuracy, in_axes=(None, None, 0, 0))
accuracies = batch_accuracy(params, state, test_images, test_labels)
total_accuracy = float(jnp.mean(accuracies))
# Report statistics.
print({
'step': step,
'accuracy': float(total_accuracy),
})
if MAX_TRAIN_STEPS.value is not None and step >= MAX_TRAIN_STEPS.value:
return
if __name__ == '__main__':
app.run(main)
|
en
| 0.678061
|
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Online MNIST classification example with Bernoulli GLN. # Small example network, achieves ~95% test set accuracy ======================= # Network parameters. # Learning rate schedule. # Logging parameters. # Load MNIST dataset ========================================================= # Build a (binary) GLN classifier ============================================ # Map along class dimension to create a one-vs-all classifier ================ One-vs-all classifier init fn. One-vs-all classifier inference fn. One-vs-all classifier update fn. # Learning rate schedules. # Update weights and report log-loss. # Train on train split ======================================================= # Evaluate on test split =================================================== # Report statistics.
| 2.300989
| 2
|
src/asymmetric_jwt_auth/repos/django.py
|
crgwbr/asymmetric-jwt-auth
| 18
|
6629944
|
from typing import Union
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from jwt.exceptions import PyJWKClientError
from .base import BaseUserRepository, BasePublicKeyRepository
from ..tokens import UntrustedToken, Token
from .. import models
class DjangoUserRepository(BaseUserRepository):
def __init__(self):
self.User = get_user_model()
def get_user(self, username: str) -> Union[None, User]:
"""
Get a Django user by username
"""
try:
return self.User.objects.get(username=username)
except self.User.DoesNotExist:
pass
return None
class DjangoPublicKeyListRepository(BasePublicKeyRepository):
def attempt_to_verify_token(
self, user: User, untrusted_token: UntrustedToken
) -> Union[Token, None]:
"""
Attempt to verify a JWT for the given user using public keys from the PublicKey model.
"""
for user_key in models.PublicKey.objects.filter(user=user).all():
public_key = user_key.get_key()
token = untrusted_token.verify(public_key=public_key)
if token:
user_key.update_last_used_datetime()
return token
return None
class DjangoJWKSRepository(BasePublicKeyRepository):
def attempt_to_verify_token(
self, user: User, untrusted_token: UntrustedToken
) -> Union[Token, None]:
"""
Attempt to verify a JWT for the given user using public keys the user's JWKS endpoint.
"""
jwks_endpoint = models.JWKSEndpointTrust.objects.filter(user=user).first()
if not jwks_endpoint:
return None
try:
public_key = jwks_endpoint.get_signing_key(untrusted_token)
except PyJWKClientError:
return None
token = untrusted_token.verify(public_key=public_key)
if token:
return token
return None
|
from typing import Union
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from jwt.exceptions import PyJWKClientError
from .base import BaseUserRepository, BasePublicKeyRepository
from ..tokens import UntrustedToken, Token
from .. import models
class DjangoUserRepository(BaseUserRepository):
def __init__(self):
self.User = get_user_model()
def get_user(self, username: str) -> Union[None, User]:
"""
Get a Django user by username
"""
try:
return self.User.objects.get(username=username)
except self.User.DoesNotExist:
pass
return None
class DjangoPublicKeyListRepository(BasePublicKeyRepository):
def attempt_to_verify_token(
self, user: User, untrusted_token: UntrustedToken
) -> Union[Token, None]:
"""
Attempt to verify a JWT for the given user using public keys from the PublicKey model.
"""
for user_key in models.PublicKey.objects.filter(user=user).all():
public_key = user_key.get_key()
token = untrusted_token.verify(public_key=public_key)
if token:
user_key.update_last_used_datetime()
return token
return None
class DjangoJWKSRepository(BasePublicKeyRepository):
def attempt_to_verify_token(
self, user: User, untrusted_token: UntrustedToken
) -> Union[Token, None]:
"""
Attempt to verify a JWT for the given user using public keys the user's JWKS endpoint.
"""
jwks_endpoint = models.JWKSEndpointTrust.objects.filter(user=user).first()
if not jwks_endpoint:
return None
try:
public_key = jwks_endpoint.get_signing_key(untrusted_token)
except PyJWKClientError:
return None
token = untrusted_token.verify(public_key=public_key)
if token:
return token
return None
|
en
| 0.826863
|
Get a Django user by username Attempt to verify a JWT for the given user using public keys from the PublicKey model. Attempt to verify a JWT for the given user using public keys the user's JWKS endpoint.
| 2.396127
| 2
|
mai/cli.py
|
zalando-stups/mia
| 9
|
6629945
|
<filename>mai/cli.py
import click
import os
import keyring
import yaml
import aws_saml_login.saml
import time
import mai
from aws_saml_login import authenticate, assume_role, write_aws_credentials
from clickclick import Action, choice, error, AliasedGroup, info, print_table, OutputFormat
CONFIG_DIR_PATH = click.get_app_dir('mai')
CONFIG_FILE_PATH = os.path.join(CONFIG_DIR_PATH, 'mai.yaml')
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('Mai {}'.format(mai.__version__))
ctx.exit()
output_option = click.option('-o', '--output', type=click.Choice(['text', 'json', 'tsv']), default='text',
help='Use alternative output format')
@click.group(cls=AliasedGroup, invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.option('--config-file', '-c', help='Use alternative configuration file',
default=CONFIG_FILE_PATH, metavar='PATH')
@click.option('-V', '--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True,
help='Print the current version number and exit.')
@click.option('--awsprofile', help='Profilename in ~/.aws/credentials', default='default', show_default=True)
@click.pass_context
def cli(ctx, config_file, awsprofile):
path = os.path.abspath(os.path.expanduser(config_file))
data = {}
if os.path.exists(path):
with open(path, 'rb') as fd:
data = yaml.safe_load(fd)
ctx.obj = {'config': data,
'config-file': path,
'config-dir': os.path.dirname(path),
'last-update-filename': os.path.join(os.path.dirname(path), 'last_update.yaml')}
if not ctx.invoked_subcommand:
if not data:
raise click.UsageError('No profile configured. Use "mai create .." to create a new profile.')
profile = None
if 'global' in data:
profile = data['global'].get('default_profile')
if not profile:
profile = sorted([k for k in data.keys() if k != 'global'])[0]
login_with_profile(ctx.obj, profile, data.get(profile), awsprofile)
@cli.command('list')
@output_option
@click.pass_obj
def list_profiles(obj, output):
'''List profiles'''
if obj['config']:
rows = []
for name, config in obj['config'].items():
row = {
'name': name,
'role': get_role_label(config.get('saml_role')),
'url': config.get('saml_identity_provider_url'),
'user': config.get('saml_user')}
rows.append(row)
rows.sort(key=lambda r: r['name'])
with OutputFormat(output):
print_table(sorted(rows[0].keys()), rows)
def get_role_label(role):
"""
>>> get_role_label(('arn:aws:iam::123:saml-provider/Shibboleth',\
'arn:aws:iam::123:role/Shibboleth-PowerUser', 'zalando-stups'))
'AWS Account 123 (zalando-stups): Shibboleth-PowerUser'
"""
if not role:
return ''
provider_arn, role_arn, name = role
number = role_arn.split(':')[4]
return 'AWS Account {} ({}): {}'.format(number, name, role_arn.split('/')[-1])
@cli.command()
@click.argument('profile-name')
@click.option('--url', prompt='Identity provider URL')
@click.option('-U', '--user', envvar='SAML_USER', prompt='SAML username')
@click.pass_obj
def create(obj, profile_name, url, user):
'''Create a new profile'''
if not url.startswith('http'):
url = 'https://{}'.format(url)
saml_xml, roles = saml_login(user, url)
if not roles:
error('No roles found')
exit(1)
if len(roles) == 1:
role = roles[0]
if role[2] is None:
role = (role[0], role[1], profile_name)
else:
role = choice('Please select one role', [(r, get_role_label(r)) for r in sorted(roles)])
data = obj['config']
if not data:
data = {}
data[profile_name] = {
'saml_identity_provider_url': url,
'saml_role': role,
'saml_user': user
}
path = obj['config-file']
with Action('Storing new profile in {}..'.format(path)):
os.makedirs(obj['config-dir'], exist_ok=True)
with open(path, 'w') as fd:
yaml.safe_dump(data, fd)
@cli.command('create-all')
@click.option('--url', prompt='Identity provider URL')
@click.option('-U', '--user', envvar='SAML_USER', prompt='SAML username')
@click.pass_obj
def create_all(obj, url, user):
'''Create for all roles a new own profile'''
if not url.startswith('http'):
url = 'https://{}'.format(url)
saml_xml, roles = saml_login(user, url)
if not roles:
error('No roles found')
exit(1)
data = obj['config']
if not data:
data = {}
if len(roles) == 1:
if roles[0][2] is None:
roles = [(roles[0][0], roles[0][1], 'default')]
for r in sorted(roles):
provider_arn, role_arn, name = r
name = name or 'unknown' # name is sometimes missing
profile_name = '{}-{}'.format(name.split('-', maxsplit=1)[-1], role_arn.split('-', maxsplit=1)[-1])
data[profile_name] = {
'saml_identity_provider_url': url,
'saml_role': r,
'saml_user': user
}
path = obj['config-file']
with Action('Storing new profile in {}..'.format(path)):
os.makedirs(obj['config-dir'], exist_ok=True)
with open(path, 'w') as fd:
yaml.safe_dump(data, fd)
@cli.command('set-default')
@click.argument('profile-name')
@click.pass_obj
def set_default(obj, profile_name):
'''Set default profile'''
data = obj['config']
if not data or profile_name not in data:
raise click.UsageError('Profile "{}" does not exist'.format(profile_name))
data['global'] = {
'default_profile': profile_name
}
path = obj['config-file']
with Action('Storing configuration in {}..'.format(path)):
os.makedirs(obj['config-dir'], exist_ok=True)
with open(path, 'w') as fd:
yaml.safe_dump(data, fd)
def saml_login(user, url):
ring_user = '{}@{}'.format(user, url)
saml_password = keyring.get_password('<PASSWORD>', ring_user)
saml_xml = None
while not saml_xml:
if not saml_password:
saml_password = click.prompt('Please enter your SAML password', hide_input=True)
with Action('Authenticating against {url}..\n', url=url) as act:
try:
saml_xml, roles = authenticate(url, user, saml_password)
except aws_saml_login.saml.AuthenticationFailed:
act.error('Authentication Failed')
info('Please check your username/password and try again.')
saml_password = None
keyring.set_password('<PASSWORD>', ring_user, saml_password)
return saml_xml, roles
def login_with_profile(obj, profile, config, awsprofile):
url = config.get('saml_identity_provider_url')
user = config.get('saml_user')
role = config.get('saml_role')
if not url:
raise click.UsageError('Missing identity provider URL')
if not user:
raise click.UsageError('Missing SAML username')
saml_xml, roles = saml_login(user, url)
with Action('Assuming role {role}..', role=get_role_label(role)) as action:
try:
key_id, secret, session_token = assume_role(saml_xml,
role[0], role[1])
except aws_saml_login.saml.AssumeRoleFailed as e:
action.fatal_error(str(e))
with Action('Writing temporary AWS credentials..'):
write_aws_credentials(awsprofile, key_id, secret, session_token)
with open(obj['last-update-filename'], 'w') as fd:
yaml.safe_dump({'timestamp': time.time(), 'profile': profile}, fd)
@cli.command('delete')
@click.argument('profile-name')
@click.pass_obj
def delete(obj, profile_name):
'''Delete profile'''
path = obj['config-file']
if not obj['config'] or profile_name not in obj['config']:
raise click.UsageError('Profile "{}" does not exist'.format(profile_name))
del obj['config'][profile_name]
with Action('Deleting profile from {}..'.format(path)):
os.makedirs(obj['config-dir'], exist_ok=True)
with open(path, 'w') as fd:
yaml.safe_dump(obj['config'], fd)
@cli.command()
@click.argument('profile', nargs=-1)
@click.option('-r', '--refresh', is_flag=True, help='Keep running and refresh access tokens automatically')
@click.option('--awsprofile', help='Profilename in ~/.aws/credentials', default='default', show_default=True)
@click.pass_obj
def login(obj, profile, refresh, awsprofile):
'''Login with given profile(s)'''
repeat = True
while repeat:
last_update = get_last_update(obj)
if 'profile' in last_update and last_update['profile'] and not profile:
profile = [last_update['profile']]
for prof in profile:
if prof not in obj['config']:
raise click.UsageError('Profile "{}" does not exist'.format(prof))
login_with_profile(obj, prof, obj['config'][prof], awsprofile)
if refresh:
last_update = get_last_update(obj)
wait_time = 3600 * 0.9
with Action('Waiting {} minutes before refreshing credentials..'
.format(round(((last_update['timestamp']+wait_time)-time.time()) / 60))) as act:
while time.time() < last_update['timestamp'] + wait_time:
try:
time.sleep(120)
except KeyboardInterrupt:
# do not show "EXCEPTION OCCURRED" for CTRL+C
repeat = False
break
act.progress()
else:
repeat = False
@cli.command()
@click.argument('profile', nargs=-1)
@click.option('--awsprofile', help='Profilename in ~/.aws/credentials', default='default', show_default=True)
@click.pass_context
def require(context, profile, awsprofile):
'''Login if necessary'''
last_update = get_last_update(context.obj)
time_remaining = last_update['timestamp'] + 3600 * 0.9 - time.time()
if time_remaining < 0 or (profile and profile[0] != last_update['profile']):
context.invoke(login, profile=profile, refresh=False, awsprofile=awsprofile)
def get_last_update(obj):
try:
with open(obj['last-update-filename'], 'rb') as fd:
last_update = yaml.safe_load(fd)
except:
last_update = {'timestamp': 0}
return last_update
def main():
cli()
|
<filename>mai/cli.py
import click
import os
import keyring
import yaml
import aws_saml_login.saml
import time
import mai
from aws_saml_login import authenticate, assume_role, write_aws_credentials
from clickclick import Action, choice, error, AliasedGroup, info, print_table, OutputFormat
CONFIG_DIR_PATH = click.get_app_dir('mai')
CONFIG_FILE_PATH = os.path.join(CONFIG_DIR_PATH, 'mai.yaml')
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('Mai {}'.format(mai.__version__))
ctx.exit()
output_option = click.option('-o', '--output', type=click.Choice(['text', 'json', 'tsv']), default='text',
help='Use alternative output format')
@click.group(cls=AliasedGroup, invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.option('--config-file', '-c', help='Use alternative configuration file',
default=CONFIG_FILE_PATH, metavar='PATH')
@click.option('-V', '--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True,
help='Print the current version number and exit.')
@click.option('--awsprofile', help='Profilename in ~/.aws/credentials', default='default', show_default=True)
@click.pass_context
def cli(ctx, config_file, awsprofile):
path = os.path.abspath(os.path.expanduser(config_file))
data = {}
if os.path.exists(path):
with open(path, 'rb') as fd:
data = yaml.safe_load(fd)
ctx.obj = {'config': data,
'config-file': path,
'config-dir': os.path.dirname(path),
'last-update-filename': os.path.join(os.path.dirname(path), 'last_update.yaml')}
if not ctx.invoked_subcommand:
if not data:
raise click.UsageError('No profile configured. Use "mai create .." to create a new profile.')
profile = None
if 'global' in data:
profile = data['global'].get('default_profile')
if not profile:
profile = sorted([k for k in data.keys() if k != 'global'])[0]
login_with_profile(ctx.obj, profile, data.get(profile), awsprofile)
@cli.command('list')
@output_option
@click.pass_obj
def list_profiles(obj, output):
'''List profiles'''
if obj['config']:
rows = []
for name, config in obj['config'].items():
row = {
'name': name,
'role': get_role_label(config.get('saml_role')),
'url': config.get('saml_identity_provider_url'),
'user': config.get('saml_user')}
rows.append(row)
rows.sort(key=lambda r: r['name'])
with OutputFormat(output):
print_table(sorted(rows[0].keys()), rows)
def get_role_label(role):
"""
>>> get_role_label(('arn:aws:iam::123:saml-provider/Shibboleth',\
'arn:aws:iam::123:role/Shibboleth-PowerUser', 'zalando-stups'))
'AWS Account 123 (zalando-stups): Shibboleth-PowerUser'
"""
if not role:
return ''
provider_arn, role_arn, name = role
number = role_arn.split(':')[4]
return 'AWS Account {} ({}): {}'.format(number, name, role_arn.split('/')[-1])
@cli.command()
@click.argument('profile-name')
@click.option('--url', prompt='Identity provider URL')
@click.option('-U', '--user', envvar='SAML_USER', prompt='SAML username')
@click.pass_obj
def create(obj, profile_name, url, user):
'''Create a new profile'''
if not url.startswith('http'):
url = 'https://{}'.format(url)
saml_xml, roles = saml_login(user, url)
if not roles:
error('No roles found')
exit(1)
if len(roles) == 1:
role = roles[0]
if role[2] is None:
role = (role[0], role[1], profile_name)
else:
role = choice('Please select one role', [(r, get_role_label(r)) for r in sorted(roles)])
data = obj['config']
if not data:
data = {}
data[profile_name] = {
'saml_identity_provider_url': url,
'saml_role': role,
'saml_user': user
}
path = obj['config-file']
with Action('Storing new profile in {}..'.format(path)):
os.makedirs(obj['config-dir'], exist_ok=True)
with open(path, 'w') as fd:
yaml.safe_dump(data, fd)
@cli.command('create-all')
@click.option('--url', prompt='Identity provider URL')
@click.option('-U', '--user', envvar='SAML_USER', prompt='SAML username')
@click.pass_obj
def create_all(obj, url, user):
'''Create for all roles a new own profile'''
if not url.startswith('http'):
url = 'https://{}'.format(url)
saml_xml, roles = saml_login(user, url)
if not roles:
error('No roles found')
exit(1)
data = obj['config']
if not data:
data = {}
if len(roles) == 1:
if roles[0][2] is None:
roles = [(roles[0][0], roles[0][1], 'default')]
for r in sorted(roles):
provider_arn, role_arn, name = r
name = name or 'unknown' # name is sometimes missing
profile_name = '{}-{}'.format(name.split('-', maxsplit=1)[-1], role_arn.split('-', maxsplit=1)[-1])
data[profile_name] = {
'saml_identity_provider_url': url,
'saml_role': r,
'saml_user': user
}
path = obj['config-file']
with Action('Storing new profile in {}..'.format(path)):
os.makedirs(obj['config-dir'], exist_ok=True)
with open(path, 'w') as fd:
yaml.safe_dump(data, fd)
@cli.command('set-default')
@click.argument('profile-name')
@click.pass_obj
def set_default(obj, profile_name):
'''Set default profile'''
data = obj['config']
if not data or profile_name not in data:
raise click.UsageError('Profile "{}" does not exist'.format(profile_name))
data['global'] = {
'default_profile': profile_name
}
path = obj['config-file']
with Action('Storing configuration in {}..'.format(path)):
os.makedirs(obj['config-dir'], exist_ok=True)
with open(path, 'w') as fd:
yaml.safe_dump(data, fd)
def saml_login(user, url):
ring_user = '{}@{}'.format(user, url)
saml_password = keyring.get_password('<PASSWORD>', ring_user)
saml_xml = None
while not saml_xml:
if not saml_password:
saml_password = click.prompt('Please enter your SAML password', hide_input=True)
with Action('Authenticating against {url}..\n', url=url) as act:
try:
saml_xml, roles = authenticate(url, user, saml_password)
except aws_saml_login.saml.AuthenticationFailed:
act.error('Authentication Failed')
info('Please check your username/password and try again.')
saml_password = None
keyring.set_password('<PASSWORD>', ring_user, saml_password)
return saml_xml, roles
def login_with_profile(obj, profile, config, awsprofile):
url = config.get('saml_identity_provider_url')
user = config.get('saml_user')
role = config.get('saml_role')
if not url:
raise click.UsageError('Missing identity provider URL')
if not user:
raise click.UsageError('Missing SAML username')
saml_xml, roles = saml_login(user, url)
with Action('Assuming role {role}..', role=get_role_label(role)) as action:
try:
key_id, secret, session_token = assume_role(saml_xml,
role[0], role[1])
except aws_saml_login.saml.AssumeRoleFailed as e:
action.fatal_error(str(e))
with Action('Writing temporary AWS credentials..'):
write_aws_credentials(awsprofile, key_id, secret, session_token)
with open(obj['last-update-filename'], 'w') as fd:
yaml.safe_dump({'timestamp': time.time(), 'profile': profile}, fd)
@cli.command('delete')
@click.argument('profile-name')
@click.pass_obj
def delete(obj, profile_name):
'''Delete profile'''
path = obj['config-file']
if not obj['config'] or profile_name not in obj['config']:
raise click.UsageError('Profile "{}" does not exist'.format(profile_name))
del obj['config'][profile_name]
with Action('Deleting profile from {}..'.format(path)):
os.makedirs(obj['config-dir'], exist_ok=True)
with open(path, 'w') as fd:
yaml.safe_dump(obj['config'], fd)
@cli.command()
@click.argument('profile', nargs=-1)
@click.option('-r', '--refresh', is_flag=True, help='Keep running and refresh access tokens automatically')
@click.option('--awsprofile', help='Profilename in ~/.aws/credentials', default='default', show_default=True)
@click.pass_obj
def login(obj, profile, refresh, awsprofile):
'''Login with given profile(s)'''
repeat = True
while repeat:
last_update = get_last_update(obj)
if 'profile' in last_update and last_update['profile'] and not profile:
profile = [last_update['profile']]
for prof in profile:
if prof not in obj['config']:
raise click.UsageError('Profile "{}" does not exist'.format(prof))
login_with_profile(obj, prof, obj['config'][prof], awsprofile)
if refresh:
last_update = get_last_update(obj)
wait_time = 3600 * 0.9
with Action('Waiting {} minutes before refreshing credentials..'
.format(round(((last_update['timestamp']+wait_time)-time.time()) / 60))) as act:
while time.time() < last_update['timestamp'] + wait_time:
try:
time.sleep(120)
except KeyboardInterrupt:
# do not show "EXCEPTION OCCURRED" for CTRL+C
repeat = False
break
act.progress()
else:
repeat = False
@cli.command()
@click.argument('profile', nargs=-1)
@click.option('--awsprofile', help='Profilename in ~/.aws/credentials', default='default', show_default=True)
@click.pass_context
def require(context, profile, awsprofile):
'''Login if necessary'''
last_update = get_last_update(context.obj)
time_remaining = last_update['timestamp'] + 3600 * 0.9 - time.time()
if time_remaining < 0 or (profile and profile[0] != last_update['profile']):
context.invoke(login, profile=profile, refresh=False, awsprofile=awsprofile)
def get_last_update(obj):
try:
with open(obj['last-update-filename'], 'rb') as fd:
last_update = yaml.safe_load(fd)
except:
last_update = {'timestamp': 0}
return last_update
def main():
cli()
|
en
| 0.555732
|
List profiles >>> get_role_label(('arn:aws:iam::123:saml-provider/Shibboleth',\ 'arn:aws:iam::123:role/Shibboleth-PowerUser', 'zalando-stups')) 'AWS Account 123 (zalando-stups): Shibboleth-PowerUser' Create a new profile Create for all roles a new own profile # name is sometimes missing Set default profile Delete profile Login with given profile(s) # do not show "EXCEPTION OCCURRED" for CTRL+C Login if necessary
| 1.868457
| 2
|
labelbox/data/serialization/ndjson/converter.py
|
Cyniikal/labelbox-python
| 0
|
6629946
|
<gh_stars>0
import logging
from typing import Any, Dict, Generator, Iterable
from ...annotation_types.collection import LabelCollection, LabelGenerator
from .label import NDLabel
logger = logging.getLogger(__name__)
class NDJsonConverter:
@staticmethod
def deserialize(json_data: Iterable[Dict[str, Any]]) -> LabelGenerator:
"""
Converts ndjson data (prediction import format) into the common labelbox format.
Args:
json_data: An iterable representing the ndjson data
Returns:
LabelGenerator containing the ndjson data.
"""
data = NDLabel(**{'annotations': json_data})
res = data.to_common()
return res
@staticmethod
def serialize(
labels: LabelCollection) -> Generator[Dict[str, Any], None, None]:
"""
Converts a labelbox common object to the labelbox ndjson format (prediction import format)
Note that this function might fail for objects that are not supported by mal.
Not all edge cases are handling by custom exceptions, if you get a cryptic pydantic error message it is probably due to this.
We will continue to improve the error messages and add helper functions to deal with this.
Args:
labels: Either a LabelList or a LabelGenerator
Returns:
A generator for accessing the ndjson representation of the data
"""
for example in NDLabel.from_common(labels):
yield example.dict(by_alias=True)
|
import logging
from typing import Any, Dict, Generator, Iterable
from ...annotation_types.collection import LabelCollection, LabelGenerator
from .label import NDLabel
logger = logging.getLogger(__name__)
class NDJsonConverter:
@staticmethod
def deserialize(json_data: Iterable[Dict[str, Any]]) -> LabelGenerator:
"""
Converts ndjson data (prediction import format) into the common labelbox format.
Args:
json_data: An iterable representing the ndjson data
Returns:
LabelGenerator containing the ndjson data.
"""
data = NDLabel(**{'annotations': json_data})
res = data.to_common()
return res
@staticmethod
def serialize(
labels: LabelCollection) -> Generator[Dict[str, Any], None, None]:
"""
Converts a labelbox common object to the labelbox ndjson format (prediction import format)
Note that this function might fail for objects that are not supported by mal.
Not all edge cases are handling by custom exceptions, if you get a cryptic pydantic error message it is probably due to this.
We will continue to improve the error messages and add helper functions to deal with this.
Args:
labels: Either a LabelList or a LabelGenerator
Returns:
A generator for accessing the ndjson representation of the data
"""
for example in NDLabel.from_common(labels):
yield example.dict(by_alias=True)
|
en
| 0.699081
|
Converts ndjson data (prediction import format) into the common labelbox format. Args: json_data: An iterable representing the ndjson data Returns: LabelGenerator containing the ndjson data. Converts a labelbox common object to the labelbox ndjson format (prediction import format) Note that this function might fail for objects that are not supported by mal. Not all edge cases are handling by custom exceptions, if you get a cryptic pydantic error message it is probably due to this. We will continue to improve the error messages and add helper functions to deal with this. Args: labels: Either a LabelList or a LabelGenerator Returns: A generator for accessing the ndjson representation of the data
| 2.369802
| 2
|
client-programs/contrib/metagenscope/metagenscope/autometa.py
|
LongTailBio/pangea-django
| 0
|
6629947
|
import pandas as pd
import logging
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
from sklearn.cluster import dbscan
from requests.exceptions import HTTPError
from .modules.constants import FASTKRAKEN2_NAMES
from .modules.parse_utils import (
proportions,
run_pca,
parse_taxa_report,
group_taxa_report,
)
from .data_utils import sample_module_field
logger = logging.getLogger(__name__)
def sample_has_modules(sample):
has_all = True
for module_name, field, _, _, _ in [FASTKRAKEN2_NAMES]:
try:
sample_module_field(sample, module_name, field)
except KeyError:
has_all = False
return has_all
def pc1_median(samples, taxa_matrix):
pc1 = run_pca(taxa_matrix, n_comp=1)['C0']
for sample in samples:
pcval = 'Not Found in PC1'
if pc1[sample.name] >= pc1.median():
pcval = 'Above PC1 Median'
elif pc1[sample.name] < pc1.median():
pcval = 'Below PC1 Median'
sample.mgs_metadata['MGS - PC1'] = pcval
def pca_dbscan(samples, taxa_matrix):
pca = run_pca(taxa_matrix, n_comp=min(10, taxa_matrix.shape[1]))
_, cluster_labels = dbscan(pca, eps=0.1, min_samples=3)
for i, sample in enumerate(samples):
label_val = cluster_labels[i]
label = f'Cluster {label_val}'
if label_val < 0:
label = f'Noise'
sample.mgs_metadata['MGS - PCA-DBSCAN'] = label
def add_taxa_auto_metadata(samples, grp):
samples = [sample for sample in samples if sample_has_modules(sample)]
try:
taxa_matrix = group_taxa_report(
grp,
module_name='cap2::capalyzer::kraken2_taxa',
field_name='read_counts',
)(samples)
except HTTPError:
taxa_matrix = group_taxa_report(
grp,
module_name='cap2::capalyzer::fast_kraken2_taxa',
field_name='report',
)(samples)
parsed_sample_names = set(taxa_matrix.index.to_list())
samples = [sample for sample in samples if sample.name in parsed_sample_names]
logger.info('Adding PCA median variable...')
pc1_median(samples, taxa_matrix)
logger.info('done.')
logger.info('Adding PCA DBSCAN variable...')
pca_dbscan(samples, taxa_matrix)
logger.info('done.')
def regularize_metadata(samples):
logger.info('Regularizing metadata...')
meta = pd.DataFrame.from_dict(
{sample.name: sample.metadata for sample in samples},
orient='index'
)
def regularize_numeric(col):
try:
col = pd.qcut(col, 3, labels=["low", "medium", "high"], duplicates='drop')
except ValueError:
pass
col = col.astype(str)
col = col.map(lambda x: 'Unknown' if x.lower() == 'nan' else x)
col = col.fillna('Unknown')
return col
def regularize_categorical(col):
col = col.fillna('Unknown')
min_size = max(2, col.shape[0] // 100)
counts = col.value_counts()
others = list(counts[counts < min_size].index)
col = col.map(lambda el: 'Other' if el in others else el)
return col
def regularize_col(col):
if is_numeric_dtype(col):
return regularize_numeric(col)
if is_string_dtype(col):
return regularize_categorical(col)
return col
meta = meta.apply(regularize_col, axis=0)
meta = meta.fillna('Unknown')
for sample in samples:
try:
sample_meta = meta.loc[sample.name].to_dict()
except KeyError:
sample_meta = {}
try:
setattr(sample, 'mgs_metadata', sample_meta)
except KeyError:
raise
pass
logger.info('done.')
|
import pandas as pd
import logging
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
from sklearn.cluster import dbscan
from requests.exceptions import HTTPError
from .modules.constants import FASTKRAKEN2_NAMES
from .modules.parse_utils import (
proportions,
run_pca,
parse_taxa_report,
group_taxa_report,
)
from .data_utils import sample_module_field
logger = logging.getLogger(__name__)
def sample_has_modules(sample):
has_all = True
for module_name, field, _, _, _ in [FASTKRAKEN2_NAMES]:
try:
sample_module_field(sample, module_name, field)
except KeyError:
has_all = False
return has_all
def pc1_median(samples, taxa_matrix):
pc1 = run_pca(taxa_matrix, n_comp=1)['C0']
for sample in samples:
pcval = 'Not Found in PC1'
if pc1[sample.name] >= pc1.median():
pcval = 'Above PC1 Median'
elif pc1[sample.name] < pc1.median():
pcval = 'Below PC1 Median'
sample.mgs_metadata['MGS - PC1'] = pcval
def pca_dbscan(samples, taxa_matrix):
pca = run_pca(taxa_matrix, n_comp=min(10, taxa_matrix.shape[1]))
_, cluster_labels = dbscan(pca, eps=0.1, min_samples=3)
for i, sample in enumerate(samples):
label_val = cluster_labels[i]
label = f'Cluster {label_val}'
if label_val < 0:
label = f'Noise'
sample.mgs_metadata['MGS - PCA-DBSCAN'] = label
def add_taxa_auto_metadata(samples, grp):
samples = [sample for sample in samples if sample_has_modules(sample)]
try:
taxa_matrix = group_taxa_report(
grp,
module_name='cap2::capalyzer::kraken2_taxa',
field_name='read_counts',
)(samples)
except HTTPError:
taxa_matrix = group_taxa_report(
grp,
module_name='cap2::capalyzer::fast_kraken2_taxa',
field_name='report',
)(samples)
parsed_sample_names = set(taxa_matrix.index.to_list())
samples = [sample for sample in samples if sample.name in parsed_sample_names]
logger.info('Adding PCA median variable...')
pc1_median(samples, taxa_matrix)
logger.info('done.')
logger.info('Adding PCA DBSCAN variable...')
pca_dbscan(samples, taxa_matrix)
logger.info('done.')
def regularize_metadata(samples):
logger.info('Regularizing metadata...')
meta = pd.DataFrame.from_dict(
{sample.name: sample.metadata for sample in samples},
orient='index'
)
def regularize_numeric(col):
try:
col = pd.qcut(col, 3, labels=["low", "medium", "high"], duplicates='drop')
except ValueError:
pass
col = col.astype(str)
col = col.map(lambda x: 'Unknown' if x.lower() == 'nan' else x)
col = col.fillna('Unknown')
return col
def regularize_categorical(col):
col = col.fillna('Unknown')
min_size = max(2, col.shape[0] // 100)
counts = col.value_counts()
others = list(counts[counts < min_size].index)
col = col.map(lambda el: 'Other' if el in others else el)
return col
def regularize_col(col):
if is_numeric_dtype(col):
return regularize_numeric(col)
if is_string_dtype(col):
return regularize_categorical(col)
return col
meta = meta.apply(regularize_col, axis=0)
meta = meta.fillna('Unknown')
for sample in samples:
try:
sample_meta = meta.loc[sample.name].to_dict()
except KeyError:
sample_meta = {}
try:
setattr(sample, 'mgs_metadata', sample_meta)
except KeyError:
raise
pass
logger.info('done.')
|
none
| 1
| 2.367307
| 2
|
|
GPGPY/config/config.py
|
caweinshenker/GPGPY
| 0
|
6629948
|
#MIT License
#
#Copyright (c) 2016 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import re
import sys
from .config_constants import *
class Config(object):
def __init__(self):
self._load_defaults()
def __getitem__(self, key):
return self._config_dict[key]
def __setitem__(self, key, value):
try:
self._config_dict[key] = str(value)
except TypeError as e:
print("Key or value must have a string format")
raise(e)
def __str__(self):
return str(self._config_dict)
def _load_defaults(self):
"""Load None as default value for all configuration options"""
self._config_dict = {}
for default_config in default_configs:
for config_option in default_config:
self._config_dict[config_option] = None
def load_config(self, config_filename):
config_file = open(config_filename, "r")
for line in config_file:
if re.match("(^#|^\s*\n)", line): continue #Skip comments and blank lines
else:
try:
option, value = line.split()[0], " ".join(line.split()[1:])
if len(line.split()) < 2: raise(ValueError)
if option not in self._config_dict:
print("Warning: {} is not a default option for GPGPU-Sim".format(option))
unrecognized_configuration_options.add(option)
self._config_dict[option] = value
except ValueError as e:
print("ERROR: Option {} found, but no arguments were given".format(option))
raise(e)
def write(self, config_filename):
def _write_option(config_file, option):
if self._config_dict[option] is not None: #Skip unset parameters to accept GPGPU-sim defaults
config_file.write(option + " ")
config_file.write(self._config_dict[option] + " \n")
def _write_header(config_file, config):
config_file.write("###########################\n")
config_file.write("# " + config.name + "\n")
config_file = open(config_filename, "w")
for config in default_configs:
_write_header(config_file, config)
for option in config:
_write_option(config_file, option)
config_file.close()
|
#MIT License
#
#Copyright (c) 2016 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import re
import sys
from .config_constants import *
class Config(object):
def __init__(self):
self._load_defaults()
def __getitem__(self, key):
return self._config_dict[key]
def __setitem__(self, key, value):
try:
self._config_dict[key] = str(value)
except TypeError as e:
print("Key or value must have a string format")
raise(e)
def __str__(self):
return str(self._config_dict)
def _load_defaults(self):
"""Load None as default value for all configuration options"""
self._config_dict = {}
for default_config in default_configs:
for config_option in default_config:
self._config_dict[config_option] = None
def load_config(self, config_filename):
config_file = open(config_filename, "r")
for line in config_file:
if re.match("(^#|^\s*\n)", line): continue #Skip comments and blank lines
else:
try:
option, value = line.split()[0], " ".join(line.split()[1:])
if len(line.split()) < 2: raise(ValueError)
if option not in self._config_dict:
print("Warning: {} is not a default option for GPGPU-Sim".format(option))
unrecognized_configuration_options.add(option)
self._config_dict[option] = value
except ValueError as e:
print("ERROR: Option {} found, but no arguments were given".format(option))
raise(e)
def write(self, config_filename):
def _write_option(config_file, option):
if self._config_dict[option] is not None: #Skip unset parameters to accept GPGPU-sim defaults
config_file.write(option + " ")
config_file.write(self._config_dict[option] + " \n")
def _write_header(config_file, config):
config_file.write("###########################\n")
config_file.write("# " + config.name + "\n")
config_file = open(config_filename, "w")
for config in default_configs:
_write_header(config_file, config)
for option in config:
_write_option(config_file, option)
config_file.close()
|
en
| 0.689462
|
#MIT License # #Copyright (c) 2016 <NAME> # #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: # #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. Load None as default value for all configuration options #|^\s*\n)", line): continue #Skip comments and blank lines #Skip unset parameters to accept GPGPU-sim defaults ##########################\n")
| 2.140841
| 2
|
anndata/tests/test_io_conversion.py
|
chris-rands/anndata
| 0
|
6629949
|
"""
This file contains tests for conversion made during io.
"""
import h5py
import numpy as np
import pytest
from scipy import sparse
import anndata as ad
from anndata.tests.helpers import gen_adata, assert_equal
@pytest.fixture(
params=[sparse.csr_matrix, sparse.csc_matrix, np.array],
ids=["scipy-csr", "scipy-csc", "np-array"],
)
def mtx_format(request):
return request.param
@pytest.fixture(
params=[sparse.csr_matrix, sparse.csc_matrix], ids=["scipy-csr", "scipy-csc"],
)
def spmtx_format(request):
return request.param
@pytest.fixture(params=[("raw/X",), ("X",), ("X", "raw/X")])
def to_convert(request):
return request.param
def test_sparse_to_dense_disk(tmp_path, mtx_format, to_convert):
mem_pth = tmp_path / "orig.h5ad"
dense_from_mem_pth = tmp_path / "dense_mem.h5ad"
dense_from_disk_pth = tmp_path / "dense_disk.h5ad"
mem = gen_adata((50, 50), mtx_format)
mem.raw = mem
mem.write_h5ad(mem_pth)
disk = ad.read_h5ad(mem_pth, backed="r")
mem.write_h5ad(dense_from_mem_pth, as_dense=to_convert)
disk.write_h5ad(dense_from_disk_pth, as_dense=to_convert)
with h5py.File(dense_from_mem_pth, "r") as f:
for k in to_convert:
assert isinstance(f[k], h5py.Dataset)
with h5py.File(dense_from_disk_pth, "r") as f:
for k in to_convert:
assert isinstance(f[k], h5py.Dataset)
for backed in [None, "r"]:
from_mem = ad.read_h5ad(dense_from_mem_pth, backed=backed)
from_disk = ad.read_h5ad(dense_from_disk_pth, backed=backed)
assert_equal(mem, from_mem)
assert_equal(mem, from_disk)
assert_equal(disk, from_mem)
assert_equal(disk, from_disk)
def test_sparse_to_dense_inplace(tmp_path, spmtx_format):
pth = tmp_path / "adata.h5ad"
orig = gen_adata((50, 50), spmtx_format)
orig.raw = orig
orig.write(pth)
backed = ad.read_h5ad(pth, backed="r+")
backed.write(as_dense=("X", "raw/X"))
new = ad.read_h5ad(pth)
assert_equal(orig, new)
assert_equal(backed, new)
assert isinstance(new.X, np.ndarray)
assert isinstance(new.raw.X, np.ndarray)
assert isinstance(orig.X, spmtx_format)
assert isinstance(orig.raw.X, spmtx_format)
assert isinstance(backed.X, h5py.Dataset)
assert isinstance(backed.raw.X, h5py.Dataset)
def test_sparse_to_dense_errors(tmp_path):
adata = ad.AnnData(X=sparse.random(50, 50, format="csr"))
adata.layers["like_X"] = adata.X.copy()
with pytest.raises(ValueError):
adata.write_h5ad(tmp_path / "failure.h5ad", as_dense=("raw/X"))
adata.write_h5ad(tmp_path / "failure.h5ad", as_dense=("raw", "X"))
with pytest.raises(NotImplementedError):
adata.write_h5ad(tmp_path / "failure.h5ad", as_dense=("layers/like_X"))
def test_dense_to_sparse_memory(tmp_path, spmtx_format, to_convert):
dense_path = tmp_path / "dense.h5ad"
orig = gen_adata((50, 50), np.array)
orig.raw = orig
orig.write_h5ad(dense_path)
assert not isinstance(orig.X, sparse.spmatrix)
assert not isinstance(orig.raw.X, sparse.spmatrix)
curr = ad.read_h5ad(dense_path, as_sparse=to_convert, as_sparse_fmt=spmtx_format)
if "X" in to_convert:
assert isinstance(curr.X, spmtx_format)
if "raw/X" in to_convert:
assert isinstance(curr.raw.X, spmtx_format)
assert_equal(orig, curr)
def test_dense_to_sparse_errors(tmp_path):
dense_pth = tmp_path / "dense.h5ad"
adata = ad.AnnData(X=np.ones((50, 50)))
adata.layers["like_X"] = adata.X.copy()
adata.write(dense_pth)
with pytest.raises(NotImplementedError):
ad.read_h5ad(dense_pth, as_sparse=("X",), as_sparse_fmt=sparse.coo_matrix)
with pytest.raises(NotImplementedError):
ad.read_h5ad(dense_pth, as_sparse=("layers/like_X",))
|
"""
This file contains tests for conversion made during io.
"""
import h5py
import numpy as np
import pytest
from scipy import sparse
import anndata as ad
from anndata.tests.helpers import gen_adata, assert_equal
@pytest.fixture(
params=[sparse.csr_matrix, sparse.csc_matrix, np.array],
ids=["scipy-csr", "scipy-csc", "np-array"],
)
def mtx_format(request):
return request.param
@pytest.fixture(
params=[sparse.csr_matrix, sparse.csc_matrix], ids=["scipy-csr", "scipy-csc"],
)
def spmtx_format(request):
return request.param
@pytest.fixture(params=[("raw/X",), ("X",), ("X", "raw/X")])
def to_convert(request):
return request.param
def test_sparse_to_dense_disk(tmp_path, mtx_format, to_convert):
mem_pth = tmp_path / "orig.h5ad"
dense_from_mem_pth = tmp_path / "dense_mem.h5ad"
dense_from_disk_pth = tmp_path / "dense_disk.h5ad"
mem = gen_adata((50, 50), mtx_format)
mem.raw = mem
mem.write_h5ad(mem_pth)
disk = ad.read_h5ad(mem_pth, backed="r")
mem.write_h5ad(dense_from_mem_pth, as_dense=to_convert)
disk.write_h5ad(dense_from_disk_pth, as_dense=to_convert)
with h5py.File(dense_from_mem_pth, "r") as f:
for k in to_convert:
assert isinstance(f[k], h5py.Dataset)
with h5py.File(dense_from_disk_pth, "r") as f:
for k in to_convert:
assert isinstance(f[k], h5py.Dataset)
for backed in [None, "r"]:
from_mem = ad.read_h5ad(dense_from_mem_pth, backed=backed)
from_disk = ad.read_h5ad(dense_from_disk_pth, backed=backed)
assert_equal(mem, from_mem)
assert_equal(mem, from_disk)
assert_equal(disk, from_mem)
assert_equal(disk, from_disk)
def test_sparse_to_dense_inplace(tmp_path, spmtx_format):
pth = tmp_path / "adata.h5ad"
orig = gen_adata((50, 50), spmtx_format)
orig.raw = orig
orig.write(pth)
backed = ad.read_h5ad(pth, backed="r+")
backed.write(as_dense=("X", "raw/X"))
new = ad.read_h5ad(pth)
assert_equal(orig, new)
assert_equal(backed, new)
assert isinstance(new.X, np.ndarray)
assert isinstance(new.raw.X, np.ndarray)
assert isinstance(orig.X, spmtx_format)
assert isinstance(orig.raw.X, spmtx_format)
assert isinstance(backed.X, h5py.Dataset)
assert isinstance(backed.raw.X, h5py.Dataset)
def test_sparse_to_dense_errors(tmp_path):
adata = ad.AnnData(X=sparse.random(50, 50, format="csr"))
adata.layers["like_X"] = adata.X.copy()
with pytest.raises(ValueError):
adata.write_h5ad(tmp_path / "failure.h5ad", as_dense=("raw/X"))
adata.write_h5ad(tmp_path / "failure.h5ad", as_dense=("raw", "X"))
with pytest.raises(NotImplementedError):
adata.write_h5ad(tmp_path / "failure.h5ad", as_dense=("layers/like_X"))
def test_dense_to_sparse_memory(tmp_path, spmtx_format, to_convert):
dense_path = tmp_path / "dense.h5ad"
orig = gen_adata((50, 50), np.array)
orig.raw = orig
orig.write_h5ad(dense_path)
assert not isinstance(orig.X, sparse.spmatrix)
assert not isinstance(orig.raw.X, sparse.spmatrix)
curr = ad.read_h5ad(dense_path, as_sparse=to_convert, as_sparse_fmt=spmtx_format)
if "X" in to_convert:
assert isinstance(curr.X, spmtx_format)
if "raw/X" in to_convert:
assert isinstance(curr.raw.X, spmtx_format)
assert_equal(orig, curr)
def test_dense_to_sparse_errors(tmp_path):
dense_pth = tmp_path / "dense.h5ad"
adata = ad.AnnData(X=np.ones((50, 50)))
adata.layers["like_X"] = adata.X.copy()
adata.write(dense_pth)
with pytest.raises(NotImplementedError):
ad.read_h5ad(dense_pth, as_sparse=("X",), as_sparse_fmt=sparse.coo_matrix)
with pytest.raises(NotImplementedError):
ad.read_h5ad(dense_pth, as_sparse=("layers/like_X",))
|
en
| 0.983252
|
This file contains tests for conversion made during io.
| 2.113482
| 2
|
Simple Chatty Bot/Part1.py
|
seakun/Python-Projects
| 6
|
6629950
|
print('Hello! My name is Aid.')
print('I was created in 2020.')
print('Please, remind me your name.')
# reading a name
print('What a great name you have, {yourName}!')
|
print('Hello! My name is Aid.')
print('I was created in 2020.')
print('Please, remind me your name.')
# reading a name
print('What a great name you have, {yourName}!')
|
en
| 0.725912
|
# reading a name
| 2.852493
| 3
|